]> git.apps.os.sepia.ceph.com Git - s3-tests.git/commitdiff
Merge pull request #473 from cbodley/wip-copyobj-tails
authorAli Maredia <amaredia@redhat.com>
Thu, 6 Oct 2022 20:57:39 +0000 (16:57 -0400)
committerGitHub <noreply@github.com>
Thu, 6 Oct 2022 20:57:39 +0000 (16:57 -0400)
add test_object_copy_16m to test refcounting of tail objects

28 files changed:
.gitignore [new file with mode: 0644]
LICENSE [new file with mode: 0644]
README.rst [new file with mode: 0644]
bootstrap [new file with mode: 0755]
requirements.txt [new file with mode: 0644]
s3tests.conf.SAMPLE [new file with mode: 0644]
s3tests/__init__.py [new file with mode: 0644]
s3tests/common.py [new file with mode: 0644]
s3tests/functional/__init__.py [new file with mode: 0644]
s3tests/functional/policy.py [new file with mode: 0644]
s3tests/functional/test_headers.py [new file with mode: 0644]
s3tests/functional/test_s3.py [new file with mode: 0644]
s3tests/functional/test_s3_website.py [new file with mode: 0644]
s3tests/functional/test_utils.py [new file with mode: 0644]
s3tests/functional/utils.py [new file with mode: 0644]
s3tests_boto3/__init__.py [new file with mode: 0644]
s3tests_boto3/common.py [new file with mode: 0644]
s3tests_boto3/functional/__init__.py [new file with mode: 0644]
s3tests_boto3/functional/policy.py [new file with mode: 0644]
s3tests_boto3/functional/rgw_interactive.py [new file with mode: 0644]
s3tests_boto3/functional/test_headers.py [new file with mode: 0644]
s3tests_boto3/functional/test_iam.py [new file with mode: 0644]
s3tests_boto3/functional/test_s3.py [new file with mode: 0644]
s3tests_boto3/functional/test_s3select.py [new file with mode: 0644]
s3tests_boto3/functional/test_sts.py [new file with mode: 0644]
s3tests_boto3/functional/test_utils.py [new file with mode: 0644]
s3tests_boto3/functional/utils.py [new file with mode: 0644]
setup.py [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..bcbae80
--- /dev/null
@@ -0,0 +1,14 @@
+*~
+.#*
+## the next line needs to start with a backslash to avoid looking like
+## a comment
+\#*#
+.*.swp
+
+*.pyc
+*.pyo
+
+/*.egg-info
+/virtualenv
+
+config.yaml
diff --git a/LICENSE b/LICENSE
new file mode 100644 (file)
index 0000000..10996d2
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2011 New Dream Network, LLC
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/README.rst b/README.rst
new file mode 100644 (file)
index 0000000..42ad7d5
--- /dev/null
@@ -0,0 +1,101 @@
+========================
+ S3 compatibility tests
+========================
+
+This is a set of unofficial Amazon AWS S3 compatibility
+tests, that can be useful to people implementing software
+that exposes an S3-like API. The tests use the Boto2 and Boto3 libraries.
+
+The tests use the Nose test framework. To get started, ensure you have
+the ``virtualenv`` software installed; e.g. on Debian/Ubuntu::
+
+       sudo apt-get install python-virtualenv
+
+and then run::
+
+       ./bootstrap
+
+You will need to create a configuration file with the location of the
+service and two different credentials. A sample configuration file named
+``s3tests.conf.SAMPLE`` has been provided in this repo. This file can be
+used to run the s3 tests on a Ceph cluster started with vstart.
+
+Once you have that file copied and edited, you can run the tests with::
+
+       S3TEST_CONF=your.conf ./virtualenv/bin/nosetests
+
+You can specify which directory of tests to run::
+
+       S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional
+
+You can specify which file of tests to run::
+
+       S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_s3
+
+You can specify which test to run::
+
+       S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_s3:test_bucket_list_empty
+
+To gather a list of tests being run, use the flags::
+
+        -v --collect-only
+
+Some tests have attributes set based on their current reliability and
+things like AWS not enforcing their spec stricly. You can filter tests
+based on their attributes::
+
+       S3TEST_CONF=aws.conf ./virtualenv/bin/nosetests -a '!fails_on_aws'
+
+Most of the tests have both Boto3 and Boto2 versions. Tests written in
+Boto2 are in the ``s3tests`` directory. Tests written in Boto3 are
+located in the ``s3test_boto3`` directory.
+
+You can run only the boto3 tests with::
+
+        S3TEST_CONF=your.conf ./virtualenv/bin/nosetests -v -s -A 'not fails_on_rgw' s3tests_boto3.functional
+
+========================
+ STS compatibility tests
+========================
+
+This section contains some basic tests for the AssumeRole, GetSessionToken and AssumeRoleWithWebIdentity API's. The test file is located under ``s3tests_boto3/functional``.
+
+You can run only the sts tests (all the three API's) with::
+
+        S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_sts
+
+You can filter tests based on the attributes. There is a attribute named ``test_of_sts`` to run AssumeRole and GetSessionToken tests and ``webidentity_test`` to run the AssumeRoleWithWebIdentity tests. If you want to execute only ``test_of_sts`` tests you can apply that filter as below::
+
+        S3TEST_CONF=your.conf ./virtualenv/bin/nosetests -v -s -A 'test_of_sts' s3tests_boto3.functional.test_sts
+
+For running ``webidentity_test`` you'll need have Keycloak running.
+
+In order to run any STS test you'll need to add "iam" section to the config file. For further reference on how your config file should look check ``s3tests.conf.SAMPLE``.
+
+========================
+ IAM policy tests
+========================
+
+This is a set of IAM policy tests.
+This section covers tests for user policies such as Put, Get, List, Delete, user policies with s3 actions, conflicting user policies etc
+These tests uses Boto3 libraries. Tests are written in the ``s3test_boto3`` directory.
+
+These iam policy tests uses two users with profile name "iam" and "s3 alt" as mentioned in s3tests.conf.SAMPLE.
+If Ceph cluster is started with vstart, then above two users will get created as part of vstart with same access key, secrete key etc as mentioned in s3tests.conf.SAMPLE.
+Out of those two users, "iam" user is with capabilities --caps=user-policy=* and "s3 alt" user is without capabilities.
+Adding above capabilities to "iam" user is also taken care by vstart (If Ceph cluster is started with vstart).
+
+To run these tests, create configuration file with section "iam" and "s3 alt" refer s3tests.conf.SAMPLE.
+Once you have that configuration file copied and edited, you can run all the tests with::
+
+       S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_iam
+
+You can also specify specific test to run::
+
+       S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_iam:test_put_user_policy
+
+Some tests have attributes set such as "fails_on_rgw".
+You can filter tests based on their attributes::
+
+       S3TEST_CONF=your.conf ./virtualenv/bin/nosetests s3tests_boto3.functional.test_iam -a '!fails_on_rgw'
+
diff --git a/bootstrap b/bootstrap
new file mode 100755 (executable)
index 0000000..0bba312
--- /dev/null
+++ b/bootstrap
@@ -0,0 +1,76 @@
+#!/bin/bash
+set -e
+
+virtualenv="virtualenv"
+declare -a packages
+source /etc/os-release
+
+case "$ID" in
+    debian|ubuntu|devuan)
+        packages=(debianutils python3-pip python3-virtualenv python3-dev libevent-dev libffi-dev libxml2-dev libxslt-dev zlib1g-dev)
+        for package in ${packages[@]}; do
+            if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
+                # add a space after old values
+                missing="${missing:+$missing }$package"
+            fi
+        done
+
+        if [ -n "$missing" ]; then
+            echo "$0: missing required DEB packages. Installing via sudo." 1>&2
+            sudo apt-get -y install $missing
+        fi
+        ;;
+    centos|fedora|rhel|rocky|ol|virtuozzo)
+
+        packages=(which python3-virtualenv python36-devel libevent-devel libffi-devel libxml2-devel libxslt-devel zlib-devel)
+        for package in ${packages[@]}; do
+            # When the package is python36-devel we change it to python3-devel on Fedora
+            if [[ ${package} == "python36-devel" && -f /etc/fedora-release ]]; then
+                package=python36
+            fi
+            if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+                missing="${missing:+$missing }$package"
+            fi
+        done
+
+        if [ -n "$missing" ]; then
+            echo "$0: Missing required RPM packages: ${missing}." 1>&2
+            sudo yum -y install $missing
+        fi
+        ;;
+    opensuse*|suse|sles)
+
+        packages=(which python3-virtualenv python3-devel libev-devel libffi-devel libxml2-devel libxslt-devel zlib-devel)
+        for package in ${packages[@]}; do
+            if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+                missing="${missing:+$missing }$package"
+            fi
+            if [ -n "$missing" ]; then
+                echo "$0: Missing required RPM packages: ${missing}." 1>&2
+                sudo zypper --non-interactive install --no-recommends $missing
+            fi
+        done
+
+        ;;
+    *)
+        echo "Bootstrap script does not support this distro yet, consider adding the packages"
+        exit 1
+esac
+
+
+# s3-tests only works on python 3.6 not newer versions of python3
+${virtualenv} --python=$(which python3.6) virtualenv
+
+# avoid pip bugs
+./virtualenv/bin/pip3 install --upgrade pip
+
+# latest setuptools supporting python 2.7
+./virtualenv/bin/pip install setuptools==44.1.0
+
+./virtualenv/bin/pip3 install -r requirements.txt
+
+# forbid setuptools from using the network because it'll try to use
+# easy_install, and we really wanted pip; next line will fail if pip
+# requirements.txt does not match setup.py requirements -- sucky but
+# good enough for now
+./virtualenv/bin/python3 setup.py develop
diff --git a/requirements.txt b/requirements.txt
new file mode 100644 (file)
index 0000000..88e34a5
--- /dev/null
@@ -0,0 +1,12 @@
+PyYAML
+nose >=1.0.0
+boto >=2.6.0
+boto3 >=1.0.0
+munch >=2.0.0
+# 0.14 switches to libev, that means bootstrap needs to change too
+gevent >=1.0
+isodate >=0.4.4
+requests >=2.23.0
+pytz >=2011k
+httplib2
+lxml
diff --git a/s3tests.conf.SAMPLE b/s3tests.conf.SAMPLE
new file mode 100644 (file)
index 0000000..9593fc1
--- /dev/null
@@ -0,0 +1,146 @@
+[DEFAULT]
+## this section is just used for host, port and bucket_prefix
+
+# host set for rgw in vstart.sh
+host = localhost
+
+# port set for rgw in vstart.sh
+port = 8000
+
+## say "False" to disable TLS
+is_secure = False
+
+## say "False" to disable SSL Verify
+ssl_verify = False
+
+[fixtures]
+## all the buckets created will start with this prefix;
+## {random} will be filled with random characters to pad
+## the prefix to 30 characters long, and avoid collisions
+bucket prefix = yournamehere-{random}-
+
+[s3 main]
+# main display_name set in vstart.sh
+display_name = M. Tester
+
+# main user_idname set in vstart.sh
+user_id = testid
+
+# main email set in vstart.sh
+email = tester@ceph.com
+
+# zonegroup api_name for bucket location
+api_name = default
+
+## main AWS access key
+access_key = 0555b35654ad1656d804
+
+## main AWS secret key
+secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
+
+## replace with key id obtained when secret is created, or delete if KMS not tested
+#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
+
+## Storage classes
+#storage_classes = "LUKEWARM, FROZEN"
+
+## Lifecycle debug interval (default: 10)
+#lc_debug_interval = 20
+
+[s3 alt]
+# alt display_name set in vstart.sh
+display_name = john.doe
+## alt email set in vstart.sh
+email = john.doe@example.com
+
+# alt user_id set in vstart.sh
+user_id = 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234
+
+# alt AWS access key set in vstart.sh
+access_key = NOPQRSTUVWXYZABCDEFG
+
+# alt AWS secret key set in vstart.sh
+secret_key = nopqrstuvwxyzabcdefghijklmnabcdefghijklm
+
+#[s3 cloud]
+## to run the testcases with "cloud_transition" attribute.
+## Note: the waiting time may have to tweaked depending on
+## the I/O latency to the cloud endpoint.
+
+## host set for cloud endpoint
+# host = localhost
+
+## port set for cloud endpoint
+# port = 8001
+
+## say "False" to disable TLS
+# is_secure = False
+
+## cloud endpoint credentials
+# access_key = 0555b35654ad1656d804
+# secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
+
+## storage class configured as cloud tier on local rgw server
+# cloud_storage_class = CLOUDTIER
+
+## Below are optional -
+
+## Above configured cloud storage class config options
+# retain_head_object = false
+# target_storage_class = Target_SC
+# target_path = cloud-bucket
+
+## another regular storage class to test multiple transition rules,
+# storage_class = S1
+
+[s3 tenant]
+# tenant display_name set in vstart.sh
+display_name = testx$tenanteduser
+
+# tenant user_id set in vstart.sh
+user_id = 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef
+
+# tenant AWS secret key set in vstart.sh
+access_key = HIJKLMNOPQRSTUVWXYZA
+
+# tenant AWS secret key set in vstart.sh
+secret_key = opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab
+
+# tenant email set in vstart.sh
+email = tenanteduser@example.com
+
+#following section needs to be added for all sts-tests
+[iam]
+#used for iam operations in sts-tests
+#email from vstart.sh
+email = s3@example.com
+
+#user_id from vstart.sh
+user_id = 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
+
+#access_key from vstart.sh
+access_key = ABCDEFGHIJKLMNOPQRST
+
+#secret_key vstart.sh
+secret_key = abcdefghijklmnopqrstuvwxyzabcdefghijklmn
+
+#display_name from vstart.sh
+display_name = youruseridhere
+
+#following section needs to be added when you want to run Assume Role With Webidentity test
+[webidentity]
+#used for assume role with web identity test in sts-tests
+#all parameters will be obtained from ceph/qa/tasks/keycloak.py
+token=<access_token>
+
+aud=<obtained after introspecting token>
+
+sub=<obtained after introspecting token>
+
+azp=<obtained after introspecting token>
+
+user_token=<access token for a user, with attribute Department=[Engineering, Marketing>]
+
+thumbprint=<obtained from x509 certificate>
+
+KC_REALM=<name of the realm>
diff --git a/s3tests/__init__.py b/s3tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/s3tests/common.py b/s3tests/common.py
new file mode 100644 (file)
index 0000000..53caa53
--- /dev/null
@@ -0,0 +1,302 @@
+import boto.s3.connection
+import munch
+import itertools
+import os
+import random
+import string
+import yaml
+import re
+from lxml import etree
+
+from doctest import Example
+from lxml.doctestcompare import LXMLOutputChecker
+
+s3 = munch.Munch()
+config = munch.Munch()
+prefix = ''
+
+bucket_counter = itertools.count(1)
+key_counter = itertools.count(1)
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+def nuke_bucket(bucket):
+    try:
+        bucket.set_canned_acl('private')
+        # TODO: deleted_cnt and the while loop is a work around for rgw
+        # not sending the
+        deleted_cnt = 1
+        while deleted_cnt:
+            deleted_cnt = 0
+            for key in bucket.list():
+                print('Cleaning bucket {bucket} key {key}'.format(
+                    bucket=bucket,
+                    key=key,
+                    ))
+                key.set_canned_acl('private')
+                key.delete()
+                deleted_cnt += 1
+        bucket.delete()
+    except boto.exception.S3ResponseError as e:
+        # TODO workaround for buggy rgw that fails to send
+        # error_code, remove
+        if (e.status == 403
+            and e.error_code is None
+            and e.body == ''):
+            e.error_code = 'AccessDenied'
+        if e.error_code != 'AccessDenied':
+            print('GOT UNWANTED ERROR', e.error_code)
+            raise
+        # seems like we're not the owner of the bucket; ignore
+        pass
+
+def nuke_prefixed_buckets():
+    for name, conn in list(s3.items()):
+        print('Cleaning buckets from connection {name}'.format(name=name))
+        for bucket in conn.get_all_buckets():
+            if bucket.name.startswith(prefix):
+                print('Cleaning bucket {bucket}'.format(bucket=bucket))
+                nuke_bucket(bucket)
+
+    print('Done with cleanup of test buckets.')
+
+def read_config(fp):
+    config = munch.Munch()
+    g = yaml.safe_load_all(fp)
+    for new in g:
+        config.update(munch.Munchify(new))
+    return config
+
+def connect(conf):
+    mapping = dict(
+        port='port',
+        host='host',
+        is_secure='is_secure',
+        access_key='aws_access_key_id',
+        secret_key='aws_secret_access_key',
+        )
+    kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
+    #process calling_format argument
+    calling_formats = dict(
+        ordinary=boto.s3.connection.OrdinaryCallingFormat(),
+        subdomain=boto.s3.connection.SubdomainCallingFormat(),
+        vhost=boto.s3.connection.VHostCallingFormat(),
+        )
+    kwargs['calling_format'] = calling_formats['ordinary']
+    if 'calling_format' in conf:
+        raw_calling_format = conf['calling_format']
+        try:
+            kwargs['calling_format'] = calling_formats[raw_calling_format]
+        except KeyError:
+            raise RuntimeError(
+                'calling_format unknown: %r' % raw_calling_format
+                )
+    # TODO test vhost calling format
+    conn = boto.s3.connection.S3Connection(**kwargs)
+    return conn
+
+def setup():
+    global s3, config, prefix
+    s3.clear()
+    config.clear()
+
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    with file(path) as f:
+        config.update(read_config(f))
+
+    # These 3 should always be present.
+    if 's3' not in config:
+        raise RuntimeError('Your config file is missing the s3 section!')
+    if 'defaults' not in config.s3:
+        raise RuntimeError('Your config file is missing the s3.defaults section!')
+    if 'fixtures' not in config:
+        raise RuntimeError('Your config file is missing the fixtures section!')
+
+    template = config.fixtures.get('bucket prefix', 'test-{random}-')
+    prefix = choose_bucket_prefix(template=template)
+    if prefix == '':
+        raise RuntimeError("Empty Prefix! Aborting!")
+
+    defaults = config.s3.defaults
+    for section in list(config.s3.keys()):
+        if section == 'defaults':
+            continue
+
+        conf = {}
+        conf.update(defaults)
+        conf.update(config.s3[section])
+        conn = connect(conf)
+        s3[section] = conn
+
+    # WARNING! we actively delete all buckets we see with the prefix
+    # we've chosen! Choose your prefix with care, and don't reuse
+    # credentials!
+
+    # We also assume nobody else is going to use buckets with that
+    # prefix. This is racy but given enough randomness, should not
+    # really fail.
+    nuke_prefixed_buckets()
+
+def get_new_bucket(connection=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    if connection is None:
+        connection = s3.main
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    # the only way for this to fail with a pre-existing bucket is if
+    # someone raced us between setup nuke_prefixed_buckets and here;
+    # ignore that as astronomically unlikely
+    bucket = connection.create_bucket(name)
+    return bucket
+
+def teardown():
+    nuke_prefixed_buckets()
+
+def with_setup_kwargs(setup, teardown=None):
+    """Decorator to add setup and/or teardown methods to a test function::
+
+      @with_setup_args(setup, teardown)
+      def test_something():
+          " ... "
+
+    The setup function should return (kwargs) which will be passed to
+    test function, and teardown function.
+
+    Note that `with_setup_kwargs` is useful *only* for test functions, not for test
+    methods or inside of TestCase subclasses.
+    """
+    def decorate(func):
+        kwargs = {}
+
+        def test_wrapped(*args, **kwargs2):
+            k2 = kwargs.copy()
+            k2.update(kwargs2)
+            k2['testname'] = func.__name__
+            func(*args, **k2)
+
+        test_wrapped.__name__ = func.__name__
+
+        def setup_wrapped():
+            k = setup()
+            kwargs.update(k)
+            if hasattr(func, 'setup'):
+                func.setup()
+        test_wrapped.setup = setup_wrapped
+
+        if teardown:
+            def teardown_wrapped():
+                if hasattr(func, 'teardown'):
+                    func.teardown()
+                teardown(**kwargs)
+
+            test_wrapped.teardown = teardown_wrapped
+        else:
+            if hasattr(func, 'teardown'):
+                test_wrapped.teardown = func.teardown()
+        return test_wrapped
+    return decorate
+
+# Demo case for the above, when you run test_gen():
+# _test_gen will run twice,
+# with the following stderr printing
+# setup_func {'b': 2}
+# testcase ('1',) {'b': 2, 'testname': '_test_gen'}
+# teardown_func {'b': 2}
+# setup_func {'b': 2}
+# testcase () {'b': 2, 'testname': '_test_gen'}
+# teardown_func {'b': 2}
+# 
+#def setup_func():
+#    kwargs = {'b': 2}
+#    print("setup_func", kwargs, file=sys.stderr)
+#    return kwargs
+#
+#def teardown_func(**kwargs):
+#    print("teardown_func", kwargs, file=sys.stderr)
+#
+#@with_setup_kwargs(setup=setup_func, teardown=teardown_func)
+#def _test_gen(*args, **kwargs):
+#    print("testcase", args, kwargs, file=sys.stderr)
+#
+#def test_gen():
+#    yield _test_gen, '1'
+#    yield _test_gen
+
+def trim_xml(xml_str):
+    p = etree.XMLParser(encoding="utf-8", remove_blank_text=True)
+    xml_str = bytes(xml_str, "utf-8")
+    elem = etree.XML(xml_str, parser=p)
+    return etree.tostring(elem, encoding="unicode")
+
+def normalize_xml(xml, pretty_print=True):
+    if xml is None:
+        return xml
+
+    root = etree.fromstring(xml.encode(encoding='ascii'))
+
+    for element in root.iter('*'):
+        if element.text is not None and not element.text.strip():
+            element.text = None
+        if element.text is not None:
+            element.text = element.text.strip().replace("\n", "").replace("\r", "")
+        if element.tail is not None and not element.tail.strip():
+            element.tail = None
+        if element.tail is not None:
+            element.tail = element.tail.strip().replace("\n", "").replace("\r", "")
+
+    # Sort the elements
+    for parent in root.xpath('//*[./*]'): # Search for parent elements
+          parent[:] = sorted(parent,key=lambda x: x.tag)
+
+    xmlstr = etree.tostring(root, encoding="unicode", pretty_print=pretty_print)
+    # there are two different DTD URIs
+    xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
+    xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)
+    for uri in ['http://doc.s3.amazonaws.com/doc/2006-03-01/', 'http://s3.amazonaws.com/doc/2006-03-01/']:
+        xmlstr = xmlstr.replace(uri, 'URI-DTD')
+    #xmlstr = re.sub(r'>\s+', '>', xmlstr, count=0, flags=re.MULTILINE)
+    return xmlstr
+
+def assert_xml_equal(got, want):
+    assert want is not None, 'Wanted XML cannot be None'
+    if got is None:
+        raise AssertionError('Got input to validate was None')
+    checker = LXMLOutputChecker()
+    if not checker.check_output(want, got, 0):
+        message = checker.output_difference(Example("", want), got, 0)
+        raise AssertionError(message)
diff --git a/s3tests/functional/__init__.py b/s3tests/functional/__init__.py
new file mode 100644 (file)
index 0000000..8911e02
--- /dev/null
@@ -0,0 +1,488 @@
+import sys
+import configparser
+import boto.exception
+import boto.s3.connection
+import munch
+import itertools
+import os
+import random
+import string
+from http.client import HTTPConnection, HTTPSConnection
+from urllib.parse import urlparse
+
+from .utils import region_sync_meta
+
+s3 = munch.Munch()
+config = munch.Munch()
+targets = munch.Munch()
+
+# this will be assigned by setup()
+prefix = None
+
+calling_formats = dict(
+    ordinary=boto.s3.connection.OrdinaryCallingFormat(),
+    subdomain=boto.s3.connection.SubdomainCallingFormat(),
+    vhost=boto.s3.connection.VHostCallingFormat(),
+    )
+
+def get_prefix():
+    assert prefix is not None
+    return prefix
+
+def is_slow_backend():
+    return slow_backend
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+
+def nuke_prefixed_buckets_on_conn(prefix, name, conn):
+    print('Cleaning buckets from connection {name} prefix {prefix!r}.'.format(
+        name=name,
+        prefix=prefix,
+        ))
+
+    for bucket in conn.get_all_buckets():
+        print('prefix=',prefix)
+        if bucket.name.startswith(prefix):
+            print('Cleaning bucket {bucket}'.format(bucket=bucket))
+            success = False
+            for i in range(2):
+                try:
+                    try:
+                        iterator = iter(bucket.list_versions())
+                        # peek into iterator to issue list operation
+                        try:
+                            keys = itertools.chain([next(iterator)], iterator)
+                        except StopIteration:
+                            keys = []  # empty iterator
+                    except boto.exception.S3ResponseError as e:
+                        # some S3 implementations do not support object
+                        # versioning - fall back to listing without versions
+                        if e.error_code != 'NotImplemented':
+                            raise e
+                        keys = bucket.list();
+                    for key in keys:
+                        print('Cleaning bucket {bucket} key {key}'.format(
+                            bucket=bucket,
+                            key=key,
+                            ))
+                        # key.set_canned_acl('private')
+                        bucket.delete_key(key.name, version_id = key.version_id)
+                    try:
+                        bucket.delete()
+                    except boto.exception.S3ResponseError as e:
+                        # if DELETE times out, the retry may see NoSuchBucket
+                        if e.error_code != 'NoSuchBucket':
+                            raise e
+                        pass
+                    success = True
+                except boto.exception.S3ResponseError as e:
+                    if e.error_code != 'AccessDenied':
+                        print('GOT UNWANTED ERROR', e.error_code)
+                        raise
+                    # seems like we don't have permissions set appropriately, we'll
+                    # modify permissions and retry
+                    pass
+
+                if success:
+                    break
+
+                bucket.set_canned_acl('private')
+
+
+def nuke_prefixed_buckets(prefix):
+    # If no regions are specified, use the simple method
+    if targets.main.master == None:
+        for name, conn in list(s3.items()):
+            print('Deleting buckets on {name}'.format(name=name))
+            nuke_prefixed_buckets_on_conn(prefix, name, conn)
+    else: 
+                   # First, delete all buckets on the master connection 
+                   for name, conn in list(s3.items()):
+                       if conn == targets.main.master.connection:
+                           print('Deleting buckets on {name} (master)'.format(name=name))
+                           nuke_prefixed_buckets_on_conn(prefix, name, conn)
+               
+                   # Then sync to propagate deletes to secondaries
+                   region_sync_meta(targets.main, targets.main.master.connection)
+                   print('region-sync in nuke_prefixed_buckets')
+               
+                   # Now delete remaining buckets on any other connection 
+                   for name, conn in list(s3.items()):
+                       if conn != targets.main.master.connection:
+                           print('Deleting buckets on {name} (non-master)'.format(name=name))
+                           nuke_prefixed_buckets_on_conn(prefix, name, conn)
+
+    print('Done with cleanup of test buckets.')
+
+class TargetConfig:
+    def __init__(self, cfg, section):
+        self.port = None
+        self.api_name = ''
+        self.is_master = False
+        self.is_secure = False
+        self.sync_agent_addr = None
+        self.sync_agent_port = 0
+        self.sync_meta_wait = 0
+        try:
+            self.api_name = cfg.get(section, 'api_name')
+        except (configparser.NoSectionError, configparser.NoOptionError):
+            pass
+        try:
+            self.port = cfg.getint(section, 'port')
+        except configparser.NoOptionError:
+            pass
+        try:
+            self.host=cfg.get(section, 'host')
+        except configparser.NoOptionError:
+            raise RuntimeError(
+                'host not specified for section {s}'.format(s=section)
+                )
+        try:
+            self.is_master=cfg.getboolean(section, 'is_master')
+        except configparser.NoOptionError:
+            pass
+
+        try:
+            self.is_secure=cfg.getboolean(section, 'is_secure')
+        except configparser.NoOptionError:
+            pass
+
+        try:
+            raw_calling_format = cfg.get(section, 'calling_format')
+        except configparser.NoOptionError:
+            raw_calling_format = 'ordinary'
+
+        try:
+            self.sync_agent_addr = cfg.get(section, 'sync_agent_addr')
+        except (configparser.NoSectionError, configparser.NoOptionError):
+            pass
+
+        try:
+            self.sync_agent_port = cfg.getint(section, 'sync_agent_port')
+        except (configparser.NoSectionError, configparser.NoOptionError):
+            pass
+
+        try:
+            self.sync_meta_wait = cfg.getint(section, 'sync_meta_wait')
+        except (configparser.NoSectionError, configparser.NoOptionError):
+            pass
+
+
+        try:
+            self.calling_format = calling_formats[raw_calling_format]
+        except KeyError:
+            raise RuntimeError(
+                'calling_format unknown: %r' % raw_calling_format
+                )
+
+class TargetConnection:
+    def __init__(self, conf, conn):
+        self.conf = conf
+        self.connection = conn
+
+
+
+class RegionsInfo:
+    def __init__(self):
+        self.m = munch.Munch()
+        self.master = None
+        self.secondaries = []
+
+    def add(self, name, region_config):
+        self.m[name] = region_config
+        if (region_config.is_master):
+            if not self.master is None:
+                raise RuntimeError(
+                    'multiple regions defined as master'
+                    )
+            self.master = region_config
+        else:
+            self.secondaries.append(region_config)
+    def get(self, name):
+        return self.m[name]
+    def get(self):
+        return self.m
+    def items(self):
+        return self.m.items()
+
+regions = RegionsInfo()
+
+
+class RegionsConn:
+    def __init__(self):
+        self.m = munch.Munch()
+        self.default = None
+        self.master = None
+        self.secondaries = []
+
+    def items(self):
+        return self.m.items()
+
+    def set_default(self, conn):
+        self.default = conn
+
+    def add(self, name, conn):
+        self.m[name] = conn
+        if not self.default:
+            self.default = conn
+        if (conn.conf.is_master):
+            self.master = conn
+        else:
+            self.secondaries.append(conn)
+
+
+# nosetests --processes=N with N>1 is safe
+_multiprocess_can_split_ = True
+
+def setup():
+
+    cfg = configparser.RawConfigParser()
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    cfg.read(path)
+
+    global prefix
+    global targets
+    global slow_backend
+
+    try:
+        template = cfg.get('fixtures', 'bucket prefix')
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        template = 'test-{random}-'
+    prefix = choose_bucket_prefix(template=template)
+
+    try:
+        slow_backend = cfg.getboolean('fixtures', 'slow backend')
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        slow_backend = False
+
+    # pull the default_region out, if it exists
+    try:
+        default_region = cfg.get('fixtures', 'default_region')
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        default_region = None
+
+    s3.clear()
+    config.clear()
+
+    for section in cfg.sections():
+        try:
+            (type_, name) = section.split(None, 1)
+        except ValueError:
+            continue
+        if type_ != 'region':
+            continue
+        regions.add(name, TargetConfig(cfg, section))
+
+    for section in cfg.sections():
+        try:
+            (type_, name) = section.split(None, 1)
+        except ValueError:
+            continue
+        if type_ != 's3':
+            continue
+
+        if len(regions.get()) == 0:
+            regions.add("default", TargetConfig(cfg, section))
+
+        config[name] = munch.Munch()
+        for var in [
+            'user_id',
+            'display_name',
+            'email',
+            's3website_domain',
+            'host',
+            'port',
+            'is_secure',
+            'kms_keyid',
+            'storage_classes',
+            ]:
+            try:
+                config[name][var] = cfg.get(section, var)
+            except configparser.NoOptionError:
+                pass
+
+        targets[name] = RegionsConn()
+
+        for (k, conf) in regions.items():
+            conn = boto.s3.connection.S3Connection(
+                aws_access_key_id=cfg.get(section, 'access_key'),
+                aws_secret_access_key=cfg.get(section, 'secret_key'),
+                is_secure=conf.is_secure,
+                port=conf.port,
+                host=conf.host,
+                # TODO test vhost calling format
+                calling_format=conf.calling_format,
+                )
+
+            temp_targetConn = TargetConnection(conf, conn)
+            targets[name].add(k, temp_targetConn)
+
+            # Explicitly test for and set the default region, if specified.
+            # If it was not specified, use the 'is_master' flag to set it.
+            if default_region:
+                if default_region == name:
+                    targets[name].set_default(temp_targetConn)
+            elif conf.is_master:
+                targets[name].set_default(temp_targetConn)
+
+        s3[name] = targets[name].default.connection
+
+    # WARNING! we actively delete all buckets we see with the prefix
+    # we've chosen! Choose your prefix with care, and don't reuse
+    # credentials!
+
+    # We also assume nobody else is going to use buckets with that
+    # prefix. This is racy but given enough randomness, should not
+    # really fail.
+    nuke_prefixed_buckets(prefix=prefix)
+
+
+def teardown():
+    # remove our buckets here also, to avoid littering
+    nuke_prefixed_buckets(prefix=prefix)
+
+
+bucket_counter = itertools.count(1)
+
+
+def get_new_bucket_name():
+    """
+    Get a bucket name that probably does not exist.
+
+    We make every attempt to use a unique random prefix, so if a
+    bucket by this name happens to exist, it's ok if tests give
+    false negatives.
+    """
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    return name
+
+
+def get_new_bucket(target=None, name=None, headers=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    if target is None:
+        target = targets.main.default
+    connection = target.connection
+    if name is None:
+        name = get_new_bucket_name()
+    # the only way for this to fail with a pre-existing bucket is if
+    # someone raced us between setup nuke_prefixed_buckets and here;
+    # ignore that as astronomically unlikely
+    bucket = connection.create_bucket(name, location=target.conf.api_name, headers=headers)
+    return bucket
+
+def _make_request(method, bucket, key, body=None, authenticated=False, response_headers=None, request_headers=None, expires_in=100000, path_style=True, timeout=None):
+    """
+    issue a request for a specified method, on a specified <bucket,key>,
+    with a specified (optional) body (encrypted per the connection), and
+    return the response (status, reason).
+
+    If key is None, then this will be treated as a bucket-level request.
+
+    If the request or response headers are None, then default values will be
+    provided by later methods.
+    """
+    if not path_style:
+        conn = bucket.connection
+        request_headers['Host'] = conn.calling_format.build_host(conn.server_name(), bucket.name)
+
+    if authenticated:
+        urlobj = None
+        if key is not None:
+            urlobj = key
+        elif bucket is not None:
+            urlobj = bucket
+        else:
+            raise RuntimeError('Unable to find bucket name')
+        url = urlobj.generate_url(expires_in, method=method, response_headers=response_headers, headers=request_headers)
+        o = urlparse(url)
+        path = o.path + '?' + o.query
+    else:
+        bucketobj = None
+        if key is not None:
+            path = '/{obj}'.format(obj=key.name)
+            bucketobj = key.bucket
+        elif bucket is not None:
+            path = '/'
+            bucketobj = bucket
+        else:
+            raise RuntimeError('Unable to find bucket name')
+        if path_style:
+            path = '/{bucket}'.format(bucket=bucketobj.name) + path
+
+    return _make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path, body=body, request_headers=request_headers, secure=s3.main.is_secure, timeout=timeout)
+
+def _make_bucket_request(method, bucket, body=None, authenticated=False, response_headers=None, request_headers=None, expires_in=100000, path_style=True, timeout=None):
+    """
+    issue a request for a specified method, on a specified <bucket>,
+    with a specified (optional) body (encrypted per the connection), and
+    return the response (status, reason)
+    """
+    return _make_request(method=method, bucket=bucket, key=None, body=body, authenticated=authenticated, response_headers=response_headers, request_headers=request_headers, expires_in=expires_in, path_style=path_style, timeout=timeout)
+
+def _make_raw_request(host, port, method, path, body=None, request_headers=None, secure=False, timeout=None):
+    """
+    issue a request to a specific host & port, for a specified method, on a
+    specified path with a specified (optional) body (encrypted per the
+    connection), and return the response (status, reason).
+
+    This allows construction of special cases not covered by the bucket/key to
+    URL mapping of _make_request/_make_bucket_request.
+    """
+    if secure:
+        class_ = HTTPSConnection
+    else:
+        class_ = HTTPConnection
+
+    if request_headers is None:
+        request_headers = {}
+
+    c = class_(host, port=port, timeout=timeout)
+
+    # TODO: We might have to modify this in future if we need to interact with
+    # how httplib.request handles Accept-Encoding and Host.
+    c.request(method, path, body=body, headers=request_headers)
+
+    res = c.getresponse()
+    #c.close()
+
+    print(res.status, res.reason)
+    return res
+
+
diff --git a/s3tests/functional/policy.py b/s3tests/functional/policy.py
new file mode 100644 (file)
index 0000000..aae5454
--- /dev/null
@@ -0,0 +1,46 @@
+import json
+
+class Statement(object):
+    def __init__(self, action, resource, principal = {"AWS" : "*"}, effect= "Allow", condition = None):
+        self.principal = principal
+        self.action = action
+        self.resource = resource
+        self.condition = condition
+        self.effect = effect
+
+    def to_dict(self):
+        d = { "Action" : self.action,
+              "Principal" : self.principal,
+              "Effect" : self.effect,
+              "Resource" : self.resource
+        }
+
+        if self.condition is not None:
+            d["Condition"] = self.condition
+
+        return d
+
+class Policy(object):
+    def __init__(self):
+        self.statements = []
+
+    def add_statement(self, s):
+        self.statements.append(s)
+        return self
+
+    def to_json(self):
+        policy_dict = {
+            "Version" : "2012-10-17",
+            "Statement":
+            [s.to_dict() for s in self.statements]
+        }
+
+        return json.dumps(policy_dict)
+
+def make_json_policy(action, resource, principal={"AWS": "*"}, conditions=None):
+    """
+    Helper function to make single statement policies
+    """
+    s = Statement(action, resource, principal, condition=conditions)
+    p = Policy()
+    return p.add_statement(s).to_json()
diff --git a/s3tests/functional/test_headers.py b/s3tests/functional/test_headers.py
new file mode 100644 (file)
index 0000000..63f0c56
--- /dev/null
@@ -0,0 +1,1054 @@
+from io import StringIO
+import boto.connection
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+import boto.utils
+import nose
+import operator
+import random
+import string
+import socket
+import ssl
+import os
+import re
+from email.utils import formatdate
+
+from urllib.parse import urlparse
+
+from boto.s3.connection import S3Connection
+
+from nose.tools import eq_ as eq
+from nose.plugins.attrib import attr
+from nose.plugins.skip import SkipTest
+
+from .utils import assert_raises
+
+from email.header import decode_header
+
+from . import (
+    _make_raw_request,
+    nuke_prefixed_buckets,
+    get_new_bucket,
+    s3,
+    config,
+    get_prefix,
+    TargetConnection,
+    targets,
+    )
+
+
+_orig_conn = {}
+_orig_authorize = None
+_custom_headers = {}
+_remove_headers = []
+boto_type = None
+
+
+# HeaderS3Connection and _our_authorize are necessary to be able to arbitrarily
+# overwrite headers. Depending on the version of boto, one or the other is
+# necessary. We later determine in setup what needs to be used.
+
+def _update_headers(headers):
+    """ update a set of headers with additions/removals
+    """
+    global _custom_headers, _remove_headers
+
+    headers.update(_custom_headers)
+
+    for header in _remove_headers:
+        try:
+            del headers[header]
+        except KeyError:
+            pass
+
+
+# Note: We need to update the headers twice. The first time so the
+# authentication signing is done correctly. The second time to overwrite any
+# headers modified or created in the authentication step.
+
+class HeaderS3Connection(S3Connection):
+    """ establish an authenticated connection w/customized headers
+    """
+    def fill_in_auth(self, http_request, **kwargs):
+        _update_headers(http_request.headers)
+        S3Connection.fill_in_auth(self, http_request, **kwargs)
+        _update_headers(http_request.headers)
+
+        return http_request
+
+
+def _our_authorize(self, connection, **kwargs):
+    """ perform an authentication w/customized headers
+    """
+    _update_headers(self.headers)
+    _orig_authorize(self, connection, **kwargs)
+    _update_headers(self.headers)
+
+
+def setup():
+    global boto_type
+
+    # we determine what we need to replace by the existence of particular
+    # attributes. boto 2.0rc1 as fill_in_auth for S3Connection, while boto 2.0
+    # has authorize for HTTPRequest.
+    if hasattr(S3Connection, 'fill_in_auth'):
+        global _orig_conn
+
+        boto_type = 'S3Connection'
+        for conn in s3:
+            _orig_conn[conn] = s3[conn]
+            header_conn = HeaderS3Connection(
+                aws_access_key_id=s3[conn].aws_access_key_id,
+                aws_secret_access_key=s3[conn].aws_secret_access_key,
+                is_secure=s3[conn].is_secure,
+                port=s3[conn].port,
+                host=s3[conn].host,
+                calling_format=s3[conn].calling_format
+                )
+
+            s3[conn] = header_conn
+    elif hasattr(boto.connection.HTTPRequest, 'authorize'):
+        global _orig_authorize
+
+        boto_type = 'HTTPRequest'
+
+        _orig_authorize = boto.connection.HTTPRequest.authorize
+        boto.connection.HTTPRequest.authorize = _our_authorize
+    else:
+        raise RuntimeError
+
+
+def teardown():
+    global boto_type
+
+    # replace original functionality depending on the boto version
+    if boto_type is 'S3Connection':
+        global _orig_conn
+        for conn in s3:
+            s3[conn] = _orig_conn[conn]
+        _orig_conn = {}
+    elif boto_type is 'HTTPRequest':
+        global _orig_authorize
+
+        boto.connection.HTTPRequest.authorize = _orig_authorize
+        _orig_authorize = None
+    else:
+        raise RuntimeError
+
+
+def _clear_custom_headers():
+    """ Eliminate any header customizations
+    """
+    global _custom_headers, _remove_headers
+    _custom_headers = {}
+    _remove_headers = []
+
+
+def _add_custom_headers(headers=None, remove=None):
+    """ Define header customizations (additions, replacements, removals)
+    """
+    global _custom_headers, _remove_headers
+    if not _custom_headers:
+        _custom_headers = {}
+
+    if headers is not None:
+        _custom_headers.update(headers)
+    if remove is not None:
+        _remove_headers.extend(remove)
+
+
+def _setup_bad_object(headers=None, remove=None):
+    """ Create a new bucket, add an object w/header customizations
+    """
+    bucket = get_new_bucket()
+
+    _add_custom_headers(headers=headers, remove=remove)
+    return bucket.new_key('foo')
+
+def tag(*tags):
+    def wrap(func):
+        for tag in tags:
+            setattr(func, tag, True)
+        return func
+    return wrap
+
+#
+# common tests
+#
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no content length')
+@attr(assertion='fails 411')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_contentlength_none():
+    key = _setup_bad_object(remove=('Content-Length',))
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 411)
+    eq(e.reason, 'Length Required')
+    eq(e.error_code,'MissingContentLength')
+
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/content length too long')
+@attr(assertion='fails 400')
+@nose.with_setup(teardown=_clear_custom_headers)
+@attr('fails_on_rgw')
+def test_object_create_bad_contentlength_mismatch_above():
+    content = 'bar'
+    length = len(content) + 1
+
+    key = _setup_bad_object({'Content-Length': length})
+
+    # Disable retries since key.should_retry will discard the response with
+    # PleaseRetryException.
+    def no_retry(response, chunked_transfer): return False
+    key.should_retry = no_retry
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'RequestTimeout')
+
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty authorization')
+@attr(assertion='fails 403')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_authorization_empty():
+    key = _setup_bad_object({'Authorization': ''})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'AccessDenied')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date and x-amz-date')
+@attr(assertion='succeeds')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_date_and_amz_date():
+    date = formatdate(usegmt=True)
+    key = _setup_bad_object({'Date': date, 'X-Amz-Date': date})
+    key.set_contents_from_string('bar')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/x-amz-date and no date')
+@attr(assertion='succeeds')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_amz_date_and_no_date():
+    date = formatdate(usegmt=True)
+    key = _setup_bad_object({'X-Amz-Date': date}, ('Date',))
+    key.set_contents_from_string('bar')
+
+
+# the teardown is really messed up here. check it out
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no authorization')
+@attr(assertion='fails 403')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_authorization_none():
+    key = _setup_bad_object(remove=('Authorization',))
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'AccessDenied')
+
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no content length')
+@attr(assertion='succeeds')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_contentlength_none():
+    _add_custom_headers(remove=('Content-Length',))
+    get_new_bucket()
+
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='acls')
+@attr(operation='set w/no content length')
+@attr(assertion='succeeds')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_acl_create_contentlength_none():
+    bucket = get_new_bucket()
+    key = bucket.new_key('foo')
+    key.set_contents_from_string('blah')
+
+    _add_custom_headers(remove=('Content-Length',))
+    key.set_acl('public-read')
+
+def _create_new_connection():
+    # We're going to need to manually build a connection using bad authorization info.
+    # But to save the day, lets just hijack the settings from s3.main. :)
+    main = s3.main
+    conn = HeaderS3Connection(
+        aws_access_key_id=main.aws_access_key_id,
+        aws_secret_access_key=main.aws_secret_access_key,
+        is_secure=main.is_secure,
+        port=main.port,
+        host=main.host,
+        calling_format=main.calling_format,
+        )
+    return TargetConnection(targets.main.default.conf, conn)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty content length')
+@attr(assertion='fails 400')
+@nose.with_setup(teardown=_clear_custom_headers)
+@attr('fails_on_rgw')
+def test_bucket_create_bad_contentlength_empty():
+    conn = _create_new_connection()
+    _add_custom_headers({'Content-Length': ''})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, conn)
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no content length')
+@attr(assertion='succeeds')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_contentlength_none():
+    _add_custom_headers(remove=('Content-Length',))
+    bucket = get_new_bucket()
+
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty authorization')
+@attr(assertion='fails 403')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_authorization_empty():
+    _add_custom_headers({'Authorization': ''})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'AccessDenied')
+
+
+# the teardown is really messed up here. check it out
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no authorization')
+@attr(assertion='fails 403')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_authorization_none():
+    _add_custom_headers(remove=('Authorization',))
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'AccessDenied')
+
+#
+# AWS2 specific tests
+#
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/content length too short')
+@attr(assertion='fails 400')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_contentlength_mismatch_below_aws2():
+    check_aws2_support()
+    content = 'bar'
+    length = len(content) - 1
+    key = _setup_bad_object({'Content-Length': length})
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'BadDigest')
+
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/incorrect authorization')
+@attr(assertion='fails 403')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_authorization_incorrect_aws2():
+    check_aws2_support()
+    key = _setup_bad_object({'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='})
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId')
+
+
+@tag('auth_aws2')
+@nose.with_setup(teardown=_clear_custom_headers)
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid authorization')
+@attr(assertion='fails 400')
+@attr('fails_on_dbstore')
+def test_object_create_bad_authorization_invalid_aws2():
+    check_aws2_support()
+    key = _setup_bad_object({'Authorization': 'AWS HAHAHA'})
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'InvalidArgument')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no date')
+@attr(assertion='fails 403')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_none_aws2():
+    check_aws2_support()
+    key = _setup_bad_object(remove=('Date',))
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'AccessDenied')
+
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/invalid authorization')
+@attr(assertion='fails 400')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_authorization_invalid_aws2():
+    check_aws2_support()
+    _add_custom_headers({'Authorization': 'AWS HAHAHA'})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'InvalidArgument')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no date')
+@attr(assertion='fails 403')
+@attr('fails_on_dbstore')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_date_none_aws2():
+    check_aws2_support()
+    _add_custom_headers(remove=('Date',))
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'AccessDenied')
+
+#
+# AWS4 specific tests
+#
+
+def check_aws4_support():
+    if 'S3_USE_SIGV4' not in os.environ:
+       raise SkipTest
+
+def check_aws2_support():
+    if 'S3_USE_SIGV4' in os.environ:
+       raise SkipTest
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid MD5')
+@attr(assertion='fails 400')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_md5_invalid_garbage_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Content-MD5':'AWS4 HAHAHA'})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'InvalidDigest')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/content length too short')
+@attr(assertion='fails 400')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_contentlength_mismatch_below_aws4():
+    check_aws4_support()
+    content = 'bar'
+    length = len(content) - 1
+    key = _setup_bad_object({'Content-Length': length})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, content)
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'XAmzContentSHA256Mismatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/incorrect authorization')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_authorization_incorrect_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=AKIAIGR7ZNNBHC5BKSUB/20150930/us-east-1/s3/aws4_request,SignedHeaders=host;user-agent,Signature=FWeDfwojDSdS2Ztmpfeubhd9isU='})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch', 'InvalidAccessKeyId')
+
+
+@tag('auth_aws4')
+@nose.with_setup(teardown=_clear_custom_headers)
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid authorization')
+@attr(assertion='fails 400')
+def test_object_create_bad_authorization_invalid_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Authorization': 'AWS4-HMAC-SHA256 Credential=HAHAHA'})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    assert e.error_code in ('AuthorizationHeaderMalformed', 'InvalidArgument')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty user agent')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_ua_empty_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'User-Agent': ''})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no user agent')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_ua_none_aws4():
+    check_aws4_support()
+    key = _setup_bad_object(remove=('User-Agent',))
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid date')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_invalid_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Date': 'Bad Date'})
+    key.set_contents_from_string('bar')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid x-amz-date')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_amz_date_invalid_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'X-Amz-Date': 'Bad Date'})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty date')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_empty_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Date': ''})
+    key.set_contents_from_string('bar')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty x-amz-date')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_amz_date_empty_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'X-Amz-Date': ''})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no date')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_none_aws4():
+    check_aws4_support()
+    key = _setup_bad_object(remove=('Date',))
+    key.set_contents_from_string('bar')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no x-amz-date')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_amz_date_none_aws4():
+    check_aws4_support()
+    key = _setup_bad_object(remove=('X-Amz-Date',))
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date in past')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_before_today_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
+    key.set_contents_from_string('bar')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/x-amz-date in past')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_amz_date_before_today_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'X-Amz-Date': '20100707T215304Z'})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date in future')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_after_today_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
+    key.set_contents_from_string('bar')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/x-amz-date in future')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_amz_date_after_today_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'X-Amz-Date': '20300707T215304Z'})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date before epoch')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_before_epoch_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
+    key.set_contents_from_string('bar')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/x-amz-date before epoch')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_amz_date_before_epoch_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'X-Amz-Date': '19500707T215304Z'})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date after 9999')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_date_after_end_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'Date': 'Tue, 07 Jul 9999 21:53:04 GMT'})
+    key.set_contents_from_string('bar')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/x-amz-date after 9999')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_bad_amz_date_after_end_aws4():
+    check_aws4_support()
+    key = _setup_bad_object({'X-Amz-Date': '99990707T215304Z'})
+
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_string, 'bar')
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create with missing signed custom header')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_missing_signed_custom_header_aws4():
+    check_aws4_support()
+    method='PUT'
+    expires_in='100000'
+    bucket = get_new_bucket()
+    key = bucket.new_key('foo')
+    body='zoo'
+
+    # compute the signature with 'x-amz-foo=bar' in the headers...
+    request_headers = {'x-amz-foo':'bar'}
+    url = key.generate_url(expires_in, method=method, headers=request_headers)
+
+    o = urlparse(url)
+    path = o.path + '?' + o.query
+
+    # avoid sending 'x-amz-foo=bar' in the headers
+    request_headers.pop('x-amz-foo')
+
+    res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path,
+                           body=body, request_headers=request_headers, secure=s3.main.is_secure)
+
+    eq(res.status, 403)
+    eq(res.reason, 'Forbidden')
+
+
+@tag('auth_aws4')
+@attr(resource='object')
+@attr(method='put')
+@attr(opearation='create with missing signed header')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_object_create_missing_signed_header_aws4():
+    check_aws4_support()
+    method='PUT'
+    expires_in='100000'
+    bucket = get_new_bucket()
+    key = bucket.new_key('foo')
+    body='zoo'
+
+    # compute the signature...
+    request_headers = {}
+    url = key.generate_url(expires_in, method=method, headers=request_headers)
+
+    o = urlparse(url)
+    path = o.path + '?' + o.query
+
+    # 'X-Amz-Expires' is missing
+    target = r'&X-Amz-Expires=' + expires_in
+    path = re.sub(target, '', path)
+
+    res =_make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path,
+                           body=body, request_headers=request_headers, secure=s3.main.is_secure)
+
+    eq(res.status, 403)
+    eq(res.reason, 'Forbidden')
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/invalid authorization')
+@attr(assertion='fails 400')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_authorization_invalid_aws4():
+    check_aws4_support()
+    _add_custom_headers({'Authorization': 'AWS4 HAHAHA'})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'InvalidArgument')
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty user agent')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_ua_empty_aws4():
+    check_aws4_support()
+    _add_custom_headers({'User-Agent': ''})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'SignatureDoesNotMatch')
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no user agent')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_ua_none_aws4():
+    check_aws4_support()
+    _add_custom_headers(remove=('User-Agent',))
+
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/invalid date')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_date_invalid_aws4():
+    check_aws4_support()
+    _add_custom_headers({'Date': 'Bad Date'})
+    get_new_bucket()
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/invalid x-amz-date')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_amz_date_invalid_aws4():
+    check_aws4_support()
+    _add_custom_headers({'X-Amz-Date': 'Bad Date'})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty date')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_date_empty_aws4():
+    check_aws4_support()
+    _add_custom_headers({'Date': ''})
+    get_new_bucket()
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty x-amz-date')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_amz_date_empty_aws4():
+    check_aws4_support()
+    _add_custom_headers({'X-Amz-Date': ''})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no date')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_date_none_aws4():
+    check_aws4_support()
+    _add_custom_headers(remove=('Date',))
+    get_new_bucket()
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no x-amz-date')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_amz_date_none_aws4():
+    check_aws4_support()
+    _add_custom_headers(remove=('X-Amz-Date',))
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/date in past')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_date_before_today_aws4():
+    check_aws4_support()
+    _add_custom_headers({'Date': 'Tue, 07 Jul 2010 21:53:04 GMT'})
+    get_new_bucket()
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/x-amz-date in past')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_amz_date_before_today_aws4():
+    check_aws4_support()
+    _add_custom_headers({'X-Amz-Date': '20100707T215304Z'})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/date in future')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_date_after_today_aws4():
+    check_aws4_support()
+    _add_custom_headers({'Date': 'Tue, 07 Jul 2030 21:53:04 GMT'})
+    get_new_bucket()
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/x-amz-date in future')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_amz_date_after_today_aws4():
+    check_aws4_support()
+    _add_custom_headers({'X-Amz-Date': '20300707T215304Z'})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('RequestTimeTooSkewed', 'SignatureDoesNotMatch')
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/date before epoch')
+@attr(assertion='succeeds')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_date_before_epoch_aws4():
+    check_aws4_support()
+    _add_custom_headers({'Date': 'Tue, 07 Jul 1950 21:53:04 GMT'})
+    get_new_bucket()
+
+
+@tag('auth_aws4')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/x-amz-date before epoch')
+@attr(assertion='fails 403')
+@nose.with_setup(teardown=_clear_custom_headers)
+def test_bucket_create_bad_amz_date_before_epoch_aws4():
+    check_aws4_support()
+    _add_custom_headers({'X-Amz-Date': '19500707T215304Z'})
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket)
+
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    assert e.error_code in ('AccessDenied', 'SignatureDoesNotMatch')
diff --git a/s3tests/functional/test_s3.py b/s3tests/functional/test_s3.py
new file mode 100644 (file)
index 0000000..808f9fa
--- /dev/null
@@ -0,0 +1,842 @@
+from io import StringIO
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+import boto.s3.lifecycle
+import datetime
+import time
+import email.utils
+import isodate
+import nose
+import operator
+import socket
+import ssl
+import os
+import requests
+import base64
+import hmac
+import pytz
+import json
+import httplib2
+import threading
+import itertools
+import string
+import random
+import re
+
+from collections import defaultdict
+from urllib.parse import urlparse
+
+from nose.tools import eq_ as eq
+from nose.plugins.attrib import attr
+from nose.plugins.skip import SkipTest
+
+from . import utils
+from .utils import assert_raises
+
+from .policy import Policy, Statement, make_json_policy
+
+from . import (
+    nuke_prefixed_buckets,
+    get_new_bucket,
+    get_new_bucket_name,
+    s3,
+    targets,
+    config,
+    get_prefix,
+    is_slow_backend,
+    _make_request,
+    _make_bucket_request,
+    _make_raw_request,
+    )
+
+
+def check_access_denied(fn, *args, **kwargs):
+    e = assert_raises(boto.exception.S3ResponseError, fn, *args, **kwargs)
+    eq(e.status, 403)
+    eq(e.reason, 'Forbidden')
+    eq(e.error_code, 'AccessDenied')
+
+def check_bad_bucket_name(name):
+    """
+    Attempt to create a bucket with a specified name, and confirm
+    that the request fails because of an invalid bucket name.
+    """
+    e = assert_raises(boto.exception.S3ResponseError, get_new_bucket, targets.main.default, name)
+    eq(e.status, 400)
+    eq(e.reason.lower(), 'bad request') # some proxies vary the case
+    eq(e.error_code, 'InvalidBucketName')
+
+def _create_keys(bucket=None, keys=[]):
+    """
+    Populate a (specified or new) bucket with objects with
+    specified names (and contents identical to their names).
+    """
+    if bucket is None:
+        bucket = get_new_bucket()
+
+    for s in keys:
+        key = bucket.new_key(s)
+        key.set_contents_from_string(s)
+
+    return bucket
+
+
+def _get_alt_connection():
+    return boto.s3.connection.S3Connection(
+        aws_access_key_id=s3['alt'].aws_access_key_id,
+        aws_secret_access_key=s3['alt'].aws_secret_access_key,
+        is_secure=s3['alt'].is_secure,
+        port=s3['alt'].port,
+        host=s3['alt'].host,
+        calling_format=s3['alt'].calling_format,
+    )
+
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/! in name')
+@attr(assertion='fails with subdomain')
+def test_bucket_create_naming_bad_punctuation():
+    # characters other than [a-zA-Z0-9._-]
+    check_bad_bucket_name('alpha!soup')
+
+def check_versioning(bucket, status):
+    try:
+        eq(bucket.get_versioning_status()['Versioning'], status)
+    except KeyError:
+        eq(status, None)
+
+# amazon is eventual consistent, retry a bit if failed
+def check_configure_versioning_retry(bucket, status, expected_string):
+    bucket.configure_versioning(status)
+
+    read_status = None
+
+    for i in range(5):
+        try:
+            read_status = bucket.get_versioning_status()['Versioning']
+        except KeyError:
+            read_status = None
+
+        if (expected_string == read_status):
+            break
+
+        time.sleep(1)
+
+    eq(expected_string, read_status)
+
+@attr(resource='object')
+@attr(method='create')
+@attr(operation='create versioned object, read not exist null version')
+@attr(assertion='read null version behaves correctly')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_versioning_obj_read_not_exist_null():
+    bucket = get_new_bucket()
+    check_versioning(bucket, None)
+
+    check_configure_versioning_retry(bucket, True, "Enabled")
+
+    content = 'fooz'
+    objname = 'testobj'
+
+    key = bucket.new_key(objname)
+    key.set_contents_from_string(content)
+
+    key = bucket.get_key(objname, version_id='null')
+    eq(key, None)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='append object')
+@attr(assertion='success')
+@attr('fails_on_aws')
+@attr('fails_with_subdomain')
+@attr('appendobject')
+@attr('fails_on_dbstore')
+def test_append_object():
+    bucket = get_new_bucket()
+    key = bucket.new_key('foo')
+    expires_in = 100000
+    url = key.generate_url(expires_in, method='PUT')
+    o = urlparse(url)
+    path = o.path + '?' + o.query
+    path1 = path + '&append&position=0'
+    res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path1, body='abc', secure=s3.main.is_secure)
+    path2 = path + '&append&position=3'
+    res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path2, body='abc', secure=s3.main.is_secure)
+    eq(res.status, 200)
+    eq(res.reason, 'OK')
+
+    key = bucket.get_key('foo')
+    eq(key.size, 6) 
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='append to normal object')
+@attr(assertion='fails 409')
+@attr('fails_on_aws')
+@attr('fails_with_subdomain')
+@attr('appendobject')
+@attr('fails_on_dbstore')
+def test_append_normal_object():
+    bucket = get_new_bucket()
+    key = bucket.new_key('foo')
+    key.set_contents_from_string('abc')
+    expires_in = 100000
+    url = key.generate_url(expires_in, method='PUT')
+    o = urlparse(url)
+    path = o.path + '?' + o.query
+    path = path + '&append&position=3'
+    res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path, body='abc', secure=s3.main.is_secure)
+    eq(res.status, 409)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='append position not right')
+@attr(assertion='fails 409')
+@attr('fails_on_aws')
+@attr('fails_with_subdomain')
+@attr('appendobject')
+@attr('fails_on_dbstore')
+def test_append_object_position_wrong():
+    bucket = get_new_bucket()
+    key = bucket.new_key('foo')
+    expires_in = 100000
+    url = key.generate_url(expires_in, method='PUT')
+    o = urlparse(url)
+    path = o.path + '?' + o.query
+    path1 = path + '&append&position=0'
+    res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path1, body='abc', secure=s3.main.is_secure)
+    path2 = path + '&append&position=9'
+    res = _make_raw_request(host=s3.main.host, port=s3.main.port, method='PUT', path=path2, body='abc', secure=s3.main.is_secure)
+    eq(res.status, 409)
+    eq(int(res.getheader('x-rgw-next-append-position')), 3)
+
+
+# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
+# http://tracker.newdream.net/issues/984
+@attr(resource='bucket.log')
+@attr(method='put')
+@attr(operation='set/enable/disable logging target')
+@attr(assertion='operations succeed')
+@attr('fails_on_rgw')
+def test_logging_toggle():
+    bucket = get_new_bucket()
+    log_bucket = get_new_bucket(targets.main.default, bucket.name + '-log')
+    log_bucket.set_as_logging_target()
+    bucket.enable_logging(target_bucket=log_bucket, target_prefix=bucket.name)
+    bucket.disable_logging()
+    # NOTE: this does not actually test whether or not logging works
+
+def list_bucket_storage_class(bucket):
+    result = defaultdict(list)
+    for k in bucket.get_all_versions():
+        result[k.storage_class].append(k)
+
+    return result
+
+def transfer_part(bucket, mp_id, mp_keyname, i, part, headers=None):
+    """Transfer a part of a multipart upload. Designed to be run in parallel.
+    """
+    mp = boto.s3.multipart.MultiPartUpload(bucket)
+    mp.key_name = mp_keyname
+    mp.id = mp_id
+    part_out = StringIO(part)
+    mp.upload_part_from_file(part_out, i+1, headers=headers)
+
+def generate_random(size, part_size=5*1024*1024):
+    """
+    Generate the specified number random data.
+    (actually each MB is a repetition of the first KB)
+    """
+    chunk = 1024
+    allowed = string.ascii_letters
+    for x in range(0, size, part_size):
+        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
+        s = ''
+        left = size - x
+        this_part_size = min(left, part_size)
+        for y in range(this_part_size // chunk):
+            s = s + strpart
+        if this_part_size > len(s):
+            s = s + strpart[0:this_part_size - len(s)]
+        yield s
+        if (x == size):
+            return
+
+def _multipart_upload(bucket, s3_key_name, size, part_size=5*1024*1024, do_list=None, headers=None, metadata=None, storage_class=None, resend_parts=[]):
+    """
+    generate a multi-part upload for a random file of specifed size,
+    if requested, generate a list of the parts
+    return the upload descriptor
+    """
+
+    if storage_class is not None:
+        if not headers:
+            headers = {}
+        headers['X-Amz-Storage-Class'] = storage_class
+
+    upload = bucket.initiate_multipart_upload(s3_key_name, headers=headers, metadata=metadata)
+    s = ''
+    for i, part in enumerate(generate_random(size, part_size)):
+        s += part
+        transfer_part(bucket, upload.id, upload.key_name, i, part, headers)
+        if i in resend_parts:
+            transfer_part(bucket, upload.id, upload.key_name, i, part, headers)
+
+    if do_list is not None:
+        l = bucket.list_multipart_uploads()
+        l = list(l)
+
+    return (upload, s)
+
+def _populate_key(bucket, keyname, size=7*1024*1024, storage_class=None):
+    if bucket is None:
+        bucket = get_new_bucket()
+    key = bucket.new_key(keyname)
+    if storage_class:
+        key.storage_class = storage_class
+    data_str = str(next(generate_random(size, size)))
+    data = StringIO(data_str)
+    key.set_contents_from_file(fp=data)
+    return (key, data_str)
+
+def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
+    return ''.join(random.choice(chars) for _ in range(size))
+
+def verify_object(bucket, k, data=None, storage_class=None):
+    if storage_class:
+        eq(k.storage_class, storage_class)
+
+    if data:
+        read_data = k.get_contents_as_string()
+
+        equal = data == read_data # avoid spamming log if data not equal
+        eq(equal, True)
+
+def copy_object_storage_class(src_bucket, src_key, dest_bucket, dest_key, storage_class):
+            query_args=None
+
+            if dest_key.version_id:
+                query_arg='versionId={v}'.format(v=dest_key.version_id)
+
+            headers = {}
+            headers['X-Amz-Copy-Source'] = '/{bucket}/{object}'.format(bucket=src_bucket.name, object=src_key.name)
+            if src_key.version_id:
+                headers['X-Amz-Copy-Source-Version-Id'] = src_key.version_id
+            headers['X-Amz-Storage-Class'] = storage_class
+
+            res = dest_bucket.connection.make_request('PUT', dest_bucket.name, dest_key.name,
+                    query_args=query_args, headers=headers)
+            eq(res.status, 200)
+
+def _populate_multipart_key(bucket, kname, size, storage_class=None):
+    (upload, data) = _multipart_upload(bucket, kname, size, storage_class=storage_class)
+    upload.complete_upload()
+
+    k = bucket.get_key(kname)
+
+    return (k, data)
+
+# Create a lifecycle config.  Either days (int) and prefix (string) is given, or rules.
+# Rules is an array of dictionaries, each dict has a 'days' and a 'prefix' key
+def create_lifecycle(days = None, prefix = 'test/', rules = None):
+    lifecycle = boto.s3.lifecycle.Lifecycle()
+    if rules == None:
+        expiration = boto.s3.lifecycle.Expiration(days=days)
+        rule = boto.s3.lifecycle.Rule(id=prefix, prefix=prefix, status='Enabled',
+                                      expiration=expiration)
+        lifecycle.append(rule)
+    else:
+        for rule in rules:
+            expiration = None
+            transition = None
+            try:
+                expiration = boto.s3.lifecycle.Expiration(days=rule['days'])
+            except:
+                pass
+
+            try:
+                transition = rule['transition']
+            except:
+                pass
+
+            _id = rule.get('id',None)
+            rule = boto.s3.lifecycle.Rule(id=_id, prefix=rule['prefix'],
+                                          status=rule['status'], expiration=expiration, transition=transition)
+            lifecycle.append(rule)
+    return lifecycle
+
+def set_lifecycle(rules = None):
+    bucket = get_new_bucket()
+    lifecycle = create_lifecycle(rules=rules)
+    bucket.configure_lifecycle(lifecycle)
+    return bucket
+
+def configured_storage_classes():
+    sc = [ 'STANDARD' ]
+
+    if 'storage_classes' in config['main']:
+        extra_sc = re.split('\W+', config['main']['storage_classes'])
+
+        for item in extra_sc:
+            if item != 'STANDARD':
+                sc.append(item)
+
+    return sc
+
+def lc_transition(days=None, date=None, storage_class=None):
+    return boto.s3.lifecycle.Transition(days=days, date=date, storage_class=storage_class)
+
+def lc_transitions(transitions=None):
+    result = boto.s3.lifecycle.Transitions()
+    for t in transitions:
+        result.add_transition(days=t.days, date=t.date, storage_class=t.storage_class)
+
+    return result
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='test create object with storage class')
+@attr('storage_class')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_object_storage_class():
+    sc = configured_storage_classes()
+    if len(sc) < 2:
+        raise SkipTest
+
+    bucket = get_new_bucket()
+
+    for storage_class in sc:
+        kname = 'foo-' + storage_class
+        k, data = _populate_key(bucket, kname, size=9*1024*1024, storage_class=storage_class)
+
+        verify_object(bucket, k, data, storage_class)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='test create multipart object with storage class')
+@attr('storage_class')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_object_storage_class_multipart():
+    sc = configured_storage_classes()
+    if len(sc) < 2:
+        raise SkipTest
+
+    bucket = get_new_bucket()
+    size = 11 * 1024 * 1024
+
+    for storage_class in sc:
+        key = "mymultipart-" + storage_class
+        (upload, data) = _multipart_upload(bucket, key, size, storage_class=storage_class)
+        upload.complete_upload()
+        key2 = bucket.get_key(key)
+        eq(key2.size, size)
+        eq(key2.storage_class, storage_class)
+
+def _do_test_object_modify_storage_class(obj_write_func, size):
+    sc = configured_storage_classes()
+    if len(sc) < 2:
+        raise SkipTest
+
+    bucket = get_new_bucket()
+
+    for storage_class in sc:
+        kname = 'foo-' + storage_class
+        k, data = obj_write_func(bucket, kname, size, storage_class=storage_class)
+
+        verify_object(bucket, k, data, storage_class)
+
+        for new_storage_class in sc:
+            if new_storage_class == storage_class:
+                continue
+
+            copy_object_storage_class(bucket, k, bucket, k, new_storage_class)
+            verify_object(bucket, k, data, storage_class)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='test changing objects storage class')
+@attr('storage_class')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_object_modify_storage_class():
+    _do_test_object_modify_storage_class(_populate_key, size=9*1024*1024)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='test changing objects storage class')
+@attr('storage_class')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_object_modify_storage_class_multipart():
+    _do_test_object_modify_storage_class(_populate_multipart_key, size=11*1024*1024)
+
+def _do_test_object_storage_class_copy(obj_write_func, size):
+    sc = configured_storage_classes()
+    if len(sc) < 2:
+        raise SkipTest
+
+    src_bucket = get_new_bucket()
+    dest_bucket = get_new_bucket()
+    kname = 'foo'
+
+    src_key, data = obj_write_func(src_bucket, kname, size)
+    verify_object(src_bucket, src_key, data)
+
+    for new_storage_class in sc:
+        if new_storage_class == src_key.storage_class:
+            continue
+
+        dest_key = dest_bucket.get_key('foo-' + new_storage_class, validate=False)
+
+        copy_object_storage_class(src_bucket, src_key, dest_bucket, dest_key, new_storage_class)
+        verify_object(dest_bucket, dest_key, data, new_storage_class)
+
+@attr(resource='object')
+@attr(method='copy')
+@attr(operation='test copy object to object with different storage class')
+@attr('storage_class')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_object_storage_class_copy():
+    _do_test_object_storage_class_copy(_populate_key, size=9*1024*1024)
+
+@attr(resource='object')
+@attr(method='copy')
+@attr(operation='test changing objects storage class')
+@attr('storage_class')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_object_storage_class_copy_multipart():
+    _do_test_object_storage_class_copy(_populate_multipart_key, size=9*1024*1024)
+
+class FakeFile(object):
+    """
+    file that simulates seek, tell, and current character
+    """
+    def __init__(self, char='A', interrupt=None):
+        self.offset = 0
+        self.char = bytes(char, 'utf-8')
+        self.interrupt = interrupt
+
+    def seek(self, offset, whence=os.SEEK_SET):
+        if whence == os.SEEK_SET:
+            self.offset = offset
+        elif whence == os.SEEK_END:
+            self.offset = self.size + offset;
+        elif whence == os.SEEK_CUR:
+            self.offset += offset
+
+    def tell(self):
+        return self.offset
+
+class FakeWriteFile(FakeFile):
+    """
+    file that simulates interruptable reads of constant data
+    """
+    def __init__(self, size, char='A', interrupt=None):
+        FakeFile.__init__(self, char, interrupt)
+        self.size = size
+
+    def read(self, size=-1):
+        if size < 0:
+            size = self.size - self.offset
+        count = min(size, self.size - self.offset)
+        self.offset += count
+
+        # Sneaky! do stuff before we return (the last time)
+        if self.interrupt != None and self.offset == self.size and count > 0:
+            self.interrupt()
+
+        return self.char*count
+
+class FakeFileVerifier(object):
+    """
+    file that verifies expected data has been written
+    """
+    def __init__(self, char=None):
+        self.char = char
+        self.size = 0
+
+    def write(self, data):
+        size = len(data)
+        if self.char == None:
+            self.char = data[0]
+        self.size += size
+        eq(data.decode(), self.char*size)
+
+def _verify_atomic_key_data(key, size=-1, char=None):
+    """
+    Make sure file is of the expected size and (simulated) content
+    """
+    fp_verify = FakeFileVerifier(char)
+    key.get_contents_to_file(fp_verify)
+    if size >= 0:
+        eq(fp_verify.size, size)
+
+def _test_atomic_dual_conditional_write(file_size):
+    """
+    create an object, two sessions writing different contents
+    confirm that it is all one or the other
+    """
+    bucket = get_new_bucket()
+    objname = 'testobj'
+    key = bucket.new_key(objname)
+
+    fp_a = FakeWriteFile(file_size, 'A')
+    key.set_contents_from_file(fp_a)
+    _verify_atomic_key_data(key, file_size, 'A')
+    etag_fp_a = key.etag.replace('"', '').strip()
+
+    # get a second key object (for the same key)
+    # so both can be writing without interfering
+    key2 = bucket.new_key(objname)
+
+    # write <file_size> file of C's
+    # but before we're done, try to write all B's
+    fp_b = FakeWriteFile(file_size, 'B')
+    fp_c = FakeWriteFile(file_size, 'C',
+        lambda: key2.set_contents_from_file(fp_b, rewind=True, headers={'If-Match': etag_fp_a})
+        )
+    # key.set_contents_from_file(fp_c, headers={'If-Match': etag_fp_a})
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_c,
+                      headers={'If-Match': etag_fp_a})
+    eq(e.status, 412)
+    eq(e.reason, 'Precondition Failed')
+    eq(e.error_code, 'PreconditionFailed')
+
+    # verify the file
+    _verify_atomic_key_data(key, file_size, 'B')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write one or the other')
+@attr(assertion='1MB successful')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_atomic_dual_conditional_write_1mb():
+    _test_atomic_dual_conditional_write(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write file in deleted bucket')
+@attr(assertion='fail 404')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_atomic_write_bucket_gone():
+    bucket = get_new_bucket()
+
+    def remove_bucket():
+        bucket.delete()
+
+    # create file of A's but delete the bucket it's in before we finish writing
+    # all of them
+    key = bucket.new_key('foo')
+    fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
+    e = assert_raises(boto.exception.S3ResponseError, key.set_contents_from_file, fp_a)
+    eq(e.status, 404)
+    eq(e.reason, 'Not Found')
+    eq(e.error_code, 'NoSuchBucket')
+
+def _multipart_upload_enc(bucket, s3_key_name, size, part_size=5*1024*1024,
+                          do_list=None, init_headers=None, part_headers=None,
+                          metadata=None, resend_parts=[]):
+    """
+    generate a multi-part upload for a random file of specifed size,
+    if requested, generate a list of the parts
+    return the upload descriptor
+    """
+    upload = bucket.initiate_multipart_upload(s3_key_name, headers=init_headers, metadata=metadata)
+    s = ''
+    for i, part in enumerate(generate_random(size, part_size)):
+        s += part
+        transfer_part(bucket, upload.id, upload.key_name, i, part, part_headers)
+        if i in resend_parts:
+            transfer_part(bucket, upload.id, upload.key_name, i, part, part_headers)
+
+    if do_list is not None:
+        l = bucket.list_multipart_uploads()
+        l = list(l)
+
+    return (upload, s)
+
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multipart upload with bad key for uploading chunks')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encryption_sse_c_multipart_invalid_chunks_1():
+    bucket = get_new_bucket()
+    key = "multipart_enc"
+    content_type = 'text/bla'
+    objlen = 30 * 1024 * 1024
+    init_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
+        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
+    }
+    e = assert_raises(boto.exception.S3ResponseError,
+                      _multipart_upload_enc, bucket, key, objlen,
+                      init_headers=init_headers, part_headers=part_headers,
+                      metadata={'foo': 'bar'})
+    eq(e.status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multipart upload with bad md5 for chunks')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encryption_sse_c_multipart_invalid_chunks_2():
+    bucket = get_new_bucket()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    init_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
+    }
+    e = assert_raises(boto.exception.S3ResponseError,
+                      _multipart_upload_enc, bucket, key, objlen,
+                      init_headers=init_headers, part_headers=part_headers,
+                      metadata={'foo': 'bar'})
+    eq(e.status, 400)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
+@attr(assertion='succeeds')
+@attr('fails_with_subdomain')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_different_tenant():
+    bucket = get_new_bucket()
+    key = bucket.new_key('asdf')
+    key.set_contents_from_string('asdf')
+    l = bucket.list()
+    resource1 = "arn:aws:s3::*:" + bucket.name
+    resource2 = "arn:aws:s3::*:" + bucket.name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+    bucket.set_policy(policy_document)
+
+    new_conn = boto.s3.connection.S3Connection(
+        aws_access_key_id=s3['tenant'].aws_access_key_id,
+        aws_secret_access_key=s3['tenant'].aws_secret_access_key,
+        is_secure=s3['tenant'].is_secure,
+        port=s3['tenant'].port,
+        host=s3['tenant'].host,
+        calling_format=s3['tenant'].calling_format,
+        )
+    bucket_name = ":" + bucket.name
+    b = new_conn.get_bucket(bucket_name)
+    b.get_all_keys()
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put condition operator end with ifExists')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_set_condition_operator_end_with_IfExists():
+    bucket = _create_keys(keys=['foo'])
+    policy = '''{
+      "Version":"2012-10-17",
+      "Statement": [{
+        "Sid": "Allow Public Access to All Objects",
+        "Effect": "Allow",
+        "Principal": "*",
+        "Action": "s3:GetObject",
+        "Condition": {
+                    "StringLikeIfExists": {
+                        "aws:Referer": "http://www.example.com/*"
+                    }
+                },
+        "Resource": "arn:aws:s3:::%s/*"
+      }
+     ]
+    }''' % bucket.name
+    eq(bucket.set_policy(policy), True)
+    res = _make_request('GET', bucket.name, bucket.get_key("foo"),
+                        request_headers={'referer': 'http://www.example.com/'})
+    eq(res.status, 200)
+    res = _make_request('GET', bucket.name, bucket.get_key("foo"),
+                        request_headers={'referer': 'http://www.example.com/index.html'})
+    eq(res.status, 200)
+    res = _make_request('GET', bucket.name, bucket.get_key("foo"))
+    eq(res.status, 200)
+    res = _make_request('GET', bucket.name, bucket.get_key("foo"),
+                        request_headers={'referer': 'http://example.com'})
+    eq(res.status, 403)
+
+def _make_arn_resource(path="*"):
+    return "arn:aws:s3:::{}".format(path)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='put obj with RequestObjectTag')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_put_obj_request_obj_tag():
+
+    bucket = get_new_bucket()
+
+    tag_conditional = {"StringEquals": {
+        "s3:RequestObjectTag/security" : "public"
+    }}
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket.name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
+    policy_document = p.add_statement(s1).to_json()
+
+    bucket.set_policy(policy_document)
+
+    new_conn = _get_alt_connection()
+    bucket1 = new_conn.get_bucket(bucket.name, validate=False)
+    key1_str ='testobj'
+    key1  = bucket1.new_key(key1_str)
+    check_access_denied(key1.set_contents_from_string, key1_str)
+
+    headers = {"x-amz-tagging" : "security=public"}
+    key1.set_contents_from_string(key1_str, headers=headers)
+
diff --git a/s3tests/functional/test_s3_website.py b/s3tests/functional/test_s3_website.py
new file mode 100644 (file)
index 0000000..3696004
--- /dev/null
@@ -0,0 +1,1249 @@
+
+import sys
+import collections
+import nose
+import string
+import random
+from pprint import pprint
+import time
+import boto.exception
+import socket
+
+from urllib.parse import urlparse
+
+from nose.tools import eq_ as eq, ok_ as ok
+from nose.plugins.attrib import attr
+from nose.tools import timed
+from nose.plugins.skip import SkipTest
+
+from .. import common
+
+from . import (
+    get_new_bucket,
+    get_new_bucket_name,
+    s3,
+    config,
+    _make_raw_request,
+    choose_bucket_prefix,
+    )
+
+IGNORE_FIELD = 'IGNORETHIS'
+
+SLEEP_INTERVAL = 0.01
+SLEEP_MAX = 2.0
+
+WEBSITE_CONFIGS_XMLFRAG = {
+        'IndexDoc': '<IndexDocument><Suffix>${IndexDocument_Suffix}</Suffix></IndexDocument>${RoutingRules}',
+        'IndexDocErrorDoc': '<IndexDocument><Suffix>${IndexDocument_Suffix}</Suffix></IndexDocument><ErrorDocument><Key>${ErrorDocument_Key}</Key></ErrorDocument>${RoutingRules}',
+        'RedirectAll': '<RedirectAllRequestsTo><HostName>${RedirectAllRequestsTo_HostName}</HostName></RedirectAllRequestsTo>${RoutingRules}',
+        'RedirectAll+Protocol': '<RedirectAllRequestsTo><HostName>${RedirectAllRequestsTo_HostName}</HostName><Protocol>${RedirectAllRequestsTo_Protocol}</Protocol></RedirectAllRequestsTo>${RoutingRules}',
+        }
+INDEXDOC_TEMPLATE = '<html><h1>IndexDoc</h1><body>{random}</body></html>'
+ERRORDOC_TEMPLATE = '<html><h1>ErrorDoc</h1><body>{random}</body></html>'
+
+CAN_WEBSITE = None
+
+@attr('fails_on_dbstore')
+def check_can_test_website():
+    global CAN_WEBSITE
+    # This is a bit expensive, so we cache this
+    if CAN_WEBSITE is None:
+        bucket = get_new_bucket()
+        try:
+            wsconf = bucket.get_website_configuration()
+            CAN_WEBSITE = True
+        except boto.exception.S3ResponseError as e:
+            if e.status == 404 and e.reason == 'Not Found' and e.error_code in ['NoSuchWebsiteConfiguration', 'NoSuchKey']:
+                CAN_WEBSITE = True
+            elif e.status == 405 and e.reason == 'Method Not Allowed' and e.error_code == 'MethodNotAllowed':
+                # rgw_enable_static_website is false
+                CAN_WEBSITE = False
+            elif e.status == 403 and e.reason == 'SignatureDoesNotMatch' and e.error_code == 'Forbidden':
+                # This is older versions that do not support the website code
+                CAN_WEBSITE = False
+            elif e.status == 501 and e.error_code == 'NotImplemented':
+                CAN_WEBSITE = False
+            else:
+                raise RuntimeError("Unknown response in checking if WebsiteConf is supported", e)
+        finally:
+            bucket.delete()
+
+    if CAN_WEBSITE is True:
+        return True
+    elif CAN_WEBSITE is False:
+        raise SkipTest
+    else:
+        raise RuntimeError("Unknown cached response in checking if WebsiteConf is supported")
+
+
+def make_website_config(xml_fragment):
+    """
+    Take the tedious stuff out of the config
+    """
+    return '<?xml version="1.0" encoding="UTF-8"?><WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' + xml_fragment + '</WebsiteConfiguration>'
+
+def get_website_url(**kwargs):
+    """
+    Return the URL to a website page
+    """
+    proto, bucket, hostname, path = 'http', None, None, '/'
+
+    if 'proto' in kwargs:
+        proto = kwargs['proto']
+    if 'bucket' in kwargs:
+        bucket = kwargs['bucket']
+    if 'hostname' in kwargs:
+        hostname = kwargs['hostname']
+    if 'path' in kwargs:
+        path = kwargs['path']
+
+    if hostname is None and bucket is None:
+        return '/' + path.lstrip('/')
+
+    domain = config['main']['host']
+    if('s3website_domain' in config['main']):
+        domain = config['main']['s3website_domain']
+    elif('s3website_domain' in config['alt']):
+        domain = config['DEFAULT']['s3website_domain']
+    if hostname is None and bucket is not None:
+        hostname = '%s.%s' % (bucket, domain)
+    path = path.lstrip('/')
+    return "%s://%s/%s" % (proto, hostname, path)
+
+def _test_website_populate_fragment(xml_fragment, fields):
+    for k in ['RoutingRules']:
+      if k in list(fields.keys()) and len(fields[k]) > 0:
+         fields[k] = '<%s>%s</%s>' % (k, fields[k], k)
+    f = {
+          'IndexDocument_Suffix': choose_bucket_prefix(template='index-{random}.html', max_len=32),
+          'ErrorDocument_Key': choose_bucket_prefix(template='error-{random}.html', max_len=32),
+          'RedirectAllRequestsTo_HostName': choose_bucket_prefix(template='{random}.{random}.com', max_len=32),
+          'RoutingRules': ''
+        }
+    f.update(fields)
+    xml_fragment = string.Template(xml_fragment).safe_substitute(**f)
+    return xml_fragment, f
+
+def _test_website_prep(bucket, xml_template, hardcoded_fields = {}, expect_fail=None):
+    xml_fragment, f = _test_website_populate_fragment(xml_template, hardcoded_fields)
+    f['WebsiteConfiguration'] = ''
+    if not xml_template:
+        bucket.delete_website_configuration()
+        return f
+
+    config_xmlnew = make_website_config(xml_fragment)
+
+    config_xmlold = ''
+    try:
+        config_xmlold = common.normalize_xml(bucket.get_website_configuration_xml(), pretty_print=True)
+    except boto.exception.S3ResponseError as e:
+        if str(e.status) == str(404) \
+            and ('NoSuchWebsiteConfiguration' in e.body or 'NoSuchWebsiteConfiguration' in e.code or
+                    'NoSuchKey' in e.body or 'NoSuchKey' in e.code):
+            pass
+        else:
+            raise e
+
+    try:
+        bucket.set_website_configuration_xml(common.trim_xml(config_xmlnew))
+        config_xmlnew = common.normalize_xml(config_xmlnew, pretty_print=True)
+    except boto.exception.S3ResponseError as e:
+        if expect_fail is not None:
+            if isinstance(expect_fail, dict):
+                pass
+            elif isinstance(expect_fail, str):
+                pass
+        raise e
+
+    # TODO: in some cases, it takes non-zero time for the config to be applied by AmazonS3
+    # We should figure out how to poll for changes better
+    # WARNING: eu-west-1 as of 2015/06/22 was taking at least 4 seconds to propogate website configs, esp when you cycle between non-null configs
+    time.sleep(0.1)
+    config_xmlcmp = common.normalize_xml(bucket.get_website_configuration_xml(), pretty_print=True)
+
+    #if config_xmlold is not None:
+    #    print('old',config_xmlold.replace("\n",''))
+    #if config_xmlcmp is not None:
+    #    print('cmp',config_xmlcmp.replace("\n",''))
+    #if config_xmlnew is not None:
+    #    print('new',config_xmlnew.replace("\n",''))
+    # Cleanup for our validation
+    common.assert_xml_equal(config_xmlcmp, config_xmlnew)
+    #print("config_xmlcmp\n", config_xmlcmp)
+    #eq (config_xmlnew, config_xmlcmp)
+    f['WebsiteConfiguration'] = config_xmlcmp
+    return f
+
+def __website_expected_reponse_status(res, status, reason):
+    if not isinstance(status, collections.Container):
+        status = set([status])
+    if not isinstance(reason, collections.Container):
+        reason = set([reason])
+
+    if status is not IGNORE_FIELD:
+        ok(res.status in status, 'HTTP code was %s should be %s' % (res.status, status))
+    if reason is not IGNORE_FIELD:
+        ok(res.reason in reason, 'HTTP reason was was %s should be %s' % (res.reason, reason))
+
+def _website_expected_default_html(**kwargs):
+    fields = []
+    for k in list(kwargs.keys()):
+        # AmazonS3 seems to be inconsistent, some HTML errors include BucketName, but others do not.
+        if k is 'BucketName':
+            continue
+
+        v = kwargs[k]
+        if isinstance(v, str):
+            v = [v]
+        elif not isinstance(v, collections.Container):
+            v = [v]
+        for v2 in v:
+            s = '<li>%s: %s</li>' % (k,v2)
+            fields.append(s)
+    return fields
+
+def _website_expected_error_response(res, bucket_name, status, reason, code, content=None, body=None):
+    if body is None:
+        body = res.read()
+        print(body)
+    __website_expected_reponse_status(res, status, reason)
+
+    # Argh, AmazonS3 is really inconsistent, so we have a conditional test!
+    # This is most visible if you have an ErrorDoc present
+    errorcode = res.getheader('x-amz-error-code', None)
+    if errorcode is not None:
+        if code is not IGNORE_FIELD:
+            eq(errorcode, code)
+
+    if not isinstance(content, collections.Container):
+        content = set([content])
+    for f in content:
+        if f is not IGNORE_FIELD and f is not None:
+            f = bytes(f, 'utf-8')
+            ok(f in body, 'HTML should contain "%s"' % (f, ))
+
+def _website_expected_redirect_response(res, status, reason, new_url):
+    body = res.read()
+    print(body)
+    __website_expected_reponse_status(res, status, reason)
+    loc = res.getheader('Location', None)
+    eq(loc, new_url, 'Location header should be set "%s" != "%s"' % (loc,new_url,))
+    ok(len(body) == 0, 'Body of a redirect should be empty')
+
+def _website_request(bucket_name, path, connect_hostname=None, method='GET', timeout=None):
+    url = get_website_url(proto='http', bucket=bucket_name, path=path)
+    print("url", url)
+    o = urlparse(url)
+    if connect_hostname is None:
+        connect_hostname = o.hostname
+    path = o.path + '?' + o.query
+    request_headers={}
+    request_headers['Host'] = o.hostname
+    request_headers['Accept'] = '*/*'
+    print('Request: {method} {path}\n{headers}'.format(method=method, path=path, headers=''.join([t[0]+':'+t[1]+"\n" for t in list(request_headers.items())])))
+    res = _make_raw_request(connect_hostname, config.main.port, method, path, request_headers=request_headers, secure=False, timeout=timeout)
+    for (k,v) in res.getheaders():
+        print(k,v)
+    return res
+
+# ---------- Non-existant buckets via the website endpoint
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-existant bucket via website endpoint should give NoSuchBucket, exposing security risk')
+@attr('s3website')
+@attr('fails_on_rgw')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_nonexistant_bucket_s3():
+    bucket_name = get_new_bucket_name()
+    res = _website_request(bucket_name, '')
+    _website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket'))
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+#@attr(assertion='non-existant bucket via website endpoint should give Forbidden, keeping bucket identity secure')
+@attr(assertion='non-existant bucket via website endpoint should give NoSuchBucket')
+@attr('s3website')
+@attr('fails_on_s3')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_nonexistant_bucket_rgw():
+    bucket_name = get_new_bucket_name()
+    res = _website_request(bucket_name, '')
+    #_website_expected_error_response(res, bucket_name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+    _website_expected_error_response(res, bucket_name, 404, 'Not Found', 'NoSuchBucket', content=_website_expected_default_html(Code='NoSuchBucket'))
+
+#------------- IndexDocument only, successes
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty public buckets via s3website return page for /, where page is public')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+@timed(10)
+def test_website_public_bucket_list_public_index():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.make_public()
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.make_public()
+    #time.sleep(1)
+    while bucket.get_key(f['IndexDocument_Suffix']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    body = res.read()
+    print(body)
+    indexstring = bytes(indexstring, 'utf-8')
+    eq(body, indexstring) # default content should match index.html set content
+    __website_expected_reponse_status(res, 200, 'OK')
+    indexhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty private buckets via s3website return page for /, where page is private')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_public_index():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.set_canned_acl('private')
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.make_public()
+    #time.sleep(1)
+    while bucket.get_key(f['IndexDocument_Suffix']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+
+    res = _website_request(bucket.name, '')
+    __website_expected_reponse_status(res, 200, 'OK')
+    body = res.read()
+    print(body)
+    indexstring = bytes(indexstring, 'utf-8')
+    eq(body, indexstring, 'default content should match index.html set content')
+    indexhtml.delete()
+    bucket.delete()
+
+
+# ---------- IndexDocument only, failures
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty private buckets via s3website return a 403 for /')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_empty():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.set_canned_acl('private')
+    # TODO: wait for sync
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty public buckets via s3website return a 404 for /')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_empty():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.make_public()
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'))
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty public buckets via s3website return page for /, where page is private')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_private_index():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.make_public()
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    #time.sleep(1)
+    #time.sleep(1)
+    while bucket.get_key(f['IndexDocument_Suffix']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+    indexhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty private buckets via s3website return page for /, where page is private')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_private_index():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.set_canned_acl('private')
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    ##time.sleep(1)
+    while bucket.get_key(f['IndexDocument_Suffix']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+
+    indexhtml.delete()
+    bucket.delete()
+
+# ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but missing
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty private buckets via s3website return a 403 for /, missing errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_empty_missingerrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.set_canned_acl('private')
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty public buckets via s3website return a 404 for /, missing errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_empty_missingerrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.make_public()
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey')
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty public buckets via s3website return page for /, where page is private, missing errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_private_index_missingerrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.make_public()
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    #time.sleep(1)
+    while bucket.get_key(f['IndexDocument_Suffix']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+
+    indexhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty private buckets via s3website return page for /, where page is private, missing errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_private_index_missingerrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.set_canned_acl('private')
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    #time.sleep(1)
+    while bucket.get_key(f['IndexDocument_Suffix']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+
+    indexhtml.delete()
+    bucket.delete()
+
+# ---------- IndexDocument & ErrorDocument, failures due to errordoc assigned but not accessible
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty private buckets via s3website return a 403 for /, blocked errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_empty_blockederrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.set_canned_acl('private')
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('private')
+    #time.sleep(1)
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    body = res.read()
+    print(body)
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
+    errorstring = bytes(errorstring, 'utf-8')
+    ok(errorstring not in body, 'error content should NOT match error.html set content')
+
+    errorhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='check if there is an invalid payload after serving error doc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_pubilc_errordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.make_public()
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('public-read')
+
+    url = get_website_url(proto='http', bucket=bucket.name, path='')
+    o = urlparse(url)
+    host = o.hostname
+    port = s3.main.port
+
+    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    sock.connect((host, port))
+
+    request = "GET / HTTP/1.1\r\nHost:%s.%s:%s\r\n\r\n" % (bucket.name, host, port)
+    sock.send(request.encode())
+    
+    #receive header
+    resp = sock.recv(4096)
+    print(resp)
+
+    #receive body
+    resp = sock.recv(4096)
+    print('payload length=%d' % len(resp))
+    print(resp)
+
+    #check if any additional payload is left
+    resp_len = 0
+    sock.settimeout(2)
+    try:
+        resp = sock.recv(4096)
+        resp_len = len(resp)
+        print('invalid payload length=%d' % resp_len)
+        print(resp)
+    except socket.timeout:
+        print('no invalid payload')
+
+    ok(resp_len == 0, 'invalid payload')
+
+    errorhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty public buckets via s3website return a 404 for /, blocked errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_empty_blockederrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.make_public()
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('private')
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    body = res.read()
+    print(body)
+    _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=_website_expected_default_html(Code='NoSuchKey'), body=body)
+    errorstring = bytes(errorstring, 'utf-8')
+    ok(errorstring not in body, 'error content should match error.html set content')
+
+    errorhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty public buckets via s3website return page for /, where page is private, blocked errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_private_index_blockederrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.make_public()
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('private')
+    #time.sleep(1)
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    body = res.read()
+    print(body)
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
+    errorstring = bytes(errorstring, 'utf-8')
+    ok(errorstring not in body, 'error content should match error.html set content')
+
+    indexhtml.delete()
+    errorhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty private buckets via s3website return page for /, where page is private, blocked errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_private_index_blockederrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.set_canned_acl('private')
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('private')
+    #time.sleep(1)
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    body = res.read()
+    print(body)
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'), body=body)
+    errorstring = bytes(errorstring, 'utf-8')
+    ok(errorstring not in body, 'error content should match error.html set content')
+
+    indexhtml.delete()
+    errorhtml.delete()
+    bucket.delete()
+
+# ---------- IndexDocument & ErrorDocument, failures with errordoc available
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty private buckets via s3website return a 403 for /, good errordoc')
+@attr('s3website')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+@attr('fails_on_dbstore')
+def test_website_private_bucket_list_empty_gooderrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.set_canned_acl('private')
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring, policy='public-read')
+    #time.sleep(1)
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=[errorstring])
+
+    errorhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty public buckets via s3website return a 404 for /, good errordoc')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_empty_gooderrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.make_public()
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('public-read')
+   #time.sleep(1)
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchKey', content=[errorstring])
+
+    errorhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty public buckets via s3website return page for /, where page is private')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_public_bucket_list_private_index_gooderrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.make_public()
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('public-read')
+    #time.sleep(1)
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=[errorstring])
+
+    indexhtml.delete()
+    errorhtml.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-empty private buckets via s3website return page for /, where page is private')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_private_bucket_list_private_index_gooderrordoc():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+    bucket.set_canned_acl('private')
+    indexhtml = bucket.new_key(f['IndexDocument_Suffix'])
+    indexstring = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=256)
+    indexhtml.set_contents_from_string(indexstring)
+    indexhtml.set_canned_acl('private')
+    errorhtml = bucket.new_key(f['ErrorDocument_Key'])
+    errorstring = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=256)
+    errorhtml.set_contents_from_string(errorstring)
+    errorhtml.set_canned_acl('public-read')
+    #time.sleep(1)
+    while bucket.get_key(f['ErrorDocument_Key']) is None:
+        time.sleep(SLEEP_INTERVAL)
+
+    res = _website_request(bucket.name, '')
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=[errorstring])
+
+    indexhtml.delete()
+    errorhtml.delete()
+    bucket.delete()
+
+# ------ RedirectAll tests
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='RedirectAllRequestsTo without protocol should TODO')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_bucket_private_redirectall_base():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
+    bucket.set_canned_acl('private')
+
+    res = _website_request(bucket.name, '')
+    new_url = 'http://%s/' % f['RedirectAllRequestsTo_HostName']
+    _website_expected_redirect_response(res, 301, ['Moved Permanently'], new_url)
+
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='RedirectAllRequestsTo without protocol should TODO')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_bucket_private_redirectall_path():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
+    bucket.set_canned_acl('private')
+
+    pathfragment = choose_bucket_prefix(template='/{random}', max_len=16)
+
+    res = _website_request(bucket.name, pathfragment)
+    new_url = 'http://%s%s' % (f['RedirectAllRequestsTo_HostName'], pathfragment)
+    _website_expected_redirect_response(res, 301, ['Moved Permanently'], new_url)
+
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='RedirectAllRequestsTo without protocol should TODO')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_bucket_private_redirectall_path_upgrade():
+    bucket = get_new_bucket()
+    x = string.Template(WEBSITE_CONFIGS_XMLFRAG['RedirectAll+Protocol']).safe_substitute(RedirectAllRequestsTo_Protocol='https')
+    f = _test_website_prep(bucket, x)
+    bucket.set_canned_acl('private')
+
+    pathfragment = choose_bucket_prefix(template='/{random}', max_len=16)
+
+    res = _website_request(bucket.name, pathfragment)
+    new_url = 'https://%s%s' % (f['RedirectAllRequestsTo_HostName'], pathfragment)
+    _website_expected_redirect_response(res, 301, ['Moved Permanently'], new_url)
+
+    bucket.delete()
+
+# ------ x-amz redirect tests
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='x-amz-website-redirect-location should not fire without websiteconf')
+@attr('s3website')
+@attr('x-amz-website-redirect-location')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_xredirect_nonwebsite():
+    bucket = get_new_bucket()
+    #f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['RedirectAll'])
+    #bucket.set_canned_acl('private')
+
+    k = bucket.new_key('page')
+    content = 'wrong-content'
+    redirect_dest = '/relative'
+    headers = {'x-amz-website-redirect-location': redirect_dest}
+    k.set_contents_from_string(content, headers=headers, policy='public-read')
+    redirect = k.get_redirect()
+    eq(k.get_redirect(), redirect_dest)
+
+    res = _website_request(bucket.name, '/page')
+    body = res.read()
+    print(body)
+    expected_content = _website_expected_default_html(Code='NoSuchWebsiteConfiguration', BucketName=bucket.name)
+    # TODO: RGW does not have custom error messages for different 404s yet
+    #expected_content = _website_expected_default_html(Code='NoSuchWebsiteConfiguration', BucketName=bucket.name, Message='The specified bucket does not have a website configuration')
+    print(expected_content)
+    _website_expected_error_response(res, bucket.name, 404, 'Not Found', 'NoSuchWebsiteConfiguration', content=expected_content, body=body)
+
+    k.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='x-amz-website-redirect-location should fire websiteconf, relative path, public key')
+@attr('s3website')
+@attr('x-amz-website-redirect-location')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_xredirect_public_relative():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.make_public()
+
+    k = bucket.new_key('page')
+    content = 'wrong-content'
+    redirect_dest = '/relative'
+    headers = {'x-amz-website-redirect-location': redirect_dest}
+    k.set_contents_from_string(content, headers=headers, policy='public-read')
+    redirect = k.get_redirect()
+    eq(k.get_redirect(), redirect_dest)
+
+    res = _website_request(bucket.name, '/page')
+    #new_url =  get_website_url(bucket_name=bucket.name, path=redirect_dest)
+    _website_expected_redirect_response(res, 301, ['Moved Permanently'], redirect_dest)
+
+    k.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='x-amz-website-redirect-location should fire websiteconf, absolute, public key')
+@attr('s3website')
+@attr('x-amz-website-redirect-location')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_xredirect_public_abs():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.make_public()
+
+    k = bucket.new_key('page')
+    content = 'wrong-content'
+    redirect_dest = 'http://example.com/foo'
+    headers = {'x-amz-website-redirect-location': redirect_dest}
+    k.set_contents_from_string(content, headers=headers, policy='public-read')
+    redirect = k.get_redirect()
+    eq(k.get_redirect(), redirect_dest)
+
+    res = _website_request(bucket.name, '/page')
+    new_url =  get_website_url(proto='http', hostname='example.com', path='/foo')
+    _website_expected_redirect_response(res, 301, ['Moved Permanently'], new_url)
+
+    k.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='x-amz-website-redirect-location should fire websiteconf, relative path, private key')
+@attr('s3website')
+@attr('x-amz-website-redirect-location')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_xredirect_private_relative():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.make_public()
+
+    k = bucket.new_key('page')
+    content = 'wrong-content'
+    redirect_dest = '/relative'
+    headers = {'x-amz-website-redirect-location': redirect_dest}
+    k.set_contents_from_string(content, headers=headers, policy='private')
+    redirect = k.get_redirect()
+    eq(k.get_redirect(), redirect_dest)
+
+    res = _website_request(bucket.name, '/page')
+    # We get a 403 because the page is private
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+
+    k.delete()
+    bucket.delete()
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='x-amz-website-redirect-location should fire websiteconf, absolute, private key')
+@attr('s3website')
+@attr('x-amz-website-redirect-location')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_website_xredirect_private_abs():
+    bucket = get_new_bucket()
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDoc'])
+    bucket.make_public()
+
+    k = bucket.new_key('page')
+    content = 'wrong-content'
+    redirect_dest = 'http://example.com/foo'
+    headers = {'x-amz-website-redirect-location': redirect_dest}
+    k.set_contents_from_string(content, headers=headers, policy='private')
+    redirect = k.get_redirect()
+    eq(k.get_redirect(), redirect_dest)
+
+    res = _website_request(bucket.name, '/page')
+    new_url =  get_website_url(proto='http', hostname='example.com', path='/foo')
+    # We get a 403 because the page is private
+    _website_expected_error_response(res, bucket.name, 403, 'Forbidden', 'AccessDenied', content=_website_expected_default_html(Code='AccessDenied'))
+
+    k.delete()
+    bucket.delete()
+# ------ RoutingRules tests
+
+# RoutingRules
+ROUTING_RULES = {
+    'empty': '',
+    'AmazonExample1': \
+"""
+    <RoutingRule>
+    <Condition>
+      <KeyPrefixEquals>docs/</KeyPrefixEquals>
+    </Condition>
+    <Redirect>
+      <ReplaceKeyPrefixWith>documents/</ReplaceKeyPrefixWith>
+    </Redirect>
+    </RoutingRule>
+""",
+    'AmazonExample1+Protocol=https': \
+"""
+    <RoutingRule>
+    <Condition>
+      <KeyPrefixEquals>docs/</KeyPrefixEquals>
+    </Condition>
+    <Redirect>
+      <Protocol>https</Protocol>
+      <ReplaceKeyPrefixWith>documents/</ReplaceKeyPrefixWith>
+    </Redirect>
+    </RoutingRule>
+""",
+    'AmazonExample1+Protocol=https+Hostname=xyzzy': \
+"""
+    <RoutingRule>
+    <Condition>
+      <KeyPrefixEquals>docs/</KeyPrefixEquals>
+    </Condition>
+    <Redirect>
+      <Protocol>https</Protocol>
+      <HostName>xyzzy</HostName>
+      <ReplaceKeyPrefixWith>documents/</ReplaceKeyPrefixWith>
+    </Redirect>
+    </RoutingRule>
+""",
+    'AmazonExample1+Protocol=http2': \
+"""
+    <RoutingRule>
+    <Condition>
+      <KeyPrefixEquals>docs/</KeyPrefixEquals>
+    </Condition>
+    <Redirect>
+      <Protocol>http2</Protocol>
+      <ReplaceKeyPrefixWith>documents/</ReplaceKeyPrefixWith>
+    </Redirect>
+    </RoutingRule>
+""",
+   'AmazonExample2': \
+"""
+    <RoutingRule>
+    <Condition>
+       <KeyPrefixEquals>images/</KeyPrefixEquals>
+    </Condition>
+    <Redirect>
+      <ReplaceKeyWith>folderdeleted.html</ReplaceKeyWith>
+    </Redirect>
+    </RoutingRule>
+""",
+   'AmazonExample2+HttpRedirectCode=TMPL': \
+"""
+    <RoutingRule>
+    <Condition>
+       <KeyPrefixEquals>images/</KeyPrefixEquals>
+    </Condition>
+    <Redirect>
+      <HttpRedirectCode>{HttpRedirectCode}</HttpRedirectCode>
+      <ReplaceKeyWith>folderdeleted.html</ReplaceKeyWith>
+    </Redirect>
+    </RoutingRule>
+""",
+   'AmazonExample3': \
+"""
+    <RoutingRule>
+    <Condition>
+      <HttpErrorCodeReturnedEquals>404</HttpErrorCodeReturnedEquals>
+    </Condition>
+    <Redirect>
+      <HostName>ec2-11-22-333-44.compute-1.amazonaws.com</HostName>
+      <ReplaceKeyPrefixWith>report-404/</ReplaceKeyPrefixWith>
+    </Redirect>
+    </RoutingRule>
+""",
+   'AmazonExample3+KeyPrefixEquals': \
+"""
+    <RoutingRule>
+    <Condition>
+      <KeyPrefixEquals>images/</KeyPrefixEquals>
+      <HttpErrorCodeReturnedEquals>404</HttpErrorCodeReturnedEquals>
+    </Condition>
+    <Redirect>
+      <HostName>ec2-11-22-333-44.compute-1.amazonaws.com</HostName>
+      <ReplaceKeyPrefixWith>report-404/</ReplaceKeyPrefixWith>
+    </Redirect>
+    </RoutingRule>
+""",
+}
+
+for k in list(ROUTING_RULES.keys()):
+  if len(ROUTING_RULES[k]) > 0:
+    ROUTING_RULES[k] = "<!-- %s -->\n%s" % (k, ROUTING_RULES[k])
+
+ROUTING_RULES_TESTS = [
+  dict(xml=dict(RoutingRules=ROUTING_RULES['empty']), url='', location=None, code=200),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['empty']), url='/', location=None, code=200),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['empty']), url='/x', location=None, code=404),
+
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1']), url='/', location=None, code=200),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1']), url='/x', location=None, code=404),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1']), url='/docs/', location=dict(proto='http',bucket='{bucket_name}',path='/documents/'), code=301),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1']), url='/docs/x', location=dict(proto='http',bucket='{bucket_name}',path='/documents/x'), code=301),
+
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https']), url='/', location=None, code=200),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https']), url='/x', location=None, code=404),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https']), url='/docs/', location=dict(proto='https',bucket='{bucket_name}',path='/documents/'), code=301),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https']), url='/docs/x', location=dict(proto='https',bucket='{bucket_name}',path='/documents/x'), code=301),
+
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https+Hostname=xyzzy']), url='/', location=None, code=200),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https+Hostname=xyzzy']), url='/x', location=None, code=404),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https+Hostname=xyzzy']), url='/docs/', location=dict(proto='https',hostname='xyzzy',path='/documents/'), code=301),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=https+Hostname=xyzzy']), url='/docs/x', location=dict(proto='https',hostname='xyzzy',path='/documents/x'), code=301),
+
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample2']), url='/images/', location=dict(proto='http',bucket='{bucket_name}',path='/folderdeleted.html'), code=301),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample2']), url='/images/x', location=dict(proto='http',bucket='{bucket_name}',path='/folderdeleted.html'), code=301),
+
+
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample3']), url='/x', location=dict(proto='http',hostname='ec2-11-22-333-44.compute-1.amazonaws.com',path='/report-404/x'), code=301),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample3']), url='/images/x', location=dict(proto='http',hostname='ec2-11-22-333-44.compute-1.amazonaws.com',path='/report-404/images/x'), code=301),
+
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample3+KeyPrefixEquals']), url='/x', location=None, code=404),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample3+KeyPrefixEquals']), url='/images/x', location=dict(proto='http',hostname='ec2-11-22-333-44.compute-1.amazonaws.com',path='/report-404/x'), code=301),
+]
+
+ROUTING_ERROR_PROTOCOL = dict(code=400, reason='Bad Request', errorcode='InvalidRequest', bodyregex=r'Invalid protocol, protocol can be http or https. If not defined the protocol will be selected automatically.')
+
+ROUTING_RULES_TESTS_ERRORS = [ # TODO: Unused!
+  # Invalid protocol, protocol can be http or https. If not defined the protocol will be selected automatically.
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=http2']), url='/', location=None, code=400, error=ROUTING_ERROR_PROTOCOL),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=http2']), url='/x', location=None, code=400, error=ROUTING_ERROR_PROTOCOL),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=http2']), url='/docs/', location=None, code=400, error=ROUTING_ERROR_PROTOCOL),
+  dict(xml=dict(RoutingRules=ROUTING_RULES['AmazonExample1+Protocol=http2']), url='/docs/x', location=None, code=400, error=ROUTING_ERROR_PROTOCOL),
+]
+
+VALID_AMZ_REDIRECT = set([301,302,303,304,305,307,308])
+
+# General lots of tests
+for redirect_code in VALID_AMZ_REDIRECT:
+  rules = ROUTING_RULES['AmazonExample2+HttpRedirectCode=TMPL'].format(HttpRedirectCode=redirect_code)
+  result = redirect_code
+  ROUTING_RULES_TESTS.append(
+    dict(xml=dict(RoutingRules=rules), url='/images/', location=dict(proto='http',bucket='{bucket_name}',path='/folderdeleted.html'), code=result)
+  )
+  ROUTING_RULES_TESTS.append(
+    dict(xml=dict(RoutingRules=rules), url='/images/x', location=dict(proto='http',bucket='{bucket_name}',path='/folderdeleted.html'), code=result)
+  )
+
+# TODO:
+# codes other than those in VALID_AMZ_REDIRECT
+# give an error of 'The provided HTTP redirect code (314) is not valid. Valid codes are 3XX except 300.' during setting the website config
+# we should check that we can return that too on ceph
+
+def routing_setup():
+  check_can_test_website()
+  kwargs = {'obj':[]}
+  bucket = get_new_bucket()
+  kwargs['bucket'] = bucket
+  kwargs['obj'].append(bucket)
+  #f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'])
+  f = _test_website_prep(bucket, '')
+  kwargs.update(f)
+  bucket.set_canned_acl('public-read')
+  
+  k = bucket.new_key('debug-ws.xml')
+  kwargs['obj'].append(k)
+  k.set_contents_from_string('', policy='public-read')
+
+  k = bucket.new_key(f['IndexDocument_Suffix'])
+  kwargs['obj'].append(k)
+  s = choose_bucket_prefix(template=INDEXDOC_TEMPLATE, max_len=64)
+  k.set_contents_from_string(s)
+  k.set_canned_acl('public-read')
+
+  k = bucket.new_key(f['ErrorDocument_Key'])
+  kwargs['obj'].append(k)
+  s = choose_bucket_prefix(template=ERRORDOC_TEMPLATE, max_len=64)
+  k.set_contents_from_string(s)
+  k.set_canned_acl('public-read')
+
+  #time.sleep(1)
+  while bucket.get_key(f['ErrorDocument_Key']) is None:
+      time.sleep(SLEEP_INTERVAL)
+
+  return kwargs
+
+def routing_teardown(**kwargs):
+  for o in reversed(kwargs['obj']):
+    print('Deleting', str(o))
+    o.delete()
+
+@common.with_setup_kwargs(setup=routing_setup, teardown=routing_teardown)
+#@timed(10)
+def routing_check(*args, **kwargs):
+    bucket = kwargs['bucket']
+    args=args[0]
+    #print(args)
+    pprint(args)
+    xml_fields = kwargs.copy()
+    xml_fields.update(args['xml'])
+
+    k = bucket.get_key('debug-ws.xml')
+    k.set_contents_from_string(str(args)+str(kwargs), policy='public-read')
+
+    pprint(xml_fields)
+    f = _test_website_prep(bucket, WEBSITE_CONFIGS_XMLFRAG['IndexDocErrorDoc'], hardcoded_fields=xml_fields)
+    #print(f)
+    config_xmlcmp = bucket.get_website_configuration_xml()
+    config_xmlcmp = common.normalize_xml(config_xmlcmp, pretty_print=True) # For us to read
+    res = _website_request(bucket.name, args['url'])
+    print(config_xmlcmp)
+    new_url = args['location']
+    if new_url is not None:
+        new_url = get_website_url(**new_url)
+        new_url = new_url.format(bucket_name=bucket.name)
+    if args['code'] >= 200 and args['code'] < 300:
+        #body = res.read()
+        #print(body)
+        #eq(body, args['content'], 'default content should match index.html set content')
+        ok(int(res.getheader('Content-Length', -1)) > 0)
+    elif args['code'] >= 300 and args['code'] < 400:
+        _website_expected_redirect_response(res, args['code'], IGNORE_FIELD, new_url)
+    elif args['code'] >= 400:
+        _website_expected_error_response(res, bucket.name, args['code'], IGNORE_FIELD, IGNORE_FIELD)
+    else:
+        assert(False)
+
+@attr('s3website_RoutingRules')
+@attr('s3website')
+@attr('fails_on_dbstore')
+@nose.with_setup(setup=check_can_test_website, teardown=common.teardown)
+def test_routing_generator():
+    for t in ROUTING_RULES_TESTS:
+        if 'xml' in t and 'RoutingRules' in t['xml'] and len(t['xml']['RoutingRules']) > 0:
+            t['xml']['RoutingRules'] = common.trim_xml(t['xml']['RoutingRules'])
+        yield routing_check, t
diff --git a/s3tests/functional/test_utils.py b/s3tests/functional/test_utils.py
new file mode 100644 (file)
index 0000000..59c3c74
--- /dev/null
@@ -0,0 +1,11 @@
+from nose.tools import eq_ as eq
+
+from . import utils
+
+def test_generate():
+    FIVE_MB = 5 * 1024 * 1024
+    eq(len(''.join(utils.generate_random(0))), 0)
+    eq(len(''.join(utils.generate_random(1))), 1)
+    eq(len(''.join(utils.generate_random(FIVE_MB - 1))), FIVE_MB - 1)
+    eq(len(''.join(utils.generate_random(FIVE_MB))), FIVE_MB)
+    eq(len(''.join(utils.generate_random(FIVE_MB + 1))), FIVE_MB + 1)
diff --git a/s3tests/functional/utils.py b/s3tests/functional/utils.py
new file mode 100644 (file)
index 0000000..85bcaf7
--- /dev/null
@@ -0,0 +1,63 @@
+import random
+import requests
+import string
+import time
+
+from nose.tools import eq_ as eq
+
+def assert_raises(excClass, callableObj, *args, **kwargs):
+    """
+    Like unittest.TestCase.assertRaises, but returns the exception.
+    """
+    try:
+        callableObj(*args, **kwargs)
+    except excClass as e:
+        return e
+    else:
+        if hasattr(excClass, '__name__'):
+            excName = excClass.__name__
+        else:
+            excName = str(excClass)
+        raise AssertionError("%s not raised" % excName)
+
+def generate_random(size, part_size=5*1024*1024):
+    """
+    Generate the specified number random data.
+    (actually each MB is a repetition of the first KB)
+    """
+    chunk = 1024
+    allowed = string.ascii_letters
+    for x in range(0, size, part_size):
+        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
+        s = ''
+        left = size - x
+        this_part_size = min(left, part_size)
+        for y in range(this_part_size // chunk):
+            s = s + strpart
+        s = s + strpart[:(this_part_size % chunk)]
+        yield s
+        if (x == size):
+            return
+
+# syncs all the regions except for the one passed in
+def region_sync_meta(targets, region):
+
+    for (k, r) in targets.items():
+        if r == region:
+            continue
+        conf = r.conf
+        if conf.sync_agent_addr:
+            ret = requests.post('http://{addr}:{port}/metadata/incremental'.format(addr = conf.sync_agent_addr, port = conf.sync_agent_port))
+            eq(ret.status_code, 200)
+        if conf.sync_meta_wait:
+            time.sleep(conf.sync_meta_wait)
+
+
+def get_grantee(policy, permission):
+    '''
+    Given an object/bucket policy, extract the grantee with the required permission
+    '''
+
+    for g in policy.acl.grants:
+        if g.permission == permission:
+            return g.id
diff --git a/s3tests_boto3/__init__.py b/s3tests_boto3/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/s3tests_boto3/common.py b/s3tests_boto3/common.py
new file mode 100644 (file)
index 0000000..987ec6b
--- /dev/null
@@ -0,0 +1,301 @@
+import boto.s3.connection
+import munch
+import itertools
+import os
+import random
+import string
+import yaml
+import re
+from lxml import etree
+
+from doctest import Example
+from lxml.doctestcompare import LXMLOutputChecker
+
+s3 = munch.Munch()
+config = munch.Munch()
+prefix = ''
+
+bucket_counter = itertools.count(1)
+key_counter = itertools.count(1)
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+def nuke_bucket(bucket):
+    try:
+        bucket.set_canned_acl('private')
+        # TODO: deleted_cnt and the while loop is a work around for rgw
+        # not sending the
+        deleted_cnt = 1
+        while deleted_cnt:
+            deleted_cnt = 0
+            for key in bucket.list():
+                print('Cleaning bucket {bucket} key {key}'.format(
+                    bucket=bucket,
+                    key=key,
+                    ))
+                key.set_canned_acl('private')
+                key.delete()
+                deleted_cnt += 1
+        bucket.delete()
+    except boto.exception.S3ResponseError as e:
+        # TODO workaround for buggy rgw that fails to send
+        # error_code, remove
+        if (e.status == 403
+            and e.error_code is None
+            and e.body == ''):
+            e.error_code = 'AccessDenied'
+        if e.error_code != 'AccessDenied':
+            print('GOT UNWANTED ERROR', e.error_code)
+            raise
+        # seems like we're not the owner of the bucket; ignore
+        pass
+
+def nuke_prefixed_buckets():
+    for name, conn in list(s3.items()):
+        print('Cleaning buckets from connection {name}'.format(name=name))
+        for bucket in conn.get_all_buckets():
+            if bucket.name.startswith(prefix):
+                print('Cleaning bucket {bucket}'.format(bucket=bucket))
+                nuke_bucket(bucket)
+
+    print('Done with cleanup of test buckets.')
+
+def read_config(fp):
+    config = munch.Munch()
+    g = yaml.safe_load_all(fp)
+    for new in g:
+        config.update(munch.Munchify(new))
+    return config
+
+def connect(conf):
+    mapping = dict(
+        port='port',
+        host='host',
+        is_secure='is_secure',
+        access_key='aws_access_key_id',
+        secret_key='aws_secret_access_key',
+        )
+    kwargs = dict((mapping[k],v) for (k,v) in conf.items() if k in mapping)
+    #process calling_format argument
+    calling_formats = dict(
+        ordinary=boto.s3.connection.OrdinaryCallingFormat(),
+        subdomain=boto.s3.connection.SubdomainCallingFormat(),
+        vhost=boto.s3.connection.VHostCallingFormat(),
+        )
+    kwargs['calling_format'] = calling_formats['ordinary']
+    if 'calling_format' in conf:
+        raw_calling_format = conf['calling_format']
+        try:
+            kwargs['calling_format'] = calling_formats[raw_calling_format]
+        except KeyError:
+            raise RuntimeError(
+                'calling_format unknown: %r' % raw_calling_format
+                )
+    # TODO test vhost calling format
+    conn = boto.s3.connection.S3Connection(**kwargs)
+    return conn
+
+def setup():
+    global s3, config, prefix
+    s3.clear()
+    config.clear()
+
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    with file(path) as f:
+        config.update(read_config(f))
+
+    # These 3 should always be present.
+    if 's3' not in config:
+        raise RuntimeError('Your config file is missing the s3 section!')
+    if 'defaults' not in config.s3:
+        raise RuntimeError('Your config file is missing the s3.defaults section!')
+    if 'fixtures' not in config:
+        raise RuntimeError('Your config file is missing the fixtures section!')
+
+    template = config.fixtures.get('bucket prefix', 'test-{random}-')
+    prefix = choose_bucket_prefix(template=template)
+    if prefix == '':
+        raise RuntimeError("Empty Prefix! Aborting!")
+
+    defaults = config.s3.defaults
+    for section in list(config.s3.keys()):
+        if section == 'defaults':
+            continue
+
+        conf = {}
+        conf.update(defaults)
+        conf.update(config.s3[section])
+        conn = connect(conf)
+        s3[section] = conn
+
+    # WARNING! we actively delete all buckets we see with the prefix
+    # we've chosen! Choose your prefix with care, and don't reuse
+    # credentials!
+
+    # We also assume nobody else is going to use buckets with that
+    # prefix. This is racy but given enough randomness, should not
+    # really fail.
+    nuke_prefixed_buckets()
+
+def get_new_bucket(connection=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    if connection is None:
+        connection = s3.main
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    # the only way for this to fail with a pre-existing bucket is if
+    # someone raced us between setup nuke_prefixed_buckets and here;
+    # ignore that as astronomically unlikely
+    bucket = connection.create_bucket(name)
+    return bucket
+
+def teardown():
+    nuke_prefixed_buckets()
+
+def with_setup_kwargs(setup, teardown=None):
+    """Decorator to add setup and/or teardown methods to a test function::
+
+      @with_setup_args(setup, teardown)
+      def test_something():
+          " ... "
+
+    The setup function should return (kwargs) which will be passed to
+    test function, and teardown function.
+
+    Note that `with_setup_kwargs` is useful *only* for test functions, not for test
+    methods or inside of TestCase subclasses.
+    """
+    def decorate(func):
+        kwargs = {}
+
+        def test_wrapped(*args, **kwargs2):
+            k2 = kwargs.copy()
+            k2.update(kwargs2)
+            k2['testname'] = func.__name__
+            func(*args, **k2)
+
+        test_wrapped.__name__ = func.__name__
+
+        def setup_wrapped():
+            k = setup()
+            kwargs.update(k)
+            if hasattr(func, 'setup'):
+                func.setup()
+        test_wrapped.setup = setup_wrapped
+
+        if teardown:
+            def teardown_wrapped():
+                if hasattr(func, 'teardown'):
+                    func.teardown()
+                teardown(**kwargs)
+
+            test_wrapped.teardown = teardown_wrapped
+        else:
+            if hasattr(func, 'teardown'):
+                test_wrapped.teardown = func.teardown()
+        return test_wrapped
+    return decorate
+
+# Demo case for the above, when you run test_gen():
+# _test_gen will run twice,
+# with the following stderr printing
+# setup_func {'b': 2}
+# testcase ('1',) {'b': 2, 'testname': '_test_gen'}
+# teardown_func {'b': 2}
+# setup_func {'b': 2}
+# testcase () {'b': 2, 'testname': '_test_gen'}
+# teardown_func {'b': 2}
+# 
+#def setup_func():
+#    kwargs = {'b': 2}
+#    print("setup_func", kwargs, file=sys.stderr)
+#    return kwargs
+#
+#def teardown_func(**kwargs):
+#    print("teardown_func", kwargs, file=sys.stderr)
+#
+#@with_setup_kwargs(setup=setup_func, teardown=teardown_func)
+#def _test_gen(*args, **kwargs):
+#    print("testcase", args, kwargs, file=sys.stderr)
+#
+#def test_gen():
+#    yield _test_gen, '1'
+#    yield _test_gen
+
+def trim_xml(xml_str):
+    p = etree.XMLParser(remove_blank_text=True)
+    elem = etree.XML(xml_str, parser=p)
+    return etree.tostring(elem)
+
+def normalize_xml(xml, pretty_print=True):
+    if xml is None:
+        return xml
+
+    root = etree.fromstring(xml.encode(encoding='ascii'))
+
+    for element in root.iter('*'):
+        if element.text is not None and not element.text.strip():
+            element.text = None
+        if element.text is not None:
+            element.text = element.text.strip().replace("\n", "").replace("\r", "")
+        if element.tail is not None and not element.tail.strip():
+            element.tail = None
+        if element.tail is not None:
+            element.tail = element.tail.strip().replace("\n", "").replace("\r", "")
+
+    # Sort the elements
+    for parent in root.xpath('//*[./*]'): # Search for parent elements
+          parent[:] = sorted(parent,key=lambda x: x.tag)
+
+    xmlstr = etree.tostring(root, encoding="utf-8", xml_declaration=True, pretty_print=pretty_print)
+    # there are two different DTD URIs
+    xmlstr = re.sub(r'xmlns="[^"]+"', 'xmlns="s3"', xmlstr)
+    xmlstr = re.sub(r'xmlns=\'[^\']+\'', 'xmlns="s3"', xmlstr)
+    for uri in ['http://doc.s3.amazonaws.com/doc/2006-03-01/', 'http://s3.amazonaws.com/doc/2006-03-01/']:
+        xmlstr = xmlstr.replace(uri, 'URI-DTD')
+    #xmlstr = re.sub(r'>\s+', '>', xmlstr, count=0, flags=re.MULTILINE)
+    return xmlstr
+
+def assert_xml_equal(got, want):
+    assert want is not None, 'Wanted XML cannot be None'
+    if got is None:
+        raise AssertionError('Got input to validate was None')
+    checker = LXMLOutputChecker()
+    if not checker.check_output(want, got, 0):
+        message = checker.output_difference(Example("", want), got, 0)
+        raise AssertionError(message)
diff --git a/s3tests_boto3/functional/__init__.py b/s3tests_boto3/functional/__init__.py
new file mode 100644 (file)
index 0000000..4d03d65
--- /dev/null
@@ -0,0 +1,721 @@
+import boto3
+from botocore import UNSIGNED
+from botocore.client import Config
+from botocore.exceptions import ClientError
+from botocore.handlers import disable_signing
+import configparser
+import datetime
+import time
+import os
+import munch
+import random
+import string
+import itertools
+import urllib3
+import re
+
+config = munch.Munch
+
+# this will be assigned by setup()
+prefix = None
+
+def get_prefix():
+    assert prefix is not None
+    return prefix
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+def get_buckets_list(client=None, prefix=None):
+    if client == None:
+        client = get_client()
+    if prefix == None:
+        prefix = get_prefix()
+    response = client.list_buckets()
+    bucket_dicts = response['Buckets']
+    buckets_list = []
+    for bucket in bucket_dicts:
+        if prefix in bucket['Name']:
+            buckets_list.append(bucket['Name'])
+
+    return buckets_list
+
+def get_objects_list(bucket, client=None, prefix=None):
+    if client == None:
+        client = get_client()
+
+    if prefix == None:
+        response = client.list_objects(Bucket=bucket)
+    else:
+        response = client.list_objects(Bucket=bucket, Prefix=prefix)
+    objects_list = []
+
+    if 'Contents' in response:
+        contents = response['Contents']
+        for obj in contents:
+            objects_list.append(obj['Key'])
+
+    return objects_list
+
+# generator function that returns object listings in batches, where each
+# batch is a list of dicts compatible with delete_objects()
+def list_versions(client, bucket, batch_size):
+    key_marker = ''
+    version_marker = ''
+    truncated = True
+    while truncated:
+        listing = client.list_object_versions(
+                Bucket=bucket,
+                KeyMarker=key_marker,
+                VersionIdMarker=version_marker,
+                MaxKeys=batch_size)
+
+        key_marker = listing.get('NextKeyMarker')
+        version_marker = listing.get('NextVersionIdMarker')
+        truncated = listing['IsTruncated']
+
+        objs = listing.get('Versions', []) + listing.get('DeleteMarkers', [])
+        if len(objs):
+            yield [{'Key': o['Key'], 'VersionId': o['VersionId']} for o in objs]
+
+def nuke_bucket(client, bucket):
+    batch_size = 128
+    max_retain_date = None
+
+    # list and delete objects in batches
+    for objects in list_versions(client, bucket, batch_size):
+        delete = client.delete_objects(Bucket=bucket,
+                Delete={'Objects': objects, 'Quiet': True},
+                BypassGovernanceRetention=True)
+
+        # check for object locks on 403 AccessDenied errors
+        for err in delete.get('Errors', []):
+            if err.get('Code') != 'AccessDenied':
+                continue
+            try:
+                res = client.get_object_retention(Bucket=bucket,
+                        Key=err['Key'], VersionId=err['VersionId'])
+                retain_date = res['Retention']['RetainUntilDate']
+                if not max_retain_date or max_retain_date < retain_date:
+                    max_retain_date = retain_date
+            except ClientError:
+                pass
+
+    if max_retain_date:
+        # wait out the retention period (up to 60 seconds)
+        now = datetime.datetime.now(max_retain_date.tzinfo)
+        if max_retain_date > now:
+            delta = max_retain_date - now
+            if delta.total_seconds() > 60:
+                raise RuntimeError('bucket {} still has objects \
+locked for {} more seconds, not waiting for \
+bucket cleanup'.format(bucket, delta.total_seconds()))
+            print('nuke_bucket', bucket, 'waiting', delta.total_seconds(),
+                    'seconds for object locks to expire')
+            time.sleep(delta.total_seconds())
+
+        for objects in list_versions(client, bucket, batch_size):
+            client.delete_objects(Bucket=bucket,
+                    Delete={'Objects': objects, 'Quiet': True},
+                    BypassGovernanceRetention=True)
+
+    client.delete_bucket(Bucket=bucket)
+
+def nuke_prefixed_buckets(prefix, client=None):
+    if client == None:
+        client = get_client()
+
+    buckets = get_buckets_list(client, prefix)
+
+    err = None
+    for bucket_name in buckets:
+        try:
+            nuke_bucket(client, bucket_name)
+        except Exception as e:
+            # The exception shouldn't be raised when doing cleanup. Pass and continue
+            # the bucket cleanup process. Otherwise left buckets wouldn't be cleared
+            # resulting in some kind of resource leak. err is used to hint user some
+            # exception once occurred.
+            err = e
+            pass
+    if err:
+        raise err
+
+    print('Done with cleanup of buckets in tests.')
+
+def configured_storage_classes():
+    sc = ['STANDARD']
+
+    extra_sc = re.split(r"[\b\W\b]+", config.storage_classes)
+
+    for item in extra_sc:
+        if item != 'STANDARD':
+             sc.append(item)
+
+    sc = [i for i in sc if i]
+    print("storage classes configured: " + str(sc))
+
+    return sc
+
+def setup():
+    cfg = configparser.RawConfigParser()
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    cfg.read(path)
+
+    if not cfg.defaults():
+        raise RuntimeError('Your config file is missing the DEFAULT section!')
+    if not cfg.has_section("s3 main"):
+        raise RuntimeError('Your config file is missing the "s3 main" section!')
+    if not cfg.has_section("s3 alt"):
+        raise RuntimeError('Your config file is missing the "s3 alt" section!')
+    if not cfg.has_section("s3 tenant"):
+        raise RuntimeError('Your config file is missing the "s3 tenant" section!')
+
+    global prefix
+
+    defaults = cfg.defaults()
+
+    # vars from the DEFAULT section
+    config.default_host = defaults.get("host")
+    config.default_port = int(defaults.get("port"))
+    config.default_is_secure = cfg.getboolean('DEFAULT', "is_secure")
+
+    proto = 'https' if config.default_is_secure else 'http'
+    config.default_endpoint = "%s://%s:%d" % (proto, config.default_host, config.default_port)
+
+    try:
+        config.default_ssl_verify = cfg.getboolean('DEFAULT', "ssl_verify")
+    except configparser.NoOptionError:
+        config.default_ssl_verify = False
+
+    # Disable InsecureRequestWarning reported by urllib3 when ssl_verify is False
+    if not config.default_ssl_verify:
+        urllib3.disable_warnings()
+
+    # vars from the main section
+    config.main_access_key = cfg.get('s3 main',"access_key")
+    config.main_secret_key = cfg.get('s3 main',"secret_key")
+    config.main_display_name = cfg.get('s3 main',"display_name")
+    config.main_user_id = cfg.get('s3 main',"user_id")
+    config.main_email = cfg.get('s3 main',"email")
+    try:
+        config.main_kms_keyid = cfg.get('s3 main',"kms_keyid")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.main_kms_keyid = 'testkey-1'
+
+    try:
+        config.main_kms_keyid2 = cfg.get('s3 main',"kms_keyid2")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.main_kms_keyid2 = 'testkey-2'
+
+    try:
+        config.main_api_name = cfg.get('s3 main',"api_name")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.main_api_name = ""
+        pass
+
+    try:
+        config.storage_classes = cfg.get('s3 main',"storage_classes")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.storage_classes = ""
+        pass
+
+    try:
+        config.lc_debug_interval = int(cfg.get('s3 main',"lc_debug_interval"))
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.lc_debug_interval = 10
+
+    config.alt_access_key = cfg.get('s3 alt',"access_key")
+    config.alt_secret_key = cfg.get('s3 alt',"secret_key")
+    config.alt_display_name = cfg.get('s3 alt',"display_name")
+    config.alt_user_id = cfg.get('s3 alt',"user_id")
+    config.alt_email = cfg.get('s3 alt',"email")
+
+    config.tenant_access_key = cfg.get('s3 tenant',"access_key")
+    config.tenant_secret_key = cfg.get('s3 tenant',"secret_key")
+    config.tenant_display_name = cfg.get('s3 tenant',"display_name")
+    config.tenant_user_id = cfg.get('s3 tenant',"user_id")
+    config.tenant_email = cfg.get('s3 tenant',"email")
+
+    # vars from the fixtures section
+    try:
+        template = cfg.get('fixtures', "bucket prefix")
+    except (configparser.NoOptionError):
+        template = 'test-{random}-'
+    prefix = choose_bucket_prefix(template=template)
+
+    alt_client = get_alt_client()
+    tenant_client = get_tenant_client()
+    nuke_prefixed_buckets(prefix=prefix)
+    nuke_prefixed_buckets(prefix=prefix, client=alt_client)
+    nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
+
+    if cfg.has_section("s3 cloud"):
+        get_cloud_config(cfg)
+    else:
+        config.cloud_storage_class = None
+
+
+def teardown():
+    alt_client = get_alt_client()
+    tenant_client = get_tenant_client()
+    nuke_prefixed_buckets(prefix=prefix)
+    nuke_prefixed_buckets(prefix=prefix, client=alt_client)
+    nuke_prefixed_buckets(prefix=prefix, client=tenant_client)
+    try:
+        iam_client = get_iam_client()
+        list_roles_resp = iam_client.list_roles()
+        for role in list_roles_resp['Roles']:
+            list_policies_resp = iam_client.list_role_policies(RoleName=role['RoleName'])
+            for policy in list_policies_resp['PolicyNames']:
+                del_policy_resp = iam_client.delete_role_policy(
+                                         RoleName=role['RoleName'],
+                                         PolicyName=policy
+                                        )
+            del_role_resp = iam_client.delete_role(RoleName=role['RoleName'])
+        list_oidc_resp = iam_client.list_open_id_connect_providers()
+        for oidcprovider in list_oidc_resp['OpenIDConnectProviderList']:
+            del_oidc_resp = iam_client.delete_open_id_connect_provider(
+                        OpenIDConnectProviderArn=oidcprovider['Arn']
+                    )
+    except:
+        pass
+
+def check_webidentity():
+    cfg = configparser.RawConfigParser()
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    cfg.read(path)
+    if not cfg.has_section("webidentity"):
+        raise RuntimeError('Your config file is missing the "webidentity" section!')
+
+    config.webidentity_thumbprint = cfg.get('webidentity', "thumbprint")
+    config.webidentity_aud = cfg.get('webidentity', "aud")
+    config.webidentity_token = cfg.get('webidentity', "token")
+    config.webidentity_realm = cfg.get('webidentity', "KC_REALM")
+    config.webidentity_sub = cfg.get('webidentity', "sub")
+    config.webidentity_azp = cfg.get('webidentity', "azp")
+    config.webidentity_user_token = cfg.get('webidentity', "user_token")
+
+def get_cloud_config(cfg):
+    config.cloud_host = cfg.get('s3 cloud',"host")
+    config.cloud_port = int(cfg.get('s3 cloud',"port"))
+    config.cloud_is_secure = cfg.getboolean('s3 cloud', "is_secure")
+
+    proto = 'https' if config.cloud_is_secure else 'http'
+    config.cloud_endpoint = "%s://%s:%d" % (proto, config.cloud_host, config.cloud_port)
+
+    config.cloud_access_key = cfg.get('s3 cloud',"access_key")
+    config.cloud_secret_key = cfg.get('s3 cloud',"secret_key")
+
+    try:
+        config.cloud_storage_class = cfg.get('s3 cloud', "cloud_storage_class")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_storage_class = None
+    
+    try:
+        config.cloud_retain_head_object = cfg.get('s3 cloud',"retain_head_object")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_retain_head_object = None
+
+    try:
+        config.cloud_target_path = cfg.get('s3 cloud',"target_path")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_target_path = None
+
+    try:
+        config.cloud_target_storage_class = cfg.get('s3 cloud',"target_storage_class")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_target_storage_class = 'STANDARD'
+
+    try:
+        config.cloud_regular_storage_class = cfg.get('s3 cloud', "storage_class")
+    except (configparser.NoSectionError, configparser.NoOptionError):
+        config.cloud_regular_storage_class  = None
+
+
+def get_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_v2_client():
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=Config(signature_version='s3'))
+    return client
+
+def get_sts_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='sts',
+                        aws_access_key_id=config.alt_access_key,
+                        aws_secret_access_key=config.alt_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        region_name='',
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_iam_client(client_config=None):
+    cfg = configparser.RawConfigParser()
+    try:
+        path = os.environ['S3TEST_CONF']
+    except KeyError:
+        raise RuntimeError(
+            'To run tests, point environment '
+            + 'variable S3TEST_CONF to a config file.',
+            )
+    cfg.read(path)
+    if not cfg.has_section("iam"):
+        raise RuntimeError('Your config file is missing the "iam" section!')
+
+    config.iam_access_key = cfg.get('iam',"access_key")
+    config.iam_secret_key = cfg.get('iam',"secret_key")
+    config.iam_display_name = cfg.get('iam',"display_name")
+    config.iam_user_id = cfg.get('iam',"user_id")
+    config.iam_email = cfg.get('iam',"email")    
+
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+    
+    client = boto3.client(service_name='iam',
+                        aws_access_key_id=config.iam_access_key,
+                        aws_secret_access_key=config.iam_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        region_name='',
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_iam_s3client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+    client = boto3.client(service_name='s3',
+                          aws_access_key_id=get_iam_access_key(),
+                          aws_secret_access_key=get_iam_secret_key(),
+                          endpoint_url=config.default_endpoint,
+                          use_ssl=config.default_is_secure,
+                          verify=config.default_ssl_verify,
+                          config=client_config)
+    return client
+
+def get_alt_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.alt_access_key,
+                        aws_secret_access_key=config.alt_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_cloud_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.cloud_access_key,
+                        aws_secret_access_key=config.cloud_secret_key,
+                        endpoint_url=config.cloud_endpoint,
+                        use_ssl=config.cloud_is_secure,
+                        config=client_config)
+    return client
+
+def get_tenant_client(client_config=None):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=config.tenant_access_key,
+                        aws_secret_access_key=config.tenant_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+def get_tenant_iam_client():
+
+    client = boto3.client(service_name='iam',
+                          region_name='us-east-1',
+                          aws_access_key_id=config.tenant_access_key,
+                          aws_secret_access_key=config.tenant_secret_key,
+                          endpoint_url=config.default_endpoint,
+                          verify=config.default_ssl_verify,
+                          use_ssl=config.default_is_secure)
+    return client
+
+def get_alt_iam_client():
+
+    client = boto3.client(service_name='iam',
+                          region_name='',
+                          aws_access_key_id=config.alt_access_key,
+                          aws_secret_access_key=config.alt_secret_key,
+                          endpoint_url=config.default_endpoint,
+                          verify=config.default_ssl_verify,
+                          use_ssl=config.default_is_secure)
+    return client
+
+def get_unauthenticated_client():
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id='',
+                        aws_secret_access_key='',
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=Config(signature_version=UNSIGNED))
+    return client
+
+def get_bad_auth_client(aws_access_key_id='badauth'):
+    client = boto3.client(service_name='s3',
+                        aws_access_key_id=aws_access_key_id,
+                        aws_secret_access_key='roflmao',
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=Config(signature_version='s3v4'))
+    return client
+
+def get_svc_client(client_config=None, svc='s3'):
+    if client_config == None:
+        client_config = Config(signature_version='s3v4')
+
+    client = boto3.client(service_name=svc,
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify,
+                        config=client_config)
+    return client
+
+bucket_counter = itertools.count(1)
+
+def get_new_bucket_name():
+    """
+    Get a bucket name that probably does not exist.
+
+    We make every attempt to use a unique random prefix, so if a
+    bucket by this name happens to exist, it's ok if tests give
+    false negatives.
+    """
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    return name
+
+def get_new_bucket_resource(name=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    s3 = boto3.resource('s3',
+                        aws_access_key_id=config.main_access_key,
+                        aws_secret_access_key=config.main_secret_key,
+                        endpoint_url=config.default_endpoint,
+                        use_ssl=config.default_is_secure,
+                        verify=config.default_ssl_verify)
+    if name is None:
+        name = get_new_bucket_name()
+    bucket = s3.Bucket(name)
+    bucket_location = bucket.create()
+    return bucket
+
+def get_new_bucket(client=None, name=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    if client is None:
+        client = get_client()
+    if name is None:
+        name = get_new_bucket_name()
+
+    client.create_bucket(Bucket=name)
+    return name
+
+def get_parameter_name():
+    parameter_name=""
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+    while rand:
+        parameter_name = '{random}'.format(random=rand)
+        if len(parameter_name) <= 10:
+            return parameter_name
+        rand = rand[:-1]
+    return parameter_name
+
+def get_sts_user_id():
+    return config.alt_user_id
+
+def get_config_is_secure():
+    return config.default_is_secure
+
+def get_config_host():
+    return config.default_host
+
+def get_config_port():
+    return config.default_port
+
+def get_config_endpoint():
+    return config.default_endpoint
+
+def get_config_ssl_verify():
+    return config.default_ssl_verify
+
+def get_main_aws_access_key():
+    return config.main_access_key
+
+def get_main_aws_secret_key():
+    return config.main_secret_key
+
+def get_main_display_name():
+    return config.main_display_name
+
+def get_main_user_id():
+    return config.main_user_id
+
+def get_main_email():
+    return config.main_email
+
+def get_main_api_name():
+    return config.main_api_name
+
+def get_main_kms_keyid():
+    return config.main_kms_keyid
+
+def get_secondary_kms_keyid():
+    return config.main_kms_keyid2
+
+def get_alt_aws_access_key():
+    return config.alt_access_key
+
+def get_alt_aws_secret_key():
+    return config.alt_secret_key
+
+def get_alt_display_name():
+    return config.alt_display_name
+
+def get_alt_user_id():
+    return config.alt_user_id
+
+def get_alt_email():
+    return config.alt_email
+
+def get_tenant_aws_access_key():
+    return config.tenant_access_key
+
+def get_tenant_aws_secret_key():
+    return config.tenant_secret_key
+
+def get_tenant_display_name():
+    return config.tenant_display_name
+
+def get_tenant_user_id():
+    return config.tenant_user_id
+
+def get_tenant_email():
+    return config.tenant_email
+
+def get_thumbprint():
+    return config.webidentity_thumbprint
+
+def get_aud():
+    return config.webidentity_aud
+
+def get_sub():
+    return config.webidentity_sub
+
+def get_azp():
+    return config.webidentity_azp
+
+def get_token():
+    return config.webidentity_token
+
+def get_realm_name():
+    return config.webidentity_realm
+
+def get_iam_access_key():
+    return config.iam_access_key
+
+def get_iam_secret_key():
+    return config.iam_secret_key
+
+def get_user_token():
+    return config.webidentity_user_token
+
+def get_cloud_storage_class():
+    return config.cloud_storage_class
+
+def get_cloud_retain_head_object():
+    return config.cloud_retain_head_object
+
+def get_cloud_regular_storage_class():
+    return config.cloud_regular_storage_class
+
+def get_cloud_target_path():
+    return config.cloud_target_path
+
+def get_cloud_target_storage_class():
+    return config.cloud_target_storage_class
+
+def get_lc_debug_interval():
+    return config.lc_debug_interval
diff --git a/s3tests_boto3/functional/policy.py b/s3tests_boto3/functional/policy.py
new file mode 100644 (file)
index 0000000..aae5454
--- /dev/null
@@ -0,0 +1,46 @@
+import json
+
+class Statement(object):
+    def __init__(self, action, resource, principal = {"AWS" : "*"}, effect= "Allow", condition = None):
+        self.principal = principal
+        self.action = action
+        self.resource = resource
+        self.condition = condition
+        self.effect = effect
+
+    def to_dict(self):
+        d = { "Action" : self.action,
+              "Principal" : self.principal,
+              "Effect" : self.effect,
+              "Resource" : self.resource
+        }
+
+        if self.condition is not None:
+            d["Condition"] = self.condition
+
+        return d
+
+class Policy(object):
+    def __init__(self):
+        self.statements = []
+
+    def add_statement(self, s):
+        self.statements.append(s)
+        return self
+
+    def to_json(self):
+        policy_dict = {
+            "Version" : "2012-10-17",
+            "Statement":
+            [s.to_dict() for s in self.statements]
+        }
+
+        return json.dumps(policy_dict)
+
+def make_json_policy(action, resource, principal={"AWS": "*"}, conditions=None):
+    """
+    Helper function to make single statement policies
+    """
+    s = Statement(action, resource, principal, condition=conditions)
+    p = Policy()
+    return p.add_statement(s).to_json()
diff --git a/s3tests_boto3/functional/rgw_interactive.py b/s3tests_boto3/functional/rgw_interactive.py
new file mode 100644 (file)
index 0000000..873a145
--- /dev/null
@@ -0,0 +1,92 @@
+#!/usr/bin/python
+import boto3
+import os
+import random
+import string
+import itertools
+
+host = "localhost"
+port = 8000
+
+## AWS access key
+access_key = "0555b35654ad1656d804"
+
+## AWS secret key
+secret_key = "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="
+
+prefix = "YOURNAMEHERE-1234-"
+
+endpoint_url = "http://%s:%d" % (host, port)
+
+client = boto3.client(service_name='s3',
+                    aws_access_key_id=access_key,
+                    aws_secret_access_key=secret_key,
+                    endpoint_url=endpoint_url,
+                    use_ssl=False,
+                    verify=False)
+
+s3 = boto3.resource('s3', 
+                    use_ssl=False,
+                    verify=False,
+                    endpoint_url=endpoint_url, 
+                    aws_access_key_id=access_key,
+                    aws_secret_access_key=secret_key)
+
+def choose_bucket_prefix(template, max_len=30):
+    """
+    Choose a prefix for our test buckets, so they're easy to identify.
+
+    Use template and feed it more and more random filler, until it's
+    as long as possible but still below max_len.
+    """
+    rand = ''.join(
+        random.choice(string.ascii_lowercase + string.digits)
+        for c in range(255)
+        )
+
+    while rand:
+        s = template.format(random=rand)
+        if len(s) <= max_len:
+            return s
+        rand = rand[:-1]
+
+    raise RuntimeError(
+        'Bucket prefix template is impossible to fulfill: {template!r}'.format(
+            template=template,
+            ),
+        )
+
+bucket_counter = itertools.count(1)
+
+def get_new_bucket_name():
+    """
+    Get a bucket name that probably does not exist.
+
+    We make every attempt to use a unique random prefix, so if a
+    bucket by this name happens to exist, it's ok if tests give
+    false negatives.
+    """
+    name = '{prefix}{num}'.format(
+        prefix=prefix,
+        num=next(bucket_counter),
+        )
+    return name
+
+def get_new_bucket(session=boto3, name=None, headers=None):
+    """
+    Get a bucket that exists and is empty.
+
+    Always recreates a bucket from scratch. This is useful to also
+    reset ACLs and such.
+    """
+    s3 = session.resource('s3', 
+                        use_ssl=False,
+                        verify=False,
+                        endpoint_url=endpoint_url, 
+                        aws_access_key_id=access_key,
+                        aws_secret_access_key=secret_key)
+    if name is None:
+        name = get_new_bucket_name()
+    bucket = s3.Bucket(name)
+    bucket_location = bucket.create()
+    return bucket
diff --git a/s3tests_boto3/functional/test_headers.py b/s3tests_boto3/functional/test_headers.py
new file mode 100644 (file)
index 0000000..04b2757
--- /dev/null
@@ -0,0 +1,771 @@
+import boto3
+from nose.tools import eq_ as eq
+from nose.plugins.attrib import attr
+import nose
+from botocore.exceptions import ClientError
+from email.utils import formatdate
+
+from .utils import assert_raises
+from .utils import _get_status_and_error_code
+from .utils import _get_status
+
+from . import (
+    get_client,
+    get_v2_client,
+    get_new_bucket,
+    get_new_bucket_name,
+    )
+
+def _add_header_create_object(headers, client=None):
+    """ Create a new bucket, add an object w/header customizations
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutObject', add_headers)
+    client.put_object(Bucket=bucket_name, Key=key_name)
+
+    return bucket_name, key_name
+
+
+def _add_header_create_bad_object(headers, client=None):
+    """ Create a new bucket, add an object with a header. This should cause a failure 
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutObject', add_headers)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
+
+    return e
+
+
+def _remove_header_create_object(remove, client=None):
+    """ Create a new bucket, add an object without a header
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.PutObject', remove_header)
+    client.put_object(Bucket=bucket_name, Key=key_name)
+
+    return bucket_name, key_name
+
+def _remove_header_create_bad_object(remove, client=None):
+    """ Create a new bucket, add an object without a header. This should cause a failure
+    """
+    bucket_name = get_new_bucket()
+    if client == None:
+        client = get_client()
+    key_name = 'foo'
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.PutObject', remove_header)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, Body='bar')
+
+    return e
+
+
+def _add_header_create_bucket(headers, client=None):
+    """ Create a new bucket, w/header customizations
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
+    client.create_bucket(Bucket=bucket_name)
+
+    return bucket_name
+
+
+def _add_header_create_bad_bucket(headers=None, client=None):
+    """ Create a new bucket, w/header customizations that should cause a failure 
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # pass in custom headers before PutObject call
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+
+    return e
+
+
+def _remove_header_create_bucket(remove, client=None):
+    """ Create a new bucket, without a header
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.CreateBucket', remove_header)
+    client.create_bucket(Bucket=bucket_name)
+
+    return bucket_name
+
+def _remove_header_create_bad_bucket(remove, client=None):
+    """ Create a new bucket, without a header. This should cause a failure
+    """
+    bucket_name = get_new_bucket_name()
+    if client == None:
+        client = get_client()
+
+    # remove custom headers before PutObject call
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.CreateBucket', remove_header)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+
+    return e
+
+def tag(*tags):
+    def wrap(func):
+        for tag in tags:
+            setattr(func, tag, True)
+        return func
+    return wrap
+
+#
+# common tests
+#
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid MD5')
+@attr(assertion='fails 400')
+def test_object_create_bad_md5_invalid_short():
+    e = _add_header_create_bad_object({'Content-MD5':'YWJyYWNhZGFicmE='})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidDigest')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/mismatched MD5')
+@attr(assertion='fails 400')
+def test_object_create_bad_md5_bad():
+    e = _add_header_create_bad_object({'Content-MD5':'rL0Y20xC+Fzt72VPzMSk2A=='})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'BadDigest')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty MD5')
+@attr(assertion='fails 400')
+def test_object_create_bad_md5_empty():
+    e = _add_header_create_bad_object({'Content-MD5':''})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidDigest')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no MD5 header')
+@attr(assertion='succeeds')
+def test_object_create_bad_md5_none():
+    bucket_name, key_name = _remove_header_create_object('Content-MD5')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/Expect 200')
+@attr(assertion='garbage, but S3 succeeds!')
+def test_object_create_bad_expect_mismatch():
+    bucket_name, key_name = _add_header_create_object({'Expect': 200})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty expect')
+@attr(assertion='succeeds ... should it?')
+def test_object_create_bad_expect_empty():
+    bucket_name, key_name = _add_header_create_object({'Expect': ''})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no expect')
+@attr(assertion='succeeds')
+def test_object_create_bad_expect_none():
+    bucket_name, key_name = _remove_header_create_object('Expect')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty content length')
+@attr(assertion='fails 400')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@attr('fails_on_rgw')
+def test_object_create_bad_contentlength_empty():
+    e = _add_header_create_bad_object({'Content-Length':''})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/negative content length')
+@attr(assertion='fails 400')
+@attr('fails_on_mod_proxy_fcgi')
+def test_object_create_bad_contentlength_negative():
+    client = get_client()
+    bucket_name = get_new_bucket()
+    key_name = 'foo'
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key_name, ContentLength=-1)
+    status = _get_status(e.response)
+    eq(status, 400)
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no content length')
+@attr(assertion='fails 411')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@attr('fails_on_rgw')
+def test_object_create_bad_contentlength_none():
+    remove = 'Content-Length'
+    e = _remove_header_create_bad_object('Content-Length')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 411)
+    eq(error_code, 'MissingContentLength')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/content type text/plain')
+@attr(assertion='succeeds')
+def test_object_create_bad_contenttype_invalid():
+    bucket_name, key_name = _add_header_create_object({'Content-Type': 'text/plain'})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty content type')
+@attr(assertion='succeeds')
+def test_object_create_bad_contenttype_empty():
+    client = get_client()
+    key_name = 'foo'
+    bucket_name = get_new_bucket()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar', ContentType='')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no content type')
+@attr(assertion='succeeds')
+def test_object_create_bad_contenttype_none():
+    bucket_name = get_new_bucket()
+    key_name = 'foo'
+    client = get_client()
+    # as long as ContentType isn't specified in put_object it isn't going into the request
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty authorization')
+@attr(assertion='fails 403')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
+@attr('fails_on_rgw')
+def test_object_create_bad_authorization_empty():
+    e = _add_header_create_bad_object({'Authorization': ''})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date and x-amz-date')
+@attr(assertion='succeeds')
+# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
+@attr('fails_on_rgw')
+def test_object_create_date_and_amz_date():
+    date = formatdate(usegmt=True)
+    bucket_name, key_name = _add_header_create_object({'Date': date, 'X-Amz-Date': date})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/x-amz-date and no date')
+@attr(assertion='succeeds')
+# TODO: remove 'fails_on_rgw' and once we have learned how to pass both the 'Date' and 'X-Amz-Date' header during signing and not 'X-Amz-Date' before
+@attr('fails_on_rgw')
+def test_object_create_amz_date_and_no_date():
+    date = formatdate(usegmt=True)
+    bucket_name, key_name = _add_header_create_object({'Date': '', 'X-Amz-Date': date})
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+# the teardown is really messed up here. check it out
+@tag('auth_common')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no authorization')
+@attr(assertion='fails 403')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the authorization header
+@attr('fails_on_rgw')
+def test_object_create_bad_authorization_none():
+    e = _remove_header_create_bad_object('Authorization')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no content length')
+@attr(assertion='succeeds')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@attr('fails_on_rgw')
+def test_bucket_create_contentlength_none():
+    remove = 'Content-Length'
+    _remove_header_create_bucket(remove)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='acls')
+@attr(operation='set w/no content length')
+@attr(assertion='succeeds')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@attr('fails_on_rgw')
+def test_object_acl_create_contentlength_none():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    remove = 'Content-Length'
+    def remove_header(**kwargs):
+        if (remove in kwargs['params']['headers']):
+            del kwargs['params']['headers'][remove]
+
+    client.meta.events.register('before-call.s3.PutObjectAcl', remove_header)
+    client.put_object_acl(Bucket=bucket_name, Key='foo', ACL='public-read')
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='acls')
+@attr(operation='set w/invalid permission')
+@attr(assertion='fails 400')
+def test_bucket_put_bad_canned_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    headers = {'x-amz-acl': 'public-ready'}
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutBucketAcl', add_headers)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, ACL='public-read')
+    status = _get_status(e.response)
+    eq(status, 400)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/expect 200')
+@attr(assertion='garbage, but S3 succeeds!')
+def test_bucket_create_bad_expect_mismatch():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    headers = {'Expect': 200}
+    add_headers = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.CreateBucket', add_headers)
+    client.create_bucket(Bucket=bucket_name)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/expect empty')
+@attr(assertion='garbage, but S3 succeeds!')
+def test_bucket_create_bad_expect_empty():
+    headers = {'Expect': ''}
+    _add_header_create_bucket(headers)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty content length')
+@attr(assertion='fails 400')
+# TODO: The request isn't even making it to the RGW past the frontend
+# This test had 'fails_on_rgw' before the move to boto3
+@attr('fails_on_rgw')
+def test_bucket_create_bad_contentlength_empty():
+    headers = {'Content-Length': ''}
+    e = _add_header_create_bad_bucket(headers)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/negative content length')
+@attr(assertion='fails 400')
+@attr('fails_on_mod_proxy_fcgi')
+def test_bucket_create_bad_contentlength_negative():
+    headers = {'Content-Length': '-1'}
+    e = _add_header_create_bad_bucket(headers)
+    status = _get_status(e.response)
+    eq(status, 400)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no content length')
+@attr(assertion='succeeds')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the content-length header
+@attr('fails_on_rgw')
+def test_bucket_create_bad_contentlength_none():
+    remove = 'Content-Length'
+    _remove_header_create_bucket(remove)
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty authorization')
+@attr(assertion='fails 403')
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@attr('fails_on_rgw')
+def test_bucket_create_bad_authorization_empty():
+    headers = {'Authorization': ''}
+    e = _add_header_create_bad_bucket(headers)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_common')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no authorization')
+@attr(assertion='fails 403')
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@attr('fails_on_rgw')
+def test_bucket_create_bad_authorization_none():
+    e = _remove_header_create_bad_bucket('Authorization')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid MD5')
+@attr(assertion='fails 400')
+def test_object_create_bad_md5_invalid_garbage_aws2():
+    v2_client = get_v2_client()
+    headers = {'Content-MD5': 'AWS HAHAHA'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidDigest')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/content length too short')
+@attr(assertion='fails 400')
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the Content-Length header
+@attr('fails_on_rgw')
+def test_object_create_bad_contentlength_mismatch_below_aws2():
+    v2_client = get_v2_client()
+    content = 'bar'
+    length = len(content) - 1
+    headers = {'Content-Length': str(length)}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'BadDigest')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/incorrect authorization')
+@attr(assertion='fails 403')
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@attr('fails_on_rgw')
+def test_object_create_bad_authorization_incorrect_aws2():
+    v2_client = get_v2_client()
+    headers = {'Authorization': 'AWS AKIAIGR7ZNNBHC5BKSUB:FWeDfwojDSdS2Ztmpfeubhd9isU='}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'InvalidDigest')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid authorization')
+@attr(assertion='fails 400')
+# TODO: remove 'fails_on_rgw' and once we have learned how to manipulate the authorization header
+@attr('fails_on_rgw')
+def test_object_create_bad_authorization_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'Authorization': 'AWS HAHAHA'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty user agent')
+@attr(assertion='succeeds')
+def test_object_create_bad_ua_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'User-Agent': ''}
+    bucket_name, key_name = _add_header_create_object(headers, v2_client)
+    v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no user agent')
+@attr(assertion='succeeds')
+def test_object_create_bad_ua_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'User-Agent'
+    bucket_name, key_name = _remove_header_create_object(remove, v2_client)
+    v2_client.put_object(Bucket=bucket_name, Key=key_name, Body='bar')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/invalid date')
+@attr(assertion='fails 403')
+def test_object_create_bad_date_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Bad Date'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/empty date')
+@attr(assertion='fails 403')
+def test_object_create_bad_date_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': ''}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/no date')
+@attr(assertion='fails 403')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
+@attr('fails_on_rgw')
+def test_object_create_bad_date_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'x-amz-date'
+    e = _remove_header_create_bad_object(remove, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date in past')
+@attr(assertion='fails 403')
+def test_object_create_bad_date_before_today_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'RequestTimeTooSkewed')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date before epoch')
+@attr(assertion='fails 403')
+def test_object_create_bad_date_before_epoch_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create w/date after 9999')
+@attr(assertion='fails 403')
+def test_object_create_bad_date_after_end_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 9999 21:53:04 GMT'}
+    e = _add_header_create_bad_object(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'RequestTimeTooSkewed')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/invalid authorization')
+@attr(assertion='fails 400')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
+@attr('fails_on_rgw')
+def test_bucket_create_bad_authorization_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'Authorization': 'AWS HAHAHA'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty user agent')
+@attr(assertion='succeeds')
+def test_bucket_create_bad_ua_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'User-Agent': ''}
+    _add_header_create_bucket(headers, v2_client)
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no user agent')
+@attr(assertion='succeeds')
+def test_bucket_create_bad_ua_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'User-Agent'
+    _remove_header_create_bucket(remove, v2_client)
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/invalid date')
+@attr(assertion='fails 403')
+def test_bucket_create_bad_date_invalid_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Bad Date'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/empty date')
+@attr(assertion='fails 403')
+def test_bucket_create_bad_date_empty_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': ''}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/no date')
+@attr(assertion='fails 403')
+# TODO: remove 'fails_on_rgw' and once we have learned how to remove the date header
+@attr('fails_on_rgw')
+def test_bucket_create_bad_date_none_aws2():
+    v2_client = get_v2_client()
+    remove = 'x-amz-date'
+    e = _remove_header_create_bad_bucket(remove, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/date in past')
+@attr(assertion='fails 403')
+def test_bucket_create_bad_date_before_today_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 2010 21:53:04 GMT'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'RequestTimeTooSkewed')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/date in future')
+@attr(assertion='fails 403')
+def test_bucket_create_bad_date_after_today_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 2030 21:53:04 GMT'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'RequestTimeTooSkewed')
+
+@tag('auth_aws2')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/date before epoch')
+@attr(assertion='fails 403')
+def test_bucket_create_bad_date_before_epoch_aws2():
+    v2_client = get_v2_client()
+    headers = {'x-amz-date': 'Tue, 07 Jul 1950 21:53:04 GMT'}
+    e = _add_header_create_bad_bucket(headers, v2_client)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
diff --git a/s3tests_boto3/functional/test_iam.py b/s3tests_boto3/functional/test_iam.py
new file mode 100644 (file)
index 0000000..bf7236f
--- /dev/null
@@ -0,0 +1,970 @@
+import json
+
+from botocore.exceptions import ClientError
+from nose.plugins.attrib import attr
+from nose.tools import eq_ as eq
+
+from s3tests_boto3.functional.utils import assert_raises
+from s3tests_boto3.functional.test_s3 import _multipart_upload
+from . import (
+    get_alt_client,
+    get_iam_client,
+    get_new_bucket,
+    get_iam_s3client,
+    get_alt_iam_client,
+    get_alt_user_id,
+)
+from .utils import _get_status, _get_status_and_error_code
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify Put User Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_put_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.delete_user_policy(PolicyName='AllAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify Put User Policy with invalid user')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_put_user_policy_invalid_user():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    eq(status, 404)
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify Put User Policy using parameter value outside limit')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_put_user_policy_parameter_limit():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [{
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}] * 1000
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy' * 10, UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 400)
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify Put User Policy using invalid policy document elements')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_rgw')
+def test_put_user_policy_invalid_element():
+    client = get_iam_client()
+
+    # With Version other than 2012-10-17
+    policy_document = json.dumps(
+        {"Version": "2010-10-17",
+         "Statement": [{
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}]
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 400)
+
+    # With no Statement
+    policy_document = json.dumps(
+        {
+            "Version": "2012-10-17",
+        }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 400)
+
+    # with same Sid for 2 statements
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [
+             {"Sid": "98AB54CF",
+              "Effect": "Allow",
+              "Action": "*",
+              "Resource": "*"},
+             {"Sid": "98AB54CF",
+              "Effect": "Allow",
+              "Action": "*",
+              "Resource": "*"}]
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 400)
+
+    # with Principal
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [{
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*",
+             "Principal": "arn:aws:iam:::username"}]
+         }
+    )
+    e = assert_raises(ClientError, client.put_user_policy, PolicyDocument=policy_document,
+                      PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 400)
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify Put a policy that already exists')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_put_existing_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}
+         }
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                           UserName=get_alt_user_id())
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify List User policies')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_list_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}
+         }
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.list_user_policies(UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify List User policies with invalid user')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_list_user_policy_invalid_user():
+    client = get_iam_client()
+    e = assert_raises(ClientError, client.list_user_policies, UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    eq(status, 404)
+
+
+@attr(resource='user-policy')
+@attr(method='get')
+@attr(operation='Verify Get User policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_get_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.get_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.delete_user_policy(PolicyName='AllAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='get')
+@attr(operation='Verify Get User Policy with invalid user')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_get_user_policy_invalid_user():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy',
+                      UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    eq(status, 404)
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@attr(resource='user-policy')
+@attr(method='get')
+@attr(operation='Verify Get User Policy with invalid policy name')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_rgw')
+def test_get_user_policy_invalid_policy_name():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                           UserName=get_alt_user_id())
+    e = assert_raises(ClientError, client.get_user_policy, PolicyName='non-existing-policy-name',
+                      UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 404)
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+
+
+@attr(resource='user-policy')
+@attr(method='get')
+@attr(operation='Verify Get Deleted User Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_rgw')
+def test_get_deleted_user_policy():
+    client = get_iam_client()
+
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document, PolicyName='AllAccessPolicy',
+                           UserName=get_alt_user_id())
+    client.delete_user_policy(PolicyName='AllAccessPolicy', UserName=get_alt_user_id())
+    e = assert_raises(ClientError, client.get_user_policy, PolicyName='AllAccessPolicy',
+                      UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 404)
+
+
+@attr(resource='user-policy')
+@attr(method='get')
+@attr(operation='Verify Get a policy from multiple policies for a user')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_get_user_policy_from_multiple_policies():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy1',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy2',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.get_user_policy(PolicyName='AllowAccessPolicy2',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy1',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy2',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='delete')
+@attr(operation='Verify Delete User Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_delete_user_policy():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='delete')
+@attr(operation='Verify Delete User Policy with invalid user')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_delete_user_policy_invalid_user():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    e = assert_raises(ClientError, client.delete_user_policy, PolicyName='AllAccessPolicy',
+                      UserName="some-non-existing-user-id")
+    status = _get_status(e.response)
+    eq(status, 404)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='delete')
+@attr(operation='Verify Delete User Policy with invalid policy name')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_delete_user_policy_invalid_policy_name():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    e = assert_raises(ClientError, client.delete_user_policy, PolicyName='non-existing-policy-name',
+                      UserName=get_alt_user_id())
+    status = _get_status(e.response)
+    eq(status, 404)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='delete')
+@attr(operation='Verify Delete multiple User policies for a user')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_delete_user_policy_from_multiple_policies():
+    client = get_iam_client()
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": "*",
+             "Resource": "*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy1',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy2',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy3',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy1',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy2',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.get_user_policy(PolicyName='AllowAccessPolicy3',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy3',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Allow Bucket Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_allow_bucket_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+    s3_client_iam.put_object(Bucket=bucket, Key='foo', Body='bar')
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:ListBucket", "s3:DeleteBucket"],
+             "Resource": f"arn:aws:s3:::{bucket}"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = s3_client_alt.list_objects(Bucket=bucket)
+    object_found = False
+    for object_received in response['Contents']:
+        if "foo" == object_received['Key']:
+            object_found = True
+            break
+    if not object_found:
+        raise AssertionError("Object is not listed")
+
+    response = s3_client_iam.delete_object(Bucket=bucket, Key='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    response = s3_client_alt.delete_bucket(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    response = s3_client_iam.list_buckets()
+    for bucket in response['Buckets']:
+        if bucket == bucket['Name']:
+            raise AssertionError("deleted bucket is getting listed")
+
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Deny Bucket Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_dbstore')
+def test_deny_bucket_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client = get_alt_client()
+    bucket = get_new_bucket(client=s3_client)
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Deny",
+             "Action": ["s3:ListAllMyBuckets", "s3:DeleteBucket"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+
+    response = client.put_user_policy(PolicyDocument=policy_document_deny,
+                                      PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    e = assert_raises(ClientError, s3_client.list_buckets, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    e = assert_raises(ClientError, s3_client.delete_bucket, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = s3_client.delete_bucket(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Allow Object Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_allow_object_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"],
+             "Resource": f"arn:aws:s3:::{bucket}/*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    s3_client_alt.put_object(Bucket=bucket, Key='foo', Body='bar')
+    response = s3_client_alt.get_object(Bucket=bucket, Key='foo')
+    body = response['Body'].read()
+    if type(body) is bytes:
+        body = body.decode()
+    eq(body, "bar")
+    response = s3_client_alt.delete_object(Bucket=bucket, Key='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    e = assert_raises(ClientError, s3_client_iam.get_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+    response = s3_client_iam.delete_bucket(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Deny Object Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_dbstore')
+def test_deny_object_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    bucket = get_new_bucket(client=s3_client_alt)
+    s3_client_alt.put_object(Bucket=bucket, Key='foo', Body='bar')
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [{
+             "Effect": "Deny",
+             "Action": ["s3:PutObject", "s3:GetObject", "s3:DeleteObject"],
+             "Resource": f"arn:aws:s3:::{bucket}/*"}, {
+             "Effect": "Allow",
+             "Action": ["s3:DeleteBucket"],
+             "Resource": f"arn:aws:s3:::{bucket}"}]}
+    )
+    client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy',
+                           UserName=get_alt_user_id())
+
+    e = assert_raises(ClientError, s3_client_alt.put_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    e = assert_raises(ClientError, s3_client_alt.get_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    e = assert_raises(ClientError, s3_client_alt.delete_object, Bucket=bucket, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Allow Multipart Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_allow_multipart_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:ListBucketMultipartUploads", "s3:AbortMultipartUpload"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document_allow,
+                                      PolicyName='AllowAccessPolicy', UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    key = "mymultipart"
+    mb = 1024 * 1024
+
+    (upload_id, _, _) = _multipart_upload(client=s3_client_iam, bucket_name=bucket, key=key,
+                                          size=5 * mb)
+    response = s3_client_alt.list_multipart_uploads(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = s3_client_alt.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    response = s3_client_iam.delete_bucket(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Deny Multipart Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_dbstore')
+def test_deny_multipart_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client = get_alt_client()
+    bucket = get_new_bucket(client=s3_client)
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Deny",
+             "Action": ["s3:ListBucketMultipartUploads", "s3:AbortMultipartUpload"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+    response = client.put_user_policy(PolicyDocument=policy_document_deny,
+                                      PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    key = "mymultipart"
+    mb = 1024 * 1024
+
+    (upload_id, _, _) = _multipart_upload(client=s3_client, bucket_name=bucket, key=key,
+                                          size=5 * mb)
+
+    e = assert_raises(ClientError, s3_client.list_multipart_uploads, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    e = assert_raises(ClientError, s3_client.abort_multipart_upload, Bucket=bucket,
+                      Key=key, UploadId=upload_id)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    response = s3_client.delete_bucket(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Allow Tagging Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_dbstore')
+def test_allow_tagging_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client_alt = get_alt_client()
+    s3_client_iam = get_iam_s3client()
+    bucket = get_new_bucket(client=s3_client_iam)
+
+    policy_document_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Allow",
+             "Action": ["s3:PutBucketTagging", "s3:GetBucketTagging",
+                        "s3:PutObjectTagging", "s3:GetObjectTagging"],
+             "Resource": f"arn:aws:s3:::*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document_allow, PolicyName='AllowAccessPolicy',
+                           UserName=get_alt_user_id())
+    tags = {'TagSet': [{'Key': 'Hello', 'Value': 'World'}, ]}
+
+    response = s3_client_alt.put_bucket_tagging(Bucket=bucket, Tagging=tags)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = s3_client_alt.get_bucket_tagging(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(response['TagSet'][0]['Key'], 'Hello')
+    eq(response['TagSet'][0]['Value'], 'World')
+
+    obj_key = 'obj'
+    response = s3_client_iam.put_object(Bucket=bucket, Key=obj_key, Body='obj_body')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = s3_client_alt.put_object_tagging(Bucket=bucket, Key=obj_key, Tagging=tags)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = s3_client_alt.get_object_tagging(Bucket=bucket, Key=obj_key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(response['TagSet'], tags['TagSet'])
+
+    response = s3_client_iam.delete_object(Bucket=bucket, Key=obj_key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+    response = s3_client_iam.delete_bucket(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='s3 Actions')
+@attr(operation='Verify Deny Tagging Actions in user Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_dbstore')
+def test_deny_tagging_actions_in_user_policy():
+    client = get_iam_client()
+    s3_client = get_alt_client()
+    bucket = get_new_bucket(client=s3_client)
+
+    policy_document_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {
+             "Effect": "Deny",
+             "Action": ["s3:PutBucketTagging", "s3:GetBucketTagging",
+                        "s3:PutObjectTagging", "s3:DeleteObjectTagging"],
+             "Resource": "arn:aws:s3:::*"}}
+    )
+    client.put_user_policy(PolicyDocument=policy_document_deny, PolicyName='DenyAccessPolicy',
+                           UserName=get_alt_user_id())
+    tags = {'TagSet': [{'Key': 'Hello', 'Value': 'World'}, ]}
+
+    e = assert_raises(ClientError, s3_client.put_bucket_tagging, Bucket=bucket, Tagging=tags)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    e = assert_raises(ClientError, s3_client.get_bucket_tagging, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    obj_key = 'obj'
+    response = s3_client.put_object(Bucket=bucket, Key=obj_key, Body='obj_body')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    e = assert_raises(ClientError, s3_client.put_object_tagging, Bucket=bucket, Key=obj_key,
+                      Tagging=tags)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    e = assert_raises(ClientError, s3_client.delete_object_tagging, Bucket=bucket, Key=obj_key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    response = s3_client.delete_object(Bucket=bucket, Key=obj_key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+    response = s3_client.delete_bucket(Bucket=bucket)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify conflicting user policy statements')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_dbstore')
+def test_verify_conflicting_user_policy_statements():
+    s3client = get_alt_client()
+    bucket = get_new_bucket(client=s3client)
+    policy_document = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": [
+             {"Sid": "98AB54CG",
+              "Effect": "Allow",
+              "Action": "s3:ListBucket",
+              "Resource": f"arn:aws:s3:::{bucket}"},
+             {"Sid": "98AB54CA",
+              "Effect": "Deny",
+              "Action": "s3:ListBucket",
+              "Resource": f"arn:aws:s3:::{bucket}"}
+         ]}
+    )
+    client = get_iam_client()
+    response = client.put_user_policy(PolicyDocument=policy_document, PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(method='put')
+@attr(operation='Verify conflicting user policies')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+@attr('fails_on_dbstore')
+def test_verify_conflicting_user_policies():
+    s3client = get_alt_client()
+    bucket = get_new_bucket(client=s3client)
+    policy_allow = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {"Sid": "98AB54CG",
+                       "Effect": "Allow",
+                       "Action": "s3:ListBucket",
+                       "Resource": f"arn:aws:s3:::{bucket}"}}
+    )
+    policy_deny = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {"Sid": "98AB54CGZ",
+                       "Effect": "Deny",
+                       "Action": "s3:ListBucket",
+                       "Resource": f"arn:aws:s3:::{bucket}"}}
+    )
+    client = get_iam_client()
+    response = client.put_user_policy(PolicyDocument=policy_allow, PolicyName='AllowAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.put_user_policy(PolicyDocument=policy_deny, PolicyName='DenyAccessPolicy',
+                                      UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    e = assert_raises(ClientError, s3client.list_objects, Bucket=bucket)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    response = client.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.delete_user_policy(PolicyName='DenyAccessPolicy',
+                                         UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='user-policy')
+@attr(operation='Verify Allow Actions for IAM user policies')
+@attr(assertion='succeeds')
+@attr('user-policy')
+@attr('test_of_iam')
+def test_verify_allow_iam_actions():
+    policy1 = json.dumps(
+        {"Version": "2012-10-17",
+         "Statement": {"Sid": "98AB54CGA",
+                       "Effect": "Allow",
+                       "Action": ["iam:PutUserPolicy", "iam:GetUserPolicy",
+                                  "iam:ListUserPolicies", "iam:DeleteUserPolicy"],
+                       "Resource": f"arn:aws:iam:::user/{get_alt_user_id()}"}}
+    )
+    client1 = get_iam_client()
+    iam_client_alt = get_alt_iam_client()
+
+    response = client1.put_user_policy(PolicyDocument=policy1, PolicyName='AllowAccessPolicy',
+                                       UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = iam_client_alt.get_user_policy(PolicyName='AllowAccessPolicy',
+                                       UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = iam_client_alt.list_user_policies(UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = iam_client_alt.delete_user_policy(PolicyName='AllowAccessPolicy',
+                                          UserName=get_alt_user_id())
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
diff --git a/s3tests_boto3/functional/test_s3.py b/s3tests_boto3/functional/test_s3.py
new file mode 100644 (file)
index 0000000..20ae4f1
--- /dev/null
@@ -0,0 +1,14927 @@
+import boto3
+import botocore.session
+from botocore.exceptions import ClientError
+from botocore.exceptions import ParamValidationError
+from nose.tools import eq_ as eq
+from nose.plugins.attrib import attr
+from nose.plugins.skip import SkipTest
+import isodate
+import email.utils
+import datetime
+import threading
+import re
+import pytz
+from collections import OrderedDict
+import requests
+import json
+import base64
+import hmac
+import hashlib
+import xml.etree.ElementTree as ET
+import time
+import operator
+import nose
+import os
+import string
+import random
+import socket
+import dateutil.parser
+import ssl
+from collections import namedtuple
+from collections import defaultdict
+from io import StringIO
+
+from email.header import decode_header
+
+from .utils import assert_raises
+from .utils import generate_random
+from .utils import _get_status_and_error_code
+from .utils import _get_status
+
+from .policy import Policy, Statement, make_json_policy
+
+from . import (
+    get_client,
+    get_prefix,
+    get_unauthenticated_client,
+    get_bad_auth_client,
+    get_v2_client,
+    get_new_bucket,
+    get_new_bucket_name,
+    get_new_bucket_resource,
+    get_config_is_secure,
+    get_config_host,
+    get_config_port,
+    get_config_endpoint,
+    get_config_ssl_verify,
+    get_main_aws_access_key,
+    get_main_aws_secret_key,
+    get_main_display_name,
+    get_main_user_id,
+    get_main_email,
+    get_main_api_name,
+    get_alt_aws_access_key,
+    get_alt_aws_secret_key,
+    get_alt_display_name,
+    get_alt_user_id,
+    get_alt_email,
+    get_alt_client,
+    get_tenant_client,
+    get_tenant_iam_client,
+    get_tenant_user_id,
+    get_buckets_list,
+    get_objects_list,
+    get_main_kms_keyid,
+    get_secondary_kms_keyid,
+    get_svc_client,
+    get_cloud_storage_class,
+    get_cloud_retain_head_object,
+    get_cloud_regular_storage_class,
+    get_cloud_target_path,
+    get_cloud_target_storage_class,
+    get_cloud_client,
+    nuke_prefixed_buckets,
+    configured_storage_classes,
+    get_lc_debug_interval,
+    )
+
+
+def _bucket_is_empty(bucket):
+    is_empty = True
+    for obj in bucket.objects.all():
+        is_empty = False
+        break
+    return is_empty
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty buckets return no contents')
+def test_bucket_list_empty():
+    bucket = get_new_bucket_resource()
+    is_empty = _bucket_is_empty(bucket)
+    eq(is_empty, True)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='distinct buckets have different contents')
+@attr('list-objects-v2')
+def test_bucket_list_distinct():
+    bucket1 = get_new_bucket_resource()
+    bucket2 = get_new_bucket_resource()
+    obj = bucket1.put_object(Body='str', Key='asdf')
+    is_empty = _bucket_is_empty(bucket2)
+    eq(is_empty, True)
+
+def _create_objects(bucket=None, bucket_name=None, keys=[]):
+    """
+    Populate a (specified or new) bucket with objects with
+    specified names (and contents identical to their names).
+    """
+    if bucket_name is None:
+        bucket_name = get_new_bucket_name()
+    if bucket is None:
+        bucket = get_new_bucket_resource(name=bucket_name)
+
+    for key in keys:
+        obj = bucket.put_object(Body=key, Key=key)
+
+    return bucket_name
+
+def _get_keys(response):
+    """
+    return lists of strings that are the keys from a client.list_objects() response
+    """
+    keys = []
+    if 'Contents' in response:
+        objects_list = response['Contents']
+        keys = [obj['Key'] for obj in objects_list]
+    return keys
+
+def _get_prefixes(response):
+    """
+    return lists of strings that are prefixes from a client.list_objects() response
+    """
+    prefixes = []
+    if 'CommonPrefixes' in response:
+        prefix_list = response['CommonPrefixes']
+        prefixes = [prefix['Prefix'] for prefix in prefix_list]
+    return prefixes
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='pagination w/max_keys=2, no marker')
+@attr('fails_on_dbstore')
+def test_bucket_list_many():
+    bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=2)
+    keys = _get_keys(response)
+    eq(len(keys), 2)
+    eq(keys, ['bar', 'baz'])
+    eq(response['IsTruncated'], True)
+
+    response = client.list_objects(Bucket=bucket_name, Marker='baz',MaxKeys=2)
+    keys = _get_keys(response)
+    eq(len(keys), 1)
+    eq(response['IsTruncated'], False)
+    eq(keys, ['foo'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='pagination w/max_keys=2, no marker')
+@attr('list-objects-v2')
+@attr('fails_on_dbstore')
+def test_bucket_listv2_many():
+    bucket_name = _create_objects(keys=['foo', 'bar', 'baz'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=2)
+    keys = _get_keys(response)
+    eq(len(keys), 2)
+    eq(keys, ['bar', 'baz'])
+    eq(response['IsTruncated'], True)
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='baz',MaxKeys=2)
+    keys = _get_keys(response)
+    eq(len(keys), 1)
+    eq(response['IsTruncated'], False)
+    eq(keys, ['foo'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='keycount in listobjectsv2')
+@attr('list-objects-v2')
+def test_basic_key_count():
+    client = get_client()
+    bucket_names = []
+    bucket_name = get_new_bucket_name()
+    client.create_bucket(Bucket=bucket_name)
+    for j in range(5):
+            client.put_object(Bucket=bucket_name, Key=str(j))
+    response1 = client.list_objects_v2(Bucket=bucket_name)
+    eq(response1['KeyCount'], 5)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefixes in multi-component object names')
+def test_bucket_list_delimiter_basic():
+    bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
+    eq(response['Delimiter'], '/')
+    keys = _get_keys(response)
+    eq(keys, ['asdf'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    eq(prefixes, ['foo/', 'quux/'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefixes in multi-component object names')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_basic():
+    bucket_name = _create_objects(keys=['foo/bar', 'foo/bar/xyzzy', 'quux/thud', 'asdf'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
+    eq(response['Delimiter'], '/')
+    keys = _get_keys(response)
+    eq(keys, ['asdf'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    eq(prefixes, ['foo/', 'quux/'])
+    eq(response['KeyCount'], len(prefixes) + len(keys))
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='test url encoding')
+@attr('list-objects-v2')
+def test_bucket_listv2_encoding_basic():
+    bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', EncodingType='url')
+    eq(response['Delimiter'], '/')
+    keys = _get_keys(response)
+    eq(keys, ['asdf%2Bb'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 3)
+    eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='test url encoding')
+@attr('list-objects')
+def test_bucket_list_encoding_basic():
+    bucket_name = _create_objects(keys=['foo+1/bar', 'foo/bar/xyzzy', 'quux ab/thud', 'asdf+b'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/', EncodingType='url')
+    eq(response['Delimiter'], '/')
+    keys = _get_keys(response)
+    eq(keys, ['asdf%2Bb'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 3)
+    eq(prefixes, ['foo%2B1/', 'foo/', 'quux%20ab/'])
+
+
+def validate_bucket_list(bucket_name, prefix, delimiter, marker, max_keys,
+                         is_truncated, check_objs, check_prefixes, next_marker):
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter=delimiter, Marker=marker, MaxKeys=max_keys, Prefix=prefix)
+    eq(response['IsTruncated'], is_truncated)
+    if 'NextMarker' not in response:
+        response['NextMarker'] = None
+    eq(response['NextMarker'], next_marker)
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+
+    eq(len(keys), len(check_objs))
+    eq(len(prefixes), len(check_prefixes))
+    eq(keys, check_objs)
+    eq(prefixes, check_prefixes)
+
+    return response['NextMarker']
+
+def validate_bucket_listv2(bucket_name, prefix, delimiter, continuation_token, max_keys,
+                         is_truncated, check_objs, check_prefixes, last=False):
+    client = get_client()
+
+    params = dict(Bucket=bucket_name, Delimiter=delimiter, MaxKeys=max_keys, Prefix=prefix)
+    if continuation_token is not None:
+        params['ContinuationToken'] = continuation_token
+    else:
+        params['StartAfter'] = ''
+    response = client.list_objects_v2(**params)
+    eq(response['IsTruncated'], is_truncated)
+    if 'NextContinuationToken' not in response:
+        response['NextContinuationToken'] = None
+    if last:
+        eq(response['NextContinuationToken'], None)
+
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+
+    eq(len(keys), len(check_objs))
+    eq(len(prefixes), len(check_prefixes))
+    eq(keys, check_objs)
+    eq(prefixes, check_prefixes)
+
+    return response['NextContinuationToken']
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefixes in multi-component object names')
+@attr('fails_on_dbstore')
+def test_bucket_list_delimiter_prefix():
+    bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
+
+    delim = '/'
+    marker = ''
+    prefix = ''
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['asdf'], [], 'asdf')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['boo/'], 'boo/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['cquux/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['asdf'], ['boo/'], 'boo/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['cquux/'], None)
+
+    prefix = 'boo/'
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['boo/bar'], [], 'boo/bar')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['boo/baz/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['boo/bar'], ['boo/baz/'], None)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefixes in multi-component object names')
+@attr('list-objects-v2')
+@attr('fails_on_dbstore')
+def test_bucket_listv2_delimiter_prefix():
+    bucket_name = _create_objects(keys=['asdf', 'boo/bar', 'boo/baz/xyzzy', 'cquux/thud', 'cquux/bla'])
+
+    delim = '/'
+    continuation_token = ''
+    prefix = ''
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['asdf'], [])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, True, [], ['boo/'])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['cquux/'], last=True)
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['asdf'], ['boo/'])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 2, False, [], ['cquux/'], last=True)
+
+    prefix = 'boo/'
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['boo/bar'], [])
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token, 1, False, [], ['boo/baz/'], last=True)
+
+    continuation_token = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['boo/bar'], ['boo/baz/'], last=True)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefix and delimiter handling when object ends with delimiter')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_prefix_ends_with_delimiter():
+    bucket_name = _create_objects(keys=['asdf/'])
+    validate_bucket_listv2(bucket_name, 'asdf/', '/', None, 1000, False, ['asdf/'], [], last=True)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefix and delimiter handling when object ends with delimiter')
+def test_bucket_list_delimiter_prefix_ends_with_delimiter():
+    bucket_name = _create_objects(keys=['asdf/'])
+    validate_bucket_list(bucket_name, 'asdf/', '/', '', 1000, False, ['asdf/'], [], None)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-slash delimiter characters')
+def test_bucket_list_delimiter_alt():
+    bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='a')
+    eq(response['Delimiter'], 'a')
+
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    eq(prefixes, ['ba', 'ca'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='non-slash delimiter characters')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_alt():
+    bucket_name = _create_objects(keys=['bar', 'baz', 'cab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a')
+    eq(response['Delimiter'], 'a')
+
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    eq(prefixes, ['ba', 'ca'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefixes starting with underscore')
+@attr('fails_on_dbstore')
+def test_bucket_list_delimiter_prefix_underscore():
+    bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
+
+    delim = '/'
+    marker = ''
+    prefix = ''
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_obj1_'], [], '_obj1_')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, True, [], ['_under1/'], '_under1/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under2/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, True, ['_obj1_'], ['_under1/'], '_under1/')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 2, False, [], ['_under2/'], None)
+
+    prefix = '_under1/'
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 1, True, ['_under1/bar'], [], '_under1/bar')
+    marker = validate_bucket_list(bucket_name, prefix, delim, marker, 1, False, [], ['_under1/baz/'], None)
+
+    marker = validate_bucket_list(bucket_name, prefix, delim, '', 2, False, ['_under1/bar'], ['_under1/baz/'], None)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='prefixes starting with underscore')
+@attr('list-objects-v2')
+@attr('fails_on_dbstore')
+def test_bucket_listv2_delimiter_prefix_underscore():
+    bucket_name = _create_objects(keys=['_obj1_','_under1/bar', '_under1/baz/xyzzy', '_under2/thud', '_under2/bla'])
+
+    delim = '/'
+    continuation_token = ''
+    prefix = ''
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_obj1_'], [])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, True, [], ['_under1/'])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under2/'], last=True)
+
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, True, ['_obj1_'], ['_under1/'])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 2, False, [], ['_under2/'], last=True)
+
+    prefix = '_under1/'
+
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 1, True, ['_under1/bar'], [])
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, continuation_token , 1, False, [], ['_under1/baz/'], last=True)
+
+    continuation_token  = validate_bucket_listv2(bucket_name, prefix, delim, None, 2, False, ['_under1/bar'], ['_under1/baz/'], last=True)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='percentage delimiter characters')
+def test_bucket_list_delimiter_percentage():
+    bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='%')
+    eq(response['Delimiter'], '%')
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    eq(prefixes, ['b%', 'c%'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='percentage delimiter characters')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_percentage():
+    bucket_name = _create_objects(keys=['b%ar', 'b%az', 'c%ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='%')
+    eq(response['Delimiter'], '%')
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    eq(prefixes, ['b%', 'c%'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='whitespace delimiter characters')
+def test_bucket_list_delimiter_whitespace():
+    bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter=' ')
+    eq(response['Delimiter'], ' ')
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    eq(prefixes, ['b ', 'c '])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='whitespace delimiter characters')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_whitespace():
+    bucket_name = _create_objects(keys=['b ar', 'b az', 'c ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter=' ')
+    eq(response['Delimiter'], ' ')
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    eq(prefixes, ['b ', 'c '])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='dot delimiter characters')
+def test_bucket_list_delimiter_dot():
+    bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='.')
+    eq(response['Delimiter'], '.')
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    eq(prefixes, ['b.', 'c.'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='dot delimiter characters')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_dot():
+    bucket_name = _create_objects(keys=['b.ar', 'b.az', 'c.ab', 'foo'])
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='.')
+    eq(response['Delimiter'], '.')
+    keys = _get_keys(response)
+    # foo contains no 'a' and so is a complete key
+    eq(keys, ['foo'])
+
+    prefixes = _get_prefixes(response)
+    eq(len(prefixes), 2)
+    # bar, baz, and cab should be broken up by the 'a' delimiters
+    eq(prefixes, ['b.', 'c.'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='non-printable delimiter can be specified')
+def test_bucket_list_delimiter_unreadable():
+    key_names=['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='\x0a')
+    eq(response['Delimiter'], '\x0a')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='non-printable delimiter can be specified')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_unreadable():
+    key_names=['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='\x0a')
+    eq(response['Delimiter'], '\x0a')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='empty delimiter can be specified')
+def test_bucket_list_delimiter_empty():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='')
+    # putting an empty value into Delimiter will not return a value in the response
+    eq('Delimiter' in response, False)
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='empty delimiter can be specified')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_empty():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='')
+    # putting an empty value into Delimiter will not return a value in the response
+    eq('Delimiter' in response, False)
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='unspecified delimiter defaults to none')
+def test_bucket_list_delimiter_none():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name)
+    # putting an empty value into Delimiter will not return a value in the response
+    eq('Delimiter' in response, False)
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='unspecified delimiter defaults to none')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_none():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name)
+    # putting an empty value into Delimiter will not return a value in the response
+    eq('Delimiter' in response, False)
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr('list-objects-v2')
+def test_bucket_listv2_fetchowner_notempty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, FetchOwner=True)
+    objs_list = response['Contents']
+    eq('Owner' in objs_list[0], True)
+
+@attr('list-objects-v2')
+def test_bucket_listv2_fetchowner_defaultempty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name)
+    objs_list = response['Contents']
+    eq('Owner' in objs_list[0], False)
+
+@attr('list-objects-v2')
+def test_bucket_listv2_fetchowner_empty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, FetchOwner= False)
+    objs_list = response['Contents']
+    eq('Owner' in objs_list[0], False)
+
+
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='unused delimiter is not found')
+def test_bucket_list_delimiter_not_exist():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
+    # putting an empty value into Delimiter will not return a value in the response
+    eq(response['Delimiter'], '/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(assertion='unused delimiter is not found')
+@attr('list-objects-v2')
+def test_bucket_listv2_delimiter_not_exist():
+    key_names = ['bar', 'baz', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/')
+    # putting an empty value into Delimiter will not return a value in the response
+    eq(response['Delimiter'], '/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list')
+@attr(assertion='list with delimiter not skip special keys')
+@attr('fails_on_dbstore')
+def test_bucket_list_delimiter_not_skip_special():
+    key_names = ['0/'] + ['0/%s' % i for i in range(1000, 1999)]
+    key_names2 = ['1999', '1999#', '1999+', '2000']
+    key_names += key_names2
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/')
+    eq(response['Delimiter'], '/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names2)
+    eq(prefixes, ['0/'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix')
+@attr(assertion='returns only objects under prefix')
+def test_bucket_list_prefix_basic():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='foo/')
+    eq(response['Prefix'], 'foo/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['foo/bar', 'foo/baz'])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix with list-objects-v2')
+@attr(assertion='returns only objects under prefix')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_basic():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='foo/')
+    eq(response['Prefix'], 'foo/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['foo/bar', 'foo/baz'])
+    eq(prefixes, [])
+
+# just testing that we can do the delimeter and prefix logic on non-slashes
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix')
+@attr(assertion='prefixes w/o delimiters')
+def test_bucket_list_prefix_alt():
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='ba')
+    eq(response['Prefix'], 'ba')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['bar', 'baz'])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix with list-objects-v2')
+@attr(assertion='prefixes w/o delimiters')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_alt():
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='ba')
+    eq(response['Prefix'], 'ba')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['bar', 'baz'])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix')
+@attr(assertion='empty prefix returns everything')
+def test_bucket_list_prefix_empty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='')
+    eq(response['Prefix'], '')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix with list-objects-v2')
+@attr(assertion='empty prefix returns everything')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_empty():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
+    eq(response['Prefix'], '')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix')
+@attr(assertion='unspecified prefix returns everything')
+def test_bucket_list_prefix_none():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='')
+    eq(response['Prefix'], '')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix with list-objects-v2')
+@attr(assertion='unspecified prefix returns everything')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_none():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='')
+    eq(response['Prefix'], '')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, key_names)
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix')
+@attr(assertion='nonexistent prefix returns nothing')
+def test_bucket_list_prefix_not_exist():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='d')
+    eq(response['Prefix'], 'd')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix with list-objects-v2')
+@attr(assertion='nonexistent prefix returns nothing')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_not_exist():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='d')
+    eq(response['Prefix'], 'd')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix')
+@attr(assertion='non-printable prefix can be specified')
+def test_bucket_list_prefix_unreadable():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Prefix='\x0a')
+    eq(response['Prefix'], '\x0a')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix with list-objects-v2')
+@attr(assertion='non-printable prefix can be specified')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_unreadable():
+    key_names = ['foo/bar', 'foo/baz', 'quux']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Prefix='\x0a')
+    eq(response['Prefix'], '\x0a')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix w/delimiter')
+@attr(assertion='returns only objects directly under prefix')
+def test_bucket_list_prefix_delimiter_basic():
+    key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
+    eq(response['Prefix'], 'foo/')
+    eq(response['Delimiter'], '/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['foo/bar'])
+    eq(prefixes, ['foo/baz/'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list-objects-v2 under prefix w/delimiter')
+@attr(assertion='returns only objects directly under prefix')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_delimiter_basic():
+    key_names = ['foo/bar', 'foo/baz/xyzzy', 'quux/thud', 'asdf']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='/', Prefix='foo/')
+    eq(response['Prefix'], 'foo/')
+    eq(response['Delimiter'], '/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['foo/bar'])
+    eq(prefixes, ['foo/baz/'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix w/delimiter')
+@attr(assertion='non-slash delimiters')
+def test_bucket_list_prefix_delimiter_alt():
+    key_names = ['bar', 'bazar', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='a', Prefix='ba')
+    eq(response['Prefix'], 'ba')
+    eq(response['Delimiter'], 'a')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['bar'])
+    eq(prefixes, ['baza'])
+
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_delimiter_alt():
+    key_names = ['bar', 'bazar', 'cab', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='a', Prefix='ba')
+    eq(response['Prefix'], 'ba')
+    eq(response['Delimiter'], 'a')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['bar'])
+    eq(prefixes, ['baza'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix w/delimiter')
+@attr(assertion='finds nothing w/unmatched prefix')
+def test_bucket_list_prefix_delimiter_prefix_not_exist():
+    key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='d', Prefix='/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list-objects-v2 under prefix w/delimiter')
+@attr(assertion='finds nothing w/unmatched prefix')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_delimiter_prefix_not_exist():
+    key_names = ['b/a/r', 'b/a/c', 'b/a/g', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='d', Prefix='/')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix w/delimiter')
+@attr(assertion='over-ridden slash ceases to be a delimiter')
+def test_bucket_list_prefix_delimiter_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='b')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list-objects-v2 under prefix w/delimiter')
+@attr(assertion='over-ridden slash ceases to be a delimiter')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_delimiter_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='b')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, ['b/a/c', 'b/a/g', 'b/a/r'])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list under prefix w/delimiter')
+@attr(assertion='finds nothing w/unmatched prefix and delimiter')
+def test_bucket_list_prefix_delimiter_prefix_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Delimiter='z', Prefix='y')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list-objects-v2 under prefix w/delimiter')
+@attr(assertion='finds nothing w/unmatched prefix and delimiter')
+@attr('list-objects-v2')
+def test_bucket_listv2_prefix_delimiter_prefix_delimiter_not_exist():
+    key_names = ['b/a/c', 'b/a/g', 'b/a/r', 'g']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, Delimiter='z', Prefix='y')
+
+    keys = _get_keys(response)
+    prefixes = _get_prefixes(response)
+    eq(keys, [])
+    eq(prefixes, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='pagination w/max_keys=1, marker')
+@attr('fails_on_dbstore')
+def test_bucket_list_maxkeys_one():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=1)
+    eq(response['IsTruncated'], True)
+
+    keys = _get_keys(response)
+    eq(keys, key_names[0:1])
+
+    response = client.list_objects(Bucket=bucket_name, Marker=key_names[0])
+    eq(response['IsTruncated'], False)
+
+    keys = _get_keys(response)
+    eq(keys, key_names[1:])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='pagination w/max_keys=1, marker')
+@attr('list-objects-v2')
+@attr('fails_on_dbstore')
+def test_bucket_listv2_maxkeys_one():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
+    eq(response['IsTruncated'], True)
+
+    keys = _get_keys(response)
+    eq(keys, key_names[0:1])
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter=key_names[0])
+    eq(response['IsTruncated'], False)
+
+    keys = _get_keys(response)
+    eq(keys, key_names[1:])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='pagination w/max_keys=0')
+def test_bucket_list_maxkeys_zero():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=0)
+
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='pagination w/max_keys=0')
+@attr('list-objects-v2')
+def test_bucket_listv2_maxkeys_zero():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=0)
+
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='pagination w/o max_keys')
+def test_bucket_list_maxkeys_none():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name)
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, key_names)
+    eq(response['MaxKeys'], 1000)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='pagination w/o max_keys')
+@attr('list-objects-v2')
+def test_bucket_listv2_maxkeys_none():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name)
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, key_names)
+    eq(response['MaxKeys'], 1000)
+
+def get_http_response_body(**kwargs):
+    global http_response_body
+    http_response_body = kwargs['http_response'].__dict__['_content']
+
+def parseXmlToJson(xml):
+  response = {}
+
+  for child in list(xml):
+    if len(list(child)) > 0:
+      response[child.tag] = parseXmlToJson(child)
+    else:
+      response[child.tag] = child.text or ''
+
+    # one-liner equivalent
+    # response[child.tag] = parseXmlToJson(child) if len(list(child)) > 0 else child.text or ''
+
+  return response
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get usage by client')
+@attr(assertion='account usage api')
+@attr('fails_on_aws') # allow-unordered is a non-standard extension
+def test_account_usage():
+    # boto3.set_stream_logger(name='botocore')
+    client = get_client()
+    # adds the unordered query parameter
+    def add_usage(**kwargs):
+        kwargs['params']['url'] += "?usage"
+    client.meta.events.register('before-call.s3.ListBuckets', add_usage)
+    client.meta.events.register('after-call.s3.ListBuckets', get_http_response_body)
+    client.list_buckets()
+    xml    = ET.fromstring(http_response_body.decode('utf-8'))
+    parsed = parseXmlToJson(xml)
+    summary = parsed['Summary']
+    eq(summary['QuotaMaxBytes'], '-1')
+    eq(summary['QuotaMaxBuckets'], '1000')
+    eq(summary['QuotaMaxObjCount'], '-1')
+    eq(summary['QuotaMaxBytesPerBucket'], '-1')
+    eq(summary['QuotaMaxObjCountPerBucket'], '-1')
+
+@attr(resource='bucket')
+@attr(method='head')
+@attr(operation='get usage by client')
+@attr(assertion='account usage by head bucket')
+@attr('fails_on_aws') # allow-unordered is a non-standard extension
+@attr('fails_on_dbstore')
+def test_head_bucket_usage():
+    # boto3.set_stream_logger(name='botocore')
+    client = get_client()
+    bucket_name = _create_objects(keys=['foo'])
+    # adds the unordered query parameter
+    client.meta.events.register('after-call.s3.HeadBucket', get_http_response)
+    client.head_bucket(Bucket=bucket_name)
+    hdrs = http_response['headers']
+    eq(hdrs['X-RGW-Object-Count'], '1')
+    eq(hdrs['X-RGW-Bytes-Used'], '3')
+    eq(hdrs['X-RGW-Quota-User-Size'], '-1')
+    eq(hdrs['X-RGW-Quota-User-Objects'], '-1')
+    eq(hdrs['X-RGW-Quota-Max-Buckets'], '1000')
+    eq(hdrs['X-RGW-Quota-Bucket-Size'], '-1')
+    eq(hdrs['X-RGW-Quota-Bucket-Objects'], '-1')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='bucket list unordered')
+@attr('fails_on_aws') # allow-unordered is a non-standard extension
+@attr('fails_on_dbstore')
+def test_bucket_list_unordered():
+    # boto3.set_stream_logger(name='botocore')
+    keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
+               'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
+               'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
+               'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
+               'xix', 'yak', 'zoo']
+    bucket_name = _create_objects(keys=keys_in)
+    client = get_client()
+
+    # adds the unordered query parameter
+    def add_unordered(**kwargs):
+        kwargs['params']['url'] += "&allow-unordered=true"
+    client.meta.events.register('before-call.s3.ListObjects', add_unordered)
+
+    # test simple retrieval
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=1000)
+    unordered_keys_out = _get_keys(response)
+    eq(len(keys_in), len(unordered_keys_out))
+    eq(keys_in.sort(), unordered_keys_out.sort())
+
+    # test retrieval with prefix
+    response = client.list_objects(Bucket=bucket_name,
+                                   MaxKeys=1000,
+                                   Prefix="abc/")
+    unordered_keys_out = _get_keys(response)
+    eq(5, len(unordered_keys_out))
+
+    # test incremental retrieval with marker
+    response = client.list_objects(Bucket=bucket_name, MaxKeys=6)
+    unordered_keys_out = _get_keys(response)
+    eq(6, len(unordered_keys_out))
+
+    # now get the next bunch
+    response = client.list_objects(Bucket=bucket_name,
+                                   MaxKeys=6,
+                                   Marker=unordered_keys_out[-1])
+    unordered_keys_out2 = _get_keys(response)
+    eq(6, len(unordered_keys_out2))
+
+    # make sure there's no overlap between the incremental retrievals
+    intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
+    eq(0, len(intersect))
+
+    # verify that unordered used with delimiter results in error
+    e = assert_raises(ClientError,
+                      client.list_objects, Bucket=bucket_name, Delimiter="/")
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='bucket list unordered')
+@attr('fails_on_aws') # allow-unordered is a non-standard extension
+@attr('list-objects-v2')
+@attr('fails_on_dbstore')
+def test_bucket_listv2_unordered():
+    # boto3.set_stream_logger(name='botocore')
+    keys_in = ['ado', 'bot', 'cob', 'dog', 'emu', 'fez', 'gnu', 'hex',
+               'abc/ink', 'abc/jet', 'abc/kin', 'abc/lax', 'abc/mux',
+               'def/nim', 'def/owl', 'def/pie', 'def/qed', 'def/rye',
+               'ghi/sew', 'ghi/tor', 'ghi/uke', 'ghi/via', 'ghi/wit',
+               'xix', 'yak', 'zoo']
+    bucket_name = _create_objects(keys=keys_in)
+    client = get_client()
+
+    # adds the unordered query parameter
+    def add_unordered(**kwargs):
+        kwargs['params']['url'] += "&allow-unordered=true"
+    client.meta.events.register('before-call.s3.ListObjects', add_unordered)
+
+    # test simple retrieval
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1000)
+    unordered_keys_out = _get_keys(response)
+    eq(len(keys_in), len(unordered_keys_out))
+    eq(keys_in.sort(), unordered_keys_out.sort())
+
+    # test retrieval with prefix
+    response = client.list_objects_v2(Bucket=bucket_name,
+                                   MaxKeys=1000,
+                                   Prefix="abc/")
+    unordered_keys_out = _get_keys(response)
+    eq(5, len(unordered_keys_out))
+
+    # test incremental retrieval with marker
+    response = client.list_objects_v2(Bucket=bucket_name, MaxKeys=6)
+    unordered_keys_out = _get_keys(response)
+    eq(6, len(unordered_keys_out))
+
+    # now get the next bunch
+    response = client.list_objects_v2(Bucket=bucket_name,
+                                   MaxKeys=6,
+                                   StartAfter=unordered_keys_out[-1])
+    unordered_keys_out2 = _get_keys(response)
+    eq(6, len(unordered_keys_out2))
+
+    # make sure there's no overlap between the incremental retrievals
+    intersect = set(unordered_keys_out).intersection(unordered_keys_out2)
+    eq(0, len(intersect))
+
+    # verify that unordered used with delimiter results in error
+    e = assert_raises(ClientError,
+                      client.list_objects, Bucket=bucket_name, Delimiter="/")
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='invalid max_keys')
+def test_bucket_list_maxkeys_invalid():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    # adds invalid max keys to url
+    # before list_objects is called
+    def add_invalid_maxkeys(**kwargs):
+        kwargs['params']['url'] += "&max-keys=blah"
+    client.meta.events.register('before-call.s3.ListObjects', add_invalid_maxkeys)
+
+    e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='no pagination, no marker')
+def test_bucket_list_marker_none():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name)
+    eq(response['Marker'], '')
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='no pagination, empty marker')
+def test_bucket_list_marker_empty():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='')
+    eq(response['Marker'], '')
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, key_names)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='no pagination, empty continuationtoken')
+@attr('list-objects-v2')
+def test_bucket_listv2_continuationtoken_empty():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, ContinuationToken='')
+    eq(response['ContinuationToken'], '')
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, key_names)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list keys with list-objects-v2')
+@attr(assertion='no pagination, non-empty continuationtoken')
+@attr('list-objects-v2')
+def test_bucket_listv2_continuationtoken():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response1 = client.list_objects_v2(Bucket=bucket_name, MaxKeys=1)
+    next_continuation_token = response1['NextContinuationToken']
+
+    response2 = client.list_objects_v2(Bucket=bucket_name, ContinuationToken=next_continuation_token)
+    eq(response2['ContinuationToken'], next_continuation_token)
+    eq(response2['IsTruncated'], False)
+    key_names2 = ['baz', 'foo', 'quxx']
+    keys = _get_keys(response2)
+    eq(keys, key_names2)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list keys with list-objects-v2')
+@attr(assertion='no pagination, non-empty continuationtoken and startafter')
+@attr('list-objects-v2')
+@attr('fails_on_dbstore')
+def test_bucket_listv2_both_continuationtoken_startafter():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response1 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', MaxKeys=1)
+    next_continuation_token = response1['NextContinuationToken']
+
+    response2 = client.list_objects_v2(Bucket=bucket_name, StartAfter='bar', ContinuationToken=next_continuation_token)
+    eq(response2['ContinuationToken'], next_continuation_token)
+    eq(response2['StartAfter'], 'bar')
+    eq(response2['IsTruncated'], False)
+    key_names2 = ['foo', 'quxx']
+    keys = _get_keys(response2)
+    eq(keys, key_names2)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='non-printing marker')
+def test_bucket_list_marker_unreadable():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='\x0a')
+    eq(response['Marker'], '\x0a')
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, key_names)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='non-printing startafter')
+@attr('list-objects-v2')
+def test_bucket_listv2_startafter_unreadable():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='\x0a')
+    eq(response['StartAfter'], '\x0a')
+    eq(response['IsTruncated'], False)
+    keys = _get_keys(response)
+    eq(keys, key_names)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='marker not-in-list')
+def test_bucket_list_marker_not_in_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='blah')
+    eq(response['Marker'], 'blah')
+    keys = _get_keys(response)
+    eq(keys, [ 'foo','quxx'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='startafter not-in-list')
+@attr('list-objects-v2')
+def test_bucket_listv2_startafter_not_in_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='blah')
+    eq(response['StartAfter'], 'blah')
+    keys = _get_keys(response)
+    eq(keys, ['foo', 'quxx'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys')
+@attr(assertion='marker after list')
+def test_bucket_list_marker_after_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects(Bucket=bucket_name, Marker='zzz')
+    eq(response['Marker'], 'zzz')
+    keys = _get_keys(response)
+    eq(response['IsTruncated'], False)
+    eq(keys, [])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all keys with list-objects-v2')
+@attr(assertion='startafter after list')
+@attr('list-objects-v2')
+def test_bucket_listv2_startafter_after_list():
+    key_names = ['bar', 'baz', 'foo', 'quxx']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    response = client.list_objects_v2(Bucket=bucket_name, StartAfter='zzz')
+    eq(response['StartAfter'], 'zzz')
+    keys = _get_keys(response)
+    eq(response['IsTruncated'], False)
+    eq(keys, [])
+
+def _compare_dates(datetime1, datetime2):
+    """
+    changes ms from datetime1 to 0, compares it to datetime2
+    """
+    # both times are in datetime format but datetime1 has
+    # microseconds and datetime2 does not
+    datetime1 = datetime1.replace(microsecond=0)
+    eq(datetime1, datetime2)
+
+@attr(resource='object')
+@attr(method='head')
+@attr(operation='compare w/bucket list')
+@attr(assertion='return same metadata')
+@attr('fails_on_dbstore')
+def test_bucket_list_return_data():
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    data = {}
+    for key_name in key_names:
+        obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
+        acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
+        data.update({
+            key_name: {
+                'DisplayName': acl_response['Owner']['DisplayName'],
+                'ID': acl_response['Owner']['ID'],
+                'ETag': obj_response['ETag'],
+                'LastModified': obj_response['LastModified'],
+                'ContentLength': obj_response['ContentLength'],
+                }
+            })
+
+    response  = client.list_objects(Bucket=bucket_name)
+    objs_list = response['Contents']
+    for obj in objs_list:
+        key_name = obj['Key']
+        key_data = data[key_name]
+        eq(obj['ETag'],key_data['ETag'])
+        eq(obj['Size'],key_data['ContentLength'])
+        eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
+        eq(obj['Owner']['ID'],key_data['ID'])
+        _compare_dates(obj['LastModified'],key_data['LastModified'])
+
+
+@attr(resource='object')
+@attr(method='head')
+@attr(operation='compare w/bucket list when bucket versioning is configured')
+@attr(assertion='return same metadata')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_bucket_list_return_data_versioning():
+    bucket_name = get_new_bucket()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    key_names = ['bar', 'baz', 'foo']
+    bucket_name = _create_objects(bucket_name=bucket_name,keys=key_names)
+
+    client = get_client()
+    data = {}
+
+    for key_name in key_names:
+        obj_response = client.head_object(Bucket=bucket_name, Key=key_name)
+        acl_response = client.get_object_acl(Bucket=bucket_name, Key=key_name)
+        data.update({
+            key_name: {
+                'ID': acl_response['Owner']['ID'],
+                'DisplayName': acl_response['Owner']['DisplayName'],
+                'ETag': obj_response['ETag'],
+                'LastModified': obj_response['LastModified'],
+                'ContentLength': obj_response['ContentLength'],
+                'VersionId': obj_response['VersionId']
+                }
+            })
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    objs_list = response['Versions']
+
+    for obj in objs_list:
+        key_name = obj['Key']
+        key_data = data[key_name]
+        eq(obj['Owner']['DisplayName'],key_data['DisplayName'])
+        eq(obj['ETag'],key_data['ETag'])
+        eq(obj['Size'],key_data['ContentLength'])
+        eq(obj['Owner']['ID'],key_data['ID'])
+        eq(obj['VersionId'], key_data['VersionId'])
+        _compare_dates(obj['LastModified'],key_data['LastModified'])
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all objects (anonymous)')
+@attr(assertion='succeeds')
+def test_bucket_list_objects_anonymous():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    unauthenticated_client.list_objects(Bucket=bucket_name)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all objects (anonymous) with list-objects-v2')
+@attr(assertion='succeeds')
+@attr('list-objects-v2')
+def test_bucket_listv2_objects_anonymous():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    unauthenticated_client.list_objects_v2(Bucket=bucket_name)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all objects (anonymous)')
+@attr(assertion='fails')
+def test_bucket_list_objects_anonymous_fail():
+    bucket_name = get_new_bucket()
+
+    unauthenticated_client = get_unauthenticated_client()
+    e = assert_raises(ClientError, unauthenticated_client.list_objects, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all objects (anonymous) with list-objects-v2')
+@attr(assertion='fails')
+@attr('list-objects-v2')
+def test_bucket_listv2_objects_anonymous_fail():
+    bucket_name = get_new_bucket()
+
+    unauthenticated_client = get_unauthenticated_client()
+    e = assert_raises(ClientError, unauthenticated_client.list_objects_v2, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='non-existant bucket')
+@attr(assertion='fails 404')
+def test_bucket_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.list_objects, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='non-existant bucket with list-objects-v2')
+@attr(assertion='fails 404')
+@attr('list-objects-v2')
+def test_bucketv2_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.list_objects_v2, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='non-existant bucket')
+@attr(assertion='fails 404')
+def test_bucket_delete_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='non-empty bucket')
+@attr(assertion='fails 409')
+def test_bucket_delete_nonempty():
+    key_names = ['foo']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 409)
+    eq(error_code, 'BucketNotEmpty')
+
+def _do_set_bucket_canned_acl(client, bucket_name, canned_acl, i, results):
+    try:
+        client.put_bucket_acl(ACL=canned_acl, Bucket=bucket_name)
+        results[i] = True
+    except:
+        results[i] = False
+
+def _do_set_bucket_canned_acl_concurrent(client, bucket_name, canned_acl, num, results):
+    t = []
+    for i in range(num):
+        thr = threading.Thread(target = _do_set_bucket_canned_acl, args=(client, bucket_name, canned_acl, i, results))
+        thr.start()
+        t.append(thr)
+    return t
+
+def _do_wait_completion(t):
+    for thr in t:
+        thr.join()
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='concurrent set of acls on a bucket')
+@attr(assertion='works')
+def test_bucket_concurrent_set_canned_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    num_threads = 50 # boto2 retry defaults to 5 so we need a thread to fail at least 5 times
+                     # this seems like a large enough number to get through retry (if bug
+                     # exists)
+    results = [None] * num_threads
+
+    t = _do_set_bucket_canned_acl_concurrent(client, bucket_name, 'public-read', num_threads, results)
+    _do_wait_completion(t)
+
+    for r in results:
+        eq(r, True)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='non-existant bucket')
+@attr(assertion='fails 404')
+def test_object_write_to_nonexist_bucket():
+    key_names = ['foo']
+    bucket_name = 'whatchutalkinboutwillis'
+    client = get_client()
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+
+@attr(resource='bucket')
+@attr(method='del')
+@attr(operation='deleted bucket')
+@attr(assertion='fails 404')
+def test_bucket_create_delete():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.delete_bucket(Bucket=bucket_name)
+
+    e = assert_raises(ClientError, client.delete_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='read contents that were never written')
+@attr(assertion='fails 404')
+def test_object_read_not_exist():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+
+http_response = None
+
+def get_http_response(**kwargs):
+    global http_response
+    http_response = kwargs['http_response'].__dict__
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='read contents that were never written to raise one error response')
+@attr(assertion='RequestId appears in the error response')
+@attr('fails_on_dbstore')
+def test_object_requestid_matches_header_on_error():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # get http response after failed request
+    client.meta.events.register('after-call.s3.GetObject', get_http_response)
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='bar')
+
+    response_body = http_response['_content']
+    resp_body_xml = ET.fromstring(response_body)
+    request_id = resp_body_xml.find('.//RequestId').text
+
+    assert request_id is not None
+    eq(request_id, e.response['ResponseMetadata']['RequestId'])
+
+def _make_objs_dict(key_names):
+    objs_list = []
+    for key in key_names:
+        obj_dict = {'Key': key}
+        objs_list.append(obj_dict)
+    objs_dict = {'Objects': objs_list}
+    return objs_dict
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='delete multiple objects')
+@attr(assertion='deletes multiple objects with a single call')
+def test_multi_object_delete():
+    key_names = ['key0', 'key1', 'key2']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+    response = client.list_objects(Bucket=bucket_name)
+    eq(len(response['Contents']), 3)
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+
+    eq(len(response['Deleted']), 3)
+    assert 'Errors' not in response
+    response = client.list_objects(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+    eq(len(response['Deleted']), 3)
+    assert 'Errors' not in response
+    response = client.list_objects(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='delete multiple objects with list-objects-v2')
+@attr(assertion='deletes multiple objects with a single call')
+@attr('list-objects-v2')
+def test_multi_objectv2_delete():
+    key_names = ['key0', 'key1', 'key2']
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+    response = client.list_objects_v2(Bucket=bucket_name)
+    eq(len(response['Contents']), 3)
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+
+    eq(len(response['Deleted']), 3)
+    assert 'Errors' not in response
+    response = client.list_objects_v2(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+    response = client.delete_objects(Bucket=bucket_name, Delete=objs_dict)
+    eq(len(response['Deleted']), 3)
+    assert 'Errors' not in response
+    response = client.list_objects_v2(Bucket=bucket_name)
+    assert 'Contents' not in response
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='delete multiple objects has upper limit of 1000 keys')
+@attr(assertion='fails 400')
+def test_multi_object_delete_key_limit():
+    key_names = [f"key-{i}" for i in range(1001)]
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    paginator = client.get_paginator('list_objects')
+    pages = paginator.paginate(Bucket=bucket_name)
+    numKeys = 0
+    for page in pages:
+        numKeys += len(page['Contents'])
+    eq(numKeys, 1001)
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='delete multiple objects has upper limit of 1000 keys with list-objects-v2')
+@attr(assertion='fails 400')
+def test_multi_objectv2_delete_key_limit():
+    key_names = [f"key-{i}" for i in range(1001)]
+    bucket_name = _create_objects(keys=key_names)
+    client = get_client()
+
+    paginator = client.get_paginator('list_objects_v2')
+    pages = paginator.paginate(Bucket=bucket_name)
+    numKeys = 0
+    for page in pages:
+        numKeys += len(page['Contents'])
+    eq(numKeys, 1001)
+
+    objs_dict = _make_objs_dict(key_names=key_names)
+    e = assert_raises(ClientError,client.delete_objects,Bucket=bucket_name,Delete=objs_dict)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write zero-byte key')
+@attr(assertion='correct content length')
+def test_object_head_zero_bytes():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='')
+
+    response = client.head_object(Bucket=bucket_name, Key='foo')
+    eq(response['ContentLength'], 0)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write key')
+@attr(assertion='correct etag')
+def test_object_write_check_etag():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(response['ETag'], '"37b51d194a7513e45b56f6524f2d51f2"')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write key')
+@attr(assertion='correct cache control header')
+def test_object_write_cache_control():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    cache_control = 'public, max-age=14400'
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', CacheControl=cache_control)
+
+    response = client.head_object(Bucket=bucket_name, Key='foo')
+    eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], cache_control)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write key')
+@attr(assertion='correct expires header')
+def test_object_write_expires():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Expires=expires)
+
+    response = client.head_object(Bucket=bucket_name, Key='foo')
+    _compare_dates(expires, response['Expires'])
+
+def _get_body(response):
+    body = response['Body']
+    got = body.read()
+    if type(got) is bytes:
+        got = got.decode()
+    return got
+
+@attr(resource='object')
+@attr(method='all')
+@attr(operation='complete object life cycle')
+@attr(assertion='read back what we wrote and rewrote')
+def test_object_write_read_update_read_delete():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # Write
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    # Read
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+    # Update
+    client.put_object(Bucket=bucket_name, Key='foo', Body='soup')
+    # Read
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'soup')
+    # Delete
+    client.delete_object(Bucket=bucket_name, Key='foo')
+
+def _set_get_metadata(metadata, bucket_name=None):
+    """
+    create a new bucket new or use an existing
+    name to create an object that bucket,
+    set the meta1 property to a specified, value,
+    and then re-read and return that property
+    """
+    if bucket_name is None:
+        bucket_name = get_new_bucket()
+
+    client = get_client()
+    metadata_dict = {'meta1': metadata}
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    return response['Metadata']['meta1']
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write/re-read')
+@attr(assertion='reread what we wrote')
+def test_object_set_get_metadata_none_to_good():
+    got = _set_get_metadata('mymeta')
+    eq(got, 'mymeta')
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write/re-read')
+@attr(assertion='write empty value, returns empty value')
+def test_object_set_get_metadata_none_to_empty():
+    got = _set_get_metadata('')
+    eq(got, '')
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write/re-write')
+@attr(assertion='empty value replaces old')
+def test_object_set_get_metadata_overwrite_to_empty():
+    bucket_name = get_new_bucket()
+    got = _set_get_metadata('oldmeta', bucket_name)
+    eq(got, 'oldmeta')
+    got = _set_get_metadata('', bucket_name)
+    eq(got, '')
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write/re-write')
+@attr(assertion='UTF-8 values passed through')
+# TODO: the decoding of this unicode metadata is not happening properly for unknown reasons
+@attr('fails_on_rgw')
+def test_object_set_get_unicode_metadata():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    def set_unicode_metadata(**kwargs):
+        kwargs['params']['headers']['x-amz-meta-meta1'] = u"Hello World\xe9"
+
+    client.meta.events.register('before-call.s3.PutObject', set_unicode_metadata)
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    got = response['Metadata']['meta1'].decode('utf-8')
+    got = response['Metadata']['meta1']
+    print(got)
+    print(u"Hello World\xe9")
+    eq(got, u"Hello World\xe9")
+
+def _set_get_metadata_unreadable(metadata, bucket_name=None):
+    """
+    set and then read back a meta-data value (which presumably
+    includes some interesting characters), and return a list
+    containing the stored value AND the encoding with which it
+    was returned.
+
+    This should return a 400 bad request because the webserver
+    rejects the request.
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    metadata_dict = {'meta1': metadata}
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='bar', Metadata=metadata_dict)
+    return e
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write/re-write')
+@attr(assertion='non-UTF-8 values detected, but rejected by webserver')
+@attr('fails_strict_rfc2616')
+@attr(assertion='fails 400')
+def test_object_set_get_non_utf8_metadata():
+    metadata = '\x04mymeta'
+    e = _set_get_metadata_unreadable(metadata)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400 or 403)
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write')
+@attr(assertion='non-printing prefixes rejected by webserver')
+@attr('fails_strict_rfc2616')
+@attr(assertion='fails 400')
+def test_object_set_get_metadata_empty_to_unreadable_prefix():
+    metadata = '\x04w'
+    e = _set_get_metadata_unreadable(metadata)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400 or 403)
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write')
+@attr(assertion='non-printing suffixes rejected by webserver')
+@attr('fails_strict_rfc2616')
+@attr(assertion='fails 400')
+def test_object_set_get_metadata_empty_to_unreadable_suffix():
+    metadata = 'h\x04'
+    e = _set_get_metadata_unreadable(metadata)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400 or 403)
+
+@attr(resource='object.metadata')
+@attr(method='put')
+@attr(operation='metadata write')
+@attr(assertion='non-priting in-fixes rejected by webserver')
+@attr('fails_strict_rfc2616')
+@attr(assertion='fails 400')
+def test_object_set_get_metadata_empty_to_unreadable_infix():
+    metadata = 'h\x04w'
+    e = _set_get_metadata_unreadable(metadata)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400 or 403)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='data re-write')
+@attr(assertion='replaces previous metadata')
+def test_object_metadata_replaced_on_put():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    metadata_dict = {'meta1': 'bar'}
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar', Metadata=metadata_dict)
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    got = response['Metadata']
+    eq(got, {})
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='data write from file (w/100-Continue)')
+@attr(assertion='succeeds and returns written data')
+def test_object_write_file():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data_str = 'bar'
+    data = bytes(data_str, 'utf-8')
+    client.put_object(Bucket=bucket_name, Key='foo', Body=data)
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+def _get_post_url(bucket_name):
+    endpoint = get_config_endpoint()
+    return '{endpoint}/{bucket_name}'.format(endpoint=endpoint, bucket_name=bucket_name)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='anonymous browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+def test_post_object_anonymous_request():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    url = _get_post_url(bucket_name)
+    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+def test_post_object_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request, no content-type header')
+@attr(assertion='succeeds and returns written data')
+def test_post_object_authenticated_no_content_type():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key="foo.txt")
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request, bad access key')
+@attr(assertion='fails')
+def test_post_object_authenticated_request_bad_access_key():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , 'foo'),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 403)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='anonymous browser based upload via POST request')
+@attr(assertion='succeeds with status 201')
+def test_post_object_set_success_code():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
+    ("success_action_status" , "201"),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 201)
+    message = ET.fromstring(r.content).find('Key')
+    eq(message.text,'foo.txt')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='anonymous browser based upload via POST request')
+@attr(assertion='succeeds with status 204')
+def test_post_object_set_invalid_success_code():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\
+    ("success_action_status" , "404"),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    content = r.content.decode()
+    eq(content,'')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+def test_post_object_upload_larger_than_chunk():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 5*1024*1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    foo_string = 'foo' * 1024*1024
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', foo_string)])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    eq(body, foo_string)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+def test_post_object_set_key_from_filename():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "${filename}"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('foo.txt', 'bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds with status 204')
+def test_post_object_ignored_header():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),("x-ignore-foo" , "bar"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds with status 204')
+def test_post_object_case_insensitive_condition_fields():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bUcKeT": bucket_name},\
+    ["StArTs-WiTh", "$KeY", "foo"],\
+    {"AcL": "private"},\
+    ["StArTs-WiTh", "$CoNtEnT-TyPe", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    foo_string = 'foo' * 1024*1024
+
+    payload = OrderedDict([ ("kEy" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("aCl" , "private"),("signature" , signature),("pOLICy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds with escaped leading $ and returns written data')
+def test_post_object_escaped_field_values():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key='\$foo.txt')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds and returns redirect url')
+def test_post_object_success_redirect_action():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    url = _get_post_url(bucket_name)
+    redirect_url = _get_post_url(bucket_name)
+
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["eq", "$success_action_redirect", redirect_url],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),("success_action_redirect" , redirect_url),\
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 200)
+    url = r.url
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    eq(url,
+    '{rurl}?bucket={bucket}&key={key}&etag=%22{etag}%22'.format(rurl = redirect_url,\
+    bucket = bucket_name, key = 'foo.txt', etag = response['ETag'].strip('"')))
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with invalid signature error')
+def test_post_object_invalid_signature():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())[::-1]
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 403)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with access key does not exist error')
+def test_post_object_invalid_access_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id[::-1]),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 403)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with invalid expiration error')
+def test_post_object_invalid_date_format():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": str(expires),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "\$foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with missing key error')
+def test_post_object_no_key_specified():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with missing signature error')
+def test_post_object_missing_signature():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with extra input fields policy error')
+def test_post_object_missing_policy_condition():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    ["starts-with", "$key", "\$foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 403)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds using starts-with restriction on metadata header')
+def test_post_object_user_specified_header():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ["starts-with", "$x-amz-meta-foo",  "bar"]
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    eq(response['Metadata']['foo'], 'barclamp')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with policy condition failed error due to missing field in POST request')
+def test_post_object_request_missing_policy_specified_field():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ["starts-with", "$x-amz-meta-foo",  "bar"]
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 403)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with conditions must be list error')
+def test_post_object_condition_is_case_sensitive():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "CONDITIONS": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with expiration must be string error')
+def test_post_object_expires_is_case_sensitive():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"EXPIRATION": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with policy expired error')
+def test_post_object_expired_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=-6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key", "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 403)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails using equality restriction on metadata header')
+def test_post_object_invalid_request_field_value():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ["eq", "$x-amz-meta-foo",  ""]
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('x-amz-meta-foo' , 'barclamp'),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 403)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with policy missing expiration error')
+def test_post_object_missing_expires_condition():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 1024],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with policy missing conditions error')
+def test_post_object_missing_conditions_list():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ")}
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with allowable upload size exceeded error')
+def test_post_object_upload_size_limit_exceeded():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0, 0],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with invalid content length error')
+def test_post_object_missing_content_length_argument():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 0],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with invalid JSON error')
+def test_post_object_invalid_content_length_argument():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", -1, 0],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='fails with upload size less than minimum allowable error')
+def test_post_object_upload_size_below_minimum():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["content-length-range", 512, 1000],\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='empty conditions return appropriate error response')
+def test_post_object_empty_conditions():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    { }\
+    ]\
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 400)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-Match: the latest ETag')
+@attr(assertion='succeeds')
+def test_get_object_ifmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    etag = response['ETag']
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfMatch=etag)
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-Match: bogus ETag')
+@attr(assertion='fails 412')
+def test_get_object_ifmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfMatch='"ABCORZ"')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-None-Match: the latest ETag')
+@attr(assertion='fails 304')
+def test_get_object_ifnonematch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    etag = response['ETag']
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfNoneMatch=etag)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 304)
+    eq(e.response['Error']['Message'], 'Not Modified')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-None-Match: bogus ETag')
+@attr(assertion='succeeds')
+def test_get_object_ifnonematch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfNoneMatch='ABCORZ')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-Modified-Since: before')
+@attr(assertion='succeeds')
+def test_get_object_ifmodifiedsince_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfModifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-Modified-Since: after')
+@attr(assertion='fails 304')
+@attr('fails_on_dbstore')
+def test_get_object_ifmodifiedsince_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    last_modified = str(response['LastModified'])
+
+    last_modified = last_modified.split('+')[0]
+    mtime = datetime.datetime.strptime(last_modified, '%Y-%m-%d %H:%M:%S')
+
+    after = mtime + datetime.timedelta(seconds=1)
+    after_str = time.strftime("%a, %d %b %Y %H:%M:%S GMT", after.timetuple())
+
+    time.sleep(1)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfModifiedSince=after_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 304)
+    eq(e.response['Error']['Message'], 'Not Modified')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-Unmodified-Since: before')
+@attr(assertion='fails 412')
+@attr('fails_on_dbstore')
+def test_get_object_ifunmodifiedsince_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 1994 19:43:31 GMT')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-Unmodified-Since: after')
+@attr(assertion='succeeds')
+def test_get_object_ifunmodifiedsince_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', IfUnmodifiedSince='Sat, 29 Oct 2100 19:43:31 GMT')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='data re-write w/ If-Match: the latest ETag')
+@attr(assertion='replaces previous data and metadata')
+@attr('fails_on_aws')
+def test_put_object_ifmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    etag = response['ETag'].replace('"', '')
+
+    # pass in custom header 'If-Match' before PutObject call
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'zar')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='get w/ If-Match: bogus ETag')
+@attr(assertion='fails 412')
+@attr('fails_on_dbstore')
+def test_put_object_ifmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    # pass in custom header 'If-Match' before PutObject call
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '"ABCORZ"'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='overwrite existing object w/ If-Match: *')
+@attr(assertion='replaces previous data and metadata')
+@attr('fails_on_aws')
+def test_put_object_ifmatch_overwrite_existed_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'zar')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='overwrite non-existing object w/ If-Match: *')
+@attr(assertion='fails 412')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_put_object_ifmatch_nonexisted_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='overwrite existing object w/ If-None-Match: outdated ETag')
+@attr(assertion='replaces previous data and metadata')
+@attr('fails_on_aws')
+def test_put_object_ifnonmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': 'ABCORZ'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    response = client.put_object(Bucket=bucket_name,Key='foo', Body='zar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'zar')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='overwrite existing object w/ If-None-Match: the latest ETag')
+@attr(assertion='fails 412')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_put_object_ifnonmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    etag = response['ETag'].replace('"', '')
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': etag}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='overwrite non-existing object w/ If-None-Match: *')
+@attr(assertion='succeeds')
+@attr('fails_on_aws')
+def test_put_object_ifnonmatch_nonexisted_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='overwrite existing object w/ If-None-Match: *')
+@attr(assertion='fails 412')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_put_object_ifnonmatch_overwrite_existed_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-None-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo', Body='zar')
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+def _setup_bucket_object_acl(bucket_acl, object_acl):
+    """
+    add a foo key, and specified key and bucket acls to
+    a (new or existing) bucket.
+    """
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
+    client.put_object(ACL=object_acl, Bucket=bucket_name, Key='foo')
+
+    return bucket_name
+
+def _setup_bucket_acl(bucket_acl=None):
+    """
+    set up a new bucket with specified acl
+    """
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL=bucket_acl, Bucket=bucket_name)
+
+    return bucket_name
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='publically readable bucket')
+@attr(assertion='bucket is readable')
+def test_object_raw_get():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='deleted object and bucket')
+@attr(assertion='fails 404')
+def test_object_raw_get_bucket_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+    client.delete_bucket(Bucket=bucket_name)
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='deleted object and bucket')
+@attr(assertion='fails 404')
+def test_object_delete_key_bucket_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+    client.delete_bucket(Bucket=bucket_name)
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.delete_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='deleted object')
+@attr(assertion='fails 404')
+def test_object_raw_get_object_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+
+@attr(resource='bucket')
+@attr(method='head')
+@attr(operation='head bucket')
+@attr(assertion='succeeds')
+def test_bucket_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.head_bucket(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='head')
+@attr(operation='non-existant bucket')
+@attr(assertion='fails 404')
+def test_bucket_head_notexist():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    e = assert_raises(ClientError, client.head_bucket, Bucket=bucket_name)
+
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    # n.b., RGW does not send a response document for this operation,
+    # which seems consistent with
+    # https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
+    #eq(error_code, 'NoSuchKey')
+
+@attr('fails_on_aws')
+@attr(resource='bucket')
+@attr(method='head')
+@attr(operation='read bucket extended information')
+@attr(assertion='extended information is getting updated')
+@attr('fails_on_dbstore')
+def test_bucket_head_extended():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.head_bucket(Bucket=bucket_name)
+    eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 0)
+    eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 0)
+
+    _create_objects(bucket_name=bucket_name, keys=['foo','bar','baz'])
+    response = client.head_bucket(Bucket=bucket_name)
+
+    eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-object-count']), 3)
+    eq(int(response['ResponseMetadata']['HTTPHeaders']['x-rgw-bytes-used']), 9)
+
+@attr(resource='bucket.acl')
+@attr(method='get')
+@attr(operation='unauthenticated on private bucket')
+@attr(assertion='succeeds')
+def test_object_raw_get_bucket_acl():
+    bucket_name = _setup_bucket_object_acl('private', 'public-read')
+
+    unauthenticated_client = get_unauthenticated_client()
+    response = unauthenticated_client.get_object(Bucket=bucket_name, Key='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object.acl')
+@attr(method='get')
+@attr(operation='unauthenticated on private object')
+@attr(assertion='fails 403')
+def test_object_raw_get_object_acl():
+    bucket_name = _setup_bucket_object_acl('public-read', 'private')
+
+    unauthenticated_client = get_unauthenticated_client()
+    e = assert_raises(ClientError, unauthenticated_client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='authenticated on public bucket/object')
+@attr(assertion='succeeds')
+def test_object_raw_authenticated():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='authenticated on private bucket/private object with modified response headers')
+@attr(assertion='succeeds')
+def test_object_raw_response_headers():
+    bucket_name = _setup_bucket_object_acl('private', 'private')
+
+    client = get_client()
+
+    response = client.get_object(Bucket=bucket_name, Key='foo', ResponseCacheControl='no-cache', ResponseContentDisposition='bla', ResponseContentEncoding='aaa', ResponseContentLanguage='esperanto', ResponseContentType='foo/bar', ResponseExpires='123')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], 'foo/bar')
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-disposition'], 'bla')
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-language'], 'esperanto')
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-encoding'], 'aaa')
+    eq(response['ResponseMetadata']['HTTPHeaders']['cache-control'], 'no-cache')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='authenticated on private bucket/public object')
+@attr(assertion='succeeds')
+def test_object_raw_authenticated_bucket_acl():
+    bucket_name = _setup_bucket_object_acl('private', 'public-read')
+
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='authenticated on public bucket/private object')
+@attr(assertion='succeeds')
+def test_object_raw_authenticated_object_acl():
+    bucket_name = _setup_bucket_object_acl('public-read', 'private')
+
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='authenticated on deleted object and bucket')
+@attr(assertion='fails 404')
+def test_object_raw_authenticated_bucket_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+    client.delete_bucket(Bucket=bucket_name)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='authenticated on deleted object')
+@attr(assertion='fails 404')
+def test_object_raw_authenticated_object_gone():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+
+    client.delete_object(Bucket=bucket_name, Key='foo')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='x-amz-expires check not expired')
+@attr(assertion='succeeds')
+def test_object_raw_get_x_amz_expires_not_expired():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=100000, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    eq(res['status_code'], 200)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='check x-amz-expires value out of range zero')
+@attr(assertion='fails 403')
+def test_object_raw_get_x_amz_expires_out_range_zero():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=0, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    eq(res['status_code'], 403)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='check x-amz-expires value out of max range')
+@attr(assertion='fails 403')
+def test_object_raw_get_x_amz_expires_out_max_range():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=609901, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    eq(res['status_code'], 403)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='check x-amz-expires value out of positive range')
+@attr(assertion='succeeds')
+def test_object_raw_get_x_amz_expires_out_positive_range():
+    bucket_name = _setup_bucket_object_acl('public-read', 'public-read')
+    client = get_client()
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+
+    url = client.generate_presigned_url(ClientMethod='get_object', Params=params, ExpiresIn=-7, HttpMethod='GET')
+
+    res = requests.get(url, verify=get_config_ssl_verify()).__dict__
+    eq(res['status_code'], 403)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='unauthenticated, no object acls')
+@attr(assertion='fails 403')
+def test_object_anon_put():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo')
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    e = assert_raises(ClientError, unauthenticated_client.put_object, Bucket=bucket_name, Key='foo', Body='foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='unauthenticated, publically writable object')
+@attr(assertion='succeeds')
+def test_object_anon_put_write_access():
+    bucket_name = _setup_bucket_acl('public-read-write')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo')
+
+    unauthenticated_client = get_unauthenticated_client()
+
+    response = unauthenticated_client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='authenticated, no object acls')
+@attr(assertion='succeeds')
+def test_object_put_authenticated():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.put_object(Bucket=bucket_name, Key='foo', Body='foo')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='authenticated, no object acls')
+@attr(assertion='succeeds')
+def test_object_raw_put_authenticated_expired():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo')
+
+    params = {'Bucket': bucket_name, 'Key': 'foo'}
+    url = client.generate_presigned_url(ClientMethod='put_object', Params=params, ExpiresIn=-1000, HttpMethod='PUT')
+
+    # params wouldn't take a 'Body' parameter so we're passing it in here
+    res = requests.put(url, data="foo", verify=get_config_ssl_verify()).__dict__
+    eq(res['status_code'], 403)
+
+def check_bad_bucket_name(bucket_name):
+    """
+    Attempt to create a bucket with a specified name, and confirm
+    that the request fails because of an invalid bucket name.
+    """
+    client = get_client()
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidBucketName')
+
+
+# AWS does not enforce all documented bucket restrictions.
+# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
+@attr('fails_on_aws')
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='name begins with underscore')
+@attr(assertion='fails with subdomain: 400')
+def test_bucket_create_naming_bad_starts_nonalpha():
+    bucket_name = get_new_bucket_name()
+    check_bad_bucket_name('_' + bucket_name)
+
+def check_invalid_bucketname(invalid_name):
+    """
+    Send a create bucket_request with an invalid bucket name
+    that will bypass the ParamValidationError that would be raised
+    if the invalid bucket name that was passed in normally.
+    This function returns the status and error code from the failure
+    """
+    client = get_client()
+    valid_bucket_name = get_new_bucket_name()
+    def replace_bucketname_from_url(**kwargs):
+        url = kwargs['params']['url']
+        new_url = url.replace(valid_bucket_name, invalid_name)
+        kwargs['params']['url'] = new_url
+    client.meta.events.register('before-call.s3.CreateBucket', replace_bucketname_from_url)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=invalid_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    return (status, error_code)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='short (one character) name')
+@attr(assertion='fails 400')
+def test_bucket_create_naming_bad_short_one():
+    check_bad_bucket_name('a')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='short (two character) name')
+@attr(assertion='fails 400')
+def test_bucket_create_naming_bad_short_two():
+    check_bad_bucket_name('aa')
+
+def check_good_bucket_name(name, _prefix=None):
+    """
+    Attempt to create a bucket with a specified name
+    and (specified or default) prefix, returning the
+    results of that effort.
+    """
+    # tests using this with the default prefix must *not* rely on
+    # being able to set the initial character, or exceed the max len
+
+    # tests using this with a custom prefix are responsible for doing
+    # their own setup/teardown nukes, with their custom prefix; this
+    # should be very rare
+    if _prefix is None:
+        _prefix = get_prefix()
+    bucket_name = '{prefix}{name}'.format(
+            prefix=_prefix,
+            name=name,
+            )
+    client = get_client()
+    response = client.create_bucket(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+def _test_bucket_create_naming_good_long(length):
+    """
+    Attempt to create a bucket whose name (including the
+    prefix) is of a specified length.
+    """
+    # tests using this with the default prefix must *not* rely on
+    # being able to set the initial character, or exceed the max len
+
+    # tests using this with a custom prefix are responsible for doing
+    # their own setup/teardown nukes, with their custom prefix; this
+    # should be very rare
+    prefix = get_new_bucket_name()
+    assert len(prefix) < 63
+    num = length - len(prefix)
+    name=num*'a'
+
+    bucket_name = '{prefix}{name}'.format(
+            prefix=prefix,
+            name=name,
+            )
+    client = get_client()
+    response = client.create_bucket(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/60 byte name')
+@attr(assertion='fails with subdomain')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_good_long_60():
+    _test_bucket_create_naming_good_long(60)
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/61 byte name')
+@attr(assertion='fails with subdomain')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_good_long_61():
+    _test_bucket_create_naming_good_long(61)
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/62 byte name')
+@attr(assertion='fails with subdomain')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_good_long_62():
+    _test_bucket_create_naming_good_long(62)
+
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/63 byte name')
+@attr(assertion='fails with subdomain')
+def test_bucket_create_naming_good_long_63():
+    _test_bucket_create_naming_good_long(63)
+
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list w/61 byte name')
+@attr(assertion='fails with subdomain')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_list_long_name():
+    prefix = get_new_bucket_name()
+    length = 61
+    num = length - len(prefix)
+    name=num*'a'
+
+    bucket_name = '{prefix}{name}'.format(
+            prefix=prefix,
+            name=name,
+            )
+    bucket = get_new_bucket_resource(name=bucket_name)
+    is_empty = _bucket_is_empty(bucket)
+    eq(is_empty, True)
+
+# AWS does not enforce all documented bucket restrictions.
+# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/index.html?BucketRestrictions.html
+@attr('fails_on_aws')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/ip address for name')
+@attr(assertion='fails on aws')
+def test_bucket_create_naming_bad_ip():
+    check_bad_bucket_name('192.168.5.123')
+
+# test_bucket_create_naming_dns_* are valid but not recommended
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/underscore in name')
+@attr(assertion='fails')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_underscore():
+    invalid_bucketname = 'foo_bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    eq(status, 400)
+    eq(error_code, 'InvalidBucketName')
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/100 byte name')
+@attr(assertion='fails with subdomain')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+def test_bucket_create_naming_dns_long():
+    prefix = get_prefix()
+    assert len(prefix) < 50
+    num = 63 - len(prefix)
+    check_good_bucket_name(num * 'a')
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/dash at end of name')
+@attr(assertion='fails')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dash_at_end():
+    invalid_bucketname = 'foo-'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    eq(status, 400)
+    eq(error_code, 'InvalidBucketName')
+
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/.. in name')
+@attr(assertion='fails')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dot_dot():
+    invalid_bucketname = 'foo..bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    eq(status, 400)
+    eq(error_code, 'InvalidBucketName')
+
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/.- in name')
+@attr(assertion='fails')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dot_dash():
+    invalid_bucketname = 'foo.-bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    eq(status, 400)
+    eq(error_code, 'InvalidBucketName')
+
+
+# Breaks DNS with SubdomainCallingFormat
+@attr('fails_with_subdomain')
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create w/-. in name')
+@attr(assertion='fails')
+@attr('fails_on_aws') # <Error><Code>InvalidBucketName</Code><Message>The specified bucket is not valid.</Message>...</Error>
+# Should now pass on AWS even though it has 'fails_on_aws' attr.
+def test_bucket_create_naming_dns_dash_dot():
+    invalid_bucketname = 'foo-.bar'
+    status, error_code = check_invalid_bucketname(invalid_bucketname)
+    eq(status, 400)
+    eq(error_code, 'InvalidBucketName')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='re-create')
+def test_bucket_create_exists():
+    # aws-s3 default region allows recreation of buckets
+    # but all other regions fail with BucketAlreadyOwnedByYou.
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name)
+    try:
+        response = client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        status, error_code = _get_status_and_error_code(e.response)
+        eq(e.status, 409)
+        eq(e.error_code, 'BucketAlreadyOwnedByYou')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get location')
+@attr('fails_on_dbstore')
+def test_bucket_get_location():
+    location_constraint = get_main_api_name()
+    if not location_constraint:
+        raise SkipTest
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': location_constraint})
+
+    response = client.get_bucket_location(Bucket=bucket_name)
+    if location_constraint == "":
+        location_constraint = None
+    eq(response['LocationConstraint'], location_constraint)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='re-create by non-owner')
+@attr(assertion='fails 409')
+@attr('fails_on_dbstore')
+def test_bucket_create_exists_nonowner():
+    # Names are shared across a global namespace. As such, no two
+    # users can create a bucket with that same name.
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    alt_client = get_alt_client()
+
+    client.create_bucket(Bucket=bucket_name)
+    e = assert_raises(ClientError, alt_client.create_bucket, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 409)
+    eq(error_code, 'BucketAlreadyExists')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='re-create with existing acl')
+@attr(assertion='fails 409')
+@attr('fails_on_dbstore')
+def test_bucket_recreate_overwrite_acl():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name, ACL='public-read')
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 409)
+    eq(error_code, 'BucketAlreadyExists')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='re-create with new acl')
+@attr(assertion='fails 409')
+@attr('fails_on_dbstore')
+def test_bucket_recreate_new_acl():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    client.create_bucket(Bucket=bucket_name)
+    e = assert_raises(ClientError, client.create_bucket, Bucket=bucket_name, ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 409)
+    eq(error_code, 'BucketAlreadyExists')
+
+def check_access_denied(fn, *args, **kwargs):
+    e = assert_raises(ClientError, fn, *args, **kwargs)
+    status = _get_status(e.response)
+    eq(status, 403)
+
+
+def check_grants(got, want):
+    """
+    Check that grants list in got matches the dictionaries in want,
+    in any order.
+    """
+    eq(len(got), len(want))
+
+    # There are instances when got does not match due the order of item.
+    if got[0]["Grantee"].get("DisplayName"):
+        got.sort(key=lambda x: x["Grantee"].get("DisplayName"))
+        want.sort(key=lambda x: x["DisplayName"])
+
+    for g, w in zip(got, want):
+        w = dict(w)
+        g = dict(g)
+        eq(g.pop('Permission', None), w['Permission'])
+        eq(g['Grantee'].pop('DisplayName', None), w['DisplayName'])
+        eq(g['Grantee'].pop('ID', None), w['ID'])
+        eq(g['Grantee'].pop('Type', None), w['Type'])
+        eq(g['Grantee'].pop('URI', None), w['URI'])
+        eq(g['Grantee'].pop('EmailAddress', None), w['EmailAddress'])
+        eq(g, {'Grantee': {}})
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='default acl')
+@attr(assertion='read back expected defaults')
+def test_bucket_acl_default():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    eq(response['Owner']['DisplayName'], display_name)
+    eq(response['Owner']['ID'], user_id)
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='public-read acl')
+@attr(assertion='read back expected defaults')
+@attr('fails_on_aws') # <Error><Code>IllegalLocationConstraintException</Code><Message>The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.</Message>
+def test_bucket_acl_canned_during_create():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='acl: public-read,private')
+@attr(assertion='read back expected values')
+def test_bucket_acl_canned():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    client.put_bucket_acl(ACL='private', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='bucket.acls')
+@attr(method='put')
+@attr(operation='acl: public-read-write')
+@attr(assertion='read back expected values')
+def test_bucket_acl_canned_publicreadwrite():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='acl: authenticated-read')
+@attr(assertion='read back expected values')
+def test_bucket_acl_canned_authenticatedread():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(ACL='authenticated-read', Bucket=bucket_name)
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object.acls')
+@attr(method='get')
+@attr(operation='default acl')
+@attr(assertion='read back expected defaults')
+def test_object_acl_default():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object.acls')
+@attr(method='put')
+@attr(operation='acl public-read')
+@attr(assertion='read back expected values')
+def test_object_acl_canned_during_create():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object.acls')
+@attr(method='put')
+@attr(operation='acl public-read,private')
+@attr(assertion='read back expected values')
+def test_object_acl_canned():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # Since it defaults to private, set it public-read first
+    client.put_object(ACL='public-read', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    # Then back to private.
+    client.put_object_acl(ACL='private',Bucket=bucket_name, Key='foo')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+    grants = response['Grants']
+
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='acl public-read-write')
+@attr(assertion='read back expected values')
+def test_object_acl_canned_publicreadwrite():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(ACL='public-read-write', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object.acls')
+@attr(method='put')
+@attr(operation='acl authenticated-read')
+@attr(assertion='read back expected values')
+def test_object_acl_canned_authenticatedread():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(ACL='authenticated-read', Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AuthenticatedUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object.acls')
+@attr(method='put')
+@attr(operation='acl bucket-owner-read')
+@attr(assertion='read back expected values')
+def test_object_acl_canned_bucketownerread():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
+    bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
+    bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
+
+    alt_client.put_object(ACL='bucket-owner-read', Bucket=bucket_name, Key='foo')
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    alt_display_name = get_alt_display_name()
+    alt_user_id = get_alt_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='READ',
+                ID=bucket_owner_id,
+                DisplayName=bucket_owner_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object.acls')
+@attr(method='put')
+@attr(operation='acl bucket-owner-read')
+@attr(assertion='read back expected values')
+def test_object_acl_canned_bucketownerfullcontrol():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    bucket_acl_response = main_client.get_bucket_acl(Bucket=bucket_name)
+    bucket_owner_id = bucket_acl_response['Grants'][2]['Grantee']['ID']
+    bucket_owner_display_name = bucket_acl_response['Grants'][2]['Grantee']['DisplayName']
+
+    alt_client.put_object(ACL='bucket-owner-full-control', Bucket=bucket_name, Key='foo')
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    alt_display_name = get_alt_display_name()
+    alt_user_id = get_alt_user_id()
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=bucket_owner_id,
+                DisplayName=bucket_owner_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='object.acls')
+@attr(method='put')
+@attr(operation='set write-acp')
+@attr(assertion='does not modify owner')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
+def test_object_acl_full_control_verify_owner():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
+
+    grant = { 'Grants': [{'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'READ_ACP'}], 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    alt_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grant)
+
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='foo')
+    eq(response['Owner']['ID'], main_user_id)
+
+def add_obj_user_grant(bucket_name, key, grant):
+    """
+    Adds a grant to the existing grants meant to be passed into
+    the AccessControlPolicy argument of put_object_acls for an object
+    owned by the main user, not the alt user
+    A grant is a dictionary in the form of:
+    {u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
+
+    """
+    client = get_client()
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key)
+
+    grants = response['Grants']
+    grants.append(grant)
+
+    grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    return grant
+
+@attr(resource='object.acls')
+@attr(method='put')
+@attr(operation='set write-acp')
+@attr(assertion='does not modify other attributes')
+def test_object_acl_full_control_verify_attributes():
+    bucket_name = get_new_bucket_name()
+    main_client = get_client()
+    alt_client = get_alt_client()
+
+    main_client.create_bucket(Bucket=bucket_name, ACL='public-read-write')
+
+    header = {'x-amz-foo': 'bar'}
+    # lambda to add any header
+    add_header = (lambda **kwargs: kwargs['params']['headers'].update(header))
+
+    main_client.meta.events.register('before-call.s3.PutObject', add_header)
+    main_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = main_client.get_object(Bucket=bucket_name, Key='foo')
+    content_type = response['ContentType']
+    etag = response['ETag']
+
+    alt_user_id = get_alt_user_id()
+
+    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
+
+    grants = add_obj_user_grant(bucket_name, 'foo', grant)
+
+    main_client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=grants)
+
+    response = main_client.get_object(Bucket=bucket_name, Key='foo')
+    eq(content_type, response['ContentType'])
+    eq(etag, response['ETag'])
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='set acl private')
+@attr(assertion='a private object can be set to private')
+def test_bucket_acl_canned_private_to_private():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.put_bucket_acl(Bucket=bucket_name, ACL='private')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+def add_bucket_user_grant(bucket_name, grant):
+    """
+    Adds a grant to the existing grants meant to be passed into
+    the AccessControlPolicy argument of put_object_acls for an object
+    owned by the main user, not the alt user
+    A grant is a dictionary in the form of:
+    {u'Grantee': {u'Type': 'type', u'DisplayName': 'name', u'ID': 'id'}, u'Permission': 'PERM'}
+    """
+    client = get_client()
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    grants.append(grant)
+
+    grant = {'Grants': grants, 'Owner': {'DisplayName': main_display_name, 'ID': main_user_id}}
+
+    return grant
+
+def _check_object_acl(permission):
+    """
+    Sets the permission on an object then checks to see
+    if it was set
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+
+    policy = {}
+    policy['Owner'] = response['Owner']
+    policy['Grants'] = response['Grants']
+    policy['Grants'][0]['Permission'] = permission
+
+    client.put_object_acl(Bucket=bucket_name, Key='foo', AccessControlPolicy=policy)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo')
+    grants = response['Grants']
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission=permission,
+                ID=main_user_id,
+                DisplayName=main_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set acl FULL_CONTRO')
+@attr(assertion='reads back correctly')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
+def test_object_acl():
+    _check_object_acl('FULL_CONTROL')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set acl WRITE')
+@attr(assertion='reads back correctly')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
+def test_object_acl_write():
+    _check_object_acl('WRITE')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set acl WRITE_ACP')
+@attr(assertion='reads back correctly')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
+def test_object_acl_writeacp():
+    _check_object_acl('WRITE_ACP')
+
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set acl READ')
+@attr(assertion='reads back correctly')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
+def test_object_acl_read():
+    _check_object_acl('READ')
+
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set acl READ_ACP')
+@attr(assertion='reads back correctly')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
+def test_object_acl_readacp():
+    _check_object_acl('READ_ACP')
+
+
+def _bucket_acl_grant_userid(permission):
+    """
+    create a new bucket, grant a specific user the specified
+    permission, read back the acl and verify correct setting
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': permission}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission=permission,
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=main_user_id,
+                DisplayName=main_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    return bucket_name
+
+def _check_bucket_acl_grant_can_read(bucket_name):
+    """
+    verify ability to read the specified bucket
+    """
+    alt_client = get_alt_client()
+    response = alt_client.head_bucket(Bucket=bucket_name)
+
+def _check_bucket_acl_grant_cant_read(bucket_name):
+    """
+    verify inability to read the specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.head_bucket, Bucket=bucket_name)
+
+def _check_bucket_acl_grant_can_readacp(bucket_name):
+    """
+    verify ability to read acls on specified bucket
+    """
+    alt_client = get_alt_client()
+    alt_client.get_bucket_acl(Bucket=bucket_name)
+
+def _check_bucket_acl_grant_cant_readacp(bucket_name):
+    """
+    verify inability to read acls on specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.get_bucket_acl, Bucket=bucket_name)
+
+def _check_bucket_acl_grant_can_write(bucket_name):
+    """
+    verify ability to write the specified bucket
+    """
+    alt_client = get_alt_client()
+    alt_client.put_object(Bucket=bucket_name, Key='foo-write', Body='bar')
+
+def _check_bucket_acl_grant_cant_write(bucket_name):
+
+    """
+    verify inability to write the specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key='foo-write', Body='bar')
+
+def _check_bucket_acl_grant_can_writeacp(bucket_name):
+    """
+    verify ability to set acls on the specified bucket
+    """
+    alt_client = get_alt_client()
+    alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+def _check_bucket_acl_grant_cant_writeacp(bucket_name):
+    """
+    verify inability to set acls on the specified bucket
+    """
+    alt_client = get_alt_client()
+    check_access_denied(alt_client.put_bucket_acl,Bucket=bucket_name, ACL='public-read')
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='set acl w/userid FULL_CONTROL')
+@attr(assertion='can read/write data/acls')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${USER}</ArgumentValue>
+def test_bucket_acl_grant_userid_fullcontrol():
+    bucket_name = _bucket_acl_grant_userid('FULL_CONTROL')
+
+    # alt user can read
+    _check_bucket_acl_grant_can_read(bucket_name)
+    # can read acl
+    _check_bucket_acl_grant_can_readacp(bucket_name)
+    # can write
+    _check_bucket_acl_grant_can_write(bucket_name)
+    # can write acl
+    _check_bucket_acl_grant_can_writeacp(bucket_name)
+
+    client = get_client()
+
+    bucket_acl_response = client.get_bucket_acl(Bucket=bucket_name)
+    owner_id = bucket_acl_response['Owner']['ID']
+    owner_display_name = bucket_acl_response['Owner']['DisplayName']
+
+    main_display_name = get_main_display_name()
+    main_user_id = get_main_user_id()
+
+    eq(owner_id, main_user_id)
+    eq(owner_display_name, main_display_name)
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='set acl w/userid READ')
+@attr(assertion='can read data, no other r/w')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
+def test_bucket_acl_grant_userid_read():
+    bucket_name = _bucket_acl_grant_userid('READ')
+
+    # alt user can read
+    _check_bucket_acl_grant_can_read(bucket_name)
+    # can't read acl
+    _check_bucket_acl_grant_cant_readacp(bucket_name)
+    # can't write
+    _check_bucket_acl_grant_cant_write(bucket_name)
+    # can't write acl
+    _check_bucket_acl_grant_cant_writeacp(bucket_name)
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='set acl w/userid READ_ACP')
+@attr(assertion='can read acl, no other r/w')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
+def test_bucket_acl_grant_userid_readacp():
+    bucket_name = _bucket_acl_grant_userid('READ_ACP')
+
+    # alt user can't read
+    _check_bucket_acl_grant_cant_read(bucket_name)
+    # can read acl
+    _check_bucket_acl_grant_can_readacp(bucket_name)
+    # can't write
+    _check_bucket_acl_grant_cant_write(bucket_name)
+    # can't write acp
+    #_check_bucket_acl_grant_cant_writeacp_can_readacp(bucket)
+    _check_bucket_acl_grant_cant_writeacp(bucket_name)
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='set acl w/userid WRITE')
+@attr(assertion='can write data, no other r/w')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
+def test_bucket_acl_grant_userid_write():
+    bucket_name = _bucket_acl_grant_userid('WRITE')
+
+    # alt user can't read
+    _check_bucket_acl_grant_cant_read(bucket_name)
+    # can't read acl
+    _check_bucket_acl_grant_cant_readacp(bucket_name)
+    # can write
+    _check_bucket_acl_grant_can_write(bucket_name)
+    # can't write acl
+    _check_bucket_acl_grant_cant_writeacp(bucket_name)
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='set acl w/userid WRITE_ACP')
+@attr(assertion='can write acls, no other r/w')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
+def test_bucket_acl_grant_userid_writeacp():
+    bucket_name = _bucket_acl_grant_userid('WRITE_ACP')
+
+    # alt user can't read
+    _check_bucket_acl_grant_cant_read(bucket_name)
+    # can't read acl
+    _check_bucket_acl_grant_cant_readacp(bucket_name)
+    # can't write
+    _check_bucket_acl_grant_cant_write(bucket_name)
+    # can write acl
+    _check_bucket_acl_grant_can_writeacp(bucket_name)
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='set acl w/invalid userid')
+@attr(assertion='fails 400')
+def test_bucket_acl_grant_nonexist_user():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    bad_user_id = '_foo'
+
+    #response = client.get_bucket_acl(Bucket=bucket_name)
+    grant = {'Grantee': {'ID': bad_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy=grant)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='revoke all ACLs')
+@attr(assertion='can: read obj, get/set bucket acl, cannot write objs')
+def test_bucket_acl_no_grants():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_bucket_acl(Bucket=bucket_name)
+    old_grants = response['Grants']
+    policy = {}
+    policy['Owner'] = response['Owner']
+    # clear grants
+    policy['Grants'] = []
+
+    # remove read/write permission
+    response = client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
+
+    # can read
+    client.get_object(Bucket=bucket_name, Key='foo')
+
+    # can't write
+    check_access_denied(client.put_object, Bucket=bucket_name, Key='baz', Body='a')
+
+    #TODO fix this test once a fix is in for same issues in
+    # test_access_bucket_private_object_private
+    client2 = get_client()
+    # owner can read acl
+    client2.get_bucket_acl(Bucket=bucket_name)
+
+    # owner can write acl
+    client2.put_bucket_acl(Bucket=bucket_name, ACL='private')
+
+    # set policy back to original so that bucket can be cleaned up
+    policy['Grants'] = old_grants
+    client2.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
+
+def _get_acl_header(user_id=None, perms=None):
+    all_headers = ["read", "write", "read-acp", "write-acp", "full-control"]
+    headers = []
+
+    if user_id == None:
+        user_id = get_alt_user_id()
+
+    if perms != None:
+        for perm in perms:
+            header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
+            headers.append(header)
+
+    else:
+        for perm in all_headers:
+            header = ("x-amz-grant-{perm}".format(perm=perm), "id={uid}".format(uid=user_id))
+            headers.append(header)
+
+    return headers
+
+@attr(resource='object')
+@attr(method='PUT')
+@attr(operation='add all grants to user through headers')
+@attr(assertion='adds all grants individually to second user')
+@attr('fails_on_dho')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
+def test_object_header_acl_grants():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    headers = _get_acl_header()
+
+    def add_headers_before_sign(**kwargs):
+        updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
+        kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
+
+    client.meta.events.register('before-sign.s3.PutObject', add_headers_before_sign)
+
+    client.put_object(Bucket=bucket_name, Key='foo_key', Body='bar')
+
+    response = client.get_object_acl(Bucket=bucket_name, Key='foo_key')
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='READ_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+@attr(resource='bucket')
+@attr(method='PUT')
+@attr(operation='add all grants to user through headers')
+@attr(assertion='adds all grants individually to second user')
+@attr('fails_on_dho')
+@attr('fails_on_aws') #  <Error><Code>InvalidArgument</Code><Message>Invalid id</Message><ArgumentName>CanonicalUser/ID</ArgumentName><ArgumentValue>${ALTUSER}</ArgumentValue>
+def test_bucket_header_acl_grants():
+    headers = _get_acl_header()
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+
+    headers = _get_acl_header()
+
+    def add_headers_before_sign(**kwargs):
+        updated_headers = (kwargs['request'].__dict__['headers'].__dict__['_headers'] + headers)
+        kwargs['request'].__dict__['headers'].__dict__['_headers'] = updated_headers
+
+    client.meta.events.register('before-sign.s3.CreateBucket', add_headers_before_sign)
+
+    client.create_bucket(Bucket=bucket_name)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='READ_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='WRITE_ACP',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    alt_client = get_alt_client()
+
+    alt_client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    # set bucket acl to public-read-write so that teardown can work
+    alt_client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
+
+
+# This test will fail on DH Objects. DHO allows multiple users with one account, which
+# would violate the uniqueness requirement of a user's email. As such, DHO users are
+# created without an email.
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='add second FULL_CONTROL user')
+@attr(assertion='works for S3, fails for DHO')
+@attr('fails_on_aws') #  <Error><Code>AmbiguousGrantByEmailAddress</Code><Message>The e-mail address you provided is associated with more than one account. Please retry your request using a different identification method or after resolving the ambiguity.</Message>
+def test_bucket_acl_grant_email():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+    alt_email_address = get_alt_email()
+
+    main_user_id = get_main_user_id()
+    main_display_name = get_main_display_name()
+
+    grant = {'Grantee': {'EmailAddress': alt_email_address, 'Type': 'AmazonCustomerByEmail' }, 'Permission': 'FULL_CONTROL'}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy = grant)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=alt_user_id,
+                DisplayName=alt_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=main_user_id,
+                DisplayName=main_display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+        ]
+    )
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='add acl for nonexistent user')
+@attr(assertion='fail 400')
+def test_bucket_acl_grant_email_not_exist():
+    # behavior not documented by amazon
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    alt_user_id = get_alt_user_id()
+    alt_display_name = get_alt_display_name()
+    alt_email_address = get_alt_email()
+
+    NONEXISTENT_EMAIL = 'doesnotexist@dreamhost.com.invalid'
+    grant = {'Grantee': {'EmailAddress': NONEXISTENT_EMAIL, 'Type': 'AmazonCustomerByEmail'}, 'Permission': 'FULL_CONTROL'}
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name, AccessControlPolicy = grant)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'UnresolvableGrantByEmailAddress')
+
+@attr(resource='bucket')
+@attr(method='ACLs')
+@attr(operation='revoke all ACLs')
+@attr(assertion='acls read back as empty')
+def test_bucket_acl_revoke_all():
+    # revoke all access, including the owner's access
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+    response = client.get_bucket_acl(Bucket=bucket_name)
+    old_grants = response['Grants']
+    policy = {}
+    policy['Owner'] = response['Owner']
+    # clear grants
+    policy['Grants'] = []
+
+    # remove read/write permission for everyone
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
+
+    response = client.get_bucket_acl(Bucket=bucket_name)
+
+    eq(len(response['Grants']), 0)
+
+    # set policy back to original so that bucket can be cleaned up
+    policy['Grants'] = old_grants
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=policy)
+
+# TODO rgw log_bucket.set_as_logging_target() gives 403 Forbidden
+# http://tracker.newdream.net/issues/984
+@attr(resource='bucket.log')
+@attr(method='put')
+@attr(operation='set/enable/disable logging target')
+@attr(assertion='operations succeed')
+@attr('fails_on_rgw')
+def test_logging_toggle():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    main_display_name = get_main_display_name()
+    main_user_id = get_main_user_id()
+
+    status = {'LoggingEnabled': {'TargetBucket': bucket_name, 'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}], 'TargetPrefix': 'foologgingprefix'}}
+
+    client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
+    client.get_bucket_logging(Bucket=bucket_name)
+    status = {'LoggingEnabled': {}}
+    client.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus=status)
+    # NOTE: this does not actually test whether or not logging works
+
+def _setup_access(bucket_acl, object_acl):
+    """
+    Simple test fixture: create a bucket with given ACL, with objects:
+    - a: owning user, given ACL
+    - a2: same object accessed by some other user
+    - b: owning user, default ACL in bucket w/given ACL
+    - b2: same object accessed by a some other user
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key1 = 'foo'
+    key2 = 'bar'
+    newkey = 'new'
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL=bucket_acl)
+    client.put_object(Bucket=bucket_name, Key=key1, Body='foocontent')
+    client.put_object_acl(Bucket=bucket_name, Key=key1, ACL=object_acl)
+    client.put_object(Bucket=bucket_name, Key=key2, Body='barcontent')
+
+    return bucket_name, key1, key2, newkey
+
+def get_bucket_key_names(bucket_name):
+    objs_list = get_objects_list(bucket_name)
+    return frozenset(obj for obj in objs_list)
+
+def list_bucket_storage_class(client, bucket_name):
+    result = defaultdict(list)
+    response  = client.list_object_versions(Bucket=bucket_name)
+    for k in response['Versions']:
+        result[k['StorageClass']].append(k)
+
+    return result
+
+def list_bucket_versions(client, bucket_name):
+    result = defaultdict(list)
+    response  = client.list_object_versions(Bucket=bucket_name)
+    for k in response['Versions']:
+        result[response['Name']].append(k)
+
+    return result
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: private/private')
+@attr(assertion='public has no access to bucket or objects')
+def test_access_bucket_private_object_private():
+    # all the test_access_* tests follow this template
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
+
+    alt_client = get_alt_client()
+    # acled object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    # default object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    # bucket read fail
+    check_access_denied(alt_client.list_objects, Bucket=bucket_name)
+
+    # acled object write fail
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
+    # NOTE: The above put's causes the connection to go bad, therefore the client can't be used
+    # anymore. This can be solved either by:
+    # 1) putting an empty string ('') in the 'Body' field of those put_object calls
+    # 2) getting a new client hence the creation of alt_client{2,3} for the tests below
+    # TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
+
+    alt_client2 = get_alt_client()
+    # default object write fail
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+    # bucket write fail
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: private/private with list-objects-v2')
+@attr(assertion='public has no access to bucket or objects')
+@attr('list-objects-v2')
+def test_access_bucket_private_objectv2_private():
+    # all the test_access_* tests follow this template
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='private')
+
+    alt_client = get_alt_client()
+    # acled object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    # default object read fail
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    # bucket read fail
+    check_access_denied(alt_client.list_objects_v2, Bucket=bucket_name)
+
+    # acled object write fail
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
+    # NOTE: The above put's causes the connection to go bad, therefore the client can't be used
+    # anymore. This can be solved either by:
+    # 1) putting an empty string ('') in the 'Body' field of those put_object calls
+    # 2) getting a new client hence the creation of alt_client{2,3} for the tests below
+    # TODO: Test it from another host and on AWS, Report this to Amazon, if findings are identical
+
+    alt_client2 = get_alt_client()
+    # default object write fail
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+    # bucket write fail
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: private/public-read')
+@attr(assertion='public can only read readable object')
+def test_access_bucket_private_object_publicread():
+
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read, b gets default (private)
+    eq(body, 'foocontent')
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: private/public-read with list-objects-v2')
+@attr(assertion='public can only read readable object')
+@attr('list-objects-v2')
+def test_access_bucket_private_objectv2_publicread():
+
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read, b gets default (private)
+    eq(body, 'foocontent')
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: private/public-read/write')
+@attr(assertion='public can only read the readable object')
+def test_access_bucket_private_object_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read-only ... because it is in a private bucket
+    # b gets default (private)
+    eq(body, 'foocontent')
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: private/public-read/write with list-objects-v2')
+@attr(assertion='public can only read the readable object')
+@attr('list-objects-v2')
+def test_access_bucket_private_objectv2_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='private', object_acl='public-read-write')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read-only ... because it is in a private bucket
+    # b gets default (private)
+    eq(body, 'foocontent')
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+    check_access_denied(alt_client3.list_objects_v2, Bucket=bucket_name)
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: public-read/private')
+@attr(assertion='public can only list the bucket')
+def test_access_bucket_publicread_object_private():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='private')
+    alt_client = get_alt_client()
+
+    # a should be private, b gets default (private)
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='barcontent')
+
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
+
+    eq(objs, ['bar', 'foo'])
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: public-read/public-read')
+@attr(assertion='public can read readable objects and list bucket')
+def test_access_bucket_publicread_object_publicread():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read')
+    alt_client = get_alt_client()
+
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    # a should be public-read, b gets default (private)
+    body = _get_body(response)
+    eq(body, 'foocontent')
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
+
+    eq(objs, ['bar', 'foo'])
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: public-read/public-read-write')
+@attr(assertion='public can read readable objects and list bucket')
+def test_access_bucket_publicread_object_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read', object_acl='public-read-write')
+    alt_client = get_alt_client()
+
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+
+    # a should be public-read-only ... because it is in a r/o bucket
+    # b gets default (private)
+    eq(body, 'foocontent')
+
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1, Body='foooverwrite')
+
+    alt_client2 = get_alt_client()
+    check_access_denied(alt_client2.get_object, Bucket=bucket_name, Key=key2)
+    check_access_denied(alt_client2.put_object, Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    alt_client3 = get_alt_client()
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client3)
+
+    eq(objs, ['bar', 'foo'])
+    check_access_denied(alt_client3.put_object, Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: public-read-write/private')
+@attr(assertion='private objects cannot be read, but can be overwritten')
+def test_access_bucket_publicreadwrite_object_private():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='private')
+    alt_client = get_alt_client()
+
+    # a should be private, b gets default (private)
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key1)
+    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
+
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client)
+    eq(objs, ['bar', 'foo'])
+    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: public-read-write/public-read')
+@attr(assertion='private objects cannot be read, but can be overwritten')
+def test_access_bucket_publicreadwrite_object_publicread():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read')
+    alt_client = get_alt_client()
+
+    # a should be public-read, b gets default (private)
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+
+    body = _get_body(response)
+    eq(body, 'foocontent')
+    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='barcontent')
+
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
+
+    objs = get_objects_list(bucket=bucket_name, client=alt_client)
+    eq(objs, ['bar', 'foo'])
+    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='object')
+@attr(method='ACLs')
+@attr(operation='set bucket/object acls: public-read-write/public-read-write')
+@attr(assertion='private objects cannot be read, but can be overwritten')
+def test_access_bucket_publicreadwrite_object_publicreadwrite():
+    bucket_name, key1, key2, newkey = _setup_access(bucket_acl='public-read-write', object_acl='public-read-write')
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key=key1)
+    body = _get_body(response)
+
+    # a should be public-read-write, b gets default (private)
+    eq(body, 'foocontent')
+    alt_client.put_object(Bucket=bucket_name, Key=key1, Body='foooverwrite')
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key=key2)
+    alt_client.put_object(Bucket=bucket_name, Key=key2, Body='baroverwrite')
+    objs = get_objects_list(bucket=bucket_name, client=alt_client)
+    eq(objs, ['bar', 'foo'])
+    alt_client.put_object(Bucket=bucket_name, Key=newkey, Body='newcontent')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all buckets')
+@attr(assertion='returns all expected buckets')
+def test_buckets_create_then_list():
+    client = get_client()
+    bucket_names = []
+    for i in range(5):
+        bucket_name = get_new_bucket_name()
+        bucket_names.append(bucket_name)
+
+    for name in bucket_names:
+        client.create_bucket(Bucket=name)
+
+    response = client.list_buckets()
+    bucket_dicts = response['Buckets']
+    buckets_list = []
+
+    buckets_list = get_buckets_list()
+
+    for name in bucket_names:
+        if name not in buckets_list:
+            raise RuntimeError("S3 implementation's GET on Service did not return bucket we created: %r", bucket.name)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all buckets')
+@attr(assertion='all buckets have a sane creation time')
+def test_buckets_list_ctime():
+    # check that creation times are within a day
+    before = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(days=1)
+
+    client = get_client()
+    for i in range(5):
+        client.create_bucket(Bucket=get_new_bucket_name())
+
+    response = client.list_buckets()
+    for bucket in response['Buckets']:
+        ctime = bucket['CreationDate']
+        assert before <= ctime, '%r > %r' % (before, ctime)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all buckets (anonymous)')
+@attr(assertion='succeeds')
+@attr('fails_on_aws')
+def test_list_buckets_anonymous():
+    # Get a connection with bad authorization, then change it to be our new Anonymous auth mechanism,
+    # emulating standard HTTP access.
+    #
+    # While it may have been possible to use httplib directly, doing it this way takes care of also
+    # allowing us to vary the calling format in testing.
+    unauthenticated_client = get_unauthenticated_client()
+    response = unauthenticated_client.list_buckets()
+    eq(len(response['Buckets']), 0)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all buckets (bad auth)')
+@attr(assertion='fails 403')
+def test_list_buckets_invalid_auth():
+    bad_auth_client = get_bad_auth_client()
+    e = assert_raises(ClientError, bad_auth_client.list_buckets)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'InvalidAccessKeyId')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list all buckets (bad auth)')
+@attr(assertion='fails 403')
+def test_list_buckets_bad_auth():
+    main_access_key = get_main_aws_access_key()
+    bad_auth_client = get_bad_auth_client(aws_access_key_id=main_access_key)
+    e = assert_raises(ClientError, bad_auth_client.list_buckets)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'SignatureDoesNotMatch')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create bucket')
+@attr(assertion='name starts with alphabetic works')
+# this test goes outside the user-configure prefix because it needs to
+# control the initial character of the bucket name
+@nose.with_setup(
+    setup=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
+    teardown=lambda: nuke_prefixed_buckets(prefix='a'+get_prefix()),
+    )
+def test_bucket_create_naming_good_starts_alpha():
+    check_good_bucket_name('foo', _prefix='a'+get_prefix())
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create bucket')
+@attr(assertion='name starts with numeric works')
+# this test goes outside the user-configure prefix because it needs to
+# control the initial character of the bucket name
+@nose.with_setup(
+    setup=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
+    teardown=lambda: nuke_prefixed_buckets(prefix='0'+get_prefix()),
+    )
+def test_bucket_create_naming_good_starts_digit():
+    check_good_bucket_name('foo', _prefix='0'+get_prefix())
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create bucket')
+@attr(assertion='name containing dot works')
+def test_bucket_create_naming_good_contains_period():
+    check_good_bucket_name('aaa.111')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create bucket')
+@attr(assertion='name containing hyphen works')
+def test_bucket_create_naming_good_contains_hyphen():
+    check_good_bucket_name('aaa-111')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='create bucket with objects and recreate it')
+@attr(assertion='bucket recreation not overriding index')
+def test_bucket_recreate_not_overriding():
+    key_names = ['mykey1', 'mykey2']
+    bucket_name = _create_objects(keys=key_names)
+
+    objs_list = get_objects_list(bucket_name)
+    eq(key_names, objs_list)
+
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+
+    objs_list = get_objects_list(bucket_name)
+    eq(key_names, objs_list)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='create and list objects with special names')
+@attr(assertion='special names work')
+@attr('fails_on_dbstore')
+def test_bucket_create_special_key_names():
+    key_names = [
+        ' ',
+        '"',
+        '$',
+        '%',
+        '&',
+        '\'',
+        '<',
+        '>',
+        '_',
+        '_ ',
+        '_ _',
+        '__',
+    ]
+
+    bucket_name = _create_objects(keys=key_names)
+
+    objs_list = get_objects_list(bucket_name)
+    eq(key_names, objs_list)
+
+    client = get_client()
+
+    for name in key_names:
+        eq((name in objs_list), True)
+        response = client.get_object(Bucket=bucket_name, Key=name)
+        body = _get_body(response)
+        eq(name, body)
+        client.put_object_acl(Bucket=bucket_name, Key=name, ACL='private')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='create and list objects with underscore as prefix, list using prefix')
+@attr(assertion='listing works correctly')
+def test_bucket_list_special_prefix():
+    key_names = ['_bla/1', '_bla/2', '_bla/3', '_bla/4', 'abcd']
+    bucket_name = _create_objects(keys=key_names)
+
+    objs_list = get_objects_list(bucket_name)
+
+    eq(len(objs_list), 5)
+
+    objs_list = get_objects_list(bucket_name, prefix='_bla/')
+    eq(len(objs_list), 4)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy zero sized object in same bucket')
+@attr(assertion='works')
+@attr('fails_on_dbstore')
+def test_object_copy_zero_size():
+    key = 'foo123bar'
+    bucket_name = _create_objects(keys=[key])
+    fp_a = FakeWriteFile(0, '')
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key, Body=fp_a)
+
+    copy_source = {'Bucket': bucket_name, 'Key': key}
+
+    client.copy(copy_source, bucket_name, 'bar321foo')
+    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+    eq(response['ContentLength'], 0)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy 16mb object in same bucket')
+@attr(assertion='works')
+@attr('fails_on_dbstore')
+def test_object_copy_16m():
+    bucket_name = get_new_bucket()
+    key1 = 'obj1'
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=key1, Body=bytearray(16*1024*1024))
+
+    copy_source = {'Bucket': bucket_name, 'Key': key1}
+    key2 = 'obj2'
+    client.copy_object(Bucket=bucket_name, Key=key2, CopySource=copy_source)
+    response = client.get_object(Bucket=bucket_name, Key=key2)
+    eq(response['ContentLength'], 16*1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object in same bucket')
+@attr(assertion='works')
+@attr('fails_on_dbstore')
+def test_object_copy_same_bucket():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+
+    client.copy(copy_source, bucket_name, 'bar321foo')
+
+    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+    body = _get_body(response)
+    eq('foo', body)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object with content-type')
+@attr(assertion='works')
+@attr('fails_on_dbstore')
+def test_object_copy_verify_contenttype():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    content_type = 'text/bla'
+    client.put_object(Bucket=bucket_name, ContentType=content_type, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+
+    client.copy(copy_source, bucket_name, 'bar321foo')
+
+    response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+    body = _get_body(response)
+    eq('foo', body)
+    response_content_type = response['ContentType']
+    eq(response_content_type, content_type)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object to itself')
+@attr(assertion='fails')
+def test_object_copy_to_itself():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+
+    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'foo123bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidRequest')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='modify object metadata by copying')
+@attr(assertion='fails')
+@attr('fails_on_dbstore')
+def test_object_copy_to_itself_with_metadata():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    metadata = {'foo': 'bar'}
+
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
+    response = client.get_object(Bucket=bucket_name, Key='foo123bar')
+    eq(response['Metadata'], metadata)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object from different bucket')
+@attr(assertion='works')
+@attr('fails_on_dbstore')
+def test_object_copy_diff_bucket():
+    bucket_name1 = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+
+    client = get_client()
+    client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
+
+    client.copy(copy_source, bucket_name2, 'bar321foo')
+
+    response = client.get_object(Bucket=bucket_name2, Key='bar321foo')
+    body = _get_body(response)
+    eq('foo', body)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy to an inaccessible bucket')
+@attr(assertion='fails w/AttributeError')
+def test_object_copy_not_owned_bucket():
+    client = get_client()
+    alt_client = get_alt_client()
+    bucket_name1 = get_new_bucket_name()
+    bucket_name2 = get_new_bucket_name()
+    client.create_bucket(Bucket=bucket_name1)
+    alt_client.create_bucket(Bucket=bucket_name2)
+
+    client.put_object(Bucket=bucket_name1, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name1, 'Key': 'foo123bar'}
+
+    e = assert_raises(ClientError, alt_client.copy, copy_source, bucket_name2, 'bar321foo')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy a non-owned object in a non-owned bucket, but with perms')
+@attr(assertion='works')
+def test_object_copy_not_owned_object_bucket():
+    client = get_client()
+    alt_client = get_alt_client()
+    bucket_name = get_new_bucket_name()
+    client.create_bucket(Bucket=bucket_name)
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    alt_user_id = get_alt_user_id()
+
+    grant = {'Grantee': {'ID': alt_user_id, 'Type': 'CanonicalUser' }, 'Permission': 'FULL_CONTROL'}
+    grants = add_obj_user_grant(bucket_name, 'foo123bar', grant)
+    client.put_object_acl(Bucket=bucket_name, Key='foo123bar', AccessControlPolicy=grants)
+
+    grant = add_bucket_user_grant(bucket_name, grant)
+    client.put_bucket_acl(Bucket=bucket_name, AccessControlPolicy=grant)
+
+    alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    alt_client.copy(copy_source, bucket_name, 'bar321foo')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object and change acl')
+@attr(assertion='works')
+@attr('fails_on_dbstore')
+def test_object_copy_canned_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    alt_client = get_alt_client()
+    client.put_object(Bucket=bucket_name, Key='foo123bar', Body='foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', ACL='public-read')
+    # check ACL is applied by doing GET from another user
+    alt_client.get_object(Bucket=bucket_name, Key='bar321foo')
+
+
+    metadata={'abc': 'def'}
+    copy_source = {'Bucket': bucket_name, 'Key': 'bar321foo'}
+    client.copy_object(ACL='public-read', Bucket=bucket_name, CopySource=copy_source, Key='foo123bar', Metadata=metadata, MetadataDirective='REPLACE')
+
+    # check ACL is applied by doing GET from another user
+    alt_client.get_object(Bucket=bucket_name, Key='foo123bar')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object and retain metadata')
+@attr('fails_on_dbstore')
+def test_object_copy_retaining_metadata():
+    for size in [3, 1024 * 1024]:
+        bucket_name = get_new_bucket()
+        client = get_client()
+        content_type = 'audio/ogg'
+
+        metadata = {'key1': 'value1', 'key2': 'value2'}
+        client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
+
+        copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo')
+
+        response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+        eq(content_type, response['ContentType'])
+        eq(metadata, response['Metadata'])
+        body = _get_body(response)
+        eq(size, response['ContentLength'])
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object and replace metadata')
+@attr('fails_on_dbstore')
+def test_object_copy_replacing_metadata():
+    for size in [3, 1024 * 1024]:
+        bucket_name = get_new_bucket()
+        client = get_client()
+        content_type = 'audio/ogg'
+
+        metadata = {'key1': 'value1', 'key2': 'value2'}
+        client.put_object(Bucket=bucket_name, Key='foo123bar', Metadata=metadata, ContentType=content_type, Body=bytearray(size))
+
+        metadata = {'key3': 'value3', 'key2': 'value2'}
+        content_type = 'audio/mpeg'
+
+        copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='bar321foo', Metadata=metadata, MetadataDirective='REPLACE', ContentType=content_type)
+
+        response = client.get_object(Bucket=bucket_name, Key='bar321foo')
+        eq(content_type, response['ContentType'])
+        eq(metadata, response['Metadata'])
+        eq(size, response['ContentLength'])
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy from non-existent bucket')
+def test_object_copy_bucket_not_found():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    copy_source = {'Bucket': bucket_name + "-fake", 'Key': 'foo123bar'}
+    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
+    status = _get_status(e.response)
+    eq(status, 404)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy from non-existent object')
+def test_object_copy_key_not_found():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'foo123bar'}
+    e = assert_raises(ClientError, client.copy, copy_source, bucket_name, 'bar321foo')
+    status = _get_status(e.response)
+    eq(status, 404)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object to/from versioned bucket')
+@attr(assertion='works')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_object_copy_versioned_bucket():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    size = 1*5
+    data = bytearray(size)
+    data_str = data.decode()
+    key1 = 'foo123bar'
+    client.put_object(Bucket=bucket_name, Key=key1, Body=data)
+
+    response = client.get_object(Bucket=bucket_name, Key=key1)
+    version_id = response['VersionId']
+
+    # copy object in the same bucket
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key2 = 'bar321foo'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
+    response = client.get_object(Bucket=bucket_name, Key=key2)
+    body = _get_body(response)
+    eq(data_str, body)
+    eq(size, response['ContentLength'])
+
+
+    # second copy
+    version_id2 = response['VersionId']
+    copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
+    key3 = 'bar321foo2'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
+    response = client.get_object(Bucket=bucket_name, Key=key3)
+    body = _get_body(response)
+    eq(data_str, body)
+    eq(size, response['ContentLength'])
+
+    # copy to another versioned bucket
+    bucket_name2 = get_new_bucket()
+    check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key4 = 'bar321foo3'
+    client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
+    response = client.get_object(Bucket=bucket_name2, Key=key4)
+    body = _get_body(response)
+    eq(data_str, body)
+    eq(size, response['ContentLength'])
+
+    # copy to another non versioned bucket
+    bucket_name3 = get_new_bucket()
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key5 = 'bar321foo4'
+    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
+    response = client.get_object(Bucket=bucket_name3, Key=key5)
+    body = _get_body(response)
+    eq(data_str, body)
+    eq(size, response['ContentLength'])
+
+    # copy from a non versioned bucket
+    copy_source = {'Bucket': bucket_name3, 'Key': key5}
+    key6 = 'foo123bar2'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key6)
+    response = client.get_object(Bucket=bucket_name, Key=key6)
+    body = _get_body(response)
+    eq(data_str, body)
+    eq(size, response['ContentLength'])
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='copy object to/from versioned bucket with url-encoded name')
+@attr(assertion='works')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_object_copy_versioned_url_encoding():
+    bucket = get_new_bucket_resource()
+    check_configure_versioning_retry(bucket.name, "Enabled", "Enabled")
+    src_key = 'foo?bar'
+    src = bucket.put_object(Key=src_key)
+    src.load() # HEAD request tests that the key exists
+
+    # copy object in the same bucket
+    dst_key = 'bar&foo'
+    dst = bucket.Object(dst_key)
+    dst.copy_from(CopySource={'Bucket': src.bucket_name, 'Key': src.key, 'VersionId': src.version_id})
+    dst.load() # HEAD request tests that the key exists
+
+def generate_random(size, part_size=5*1024*1024):
+    """
+    Generate the specified number random data.
+    (actually each MB is a repetition of the first KB)
+    """
+    chunk = 1024
+    allowed = string.ascii_letters
+    for x in range(0, size, part_size):
+        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
+        s = ''
+        left = size - x
+        this_part_size = min(left, part_size)
+        for y in range(this_part_size // chunk):
+            s = s + strpart
+        if this_part_size > len(s):
+            s = s + strpart[0:this_part_size - len(s)]
+        yield s
+        if (x == size):
+            return
+
+def _multipart_upload(bucket_name, key, size, part_size=5*1024*1024, client=None, content_type=None, metadata=None, resend_parts=[]):
+    """
+    generate a multi-part upload for a random file of specifed size,
+    if requested, generate a list of the parts
+    return the upload descriptor
+    """
+    if client == None:
+        client = get_client()
+
+
+    if content_type == None and metadata == None:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    else:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata, ContentType=content_type)
+
+    upload_id = response['UploadId']
+    s = ''
+    parts = []
+    for i, part in enumerate(generate_random(size, part_size)):
+        # part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
+        part_num = i+1
+        s += part
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
+        if i in resend_parts:
+            client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+
+    return (upload_id, s, parts)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='test copy object of a multipart upload')
+@attr(assertion='successful')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_object_copy_versioning_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key1 = "srcmultipart"
+    key1_metadata = {'foo': 'bar'}
+    content_type = 'text/bla'
+    objlen = 30 * 1024 * 1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen, content_type=content_type, metadata=key1_metadata)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=bucket_name, Key=key1)
+    key1_size = response['ContentLength']
+    version_id = response['VersionId']
+
+    # copy object in the same bucket
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key2 = 'dstmultipart'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key2)
+    response = client.get_object(Bucket=bucket_name, Key=key2)
+    version_id2 = response['VersionId']
+    body = _get_body(response)
+    eq(data, body)
+    eq(key1_size, response['ContentLength'])
+    eq(key1_metadata, response['Metadata'])
+    eq(content_type, response['ContentType'])
+
+    # second copy
+    copy_source = {'Bucket': bucket_name, 'Key': key2, 'VersionId': version_id2}
+    key3 = 'dstmultipart2'
+    client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=key3)
+    response = client.get_object(Bucket=bucket_name, Key=key3)
+    body = _get_body(response)
+    eq(data, body)
+    eq(key1_size, response['ContentLength'])
+    eq(key1_metadata, response['Metadata'])
+    eq(content_type, response['ContentType'])
+
+    # copy to another versioned bucket
+    bucket_name2 = get_new_bucket()
+    check_configure_versioning_retry(bucket_name2, "Enabled", "Enabled")
+
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key4 = 'dstmultipart3'
+    client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key=key4)
+    response = client.get_object(Bucket=bucket_name2, Key=key4)
+    body = _get_body(response)
+    eq(data, body)
+    eq(key1_size, response['ContentLength'])
+    eq(key1_metadata, response['Metadata'])
+    eq(content_type, response['ContentType'])
+
+    # copy to another non versioned bucket
+    bucket_name3 = get_new_bucket()
+    copy_source = {'Bucket': bucket_name, 'Key': key1, 'VersionId': version_id}
+    key5 = 'dstmultipart4'
+    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key5)
+    response = client.get_object(Bucket=bucket_name3, Key=key5)
+    body = _get_body(response)
+    eq(data, body)
+    eq(key1_size, response['ContentLength'])
+    eq(key1_metadata, response['Metadata'])
+    eq(content_type, response['ContentType'])
+
+    # copy from a non versioned bucket
+    copy_source = {'Bucket': bucket_name3, 'Key': key5}
+    key6 = 'dstmultipart5'
+    client.copy_object(Bucket=bucket_name3, CopySource=copy_source, Key=key6)
+    response = client.get_object(Bucket=bucket_name3, Key=key6)
+    body = _get_body(response)
+    eq(data, body)
+    eq(key1_size, response['ContentLength'])
+    eq(key1_metadata, response['Metadata'])
+    eq(content_type, response['ContentType'])
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart upload without parts')
+def test_multipart_upload_empty():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key1 = "mymultipart"
+    objlen = 0
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
+    e = assert_raises(ClientError, client.complete_multipart_upload,Bucket=bucket_name, Key=key1, UploadId=upload_id)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart uploads with single small part')
+@attr('fails_on_dbstore')
+def test_multipart_upload_small():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key1 = "mymultipart"
+    objlen = 1
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key1, size=objlen)
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    response = client.get_object(Bucket=bucket_name, Key=key1)
+    eq(response['ContentLength'], objlen)
+    # check extra client.complete_multipart_upload
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+def _create_key_with_random_content(keyname, size=7*1024*1024, bucket_name=None, client=None):
+    if bucket_name is None:
+        bucket_name = get_new_bucket()
+
+    if client == None:
+        client = get_client()
+
+    data_str = str(next(generate_random(size, size)))
+    data = bytes(data_str, 'utf-8')
+    client.put_object(Bucket=bucket_name, Key=keyname, Body=data)
+
+    return bucket_name
+
+def _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, client=None, part_size=5*1024*1024, version_id=None):
+
+    if(client == None):
+        client = get_client()
+
+    response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
+    upload_id = response['UploadId']
+
+    if(version_id == None):
+        copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    else:
+        copy_source = {'Bucket': src_bucket_name, 'Key': src_key, 'VersionId': version_id}
+
+    parts = []
+
+    i = 0
+    for start_offset in range(0, size, part_size):
+        end_offset = min(start_offset + part_size - 1, size - 1)
+        part_num = i+1
+        copy_source_range = 'bytes={start}-{end}'.format(start=start_offset, end=end_offset)
+        response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id, CopySourceRange=copy_source_range)
+        parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
+        i = i+1
+
+    return (upload_id, parts)
+
+def _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=None):
+    client = get_client()
+
+    if(version_id == None):
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key)
+    else:
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key, VersionId=version_id)
+    src_size = response['ContentLength']
+
+    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+    dest_size = response['ContentLength']
+    dest_data = _get_body(response)
+    assert(src_size >= dest_size)
+
+    r = 'bytes={s}-{e}'.format(s=0, e=dest_size-1)
+    if(version_id == None):
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r)
+    else:
+        response = client.get_object(Bucket=src_bucket_name, Key=src_key, Range=r, VersionId=version_id)
+    src_data = _get_body(response)
+    eq(src_data, dest_data)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart copies with single small part')
+@attr('fails_on_dbstore')
+def test_multipart_copy_small():
+    src_key = 'foo'
+    src_bucket_name = _create_key_with_random_content(src_key)
+
+    dest_bucket_name = get_new_bucket()
+    dest_key = "mymultipart"
+    size = 1
+    client = get_client()
+
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+    eq(size, response['ContentLength'])
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart copies with an invalid range')
+def test_multipart_copy_invalid_range():
+    client = get_client()
+    src_key = 'source'
+    src_bucket_name = _create_key_with_random_content(src_key, size=5)
+
+    response = client.create_multipart_upload(Bucket=src_bucket_name, Key='dest')
+    upload_id = response['UploadId']
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    copy_source_range = 'bytes={start}-{end}'.format(start=0, end=21)
+
+    e = assert_raises(ClientError, client.upload_part_copy,Bucket=src_bucket_name, Key='dest', UploadId=upload_id, CopySource=copy_source, CopySourceRange=copy_source_range, PartNumber=1)
+    status, error_code = _get_status_and_error_code(e.response)
+    valid_status = [400, 416]
+    if not status in valid_status:
+       raise AssertionError("Invalid response " + str(status))
+    eq(error_code, 'InvalidRange')
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart copy with an improperly formatted range')
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40795 is resolved
+@attr('fails_on_rgw')
+def test_multipart_copy_improper_range():
+    client = get_client()
+    src_key = 'source'
+    src_bucket_name = _create_key_with_random_content(src_key, size=5)
+
+    response = client.create_multipart_upload(
+        Bucket=src_bucket_name, Key='dest')
+    upload_id = response['UploadId']
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    test_ranges = ['{start}-{end}'.format(start=0, end=2),
+                   'bytes={start}'.format(start=0),
+                   'bytes=hello-world',
+                   'bytes=0-bar',
+                   'bytes=hello-',
+                   'bytes=0-2,3-5']
+
+    for test_range in test_ranges:
+        e = assert_raises(ClientError, client.upload_part_copy,
+                          Bucket=src_bucket_name, Key='dest',
+                          UploadId=upload_id,
+                          CopySource=copy_source,
+                          CopySourceRange=test_range,
+                          PartNumber=1)
+        status, error_code = _get_status_and_error_code(e.response)
+        eq(status, 400)
+        eq(error_code, 'InvalidArgument')
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart copies without x-amz-copy-source-range')
+def test_multipart_copy_without_range():
+    client = get_client()
+    src_key = 'source'
+    src_bucket_name = _create_key_with_random_content(src_key, size=10)
+    dest_bucket_name = get_new_bucket_name()
+    get_new_bucket(name=dest_bucket_name)
+    dest_key = "mymultipartcopy"
+
+    response = client.create_multipart_upload(Bucket=dest_bucket_name, Key=dest_key)
+    upload_id = response['UploadId']
+    parts = []
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': src_key}
+    part_num = 1
+    copy_source_range = 'bytes={start}-{end}'.format(start=0, end=9)
+
+    response = client.upload_part_copy(Bucket=dest_bucket_name, Key=dest_key, CopySource=copy_source, PartNumber=part_num, UploadId=upload_id)
+
+    parts.append({'ETag': response['CopyPartResult']['ETag'], 'PartNumber': part_num})
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+    eq(response['ContentLength'], 10)
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart copies with single small part')
+@attr('fails_on_dbstore')
+def test_multipart_copy_special_names():
+    src_bucket_name = get_new_bucket()
+
+    dest_bucket_name = get_new_bucket()
+
+    dest_key = "mymultipart"
+    size = 1
+    client = get_client()
+
+    for src_key in (' ', '_', '__', '?versionId'):
+        _create_key_with_random_content(src_key, bucket_name=src_bucket_name)
+        (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+        response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+        response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+        eq(size, response['ContentLength'])
+        _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+def _check_content_using_range(key, bucket_name, data, step):
+    client = get_client()
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    size = response['ContentLength']
+
+    for ofs in range(0, size, step):
+        toread = size - ofs
+        if toread > step:
+            toread = step
+        end = ofs + toread - 1
+        r = 'bytes={s}-{e}'.format(s=ofs, e=end)
+        response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
+        eq(response['ContentLength'], toread)
+        body = _get_body(response)
+        eq(body, data[ofs:end+1])
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='complete multi-part upload')
+@attr(assertion='successful')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_multipart_upload():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    content_type='text/bla'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    client = get_client()
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    # check extra client.complete_multipart_upload
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.head_bucket(Bucket=bucket_name)
+    rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
+    eq(rgw_bytes_used, objlen)
+
+    rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
+    eq(rgw_object_count, 1)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    eq(response['ContentType'], content_type)
+    eq(response['Metadata'], metadata)
+    body = _get_body(response)
+    eq(len(body), response['ContentLength'])
+    eq(body, data)
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+def check_versioning(bucket_name, status):
+    client = get_client()
+
+    try:
+        response = client.get_bucket_versioning(Bucket=bucket_name)
+        eq(response['Status'], status)
+    except KeyError:
+        eq(status, None)
+
+# amazon is eventual consistent, retry a bit if failed
+def check_configure_versioning_retry(bucket_name, status, expected_string):
+    client = get_client()
+    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': status})
+
+    read_status = None
+
+    for i in range(5):
+        try:
+            response = client.get_bucket_versioning(Bucket=bucket_name)
+            read_status = response['Status']
+        except KeyError:
+            read_status = None
+
+        if (expected_string == read_status):
+            break
+
+        time.sleep(1)
+
+    eq(expected_string, read_status)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check multipart copies of versioned objects')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_multipart_copy_versioned():
+    src_bucket_name = get_new_bucket()
+    dest_bucket_name = get_new_bucket()
+
+    dest_key = "mymultipart"
+    check_versioning(src_bucket_name, None)
+
+    src_key = 'foo'
+    check_configure_versioning_retry(src_bucket_name, "Enabled", "Enabled")
+
+    size = 15 * 1024 * 1024
+    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
+    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
+    _create_key_with_random_content(src_key, size=size, bucket_name=src_bucket_name)
+
+    version_id = []
+    client = get_client()
+    response = client.list_object_versions(Bucket=src_bucket_name)
+    for ver in response['Versions']:
+        version_id.append(ver['VersionId'])
+
+    for vid in version_id:
+        (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size, version_id=vid)
+        response = client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+        response = client.get_object(Bucket=dest_bucket_name, Key=dest_key)
+        eq(size, response['ContentLength'])
+        _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name, version_id=vid)
+
+def _check_upload_multipart_resend(bucket_name, key, objlen, resend_parts):
+    content_type = 'text/bla'
+    metadata = {'foo': 'bar'}
+    client = get_client()
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, content_type=content_type, metadata=metadata, resend_parts=resend_parts)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    eq(response['ContentType'], content_type)
+    eq(response['Metadata'], metadata)
+    body = _get_body(response)
+    eq(len(body), response['ContentLength'])
+    eq(body, data)
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='complete multiple multi-part upload with different sizes')
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='complete multi-part upload')
+@attr(assertion='successful')
+@attr('fails_on_dbstore')
+def test_multipart_upload_resend_part():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    objlen = 30 * 1024 * 1024
+
+    _check_upload_multipart_resend(bucket_name, key, objlen, [0])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [1])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [2])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [1,2])
+    _check_upload_multipart_resend(bucket_name, key, objlen, [0,1,2,3,4,5])
+
+@attr(assertion='successful')
+def test_multipart_upload_multiple_sizes():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    client = get_client()
+
+    objlen = 5*1024*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 5*1024*1024+100*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 5*1024*1024+600*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 10*1024*1024+100*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 10*1024*1024+600*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    objlen = 10*1024*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+@attr(assertion='successful')
+@attr('fails_on_dbstore')
+def test_multipart_copy_multiple_sizes():
+    src_key = 'foo'
+    src_bucket_name = _create_key_with_random_content(src_key, 12*1024*1024)
+
+    dest_bucket_name = get_new_bucket()
+    dest_key="mymultipart"
+    client = get_client()
+
+    size = 5*1024*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 5*1024*1024+100*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 5*1024*1024+600*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 10*1024*1024+100*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 10*1024*1024+600*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+    size = 10*1024*1024
+    (upload_id, parts) = _multipart_copy(src_bucket_name, src_key, dest_bucket_name, dest_key, size)
+    client.complete_multipart_upload(Bucket=dest_bucket_name, Key=dest_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    _check_key_content(src_key, src_bucket_name, dest_key, dest_bucket_name)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check failure on multiple multi-part upload with size too small')
+@attr(assertion='fails 400')
+def test_multipart_upload_size_too_small():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    client = get_client()
+
+    size = 100*1024
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=size, part_size=10*1024)
+    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'EntityTooSmall')
+
+def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
+    return ''.join(random.choice(chars) for _ in range(size))
+
+def _do_test_multipart_upload_contents(bucket_name, key, num_parts):
+    payload=gen_rand_string(5)*1024*1024
+    client = get_client()
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+
+    for part_num in range(0, num_parts):
+        part = bytes(payload, 'utf-8')
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=part)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
+
+    last_payload = '123'*1024*1024
+    last_part = bytes(last_payload, 'utf-8')
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=num_parts+1, Body=last_part)
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': num_parts+1})
+
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    test_string = _get_body(response)
+
+    all_payload = payload*num_parts + last_payload
+
+    assert test_string == all_payload
+
+    return all_payload
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='check contents of multi-part upload')
+@attr(assertion='successful')
+@attr('fails_on_dbstore')
+def test_multipart_upload_contents():
+    bucket_name = get_new_bucket()
+    _do_test_multipart_upload_contents(bucket_name, 'mymultipart', 3)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation=' multi-part upload overwrites existing key')
+@attr(assertion='successful')
+def test_multipart_upload_overwrite_existing_object():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'mymultipart'
+    payload='12345'*1024*1024
+    num_parts=2
+    client.put_object(Bucket=bucket_name, Key=key, Body=payload)
+
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+
+    for part_num in range(0, num_parts):
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num+1, Body=payload)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num+1})
+
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    test_string = _get_body(response)
+
+    assert test_string == payload*num_parts
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='abort multi-part upload')
+@attr(assertion='successful')
+def test_abort_multipart_upload():
+    bucket_name = get_new_bucket()
+    key="mymultipart"
+    objlen = 10 * 1024 * 1024
+    client = get_client()
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen)
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id)
+
+    response = client.head_bucket(Bucket=bucket_name)
+    rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', 0))
+    eq(rgw_bytes_used, 0)
+
+    rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 0))
+    eq(rgw_object_count, 0)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='abort non-existent multi-part upload')
+@attr(assertion='fails 404')
+def test_abort_multipart_upload_not_found():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    client.put_object(Bucket=bucket_name, Key=key)
+
+    e = assert_raises(ClientError, client.abort_multipart_upload, Bucket=bucket_name, Key=key, UploadId='56788')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchUpload')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='concurrent multi-part uploads')
+@attr(assertion='successful')
+@attr('fails_on_dbstore')
+def test_list_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    mb = 1024 * 1024
+
+    upload_ids = []
+    (upload_id1, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=5*mb)
+    upload_ids.append(upload_id1)
+    (upload_id2, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=6*mb)
+    upload_ids.append(upload_id2)
+
+    key2="mymultipart2"
+    (upload_id3, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key2, size=5*mb)
+    upload_ids.append(upload_id3)
+
+    response = client.list_multipart_uploads(Bucket=bucket_name)
+    uploads = response['Uploads']
+    resp_uploadids = []
+
+    for i in range(0, len(uploads)):
+        resp_uploadids.append(uploads[i]['UploadId'])
+
+    for i in range(0, len(upload_ids)):
+        eq(True, (upload_ids[i] in resp_uploadids))
+
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id1)
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id2)
+    client.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload_id3)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='list multipart uploads with different owners')
+@attr(assertion='successful')
+@attr('fails_on_dbstore')
+def test_list_multipart_upload_owner():
+    bucket_name = get_new_bucket()
+
+    client1 = get_client()
+    user1 = get_main_user_id()
+    name1 = get_main_display_name()
+
+    client2 = get_alt_client()
+    user2  = get_alt_user_id()
+    name2 = get_alt_display_name()
+
+    # add bucket acl for public read/write access
+    client1.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
+
+    key1 = 'multipart1'
+    key2 = 'multipart2'
+    upload1 = client1.create_multipart_upload(Bucket=bucket_name, Key=key1)['UploadId']
+    try:
+        upload2 = client2.create_multipart_upload(Bucket=bucket_name, Key=key2)['UploadId']
+        try:
+            # match fields of an Upload from ListMultipartUploadsResult
+            def match(upload, key, uploadid, userid, username):
+                eq(upload['Key'], key)
+                eq(upload['UploadId'], uploadid)
+                eq(upload['Initiator']['ID'], userid)
+                eq(upload['Initiator']['DisplayName'], username)
+                eq(upload['Owner']['ID'], userid)
+                eq(upload['Owner']['DisplayName'], username)
+
+            # list uploads with client1
+            uploads1 = client1.list_multipart_uploads(Bucket=bucket_name)['Uploads']
+            eq(len(uploads1), 2)
+            match(uploads1[0], key1, upload1, user1, name1)
+            match(uploads1[1], key2, upload2, user2, name2)
+
+            # list uploads with client2
+            uploads2 = client2.list_multipart_uploads(Bucket=bucket_name)['Uploads']
+            eq(len(uploads2), 2)
+            match(uploads2[0], key1, upload1, user1, name1)
+            match(uploads2[1], key2, upload2, user2, name2)
+        finally:
+            client2.abort_multipart_upload(Bucket=bucket_name, Key=key2, UploadId=upload2)
+    finally:
+        client1.abort_multipart_upload(Bucket=bucket_name, Key=key1, UploadId=upload1)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multi-part upload with missing part')
+def test_multipart_upload_missing_part():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    size = 1
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
+    # 'PartNumber should be 1'
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 9999})
+
+    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidPart')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multi-part upload with incorrect ETag')
+def test_multipart_upload_incorrect_etag():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key="mymultipart"
+    size = 1
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    upload_id = response['UploadId']
+
+    parts = []
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=1, Body=bytes('\x00', 'utf-8'))
+    # 'ETag' should be "93b885adfe0da089cdf634904fd59f71"
+    parts.append({'ETag': "ffffffffffffffffffffffffffffffff", 'PartNumber': 1})
+
+    e = assert_raises(ClientError, client.complete_multipart_upload, Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidPart')
+
+def _simple_http_req_100_cont(host, port, is_secure, method, resource):
+    """
+    Send the specified request w/expect 100-continue
+    and await confirmation.
+    """
+    req_str = '{method} {resource} HTTP/1.1\r\nHost: {host}\r\nAccept-Encoding: identity\r\nContent-Length: 123\r\nExpect: 100-continue\r\n\r\n'.format(
+            method=method,
+            resource=resource,
+            host=host,
+            )
+
+    req = bytes(req_str, 'utf-8')
+
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    if is_secure:
+        s = ssl.wrap_socket(s);
+    s.settimeout(5)
+    s.connect((host, port))
+    s.send(req)
+
+    try:
+        data = s.recv(1024)
+    except socket.error as msg:
+        print('got response: ', msg)
+        print('most likely server doesn\'t support 100-continue')
+
+    s.close()
+    data_str = data.decode()
+    l = data_str.split(' ')
+
+    assert l[0].startswith('HTTP')
+
+    return l[1]
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='w/expect continue')
+@attr(assertion='succeeds if object is public-read-write')
+@attr('100_continue')
+@attr('fails_on_mod_proxy_fcgi')
+def test_100_continue():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    objname='testobj'
+    resource = '/{bucket}/{obj}'.format(bucket=bucket_name, obj=objname)
+
+    host = get_config_host()
+    port = get_config_port()
+    is_secure = get_config_is_secure()
+
+    #NOTES: this test needs to be tested when is_secure is True
+    status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
+    eq(status, '403')
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read-write')
+
+    status = _simple_http_req_100_cont(host, port, is_secure, 'PUT', resource)
+    eq(status, '100')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set cors')
+@attr(assertion='succeeds')
+@attr('cors')
+def test_set_cors():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    allowed_methods = ['GET', 'PUT']
+    allowed_origins = ['*.get', '*.put']
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': allowed_methods,
+             'AllowedOrigins': allowed_origins,
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    eq(status, 404)
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+    response = client.get_bucket_cors(Bucket=bucket_name)
+    eq(response['CORSRules'][0]['AllowedMethods'], allowed_methods)
+    eq(response['CORSRules'][0]['AllowedOrigins'], allowed_origins)
+
+    client.delete_bucket_cors(Bucket=bucket_name)
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    eq(status, 404)
+
+def _cors_request_and_check(func, url, headers, expect_status, expect_allow_origin, expect_allow_methods):
+    r = func(url, headers=headers, verify=get_config_ssl_verify())
+    eq(r.status_code, expect_status)
+
+    assert r.headers.get('access-control-allow-origin', None) == expect_allow_origin
+    assert r.headers.get('access-control-allow-methods', None) == expect_allow_methods
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='check cors response when origin header set')
+@attr(assertion='returning cors header')
+@attr('cors')
+def test_cors_origin_response():
+    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
+    client = get_client()
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['*suffix'],
+            },
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['start*end'],
+            },
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['prefix*'],
+            },
+            {'AllowedMethods': ['PUT'],
+             'AllowedOrigins': ['*.put'],
+            }
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    eq(status, 404)
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+
+    time.sleep(3)
+
+    url = _get_post_url(bucket_name)
+
+    _cors_request_and_check(requests.get, url, None, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix'}, 200, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'foo.bar'}, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'foo.suffix.get'}, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'startend'}, 200, 'startend', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'start1end'}, 200, 'start1end', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'start12end'}, 200, 'start12end', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': '0start12end'}, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'prefix'}, 200, 'prefix', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'prefix.suffix'}, 200, 'prefix.suffix', 'GET')
+    _cors_request_and_check(requests.get, url, {'Origin': 'bla.prefix'}, 200, None, None)
+
+    obj_url = '{u}/{o}'.format(u=url, o='bar')
+    _cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
+                                                    'content-length': '0'}, 403, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'PUT',
+                                                    'content-length': '0'}, 403, None, None)
+
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'DELETE',
+                                                    'content-length': '0'}, 403, None, None)
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.suffix', 'content-length': '0'}, 403, None, None)
+
+    _cors_request_and_check(requests.put, obj_url, {'Origin': 'foo.put', 'content-length': '0'}, 403, 'foo.put', 'PUT')
+
+    _cors_request_and_check(requests.get, obj_url, {'Origin': 'foo.suffix'}, 404, 'foo.suffix', 'GET')
+
+    _cors_request_and_check(requests.options, url, None, 400, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix'}, 400, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'bla'}, 400, None, None)
+    _cors_request_and_check(requests.options, obj_url, {'Origin': 'foo.suffix', 'Access-Control-Request-Method': 'GET',
+                                                    'content-length': '0'}, 200, 'foo.suffix', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.bar', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.suffix.get', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'startend', 'Access-Control-Request-Method': 'GET'}, 200, 'startend', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'start1end', 'Access-Control-Request-Method': 'GET'}, 200, 'start1end', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'start12end', 'Access-Control-Request-Method': 'GET'}, 200, 'start12end', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': '0start12end', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'prefix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'prefix.suffix', 'Access-Control-Request-Method': 'GET'}, 200, 'prefix.suffix', 'GET')
+    _cors_request_and_check(requests.options, url, {'Origin': 'bla.prefix', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'GET'}, 403, None, None)
+    _cors_request_and_check(requests.options, url, {'Origin': 'foo.put', 'Access-Control-Request-Method': 'PUT'}, 200, 'foo.put', 'PUT')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='check cors response when origin is set to wildcard')
+@attr(assertion='returning cors header')
+@attr('cors')
+def test_cors_origin_wildcard():
+    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
+    client = get_client()
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['*'],
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    eq(status, 404)
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+
+    time.sleep(3)
+
+    url = _get_post_url(bucket_name)
+
+    _cors_request_and_check(requests.get, url, None, 200, None, None)
+    _cors_request_and_check(requests.get, url, {'Origin': 'example.origin'}, 200, '*', 'GET')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='check cors response when Access-Control-Request-Headers is set in option request')
+@attr(assertion='returning cors header')
+@attr('cors')
+def test_cors_header_option():
+    bucket_name = _setup_bucket_acl(bucket_acl='public-read')
+    client = get_client()
+
+    cors_config ={
+        'CORSRules': [
+            {'AllowedMethods': ['GET'],
+             'AllowedOrigins': ['*'],
+             'ExposeHeaders': ['x-amz-meta-header1'],
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_cors, Bucket=bucket_name)
+    status = _get_status(e.response)
+    eq(status, 404)
+
+    client.put_bucket_cors(Bucket=bucket_name, CORSConfiguration=cors_config)
+
+    time.sleep(3)
+
+    url = _get_post_url(bucket_name)
+    obj_url = '{u}/{o}'.format(u=url, o='bar')
+
+    _cors_request_and_check(requests.options, obj_url, {'Origin': 'example.origin','Access-Control-Request-Headers':'x-amz-meta-header2','Access-Control-Request-Method':'GET'}, 403, None, None)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='put tags')
+@attr(assertion='succeeds')
+@attr('tagging')
+def test_set_bucket_tagging():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    tags={
+        'TagSet': [
+            {
+                'Key': 'Hello',
+                'Value': 'World'
+            },
+        ]
+    }
+
+    e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchTagSet')
+
+    client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
+
+    response = client.get_bucket_tagging(Bucket=bucket_name)
+    eq(len(response['TagSet']), 1)
+    eq(response['TagSet'][0]['Key'], 'Hello')
+    eq(response['TagSet'][0]['Value'], 'World')
+
+    response = client.delete_bucket_tagging(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    e = assert_raises(ClientError, client.get_bucket_tagging, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchTagSet')
+
+
+class FakeFile(object):
+    """
+    file that simulates seek, tell, and current character
+    """
+    def __init__(self, char='A', interrupt=None):
+        self.offset = 0
+        self.char = bytes(char, 'utf-8')
+        self.interrupt = interrupt
+
+    def seek(self, offset, whence=os.SEEK_SET):
+        if whence == os.SEEK_SET:
+            self.offset = offset
+        elif whence == os.SEEK_END:
+            self.offset = self.size + offset;
+        elif whence == os.SEEK_CUR:
+            self.offset += offset
+
+    def tell(self):
+        return self.offset
+
+class FakeWriteFile(FakeFile):
+    """
+    file that simulates interruptable reads of constant data
+    """
+    def __init__(self, size, char='A', interrupt=None):
+        FakeFile.__init__(self, char, interrupt)
+        self.size = size
+
+    def read(self, size=-1):
+        if size < 0:
+            size = self.size - self.offset
+        count = min(size, self.size - self.offset)
+        self.offset += count
+
+        # Sneaky! do stuff before we return (the last time)
+        if self.interrupt != None and self.offset == self.size and count > 0:
+            self.interrupt()
+
+        return self.char*count
+
+class FakeReadFile(FakeFile):
+    """
+    file that simulates writes, interrupting after the second
+    """
+    def __init__(self, size, char='A', interrupt=None):
+        FakeFile.__init__(self, char, interrupt)
+        self.interrupted = False
+        self.size = 0
+        self.expected_size = size
+
+    def write(self, chars):
+        eq(chars, self.char*len(chars))
+        self.offset += len(chars)
+        self.size += len(chars)
+
+        # Sneaky! do stuff on the second seek
+        if not self.interrupted and self.interrupt != None \
+                and self.offset > 0:
+            self.interrupt()
+            self.interrupted = True
+
+    def close(self):
+        eq(self.size, self.expected_size)
+
+class FakeFileVerifier(object):
+    """
+    file that verifies expected data has been written
+    """
+    def __init__(self, char=None):
+        self.char = char
+        self.size = 0
+
+    def write(self, data):
+        size = len(data)
+        if self.char == None:
+            self.char = data[0]
+        self.size += size
+        eq(data.decode(), self.char*size)
+
+def _verify_atomic_key_data(bucket_name, key, size=-1, char=None):
+    """
+    Make sure file is of the expected size and (simulated) content
+    """
+    fp_verify = FakeFileVerifier(char)
+    client = get_client()
+    client.download_fileobj(bucket_name, key, fp_verify)
+    if size >= 0:
+        eq(fp_verify.size, size)
+
+def _test_atomic_read(file_size):
+    """
+    Create a file of A's, use it to set_contents_from_file.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+
+    fp_a = FakeWriteFile(file_size, 'A')
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_a)
+
+    fp_b = FakeWriteFile(file_size, 'B')
+    fp_a2 = FakeReadFile(file_size, 'A',
+        lambda: client.put_object(Bucket=bucket_name, Key='testobj', Body=fp_b)
+        )
+
+    read_client = get_client()
+
+    read_client.download_fileobj(bucket_name, 'testobj', fp_a2)
+    fp_a2.close()
+
+    _verify_atomic_key_data(bucket_name, 'testobj', file_size, 'B')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='read atomicity')
+@attr(assertion='1MB successful')
+def test_atomic_read_1mb():
+    _test_atomic_read(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='read atomicity')
+@attr(assertion='4MB successful')
+def test_atomic_read_4mb():
+    _test_atomic_read(1024*1024*4)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='read atomicity')
+@attr(assertion='8MB successful')
+def test_atomic_read_8mb():
+    _test_atomic_read(1024*1024*8)
+
+def _test_atomic_write(file_size):
+    """
+    Create a file of A's, use it to set_contents_from_file.
+    Verify the contents are all A's.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Before re-set continues, verify content's still A's
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    objname = 'testobj'
+
+
+    # create <file_size> file of A's
+    fp_a = FakeWriteFile(file_size, 'A')
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+
+
+    # verify A's
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+
+    # create <file_size> file of B's
+    # but try to verify the file before we finish writing all the B's
+    fp_b = FakeWriteFile(file_size, 'B',
+        lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+        )
+
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    # verify B's
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write atomicity')
+@attr(assertion='1MB successful')
+def test_atomic_write_1mb():
+    _test_atomic_write(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write atomicity')
+@attr(assertion='4MB successful')
+def test_atomic_write_4mb():
+    _test_atomic_write(1024*1024*4)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write atomicity')
+@attr(assertion='8MB successful')
+def test_atomic_write_8mb():
+    _test_atomic_write(1024*1024*8)
+
+def _test_atomic_dual_write(file_size):
+    """
+    create an object, two sessions writing different contents
+    confirm that it is all one or the other
+    """
+    bucket_name = get_new_bucket()
+    objname = 'testobj'
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key=objname)
+
+    # write <file_size> file of B's
+    # but before we're done, try to write all A's
+    fp_a = FakeWriteFile(file_size, 'A')
+
+    def rewind_put_fp_a():
+        fp_a.seek(0)
+        client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+
+    fp_b = FakeWriteFile(file_size, 'B', rewind_put_fp_a)
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    # verify the file
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write one or the other')
+@attr(assertion='1MB successful')
+def test_atomic_dual_write_1mb():
+    _test_atomic_dual_write(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write one or the other')
+@attr(assertion='4MB successful')
+def test_atomic_dual_write_4mb():
+    _test_atomic_dual_write(1024*1024*4)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write one or the other')
+@attr(assertion='8MB successful')
+def test_atomic_dual_write_8mb():
+    _test_atomic_dual_write(1024*1024*8)
+
+def _test_atomic_conditional_write(file_size):
+    """
+    Create a file of A's, use it to set_contents_from_file.
+    Verify the contents are all A's.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Before re-set continues, verify content's still A's
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    objname = 'testobj'
+    client = get_client()
+
+    # create <file_size> file of A's
+    fp_a = FakeWriteFile(file_size, 'A')
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+
+    fp_b = FakeWriteFile(file_size, 'B',
+        lambda: _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+        )
+
+    # create <file_size> file of B's
+    # but try to verify the file before we finish writing all the B's
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': '*'}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    # verify B's
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write atomicity')
+@attr(assertion='1MB successful')
+@attr('fails_on_aws')
+def test_atomic_conditional_write_1mb():
+    _test_atomic_conditional_write(1024*1024)
+
+def _test_atomic_dual_conditional_write(file_size):
+    """
+    create an object, two sessions writing different contents
+    confirm that it is all one or the other
+    """
+    bucket_name = get_new_bucket()
+    objname = 'testobj'
+    client = get_client()
+
+    fp_a = FakeWriteFile(file_size, 'A')
+    response = client.put_object(Bucket=bucket_name, Key=objname, Body=fp_a)
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'A')
+    etag_fp_a = response['ETag'].replace('"', '')
+
+    # write <file_size> file of C's
+    # but before we're done, try to write all B's
+    fp_b = FakeWriteFile(file_size, 'B')
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({'If-Match': etag_fp_a}))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    def rewind_put_fp_b():
+        fp_b.seek(0)
+        client.put_object(Bucket=bucket_name, Key=objname, Body=fp_b)
+
+    fp_c = FakeWriteFile(file_size, 'C', rewind_put_fp_b)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_c)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+    # verify the file
+    _verify_atomic_key_data(bucket_name, objname, file_size, 'B')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write one or the other')
+@attr(assertion='1MB successful')
+@attr('fails_on_aws')
+# TODO: test not passing with SSL, fix this
+@attr('fails_on_rgw')
+def test_atomic_dual_conditional_write_1mb():
+    _test_atomic_dual_conditional_write(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write file in deleted bucket')
+@attr(assertion='fail 404')
+@attr('fails_on_aws')
+# TODO: test not passing with SSL, fix this
+@attr('fails_on_rgw')
+def test_atomic_write_bucket_gone():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    def remove_bucket():
+        client.delete_bucket(Bucket=bucket_name)
+
+    objname = 'foo'
+    fp_a = FakeWriteFile(1024*1024, 'A', remove_bucket)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=objname, Body=fp_a)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchBucket')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='begin to overwrite file with multipart upload then abort')
+@attr(assertion='read back original key contents')
+def test_atomic_multipart_upload_write():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key='foo')
+    upload_id = response['UploadId']
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    client.abort_multipart_upload(Bucket=bucket_name, Key='foo', UploadId=upload_id)
+
+    response = client.get_object(Bucket=bucket_name, Key='foo')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+class Counter:
+    def __init__(self, default_val):
+        self.val = default_val
+
+    def inc(self):
+        self.val = self.val + 1
+
+class ActionOnCount:
+    def __init__(self, trigger_count, action):
+        self.count = 0
+        self.trigger_count = trigger_count
+        self.action = action
+        self.result = 0
+
+    def trigger(self):
+        self.count = self.count + 1
+
+        if self.count == self.trigger_count:
+            self.result = self.action()
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multipart check for two writes of the same part, first write finishes last')
+@attr(assertion='object contains correct content')
+def test_multipart_resend_first_finishes_last():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key_name = "mymultipart"
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
+    upload_id = response['UploadId']
+
+    #file_size = 8*1024*1024
+    file_size = 8
+
+    counter = Counter(0)
+    # upload_part might read multiple times from the object
+    # first time when it calculates md5, second time when it writes data
+    # out. We want to interject only on the last time, but we can't be
+    # sure how many times it's going to read, so let's have a test run
+    # and count the number of reads
+
+    fp_dry_run = FakeWriteFile(file_size, 'C',
+        lambda: counter.inc()
+        )
+
+    parts = []
+
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_dry_run)
+
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    client.delete_object(Bucket=bucket_name, Key=key_name)
+
+    # clear parts
+    parts[:] = []
+
+    # ok, now for the actual test
+    fp_b = FakeWriteFile(file_size, 'B')
+    def upload_fp_b():
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, Body=fp_b, PartNumber=1)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
+
+    action = ActionOnCount(counter.val, lambda: upload_fp_b())
+
+    response = client.create_multipart_upload(Bucket=bucket_name, Key=key_name)
+    upload_id = response['UploadId']
+
+    fp_a = FakeWriteFile(file_size, 'A',
+        lambda: action.trigger()
+        )
+
+    response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key_name, PartNumber=1, Body=fp_a)
+
+    parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': 1})
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key_name, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    _verify_atomic_key_data(bucket_name, key_name, file_size, 'A')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='range')
+@attr(assertion='returns correct data, 206')
+@attr('fails_on_dbstore')
+def test_ranged_request_response_code():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-7')
+
+    fetched_content = _get_body(response)
+    eq(fetched_content, content[4:8])
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-7/11')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
+
+def _generate_random_string(size):
+    return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(size))
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='range')
+@attr(assertion='returns correct data, 206')
+@attr('fails_on_dbstore')
+def test_ranged_big_request_response_code():
+    content = _generate_random_string(8*1024*1024)
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=3145728-5242880')
+
+    fetched_content = _get_body(response)
+    eq(fetched_content, content[3145728:5242881])
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 3145728-5242880/8388608')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='range')
+@attr(assertion='returns correct data, 206')
+@attr('fails_on_dbstore')
+def test_ranged_request_skip_leading_bytes_response_code():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=4-')
+
+    fetched_content = _get_body(response)
+    eq(fetched_content, content[4:])
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='range')
+@attr(assertion='returns correct data, 206')
+@attr('fails_on_dbstore')
+def test_ranged_request_return_trailing_bytes_response_code():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+    response = client.get_object(Bucket=bucket_name, Key='testobj', Range='bytes=-7')
+
+    fetched_content = _get_body(response)
+    eq(fetched_content, content[-7:])
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-range'], 'bytes 4-10/11')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 206)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='range')
+@attr(assertion='returns invalid range, 416')
+def test_ranged_request_invalid_range():
+    content = 'testcontent'
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+
+    # test invalid range
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 416)
+    eq(error_code, 'InvalidRange')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='range')
+@attr(assertion='returns invalid range, 416')
+def test_ranged_request_empty_object():
+    content = ''
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=content)
+
+    # test invalid range
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='testobj', Range='bytes=40-50')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 416)
+    eq(error_code, 'InvalidRange')
+
+@attr(resource='bucket')
+@attr(method='create')
+@attr(operation='create versioned bucket')
+@attr(assertion='can create and suspend bucket versioning')
+def test_versioning_bucket_create_suspend():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+def check_obj_content(client, bucket_name, key, version_id, content):
+    response = client.get_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+    if content is not None:
+        body = _get_body(response)
+        eq(body, content)
+    else:
+        eq(response['DeleteMarker'], True)
+
+def check_obj_versions(client, bucket_name, key, version_ids, contents):
+    # check to see if objects is pointing at correct version
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    versions = []
+    versions = response['Versions']
+    # obj versions in versions come out created last to first not first to last like version_ids & contents
+    versions.reverse()
+    i = 0
+
+    for version in versions:
+        eq(version['VersionId'], version_ids[i])
+        eq(version['Key'], key)
+        check_obj_content(client, bucket_name, key, version['VersionId'], contents[i])
+        i += 1
+
+def create_multiple_versions(client, bucket_name, key, num_versions, version_ids = None, contents = None, check_versions = True):
+    contents = contents or []
+    version_ids = version_ids or []
+
+    for i in range(num_versions):
+        body = 'content-{i}'.format(i=i)
+        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+        version_id = response['VersionId']
+
+        contents.append(body)
+        version_ids.append(version_id)
+
+#    if check_versions:
+#        check_obj_versions(client, bucket_name, key, version_ids, contents)
+
+    return (version_ids, contents)
+
+def remove_obj_version(client, bucket_name, key, version_ids, contents, index):
+    eq(len(version_ids), len(contents))
+    index = index % len(version_ids)
+    rm_version_id = version_ids.pop(index)
+    rm_content = contents.pop(index)
+
+    check_obj_content(client, bucket_name, key, rm_version_id, rm_content)
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=rm_version_id)
+
+    if len(version_ids) != 0:
+        check_obj_versions(client, bucket_name, key, version_ids, contents)
+
+def clean_up_bucket(client, bucket_name, key, version_ids):
+    for version_id in version_ids:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    client.delete_bucket(Bucket=bucket_name)
+
+def _do_test_create_remove_versions(client, bucket_name, key, num_versions, remove_start_idx, idx_inc):
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    idx = remove_start_idx
+
+    for j in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+        idx += idx_inc
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    if 'Versions' in response:
+        print(response['Versions'])
+
+
+@attr(resource='object')
+@attr(method='create')
+@attr(operation='create and remove versioned object')
+@attr(assertion='can create access and remove appropriate versions')
+@attr('versioning')
+def test_versioning_obj_create_read_remove():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
+    key = 'testobj'
+    num_versions = 5
+
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, -1, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 0, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 1, 0)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 4, -1)
+    _do_test_create_remove_versions(client, bucket_name, key, num_versions, 3, 3)
+
+@attr(resource='object')
+@attr(method='create')
+@attr(operation='create and remove versioned object and head')
+@attr(assertion='can create access and remove appropriate versions')
+@attr('versioning')
+def test_versioning_obj_create_read_remove_head():
+    bucket_name = get_new_bucket()
+
+    client = get_client()
+    client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'MFADelete': 'Disabled', 'Status': 'Enabled'})
+    key = 'testobj'
+    num_versions = 5
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    # removes old head object, checks new one
+    removed_version_id = version_ids.pop()
+    contents.pop()
+    num_versions = num_versions-1
+
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=removed_version_id)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    eq(body, contents[-1])
+
+    # add a delete marker
+    response = client.delete_object(Bucket=bucket_name, Key=key)
+    eq(response['DeleteMarker'], True)
+
+    delete_marker_version_id = response['VersionId']
+    version_ids.append(delete_marker_version_id)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(len(response['Versions']), num_versions)
+    eq(len(response['DeleteMarkers']), 1)
+    eq(response['DeleteMarkers'][0]['VersionId'], delete_marker_version_id)
+
+    clean_up_bucket(client, bucket_name, key, version_ids)
+
+@attr(resource='object')
+@attr(method='create')
+@attr(operation='create object, then switch to versioning')
+@attr(assertion='behaves correctly')
+@attr('versioning')
+def test_versioning_obj_plain_null_version_removal():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    client = get_client()
+    key = 'testobjfoo'
+    content = 'fooz'
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+
+@attr(resource='object')
+@attr(method='create')
+@attr(operation='create object, then switch to versioning')
+@attr(assertion='behaves correctly')
+@attr('versioning')
+def test_versioning_obj_plain_null_version_overwrite():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    client = get_client()
+    key = 'testobjfoo'
+    content = 'fooz'
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    content2 = 'zzz'
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    eq(body, content2)
+
+    version_id = response['VersionId']
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    eq(body, content)
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+
+@attr(resource='object')
+@attr(method='create')
+@attr(operation='create object, then switch to versioning')
+@attr(assertion='behaves correctly')
+@attr('versioning')
+def test_versioning_obj_plain_null_version_overwrite_suspended():
+    bucket_name = get_new_bucket()
+    check_versioning(bucket_name, None)
+
+    client = get_client()
+    key = 'testobjbar'
+    content = 'foooz'
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+    content2 = 'zzz'
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=content2)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    eq(body, content2)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    # original object with 'null' version id still counts as a version
+    eq(len(response['Versions']), 1)
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId='null')
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'NoSuchKey')
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+
+def delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents):
+    client.delete_object(Bucket=bucket_name, Key=key)
+
+    # clear out old null objects in lists since they will get overwritten
+    eq(len(version_ids), len(contents))
+    i = 0
+    for version_id in version_ids:
+        if version_id == 'null':
+            version_ids.pop(i)
+            contents.pop(i)
+        i += 1
+
+    return (version_ids, contents)
+
+def overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, content):
+    client.put_object(Bucket=bucket_name, Key=key, Body=content)
+
+    # clear out old null objects in lists since they will get overwritten
+    eq(len(version_ids), len(contents))
+    i = 0
+    for version_id in version_ids:
+        if version_id == 'null':
+            version_ids.pop(i)
+            contents.pop(i)
+        i += 1
+
+    # add new content with 'null' version id to the end
+    contents.append(content)
+    version_ids.append('null')
+
+    return (version_ids, contents)
+
+
+@attr(resource='object')
+@attr(method='create')
+@attr(operation='suspend versioned bucket')
+@attr(assertion='suspended versioning behaves correctly')
+@attr('versioning')
+def test_versioning_obj_suspend_versions():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 5
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+
+    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 1')
+    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 2')
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+    overwrite_suspended_versioning_obj(client, bucket_name, key, version_ids, contents, 'null content 3')
+    delete_suspended_versioning_obj(client, bucket_name, key, version_ids, contents)
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, 3, version_ids, contents)
+    num_versions += 3
+
+    for idx in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+    eq(len(version_ids), 0)
+    eq(len(version_ids), len(contents))
+
+@attr(resource='object')
+@attr(method='remove')
+@attr(operation='create and remove versions')
+@attr(assertion='everything works')
+@attr('versioning')
+def test_versioning_obj_create_versions_remove_all():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 10
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+    for idx in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+    eq(len(version_ids), 0)
+    eq(len(version_ids), len(contents))
+
+@attr(resource='object')
+@attr(method='remove')
+@attr(operation='create and remove versions')
+@attr(assertion='everything works')
+@attr('versioning')
+def test_versioning_obj_create_versions_remove_special_names():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    keys = ['_testobj', '_', ':', ' ']
+    num_versions = 10
+
+    for key in keys:
+        (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+        for idx in range(num_versions):
+            remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+        eq(len(version_ids), 0)
+        eq(len(version_ids), len(contents))
+
+@attr(resource='object')
+@attr(method='multipart')
+@attr(operation='create and test multipart object')
+@attr(assertion='everything works')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_versioning_obj_create_overwrite_multipart():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 3
+    contents = []
+    version_ids = []
+
+    for i in range(num_versions):
+        ret =  _do_test_multipart_upload_contents(bucket_name, key, 3)
+        contents.append(ret)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    for version in response['Versions']:
+        version_ids.append(version['VersionId'])
+
+    version_ids.reverse()
+    check_obj_versions(client, bucket_name, key, version_ids, contents)
+
+    for idx in range(num_versions):
+        remove_obj_version(client, bucket_name, key, version_ids, contents, idx)
+
+    eq(len(version_ids), 0)
+    eq(len(version_ids), len(contents))
+
+@attr(resource='object')
+@attr(method='multipart')
+@attr(operation='list versioned objects')
+@attr(assertion='everything works')
+@attr('versioning')
+def test_versioning_obj_list_marker():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    key2 = 'testobj-1'
+    num_versions = 5
+
+    contents = []
+    version_ids = []
+    contents2 = []
+    version_ids2 = []
+
+    # for key #1
+    for i in range(num_versions):
+        body = 'content-{i}'.format(i=i)
+        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+        version_id = response['VersionId']
+
+        contents.append(body)
+        version_ids.append(version_id)
+
+    # for key #2
+    for i in range(num_versions):
+        body = 'content-{i}'.format(i=i)
+        response = client.put_object(Bucket=bucket_name, Key=key2, Body=body)
+        version_id = response['VersionId']
+
+        contents2.append(body)
+        version_ids2.append(version_id)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    # obj versions in versions come out created last to first not first to last like version_ids & contents
+    versions.reverse()
+
+    i = 0
+    # test the last 5 created objects first
+    for i in range(5):
+        version = versions[i]
+        eq(version['VersionId'], version_ids2[i])
+        eq(version['Key'], key2)
+        check_obj_content(client, bucket_name, key2, version['VersionId'], contents2[i])
+        i += 1
+
+    # then the first 5
+    for j in range(5):
+        version = versions[i]
+        eq(version['VersionId'], version_ids[j])
+        eq(version['Key'], key)
+        check_obj_content(client, bucket_name, key, version['VersionId'], contents[j])
+        i += 1
+
+@attr(resource='object')
+@attr(method='multipart')
+@attr(operation='create and test versioned object copying')
+@attr(assertion='everything works')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_versioning_copy_obj_version():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'testobj'
+    num_versions = 3
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    for i in range(num_versions):
+        new_key_name = 'key_{i}'.format(i=i)
+        copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
+        client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key=new_key_name)
+        response = client.get_object(Bucket=bucket_name, Key=new_key_name)
+        body = _get_body(response)
+        eq(body, contents[i])
+
+    another_bucket_name = get_new_bucket()
+
+    for i in range(num_versions):
+        new_key_name = 'key_{i}'.format(i=i)
+        copy_source = {'Bucket': bucket_name, 'Key': key, 'VersionId': version_ids[i]}
+        client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
+        response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
+        body = _get_body(response)
+        eq(body, contents[i])
+
+    new_key_name = 'new_key'
+    copy_source = {'Bucket': bucket_name, 'Key': key}
+    client.copy_object(Bucket=another_bucket_name, CopySource=copy_source, Key=new_key_name)
+
+    response = client.get_object(Bucket=another_bucket_name, Key=new_key_name)
+    body = _get_body(response)
+    eq(body, contents[-1])
+
+@attr(resource='object')
+@attr(method='delete')
+@attr(operation='delete multiple versions')
+@attr(assertion='deletes multiple versions of an object with a single call')
+@attr('versioning')
+def test_versioning_multi_object_delete():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'key'
+    num_versions = 2
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    versions.reverse()
+
+    for version in versions:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+
+    # now remove again, should all succeed due to idempotency
+    for version in versions:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+
+@attr(resource='object')
+@attr(method='delete')
+@attr(operation='delete multiple versions')
+@attr(assertion='deletes multiple versions of an object and delete marker with a single call')
+@attr('versioning')
+def test_versioning_multi_object_delete_with_marker():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'key'
+    num_versions = 2
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    client.delete_object(Bucket=bucket_name, Key=key)
+    response = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    delete_markers = response['DeleteMarkers']
+
+    version_ids.append(delete_markers[0]['VersionId'])
+    eq(len(version_ids), 3)
+    eq(len(delete_markers), 1)
+
+    for version in versions:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
+
+    for delete_marker in delete_markers:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+    eq(('DeleteMarkers' in response), False)
+
+    for version in versions:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=version['VersionId'])
+
+    for delete_marker in delete_markers:
+        client.delete_object(Bucket=bucket_name, Key=key, VersionId=delete_marker['VersionId'])
+
+    # now remove again, should all succeed due to idempotency
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+    eq(('DeleteMarkers' in response), False)
+
+@attr(resource='object')
+@attr(method='delete')
+@attr(operation='multi delete create marker')
+@attr(assertion='returns correct marker version id')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_versioning_multi_object_delete_with_marker_create():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'key'
+
+    response = client.delete_object(Bucket=bucket_name, Key=key)
+    delete_marker_version_id = response['VersionId']
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    delete_markers = response['DeleteMarkers']
+
+    eq(len(delete_markers), 1)
+    eq(delete_marker_version_id, delete_markers[0]['VersionId'])
+    eq(key, delete_markers[0]['Key'])
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='change acl on an object version changes specific version')
+@attr(assertion='works')
+@attr('versioning')
+def test_versioned_object_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'xyz'
+    num_versions = 3
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    version_id = version_ids[1]
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    eq(response['Owner']['DisplayName'], display_name)
+    eq(response['Owner']['ID'], user_id)
+
+    grants = response['Grants']
+    default_policy = [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ]
+
+    check_grants(grants, default_policy)
+
+    client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+    client.put_object(Bucket=bucket_name, Key=key)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key)
+    grants = response['Grants']
+    check_grants(grants, default_policy)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='change acl on an object with no version specified changes latest version')
+@attr(assertion='works')
+@attr('versioning')
+@attr('fails_on_dbstore')
+def test_versioned_object_acl_no_version_specified():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'xyz'
+    num_versions = 3
+
+    (version_ids, contents) = create_multiple_versions(client, bucket_name, key, num_versions)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    version_id = response['VersionId']
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+    display_name = get_main_display_name()
+    user_id = get_main_user_id()
+
+    eq(response['Owner']['DisplayName'], display_name)
+    eq(response['Owner']['ID'], user_id)
+
+    grants = response['Grants']
+    default_policy = [
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ]
+
+    check_grants(grants, default_policy)
+
+    client.put_object_acl(ACL='public-read',Bucket=bucket_name, Key=key)
+
+    response = client.get_object_acl(Bucket=bucket_name, Key=key, VersionId=version_id)
+    grants = response['Grants']
+    check_grants(
+        grants,
+        [
+            dict(
+                Permission='READ',
+                ID=None,
+                DisplayName=None,
+                URI='http://acs.amazonaws.com/groups/global/AllUsers',
+                EmailAddress=None,
+                Type='Group',
+                ),
+            dict(
+                Permission='FULL_CONTROL',
+                ID=user_id,
+                DisplayName=display_name,
+                URI=None,
+                EmailAddress=None,
+                Type='CanonicalUser',
+                ),
+            ],
+        )
+
+def _do_create_object(client, bucket_name, key, i):
+    body = 'data {i}'.format(i=i)
+    client.put_object(Bucket=bucket_name, Key=key, Body=body)
+
+def _do_remove_ver(client, bucket_name, key, version_id):
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id)
+
+def _do_create_versioned_obj_concurrent(client, bucket_name, key, num):
+    t = []
+    for i in range(num):
+        thr = threading.Thread(target = _do_create_object, args=(client, bucket_name, key, i))
+        thr.start()
+        t.append(thr)
+    return t
+
+def _do_clear_versioned_bucket_concurrent(client, bucket_name):
+    t = []
+    response = client.list_object_versions(Bucket=bucket_name)
+    for version in response.get('Versions', []):
+        thr = threading.Thread(target = _do_remove_ver, args=(client, bucket_name, version['Key'], version['VersionId']))
+        thr.start()
+        t.append(thr)
+    return t
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='concurrent creation of objects, concurrent removal')
+@attr(assertion='works')
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/39142 is resolved
+@attr('fails_on_rgw')
+@attr('versioning')
+def test_versioned_concurrent_object_create_concurrent_remove():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'myobj'
+    num_versions = 5
+
+    for i in range(5):
+        t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
+        _do_wait_completion(t)
+
+        response = client.list_object_versions(Bucket=bucket_name)
+        versions = response['Versions']
+
+        eq(len(versions), num_versions)
+
+        t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
+        _do_wait_completion(t)
+
+        response = client.list_object_versions(Bucket=bucket_name)
+        eq(('Versions' in response), False)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='concurrent creation and removal of objects')
+@attr(assertion='works')
+@attr('versioning')
+def test_versioned_concurrent_object_create_and_remove():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    key = 'myobj'
+    num_versions = 3
+
+    all_threads = []
+
+    for i in range(3):
+
+        t = _do_create_versioned_obj_concurrent(client, bucket_name, key, num_versions)
+        all_threads.append(t)
+
+        t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
+        all_threads.append(t)
+
+    for t in all_threads:
+        _do_wait_completion(t)
+
+    t = _do_clear_versioned_bucket_concurrent(client, bucket_name)
+    _do_wait_completion(t)
+
+    response = client.list_object_versions(Bucket=bucket_name)
+    eq(('Versions' in response), False)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config')
+@attr('lifecycle')
+def test_lifecycle_set():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Disabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get lifecycle config')
+@attr('lifecycle')
+def test_lifecycle_get():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'test1/', 'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'ID': 'test2/', 'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
+    eq(response['Rules'], rules)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get lifecycle config no id')
+@attr('lifecycle')
+def test_lifecycle_get_no_id():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    rules=[{'Expiration': {'Days': 31}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'Expiration': {'Days': 120}, 'Prefix': 'test2/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
+    current_lc = response['Rules']
+
+    Rule = namedtuple('Rule',['prefix','status','days'])
+    rules = {'rule1' : Rule('test1/','Enabled',31),
+             'rule2' : Rule('test2/','Enabled',120)}
+
+    for lc_rule in current_lc:
+        if lc_rule['Prefix'] == rules['rule1'].prefix:
+            eq(lc_rule['Expiration']['Days'], rules['rule1'].days)
+            eq(lc_rule['Status'], rules['rule1'].status)
+            assert 'ID' in lc_rule
+        elif lc_rule['Prefix'] == rules['rule2'].prefix:
+            eq(lc_rule['Expiration']['Days'], rules['rule2'].days)
+            eq(lc_rule['Status'], rules['rule2'].status)
+            assert 'ID' in lc_rule
+        else:
+            # neither of the rules we supplied was returned, something wrong
+            print("rules not right")
+            assert False
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_expiration():
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.list_objects(Bucket=bucket_name)
+    init_objects = response['Contents']
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    time.sleep(lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    keep2_objects = response['Contents']
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire3_objects = response['Contents']
+
+    eq(len(init_objects), 6)
+    eq(len(expire1_objects), 4)
+    eq(len(keep2_objects), 4)
+    eq(len(expire3_objects), 2)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration with list-objects-v2')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('list-objects-v2')
+@attr('fails_on_dbstore')
+def test_lifecyclev2_expiration():
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'expire1/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Days': 5}, 'Prefix': 'expire3/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    init_objects = response['Contents']
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    time.sleep(lc_interval)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    keep2_objects = response['Contents']
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects_v2(Bucket=bucket_name)
+    expire3_objects = response['Contents']
+
+    eq(len(init_objects), 6)
+    eq(len(expire1_objects), 4)
+    eq(len(keep2_objects), 4)
+    eq(len(expire3_objects), 2)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration on versioning enabled bucket')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+def test_lifecycle_expiration_versioning_enabled():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    create_multiple_versions(client, bucket_name, "test1/a", 1)
+    client.delete_object(Bucket=bucket_name, Key="test1/a")
+
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    delete_markers = response['DeleteMarkers']
+    eq(len(versions), 1)
+    eq(len(delete_markers), 1)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration with 1 tag')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+def test_lifecycle_expiration_tags1():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    tom_key = 'days1/tom'
+    tom_tagset = {'TagSet':
+                  [{'Key': 'tom', 'Value': 'sawyer'}]}
+
+    client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
+                                         Tagging=tom_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    lifecycle_config = {
+        'Rules': [
+            {
+                'Expiration': {
+                    'Days': 1,
+                },
+                'ID': 'rule_tag1',
+                'Filter': {
+                    'Prefix': 'days1/',
+                    'Tag': {
+                        'Key': 'tom',
+                        'Value': 'sawyer'
+                    },
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+
+    try:
+        expire_objects = response['Contents']
+    except KeyError:
+        expire_objects = []
+
+    eq(len(expire_objects), 0)
+
+# factor out common setup code
+def setup_lifecycle_tags2(client, bucket_name):
+    tom_key = 'days1/tom'
+    tom_tagset = {'TagSet':
+                  [{'Key': 'tom', 'Value': 'sawyer'}]}
+
+    client.put_object(Bucket=bucket_name, Key=tom_key, Body='tom_body')
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=tom_key,
+                                         Tagging=tom_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    huck_key = 'days1/huck'
+    huck_tagset = {
+        'TagSet':
+        [{'Key': 'tom', 'Value': 'sawyer'},
+         {'Key': 'huck', 'Value': 'finn'}]}
+
+    client.put_object(Bucket=bucket_name, Key=huck_key, Body='huck_body')
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=huck_key,
+                                         Tagging=huck_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    lifecycle_config = {
+        'Rules': [
+            {
+                'Expiration': {
+                    'Days': 1,
+                },
+                'ID': 'rule_tag1',
+                'Filter': {
+                    'Prefix': 'days1/',
+                    'Tag': {
+                        'Key': 'tom',
+                        'Value': 'sawyer'
+                    },
+                    'And': {
+                        'Prefix': 'days1',
+                        'Tags': [
+                            {
+                                'Key': 'huck',
+                                'Value': 'finn'
+                            },
+                        ]
+                    }
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    return response
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration with 2 tags')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_expiration_tags2():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = setup_lifecycle_tags2(client, bucket_name)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    eq(len(expire1_objects), 1)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration with versioning and 2 tags')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_expiration_versioned_tags2():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # mix in versioning
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    response = setup_lifecycle_tags2(client, bucket_name)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire1_objects = response['Contents']
+
+    eq(len(expire1_objects), 1)
+
+# setup for scenario based on vidushi mishra's in rhbz#1877737
+def setup_lifecycle_noncur_tags(client, bucket_name, days):
+
+    # first create and tag the objects (10 versions of 1)
+    key = "myobject_"
+    tagset = {'TagSet':
+              [{'Key': 'vidushi', 'Value': 'mishra'}]}
+
+    for ix in range(10):
+        body = "%s v%d" % (key, ix)
+        response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+        eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+        response = client.put_object_tagging(Bucket=bucket_name, Key=key,
+                                             Tagging=tagset)
+        eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    lifecycle_config = {
+        'Rules': [
+            {
+                'NoncurrentVersionExpiration': {
+                    'NoncurrentDays': days,
+                },
+                'ID': 'rule_tag1',
+                'Filter': {
+                    'Prefix': '',
+                    'Tag': {
+                        'Key': 'vidushi',
+                        'Value': 'mishra'
+                    },
+                },
+                'Status': 'Enabled',
+            },
+        ]
+    }
+
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle_config)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    return response
+
+def verify_lifecycle_expiration_noncur_tags(client, bucket_name, secs):
+    time.sleep(secs)
+    try:
+        response  = client.list_object_versions(Bucket=bucket_name)
+        objs_list = response['Versions']
+    except:
+        objs_list = []
+    return len(objs_list)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle noncurrent expiration with 1 tag filter')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_expiration_noncur_tags1():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    # create 10 object versions (9 noncurrent) and a tag-filter
+    # noncurrent version expiration at 4 "days"
+    response = setup_lifecycle_noncur_tags(client, bucket_name, 4)
+
+    lc_interval = get_lc_debug_interval()
+
+    num_objs = verify_lifecycle_expiration_noncur_tags(
+        client, bucket_name, 2*lc_interval)
+
+    # at T+20, 10 objects should exist
+    eq(num_objs, 10)
+
+    num_objs = verify_lifecycle_expiration_noncur_tags(
+        client, bucket_name, 5*lc_interval)
+
+    # at T+60, only the current object version should exist
+    eq(num_objs, 1)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='id too long in lifecycle rule')
+@attr('lifecycle')
+@attr(assertion='fails 400')
+def test_lifecycle_id_too_long():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 256*'a', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='same id')
+@attr('lifecycle')
+@attr(assertion='fails 400')
+def test_lifecycle_same_id():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 1}, 'Prefix': 'test1/', 'Status':'Enabled'},
+           {'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test2/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='invalid status in lifecycle rule')
+@attr('lifecycle')
+@attr(assertion='fails 400')
+def test_lifecycle_invalid_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'disabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+    rules=[{'ID': 'rule1', 'Expiration': {'Days': 2}, 'Prefix': 'test1/', 'Status':'invalid'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with expiration date')
+@attr('lifecycle')
+def test_lifecycle_set_date():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2017-09-27'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with not iso8601 date')
+@attr('lifecycle')
+@attr(assertion='fails 400')
+def test_lifecycle_set_invalid_date():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '20200101'}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration with date')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_expiration_date():
+    bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2015-01-01'}, 'Prefix': 'past/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'Expiration': {'Date': '2030-01-01'}, 'Prefix': 'future/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    response = client.list_objects(Bucket=bucket_name)
+    init_objects = response['Contents']
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(3*lc_interval)
+    response = client.list_objects(Bucket=bucket_name)
+    expire_objects = response['Contents']
+
+    eq(len(init_objects), 2)
+    eq(len(expire_objects), 1)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration days 0')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+def test_lifecycle_expiration_days0():
+    bucket_name = _create_objects(keys=['days0/foo', 'days0/bar'])
+    client = get_client()
+
+    rules=[{'Expiration': {'Days': 0}, 'ID': 'rule1', 'Prefix': 'days0/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+
+    # days: 0 is legal in a transition rule, but not legal in an
+    # expiration rule
+    response_code = ""
+    try:
+        response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    except botocore.exceptions.ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    eq(response_code, 'InvalidArgument')
+
+
+def setup_lifecycle_expiration(client, bucket_name, rule_id, delta_days,
+                                    rule_prefix):
+    rules=[{'ID': rule_id,
+            'Expiration': {'Days': delta_days}, 'Prefix': rule_prefix,
+            'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    key = rule_prefix + 'foo'
+    body = 'bar'
+    response = client.put_object(Bucket=bucket_name, Key=key, Body=body)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    return response
+
+def check_lifecycle_expiration_header(response, start_time, rule_id,
+                                      delta_days):
+    expr_exists = ('x-amz-expiration' in response['ResponseMetadata']['HTTPHeaders'])
+    if (not expr_exists):
+        return False
+    expr_hdr = response['ResponseMetadata']['HTTPHeaders']['x-amz-expiration']
+
+    m = re.search(r'expiry-date="(.+)", rule-id="(.+)"', expr_hdr)
+
+    expiration = dateutil.parser.parse(m.group(1))
+    days_to_expire = ((expiration.replace(tzinfo=None) - start_time).days == delta_days)
+    rule_eq_id = (m.group(2) == rule_id)
+
+    return  days_to_expire and rule_eq_id
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration header put')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+def test_lifecycle_expiration_header_put():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    now = datetime.datetime.now(None)
+    response = setup_lifecycle_expiration(
+        client, bucket_name, 'rule1', 1, 'days1/')
+    eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
+
+@attr(resource='bucket')
+@attr(method='head')
+@attr(operation='test lifecycle expiration header head')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_dbstore')
+def test_lifecycle_expiration_header_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    now = datetime.datetime.now(None)
+    response = setup_lifecycle_expiration(
+        client, bucket_name, 'rule1', 1, 'days1/')
+
+    key = 'days1/' + 'foo'
+
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(check_lifecycle_expiration_header(response, now, 'rule1', 1), True)
+
+@attr(resource='bucket')
+@attr(method='head')
+@attr(operation='test lifecycle expiration header head with tags')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_dbstore')
+def test_lifecycle_expiration_header_tags_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    lifecycle={
+        "Rules": [
+        {
+            "Filter": {
+                "Tag": {"Key": "key1", "Value": "tag1"}
+            },
+            "Status": "Enabled",
+            "Expiration": {
+                "Days": 1
+            },
+            "ID": "rule1"
+            },
+        ]
+    }
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    key1 = "obj_key1"
+    body1 = "obj_key1_body"
+    tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
+          {'Key': 'key5','Value': 'tag5'}]}
+    response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
+
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key1)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), True)
+
+    # test that header is not returning when it should not
+    lifecycle={
+        "Rules": [
+        {
+            "Filter": {
+                "Tag": {"Key": "key2", "Value": "tag1"}
+            },
+            "Status": "Enabled",
+            "Expiration": {
+                "Days": 1
+            },
+            "ID": "rule1"
+            },
+        ]
+    }
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key1)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
+
+@attr(resource='bucket')
+@attr(method='head')
+@attr(operation='test lifecycle expiration header head with tags and And')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+def test_lifecycle_expiration_header_and_tags_head():
+    now = datetime.datetime.now(None)
+    bucket_name = get_new_bucket()
+    client = get_client()
+    lifecycle={
+        "Rules": [
+        {
+            "Filter": {
+                "And": {
+                    "Tags": [
+                        {
+                            "Key": "key1",
+                            "Value": "tag1"
+                        },
+                        {
+                            "Key": "key5",
+                            "Value": "tag6"
+                        }
+                    ]
+                }
+            },
+            "Status": "Enabled",
+            "Expiration": {
+                "Days": 1
+            },
+            "ID": "rule1"
+            },
+        ]
+    }
+    response = client.put_bucket_lifecycle_configuration(
+        Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    key1 = "obj_key1"
+    body1 = "obj_key1_body"
+    tags1={'TagSet': [{'Key': 'key1', 'Value': 'tag1'},
+          {'Key': 'key5','Value': 'tag5'}]}
+    response = client.put_object(Bucket=bucket_name, Key=key1, Body=body1)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key1,Tagging=tags1)
+
+    # stat the object, check header
+    response = client.head_object(Bucket=bucket_name, Key=key1)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(check_lifecycle_expiration_header(response, datetime.datetime.now(None), 'rule1', 1), False)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with noncurrent version expiration')
+@attr('lifecycle')
+def test_lifecycle_set_noncurrent():
+    bucket_name = _create_objects(keys=['past/foo', 'future/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'past/', 'Status':'Enabled'},
+           {'ID': 'rule2', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}, 'Prefix': 'future/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle non-current version expiration')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_noncur_expiration():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    create_multiple_versions(client, bucket_name, "test1/a", 3)
+    # not checking the object contents on the second run, because the function doesn't support multiple checks
+    create_multiple_versions(client, bucket_name, "test2/abc", 3, check_versions=False)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    init_versions = response['Versions']
+
+    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 2}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(5*lc_interval)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    expire_versions = response['Versions']
+    eq(len(init_versions), 6)
+    eq(len(expire_versions), 4)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with delete marker expiration')
+@attr('lifecycle')
+def test_lifecycle_set_deletemarker():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with Filter')
+@attr('lifecycle')
+def test_lifecycle_set_filter():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {'Prefix': 'foo'}, 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with empty Filter')
+@attr('lifecycle')
+def test_lifecycle_set_empty_filter():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Filter': {}, 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle delete marker expiration')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_deletemarker_expiration():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    create_multiple_versions(client, bucket_name, "test1/a", 1)
+    create_multiple_versions(client, bucket_name, "test2/abc", 1, check_versions=False)
+    client.delete_object(Bucket=bucket_name, Key="test1/a")
+    client.delete_object(Bucket=bucket_name, Key="test2/abc")
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    init_versions = response['Versions']
+    deleted_versions = response['DeleteMarkers']
+    total_init_versions = init_versions + deleted_versions
+
+    rules=[{'ID': 'rule1', 'NoncurrentVersionExpiration': {'NoncurrentDays': 1}, 'Expiration': {'ExpiredObjectDeleteMarker': True}, 'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(7*lc_interval)
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    init_versions = response['Versions']
+    deleted_versions = response['DeleteMarkers']
+    total_expire_versions = init_versions + deleted_versions
+
+    eq(len(total_init_versions), 4)
+    eq(len(total_expire_versions), 2)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with multipart expiration')
+@attr('lifecycle')
+def test_lifecycle_set_multipart():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules = [
+        {'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
+         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
+        {'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled',
+         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 3}}
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle multipart expiration')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_multipart_expiration():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    key_names = ['test1/a', 'test2/']
+    upload_ids = []
+
+    for key in key_names:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+        upload_ids.append(response['UploadId'])
+
+    response = client.list_multipart_uploads(Bucket=bucket_name)
+    init_uploads = response['Uploads']
+
+    rules = [
+        {'ID': 'rule1', 'Prefix': 'test1/', 'Status': 'Enabled',
+         'AbortIncompleteMultipartUpload': {'DaysAfterInitiation': 2}},
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(5*lc_interval)
+
+    response = client.list_multipart_uploads(Bucket=bucket_name)
+    expired_uploads = response['Uploads']
+    eq(len(init_uploads), 2)
+    eq(len(expired_uploads), 1)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config transition with not iso8601 date')
+@attr('lifecycle')
+@attr(assertion='fails 400')
+def test_lifecycle_transition_set_invalid_date():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Expiration': {'Date': '2023-09-27'},'Transitions': [{'Date': '20220927','StorageClass': 'GLACIER'}],'Prefix': 'test1/', 'Status':'Enabled'}]
+    lifecycle = {'Rules': rules}
+    e = assert_raises(ClientError, client.put_bucket_lifecycle_configuration, Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+def _test_encryption_sse_customer_write(file_size):
+    """
+    Tests Create a file of A's, use it to set_contents_from_file.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'testobj'
+    data = 'A'*file_size
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    eq(body, data)
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle transition')
+@attr('lifecycle')
+@attr('lifecycle_transition')
+@attr('fails_on_aws')
+def test_lifecycle_transition():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        raise SkipTest
+
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
+           {'ID': 'rule2', 'Transitions': [{'Days': 6, 'StorageClass': sc[2]}], 'Prefix': 'expire3/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    eq(len(init_keys), 6)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire1_keys['STANDARD']), 4)
+    eq(len(expire1_keys[sc[1]]), 2)
+    eq(len(expire1_keys[sc[2]]), 0)
+
+    # Wait for next expiration cycle
+    time.sleep(lc_interval)
+    keep2_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(keep2_keys['STANDARD']), 4)
+    eq(len(keep2_keys[sc[1]]), 2)
+    eq(len(keep2_keys[sc[2]]), 0)
+
+    # Wait for final expiration cycle
+    time.sleep(5*lc_interval)
+    expire3_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire3_keys['STANDARD']), 2)
+    eq(len(expire3_keys[sc[1]]), 2)
+    eq(len(expire3_keys[sc[2]]), 2)
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle expiration')
+@attr('lifecycle')
+@attr('lifecycle_transition')
+@attr('fails_on_aws')
+def test_lifecycle_transition_single_rule_multi_trans():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        raise SkipTest
+
+    bucket_name = _create_objects(keys=['expire1/foo', 'expire1/bar', 'keep2/foo',
+                                        'keep2/bar', 'expire3/foo', 'expire3/bar'])
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc[1]}, {'Days': 7, 'StorageClass': sc[2]}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    eq(len(init_keys), 6)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(5*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire1_keys['STANDARD']), 4)
+    eq(len(expire1_keys[sc[1]]), 2)
+    eq(len(expire1_keys[sc[2]]), 0)
+
+    # Wait for next expiration cycle
+    time.sleep(lc_interval)
+    keep2_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(keep2_keys['STANDARD']), 4)
+    eq(len(keep2_keys[sc[1]]), 2)
+    eq(len(keep2_keys[sc[2]]), 0)
+
+    # Wait for final expiration cycle
+    time.sleep(6*lc_interval)
+    expire3_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire3_keys['STANDARD']), 4)
+    eq(len(expire3_keys[sc[1]]), 0)
+    eq(len(expire3_keys[sc[2]]), 2)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='set lifecycle config with noncurrent version expiration')
+@attr('lifecycle')
+@attr('lifecycle_transition')
+def test_lifecycle_set_noncurrent_transition():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        raise SkipTest
+
+    bucket = get_new_bucket()
+    client = get_client()
+    rules = [
+        {
+            'ID': 'rule1',
+            'Prefix': 'test1/',
+            'Status': 'Enabled',
+            'NoncurrentVersionTransitions': [
+                {
+                    'NoncurrentDays': 2,
+                    'StorageClass': sc[1]
+                },
+                {
+                    'NoncurrentDays': 4,
+                    'StorageClass': sc[2]
+                }
+            ],
+            'NoncurrentVersionExpiration': {
+                'NoncurrentDays': 6
+            }
+        },
+        {'ID': 'rule2', 'Prefix': 'test2/', 'Status': 'Disabled', 'NoncurrentVersionExpiration': {'NoncurrentDays': 3}}
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle non-current version expiration')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('lifecycle_transition')
+@attr('fails_on_aws')
+def test_lifecycle_noncur_transition():
+    sc = configured_storage_classes()
+    if len(sc) < 3:
+        raise SkipTest
+
+    bucket = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
+
+    rules = [
+        {
+            'ID': 'rule1',
+            'Prefix': 'test1/',
+            'Status': 'Enabled',
+            'NoncurrentVersionTransitions': [
+                {
+                    'NoncurrentDays': 1,
+                    'StorageClass': sc[1]
+                },
+                {
+                    'NoncurrentDays': 5,
+                    'StorageClass': sc[2]
+                }
+            ],
+            'NoncurrentVersionExpiration': {
+                'NoncurrentDays': 9
+            }
+        }
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    create_multiple_versions(client, bucket, "test1/a", 3)
+    create_multiple_versions(client, bucket, "test1/b", 3)
+
+    init_keys = list_bucket_storage_class(client, bucket)
+    eq(len(init_keys['STANDARD']), 6)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    eq(len(expire1_keys['STANDARD']), 2)
+    eq(len(expire1_keys[sc[1]]), 4)
+    eq(len(expire1_keys[sc[2]]), 0)
+
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    eq(len(expire1_keys['STANDARD']), 2)
+    eq(len(expire1_keys[sc[1]]), 0)
+    eq(len(expire1_keys[sc[2]]), 4)
+
+    time.sleep(6*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    eq(len(expire1_keys['STANDARD']), 2)
+    eq(len(expire1_keys[sc[1]]), 0)
+    eq(len(expire1_keys[sc[2]]), 0)
+
+def verify_object(client, bucket, key, content=None, sc=None):
+    response = client.get_object(Bucket=bucket, Key=key)
+
+    if (sc == None):
+        sc = 'STANDARD'
+
+    if ('StorageClass' in response):
+        eq(response['StorageClass'], sc)
+    else: #storage class should be STANDARD
+        eq('STANDARD', sc)
+
+    if (content != None):
+        body = _get_body(response)
+        eq(body, content)
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle transition for cloud')
+@attr('lifecycle')
+@attr('lifecycle_transition')
+@attr('cloud_transition')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_cloud_transition():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        raise SkipTest
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar']
+    bucket_name = _create_objects(keys=keys)
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    eq(len(init_keys), 4)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(10*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire1_keys['STANDARD']), 2)
+
+    if (retain_head_object != None and retain_head_object == "true"):
+        eq(len(expire1_keys[cloud_sc]), 2)
+    else:
+        eq(len(expire1_keys[cloud_sc]), 0)
+
+    time.sleep(2*lc_interval)
+    # Check if objects copied to target path
+    if target_path == None:
+        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
+    prefix = bucket_name + "/"
+
+    cloud_client = get_cloud_client()
+
+    time.sleep(12*lc_interval)
+    expire1_key1_str = prefix + keys[0]
+    verify_object(cloud_client, target_path, expire1_key1_str, keys[0], target_sc)
+
+    expire1_key2_str = prefix + keys[1]
+    verify_object(cloud_client, target_path, expire1_key2_str, keys[1], target_sc)
+
+    # Now verify the object on source rgw
+    src_key = keys[0]
+    if (retain_head_object != None and retain_head_object == "true"):
+        # verify HEAD response
+        response = client.head_object(Bucket=bucket_name, Key=keys[0])
+        eq(0, response['ContentLength'])
+        eq(cloud_sc, response['StorageClass'])
+    
+        # GET should return InvalidObjectState error
+        e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key)
+        status, error_code = _get_status_and_error_code(e.response)
+        eq(status, 403)
+        eq(error_code, 'InvalidObjectState')
+
+        # COPY of object should return InvalidObjectState error
+        copy_source = {'Bucket': bucket_name, 'Key': src_key}
+        e = assert_raises(ClientError, client.copy, CopySource=copy_source, Bucket=bucket_name, Key='copy_obj')
+        status, error_code = _get_status_and_error_code(e.response)
+        eq(status, 403)
+        eq(error_code, 'InvalidObjectState')
+
+        # DELETE should succeed
+        response = client.delete_object(Bucket=bucket_name, Key=src_key)
+        e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=src_key)
+        status, error_code = _get_status_and_error_code(e.response)
+        eq(status, 404)
+        eq(error_code, 'NoSuchKey')
+
+# Similar to 'test_lifecycle_transition' but for cloud transition
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle transition for cloud')
+@attr('lifecycle')
+@attr('lifecycle_transition')
+@attr('cloud_transition')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_cloud_multiple_transition():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        raise SkipTest
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    sc1 = get_cloud_regular_storage_class()
+
+    if (sc1 == None):
+        raise SkipTest
+
+    sc = ['STANDARD', sc1, cloud_sc]
+
+    keys=['expire1/foo', 'expire1/bar', 'keep2/foo', 'keep2/bar']
+    bucket_name = _create_objects(keys=keys)
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': sc1}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
+           {'ID': 'rule2', 'Transitions': [{'Days': 5, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'},
+           {'ID': 'rule3', 'Expiration': {'Days': 9}, 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+    lifecycle = {'Rules': rules}
+    client.put_bucket_lifecycle_configuration(Bucket=bucket_name, LifecycleConfiguration=lifecycle)
+
+    # Get list of all keys
+    response = client.list_objects(Bucket=bucket_name)
+    init_keys = _get_keys(response)
+    eq(len(init_keys), 4)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire1_keys['STANDARD']), 2)
+    eq(len(expire1_keys[sc[1]]), 2)
+    eq(len(expire1_keys[sc[2]]), 0)
+
+    # Wait for next expiration cycle
+    time.sleep(7*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire1_keys['STANDARD']), 2)
+    eq(len(expire1_keys[sc[1]]), 0)
+
+    if (retain_head_object != None and retain_head_object == "true"):
+        eq(len(expire1_keys[sc[2]]), 2)
+    else:
+        eq(len(expire1_keys[sc[2]]), 0)
+
+    # Wait for final expiration cycle
+    time.sleep(12*lc_interval)
+    expire3_keys = list_bucket_storage_class(client, bucket_name)
+    eq(len(expire3_keys['STANDARD']), 2)
+    eq(len(expire3_keys[sc[1]]), 0)
+    eq(len(expire3_keys[sc[2]]), 0)
+
+# Noncurrent objects for cloud transition
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle non-current version expiration on cloud transition')
+@attr('lifecycle')
+@attr('lifecycle_expiration')
+@attr('lifecycle_transition')
+@attr('cloud_transition')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_noncur_cloud_transition():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        raise SkipTest
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    sc1 = get_cloud_regular_storage_class()
+
+    if (sc1 == None):
+        raise SkipTest
+
+    sc = ['STANDARD', sc1, cloud_sc]
+
+    bucket = get_new_bucket()
+    client = get_client()
+    check_configure_versioning_retry(bucket, "Enabled", "Enabled")
+
+    rules = [
+        {
+            'ID': 'rule1',
+            'Prefix': 'test1/',
+            'Status': 'Enabled',
+            'NoncurrentVersionTransitions': [
+                {
+                    'NoncurrentDays': 1,
+                    'StorageClass': sc[1]
+                },
+                {
+                    'NoncurrentDays': 5,
+                    'StorageClass': sc[2]
+                }
+            ],
+        }
+    ]
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    keys = ['test1/a', 'test1/b']
+
+    for k in keys:
+        create_multiple_versions(client, bucket, k, 3)
+
+    init_keys = list_bucket_storage_class(client, bucket)
+    eq(len(init_keys['STANDARD']), 6)
+
+    response  = client.list_object_versions(Bucket=bucket)
+
+    lc_interval = get_lc_debug_interval()
+
+    time.sleep(4*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    eq(len(expire1_keys['STANDARD']), 2)
+    eq(len(expire1_keys[sc[1]]), 4)
+    eq(len(expire1_keys[sc[2]]), 0)
+
+    time.sleep(10*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    eq(len(expire1_keys['STANDARD']), 2)
+    eq(len(expire1_keys[sc[1]]), 0)
+
+    if (retain_head_object == None or retain_head_object == "false"):
+        eq(len(expire1_keys[sc[2]]), 0)
+    else:
+        eq(len(expire1_keys[sc[2]]), 4)
+
+    #check if versioned object exists on cloud endpoint
+    if target_path == None:
+        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
+    prefix = bucket + "/"
+
+    cloud_client = get_cloud_client()
+
+    time.sleep(lc_interval)
+    result = list_bucket_versions(client, bucket)
+
+    for src_key in keys:
+        for k in result[src_key]: 
+            expire1_key1_str = prefix + 'test1/a' + "-" + k['VersionId']
+            verify_object(cloud_client, target_path, expire1_key1_str, None, target_sc)
+
+# The test harness for lifecycle is configured to treat days as 10 second intervals.
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='test lifecycle transition for cloud')
+@attr('lifecycle')
+@attr('lifecycle_transition')
+@attr('cloud_transition')
+@attr('fails_on_aws')
+@attr('fails_on_dbstore')
+def test_lifecycle_cloud_transition_large_obj():
+    cloud_sc = get_cloud_storage_class()
+    if cloud_sc == None:
+        raise SkipTest
+
+    retain_head_object = get_cloud_retain_head_object()
+    target_path = get_cloud_target_path()
+    target_sc = get_cloud_target_storage_class()
+
+    bucket = get_new_bucket()
+    client = get_client()
+    rules=[{'ID': 'rule1', 'Transitions': [{'Days': 1, 'StorageClass': cloud_sc}], 'Prefix': 'expire1/', 'Status': 'Enabled'}]
+
+    keys = ['keep/multi', 'expire1/multi']
+    size = 9*1024*1024
+    data = 'A'*size
+
+    for k in keys:
+        client.put_object(Bucket=bucket, Body=data, Key=k)
+        verify_object(client, bucket, k, data)
+
+    lifecycle = {'Rules': rules}
+    response = client.put_bucket_lifecycle_configuration(Bucket=bucket, LifecycleConfiguration=lifecycle)
+
+    lc_interval = get_lc_debug_interval()
+
+    # Wait for first expiration (plus fudge to handle the timer window)
+    time.sleep(8*lc_interval)
+    expire1_keys = list_bucket_storage_class(client, bucket)
+    eq(len(expire1_keys['STANDARD']), 1)
+
+    
+    if (retain_head_object != None and retain_head_object == "true"):
+        eq(len(expire1_keys[cloud_sc]), 1)
+    else:
+        eq(len(expire1_keys[cloud_sc]), 0)
+
+    # Check if objects copied to target path
+    if target_path == None:
+        target_path = "rgwx-default-" + cloud_sc.lower() + "-cloud-bucket"
+    prefix = bucket + "/"
+
+    # multipart upload takes time
+    time.sleep(12*lc_interval)
+    cloud_client = get_cloud_client()
+
+    expire1_key1_str = prefix + keys[1]
+    verify_object(cloud_client, target_path, expire1_key1_str, data, target_sc)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-C encrypted transfer 1 byte')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encrypted_transfer_1b():
+    _test_encryption_sse_customer_write(1)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-C encrypted transfer 1KB')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encrypted_transfer_1kb():
+    _test_encryption_sse_customer_write(1024)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-C encrypted transfer 1MB')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encrypted_transfer_1MB():
+    _test_encryption_sse_customer_write(1024*1024)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-C encrypted transfer 13 bytes')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encrypted_transfer_13b():
+    _test_encryption_sse_customer_write(13)
+
+
+@attr(assertion='success')
+@attr('encryption')
+def test_encryption_sse_c_method_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*1000
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.HeadObject', lf)
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write encrypted with SSE-C and read without SSE-C')
+@attr(assertion='operation fails')
+@attr('encryption')
+def test_encryption_sse_c_present():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*1000
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write encrypted with SSE-C but read with other key')
+@attr(assertion='operation fails')
+@attr('encryption')
+def test_encryption_sse_c_other_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers_A = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+    sse_client_headers_B = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
+        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_A))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers_B))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write encrypted with SSE-C, but md5 is bad')
+@attr(assertion='operation fails')
+@attr('encryption')
+def test_encryption_sse_c_invalid_md5():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write encrypted with SSE-C, but dont provide MD5')
+@attr(assertion='operation fails')
+@attr('encryption')
+def test_encryption_sse_c_no_md5():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='declare SSE-C but do not provide key')
+@attr(assertion='operation fails')
+@attr('encryption')
+def test_encryption_sse_c_no_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Do not declare SSE-C but provide key and MD5')
+@attr(assertion='operation successfull, no encryption')
+@attr('encryption')
+def test_encryption_key_no_sse_c():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    data = 'A'*100
+    key = 'testobj'
+    sse_client_headers = {
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+def _multipart_upload_enc(client, bucket_name, key, size, part_size, init_headers, part_headers, metadata, resend_parts):
+    """
+    generate a multi-part upload for a random file of specifed size,
+    if requested, generate a list of the parts
+    return the upload descriptor
+    """
+    if client == None:
+        client = get_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(init_headers))
+    client.meta.events.register('before-call.s3.CreateMultipartUpload', lf)
+    if metadata == None:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key)
+    else:
+        response = client.create_multipart_upload(Bucket=bucket_name, Key=key, Metadata=metadata)
+
+    upload_id = response['UploadId']
+    s = ''
+    parts = []
+    for i, part in enumerate(generate_random(size, part_size)):
+        # part_num is necessary because PartNumber for upload_part and in parts must start at 1 and i starts at 0
+        part_num = i+1
+        s += part
+        lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+        client.meta.events.register('before-call.s3.UploadPart', lf)
+        response = client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+        parts.append({'ETag': response['ETag'].strip('"'), 'PartNumber': part_num})
+        if i in resend_parts:
+            lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+            client.meta.events.register('before-call.s3.UploadPart', lf)
+            client.upload_part(UploadId=upload_id, Bucket=bucket_name, Key=key, PartNumber=part_num, Body=part)
+
+    return (upload_id, s, parts)
+
+def _check_content_using_range_enc(client, bucket_name, key, data, step, enc_headers=None):
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    size = response['ContentLength']
+    for ofs in range(0, size, step):
+        toread = size - ofs
+        if toread > step:
+            toread = step
+        end = ofs + toread - 1
+        lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+        client.meta.events.register('before-call.s3.GetObject', lf)
+        r = 'bytes={s}-{e}'.format(s=ofs, e=end)
+        response = client.get_object(Bucket=bucket_name, Key=key, Range=r)
+        read_range = response['ContentLength']
+        body = _get_body(response)
+        eq(read_range, toread)
+        eq(body, data[ofs:end+1])
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='complete multi-part upload')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('fails_on_aws') # allow-unordered is a non-standard extension
+@attr('fails_on_dbstore')
+def test_encryption_sse_c_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    enc_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.head_bucket(Bucket=bucket_name)
+    rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
+    eq(rgw_object_count, 1)
+    rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
+    eq(rgw_bytes_used, objlen)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    eq(response['Metadata'], metadata)
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
+
+    body = _get_body(response)
+    eq(body, data)
+    size = response['ContentLength']
+    eq(len(body), size)
+
+    _check_content_using_range_enc(client, bucket_name, key, data, 1000000, enc_headers=enc_headers)
+    _check_content_using_range_enc(client, bucket_name, key, data, 10000000, enc_headers=enc_headers)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multipart upload with bad key for uploading chunks')
+@attr(assertion='successful')
+@attr('encryption')
+# TODO: remove this fails_on_rgw when I fix it
+@attr('fails_on_rgw')
+def test_encryption_sse_c_multipart_invalid_chunks_1():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
+        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
+    }
+    resend_parts = []
+
+    e = assert_raises(ClientError, _multipart_upload_enc, client=client,  bucket_name=bucket_name,
+            key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multipart upload with bad md5 for chunks')
+@attr(assertion='successful')
+@attr('encryption')
+# TODO: remove this fails_on_rgw when I fix it
+@attr('fails_on_rgw')
+def test_encryption_sse_c_multipart_invalid_chunks_2():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'AAAAAAAAAAAAAAAAAAAAAA=='
+    }
+    resend_parts = []
+
+    e = assert_raises(ClientError, _multipart_upload_enc, client=client,  bucket_name=bucket_name,
+            key=key, size=objlen, part_size=5*1024*1024, init_headers=init_headers, part_headers=part_headers, metadata=metadata, resend_parts=resend_parts)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='complete multi-part upload and download with bad key')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encryption_sse_c_multipart_bad_download():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    put_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==',
+        'Content-Type': content_type
+    }
+    get_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': '6b+WOZ1T3cqZMxgThRcXAQBrS5mXKdDUphvpxptl9/4=',
+        'x-amz-server-side-encryption-customer-key-md5': 'arxBvwY2V4SiOne6yppVPQ=='
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=5*1024*1024, init_headers=put_headers, part_headers=put_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.head_bucket(Bucket=bucket_name)
+    rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
+    eq(rgw_object_count, 1)
+    rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
+    eq(rgw_bytes_used, objlen)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    eq(response['Metadata'], metadata)
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_encryption_sse_c_post_object_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["starts-with", "$x-amz-server-side-encryption-customer-algorithm", ""], \
+    ["starts-with", "$x-amz-server-side-encryption-customer-key", ""], \
+    ["starts-with", "$x-amz-server-side-encryption-customer-key-md5", ""], \
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('x-amz-server-side-encryption-customer-algorithm', 'AES256'), \
+    ('x-amz-server-side-encryption-customer-key', 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs='), \
+    ('x-amz-server-side-encryption-customer-key-md5', 'DWygnHRtgiJ77HCm+1rvHw=='), \
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+
+    get_headers = {
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(get_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def _test_sse_kms_customer_write(file_size, key_id = 'testkey-1'):
+    """
+    Tests Create a file of A's, use it to set_contents_from_file.
+    Create a file of B's, use it to re-set_contents_from_file.
+    Re-read the contents, and confirm we get B's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': key_id
+    }
+    data = 'A'*file_size
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    body = _get_body(response)
+    eq(body, data)
+
+
+
+
+
+
+@attr(resource='object')
+@attr(method='head')
+@attr(operation='Test SSE-KMS encrypted does perform head properly')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_method_head():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+    data = 'A'*1000
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.HeadObject', lf)
+    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='write encrypted with SSE-KMS and read without SSE-KMS')
+@attr(assertion='operation success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_present():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    eq(body, data)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='declare SSE-KMS but do not provide key_id')
+@attr(assertion='operation fails')
+@attr('encryption')
+def test_sse_kms_no_key():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Do not declare SSE-KMS but provide key_id')
+@attr(assertion='operation successfull, no encryption')
+@attr('encryption')
+def test_sse_kms_not_declared():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-2'
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key, Body=data)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='complete KMS multi-part upload')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_multipart_upload():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    enc_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'Content-Type': content_type
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.head_bucket(Bucket=bucket_name)
+    rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
+    eq(rgw_object_count, 1)
+    rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
+    eq(rgw_bytes_used, objlen)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+    client.meta.events.register('before-call.s3.UploadPart', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    eq(response['Metadata'], metadata)
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
+
+    body = _get_body(response)
+    eq(body, data)
+    size = response['ContentLength']
+    eq(len(body), size)
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multipart KMS upload with bad key_id for uploading chunks')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_multipart_invalid_chunks_1():
+    kms_keyid = get_main_kms_keyid()
+    kms_keyid2 = get_secondary_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/bla'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid2
+    }
+    resend_parts = []
+
+    _multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
+            init_headers=init_headers, part_headers=part_headers, metadata=metadata,
+            resend_parts=resend_parts)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='multipart KMS upload with unexistent key_id for chunks')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_multipart_invalid_chunks_2():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    init_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'Content-Type': content_type
+    }
+    part_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-not-present'
+    }
+    resend_parts = []
+
+    _multipart_upload_enc(client, bucket_name, key, objlen, part_size=5*1024*1024,
+            init_headers=init_headers, part_headers=part_headers, metadata=metadata,
+            resend_parts=resend_parts)
+
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated KMS browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_post_object_authenticated_request():
+    kms_keyid = get_main_kms_keyid()
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [\
+    {"bucket": bucket_name},\
+    ["starts-with", "$key", "foo"],\
+    {"acl": "private"},\
+    ["starts-with", "$Content-Type", "text/plain"],\
+    ["starts-with", "$x-amz-server-side-encryption", ""], \
+    ["starts-with", "$x-amz-server-side-encryption-aws-kms-key-id", ""], \
+    ["content-length-range", 0, 1024]\
+    ]\
+    }
+
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('x-amz-server-side-encryption', 'aws:kms'), \
+    ('x-amz-server-side-encryption-aws-kms-key-id', kms_keyid), \
+    ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-KMS encrypted transfer 1 byte')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_transfer_1b():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        raise SkipTest
+    _test_sse_kms_customer_write(1, key_id = kms_keyid)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-KMS encrypted transfer 1KB')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_transfer_1kb():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        raise SkipTest
+    _test_sse_kms_customer_write(1024, key_id = kms_keyid)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-KMS encrypted transfer 1MB')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_transfer_1MB():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        raise SkipTest
+    _test_sse_kms_customer_write(1024*1024, key_id = kms_keyid)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test SSE-KMS encrypted transfer 13 bytes')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_transfer_13b():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        raise SkipTest
+    _test_sse_kms_customer_write(13, key_id = kms_keyid)
+
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='write encrypted with SSE-KMS and read with SSE-KMS')
+@attr(assertion='operation fails')
+@attr('encryption')
+def test_sse_kms_read_declare():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    sse_kms_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1'
+    }
+    data = 'A'*100
+    key = 'testobj'
+
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_kms_client_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy')
+@attr(assertion='succeeds')
+@attr('bucket-policy')
+def test_bucket_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects(Bucket=bucket_name)
+    eq(len(response['Contents']), 1)
+
+@attr('bucket-policy')
+@attr('list-objects-v2')
+def test_bucketv2_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects_v2(Bucket=bucket_name)
+    eq(len(response['Contents']), 1)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy and ACL')
+@attr(assertion='fails')
+@attr('bucket-policy')
+def test_bucket_policy_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document =  json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Deny",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    e = assert_raises(ClientError, alt_client.list_objects, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    client.delete_bucket_policy(Bucket=bucket_name)
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy and ACL with list-objects-v2')
+@attr(assertion='fails')
+@attr('bucket-policy')
+@attr('list-objects-v2')
+def test_bucketv2_policy_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document =  json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Deny",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    e = assert_raises(ClientError, alt_client.list_objects_v2, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    client.delete_bucket_policy(Bucket=bucket_name)
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
+@attr(assertion='succeeds')
+@attr('bucket-policy')
+# TODO: remove this fails_on_rgw when I fix it
+@attr('fails_on_rgw')
+def test_bucket_policy_different_tenant():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3::*:" + bucket_name
+    resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    # TODO: figure out how to change the bucketname
+    def change_bucket_name(**kwargs):
+        kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
+        kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
+        kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
+        print(kwargs['request_signer'])
+        print(kwargs)
+
+    #bucket_name = ":" + bucket_name
+    tenant_client = get_tenant_client()
+    tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
+    response = tenant_client.list_objects(Bucket=bucket_name)
+    #alt_client = get_alt_client()
+    #response = alt_client.list_objects(Bucket=bucket_name)
+
+    eq(len(response['Contents']), 1)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy for a user belonging to a different tenant')
+@attr(assertion='succeeds')
+@attr('bucket-policy')
+# TODO: remove this fails_on_rgw when I fix it
+@attr('fails_on_rgw')
+@attr('list-objects-v2')
+def test_bucketv2_policy_different_tenant():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+
+    resource1 = "arn:aws:s3::*:" + bucket_name
+    resource2 = "arn:aws:s3::*:" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    # TODO: figure out how to change the bucketname
+    def change_bucket_name(**kwargs):
+        kwargs['params']['url'] = "http://localhost:8000/:{bucket_name}?encoding-type=url".format(bucket_name=bucket_name)
+        kwargs['params']['url_path'] = "/:{bucket_name}".format(bucket_name=bucket_name)
+        kwargs['params']['context']['signing']['bucket'] = ":{bucket_name}".format(bucket_name=bucket_name)
+        print(kwargs['request_signer'])
+        print(kwargs)
+
+    #bucket_name = ":" + bucket_name
+    tenant_client = get_tenant_client()
+    tenant_client.meta.events.register('before-call.s3.ListObjects', change_bucket_name)
+    response = tenant_client.list_objects_v2(Bucket=bucket_name)
+    #alt_client = get_alt_client()
+    #response = alt_client.list_objects_v2(Bucket=bucket_name)
+
+    eq(len(response['Contents']), 1)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy on another bucket')
+@attr(assertion='succeeds')
+@attr('bucket-policy')
+def test_bucket_policy_another_bucket():
+    bucket_name = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    key2 = 'abcd'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+    client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "arn:aws:s3:::*",
+            "arn:aws:s3:::*/*"
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    response = client.get_bucket_policy(Bucket=bucket_name)
+    response_policy = response['Policy']
+
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects(Bucket=bucket_name)
+    eq(len(response['Contents']), 1)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects(Bucket=bucket_name2)
+    eq(len(response['Contents']), 1)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test Bucket Policy on another bucket with list-objects-v2')
+@attr(assertion='succeeds')
+@attr('bucket-policy')
+@attr('list-objects-v2')
+def test_bucketv2_policy_another_bucket():
+    bucket_name = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+    client = get_client()
+    key = 'asdf'
+    key2 = 'abcd'
+    client.put_object(Bucket=bucket_name, Key=key, Body='asdf')
+    client.put_object(Bucket=bucket_name2, Key=key2, Body='abcd')
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "arn:aws:s3:::*",
+            "arn:aws:s3:::*/*"
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    response = client.get_bucket_policy(Bucket=bucket_name)
+    response_policy = response['Policy']
+
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=response_policy)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects_v2(Bucket=bucket_name)
+    eq(len(response['Contents']), 1)
+
+    alt_client = get_alt_client()
+    response = alt_client.list_objects_v2(Bucket=bucket_name2)
+    eq(len(response['Contents']), 1)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put condition operator end with ifExists')
+@attr('bucket-policy')
+# TODO: remove this fails_on_rgw when I fix it
+@attr('fails_on_rgw')
+def test_bucket_policy_set_condition_operator_end_with_IfExists():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'foo'
+    client.put_object(Bucket=bucket_name, Key=key)
+    policy = '''{
+      "Version":"2012-10-17",
+      "Statement": [{
+        "Sid": "Allow Public Access to All Objects",
+        "Effect": "Allow",
+        "Principal": "*",
+        "Action": "s3:GetObject",
+        "Condition": {
+                    "StringLikeIfExists": {
+                        "aws:Referer": "http://www.example.com/*"
+                    }
+                },
+        "Resource": "arn:aws:s3:::%s/*"
+      }
+     ]
+    }''' % bucket_name
+    # boto3.set_stream_logger(name='botocore')
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy)
+
+    request_headers={'referer': 'http://www.example.com/'}
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    request_headers={'referer': 'http://www.example.com/index.html'}
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    # the 'referer' headers need to be removed for this one
+    #response = client.get_object(Bucket=bucket_name, Key=key)
+    #eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    request_headers={'referer': 'http://example.com'}
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(request_headers))
+    client.meta.events.register('before-call.s3.GetObject', lf)
+
+    # TODO: Compare Requests sent in Boto3, Wireshark, RGW Log for both boto and boto3
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    response =  client.get_bucket_policy(Bucket=bucket_name)
+    print(response)
+
+def _create_simple_tagset(count):
+    tagset = []
+    for i in range(count):
+        tagset.append({'Key': str(i), 'Value': str(i)})
+
+    return {'TagSet': tagset}
+
+def _make_random_string(size):
+    return ''.join(random.choice(string.ascii_letters) for _ in range(size))
+
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test Get/PutObjTagging output')
+@attr(assertion='success')
+@attr('tagging')
+@attr('fails_on_dbstore')
+def test_get_obj_tagging():
+    key = 'testputtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(2)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['TagSet'], input_tagset['TagSet'])
+
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test HEAD obj tagging output')
+@attr(assertion='success')
+@attr('tagging')
+def test_get_obj_head_tagging():
+    key = 'testputtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+    count = 2
+
+    input_tagset = _create_simple_tagset(count)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-tagging-count'], str(count))
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test Put max allowed tags')
+@attr(assertion='success')
+@attr('tagging')
+@attr('fails_on_dbstore')
+def test_put_max_tags():
+    key = 'testputmaxtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(10)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['TagSet'], input_tagset['TagSet'])
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test Put max allowed tags')
+@attr(assertion='fails')
+@attr('tagging')
+def test_put_excess_tags():
+    key = 'testputmaxtags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(11)
+    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidTag')
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(len(response['TagSet']), 0)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test Put max allowed k-v size')
+@attr(assertion='success')
+@attr('tagging')
+def test_put_max_kvsize_tags():
+    key = 'testputmaxkeysize'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    for i in range(10):
+        k = _make_random_string(128)
+        v = _make_random_string(256)
+        tagset.append({'Key': k, 'Value': v})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    for kv_pair in response['TagSet']:
+        eq((kv_pair in input_tagset['TagSet']), True)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test exceed key size')
+@attr(assertion='success')
+@attr('tagging')
+def test_put_excess_key_tags():
+    key = 'testputexcesskeytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    for i in range(10):
+        k = _make_random_string(129)
+        v = _make_random_string(256)
+        tagset.append({'Key': k, 'Value': v})
+
+    input_tagset = {'TagSet': tagset}
+
+    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidTag')
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(len(response['TagSet']), 0)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test exceed val size')
+@attr(assertion='success')
+@attr('tagging')
+def test_put_excess_val_tags():
+    key = 'testputexcesskeytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    for i in range(10):
+        k = _make_random_string(128)
+        v = _make_random_string(257)
+        tagset.append({'Key': k, 'Value': v})
+
+    input_tagset = {'TagSet': tagset}
+
+    e = assert_raises(ClientError, client.put_object_tagging, Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidTag')
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(len(response['TagSet']), 0)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test PUT modifies existing tags')
+@attr(assertion='success')
+@attr('tagging')
+@attr('fails_on_dbstore')
+def test_put_modify_tags():
+    key = 'testputmodifytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    tagset = []
+    tagset.append({'Key': 'key', 'Value': 'val'})
+    tagset.append({'Key': 'key2', 'Value': 'val2'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['TagSet'], input_tagset['TagSet'])
+
+    tagset2 = []
+    tagset2.append({'Key': 'key3', 'Value': 'val3'})
+
+    input_tagset2 = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset2)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['TagSet'], input_tagset2['TagSet'])
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test Delete tags')
+@attr(assertion='success')
+@attr('tagging')
+@attr('fails_on_dbstore')
+def test_put_delete_tags():
+    key = 'testputmodifytags'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    input_tagset = _create_simple_tagset(2)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['TagSet'], input_tagset['TagSet'])
+
+    response = client.delete_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(len(response['TagSet']), 0)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='anonymous browser based upload via POST request')
+@attr('tagging')
+@attr(assertion='succeeds and returns written data')
+@attr('fails_on_dbstore')
+def test_post_object_tags_anonymous_request():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    url = _get_post_url(bucket_name)
+    client.create_bucket(ACL='public-read-write', Bucket=bucket_name)
+
+    key_name = "foo.txt"
+    input_tagset = _create_simple_tagset(2)
+    # xml_input_tagset is the same as input_tagset in xml.
+    # There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
+    xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
+
+
+    payload = OrderedDict([
+        ("key" , key_name),
+        ("acl" , "public-read"),
+        ("Content-Type" , "text/plain"),
+        ("tagging", xml_input_tagset),
+        ('file', ('bar')),
+    ])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key=key_name)
+    body = _get_body(response)
+    eq(body, 'bar')
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key_name)
+    eq(response['TagSet'], input_tagset['TagSet'])
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated browser based upload via POST request')
+@attr('tagging')
+@attr(assertion='succeeds and returns written data')
+def test_post_object_tags_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {"expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),\
+    "conditions": [
+    {"bucket": bucket_name},
+        ["starts-with", "$key", "foo"],
+        {"acl": "private"},
+        ["starts-with", "$Content-Type", "text/plain"],
+        ["content-length-range", 0, 1024],
+        ["starts-with", "$tagging", ""]
+    ]}
+
+    # xml_input_tagset is the same as `input_tagset = _create_simple_tagset(2)` in xml
+    # There is not a simple way to change input_tagset to xml like there is in the boto2 tetss
+    xml_input_tagset = "<Tagging><TagSet><Tag><Key>0</Key><Value>0</Value></Tag><Tag><Key>1</Key><Value>1</Value></Tag></TagSet></Tagging>"
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([
+        ("key" , "foo.txt"),
+        ("AWSAccessKeyId" , aws_access_key_id),\
+        ("acl" , "private"),("signature" , signature),("policy" , policy),\
+        ("tagging", xml_input_tagset),
+        ("Content-Type" , "text/plain"),
+        ('file', ('bar'))])
+
+    r = requests.post(url, files=payload, verify=get_config_ssl_verify())
+    eq(r.status_code, 204)
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test PutObj with tagging headers')
+@attr(assertion='success')
+@attr('tagging')
+@attr('fails_on_dbstore')
+def test_put_obj_with_tags():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'testtagobj1'
+    data = 'A'*100
+
+    tagset = []
+    tagset.append({'Key': 'bar', 'Value': ''})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    put_obj_tag_headers = {
+        'x-amz-tagging' : 'foo=bar&bar'
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(put_obj_tag_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+    response = client.get_object(Bucket=bucket_name, Key=key)
+    body = _get_body(response)
+    eq(body, data)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    response_tagset = response['TagSet']
+    tagset = tagset
+    eq(response_tagset, tagset)
+
+def _make_arn_resource(path="*"):
+    return "arn:aws:s3:::{}".format(path)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test GetObjTagging public read')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_get_tags_acl_public():
+    key = 'testputtagsacl'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
+    policy_document = make_json_policy("s3:GetObjectTagging",
+                                       resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    input_tagset = _create_simple_tagset(10)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    alt_client = get_alt_client()
+
+    response = alt_client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['TagSet'], input_tagset['TagSet'])
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test PutObjTagging public wrote')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_put_tags_acl_public():
+    key = 'testputtagsacl'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
+    policy_document = make_json_policy("s3:PutObjectTagging",
+                                       resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    input_tagset = _create_simple_tagset(10)
+    alt_client = get_alt_client()
+    response = alt_client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['TagSet'], input_tagset['TagSet'])
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='test deleteobjtagging public')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+def test_delete_tags_obj_public():
+    key = 'testputtagsacl'
+    bucket_name = _create_key_with_random_content(key)
+    client = get_client()
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, key))
+    policy_document = make_json_policy("s3:DeleteObjectTagging",
+                                       resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    input_tagset = _create_simple_tagset(10)
+    response = client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    alt_client = get_alt_client()
+
+    response = alt_client.delete_object_tagging(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    response = client.get_object_tagging(Bucket=bucket_name, Key=key)
+    eq(len(response['TagSet']), 0)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='test whether a correct version-id returned')
+@attr(assertion='version-id is same as bucket list')
+@attr('versioning')
+def test_versioning_bucket_atomic_upload_return_version_id():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'bar'
+
+    # for versioning-enabled-bucket, an non-empty version-id should return
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+    response = client.put_object(Bucket=bucket_name, Key=key)
+    version_id = response['VersionId']
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    for version in versions:
+        eq(version['VersionId'], version_id)
+
+
+    # for versioning-default-bucket, no version-id should return.
+    bucket_name = get_new_bucket()
+    key = 'baz'
+    response = client.put_object(Bucket=bucket_name, Key=key)
+    eq(('VersionId' in response), False)
+
+    # for versioning-suspended-bucket, no version-id should return.
+    bucket_name = get_new_bucket()
+    key = 'baz'
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+    response = client.put_object(Bucket=bucket_name, Key=key)
+    eq(('VersionId' in response), False)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='test whether a correct version-id returned')
+@attr(assertion='version-id is same as bucket list')
+@attr('versioning')
+def test_versioning_bucket_multipart_upload_return_version_id():
+    content_type='text/bla'
+    objlen = 30 * 1024 * 1024
+
+    bucket_name = get_new_bucket()
+    client = get_client()
+    key = 'bar'
+    metadata={'foo': 'baz'}
+
+    # for versioning-enabled-bucket, an non-empty version-id should return
+    check_configure_versioning_retry(bucket_name, "Enabled", "Enabled")
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    version_id = response['VersionId']
+
+    response  = client.list_object_versions(Bucket=bucket_name)
+    versions = response['Versions']
+    for version in versions:
+        eq(version['VersionId'], version_id)
+
+    # for versioning-default-bucket, no version-id should return.
+    bucket_name = get_new_bucket()
+    key = 'baz'
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    eq(('VersionId' in response), False)
+
+    # for versioning-suspended-bucket, no version-id should return
+    bucket_name = get_new_bucket()
+    key = 'foo'
+    check_configure_versioning_retry(bucket_name, "Suspended", "Suspended")
+
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client, content_type=content_type, metadata=metadata)
+
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    eq(('VersionId' in response), False)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test ExistingObjectTag conditional on get object')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_get_obj_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    tagset3 = []
+    tagset3.append({'Key': 'security1', 'Value': 'public'})
+
+    input_tagset = {'TagSet': tagset3}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    alt_client = get_alt_client()
+    response = alt_client.get_object(Bucket=bucket_name, Key='publictag')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='privatetag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='invalidtag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test ExistingObjectTag conditional on get object tagging')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_get_obj_tagging_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObjectTagging",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    tagset3 = []
+    tagset3.append({'Key': 'security1', 'Value': 'public'})
+
+    input_tagset = {'TagSet': tagset3}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    alt_client = get_alt_client()
+    response = alt_client.get_object_tagging(Bucket=bucket_name, Key='publictag')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    # A get object itself should fail since we allowed only GetObjectTagging
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test ExistingObjectTag conditional on put object tagging')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_put_obj_tagging_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:PutObjectTagging",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    alt_client = get_alt_client()
+    # PUT requests with object tagging are a bit wierd, if you forget to put
+    # the tag which is supposed to be existing anymore well, well subsequent
+    # put requests will fail
+
+    testtagset1 = []
+    testtagset1.append({'Key': 'security', 'Value': 'public'})
+    testtagset1.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': testtagset1}
+
+    response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    testtagset2 = []
+    testtagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': testtagset2}
+
+    response = alt_client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    # Now try putting the original tags again, this should fail
+    input_tagset = {'TagSet': testtagset1}
+
+    e = assert_raises(ClientError, alt_client.put_object_tagging, Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test copy-source conditional on put obj')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_put_obj_copy_source():
+    bucket_name = _create_objects(keys=['public/foo', 'public/bar', 'private/foo'])
+    client = get_client()
+
+    src_resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       src_resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    bucket_name2 = get_new_bucket()
+
+    tag_conditional = {"StringLike": {
+        "s3:x-amz-copy-source" : bucket_name + "/public/*"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
+    policy_document = make_json_policy("s3:PutObject",
+                                       resource,
+                                       conditions=tag_conditional)
+
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    copy_source = {'Bucket': bucket_name, 'Key': 'public/foo'}
+
+    alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo')
+
+    # This is possible because we are still the owner, see the grants with
+    # policy on how to do this right
+    response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo')
+    body = _get_body(response)
+    eq(body, 'public/foo')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'public/bar'}
+    alt_client.copy_object(Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
+
+    response = alt_client.get_object(Bucket=bucket_name2, Key='new_foo2')
+    body = _get_body(response)
+    eq(body, 'public/bar')
+
+    copy_source = {'Bucket': bucket_name, 'Key': 'private/foo'}
+    check_access_denied(alt_client.copy_object, Bucket=bucket_name2, CopySource=copy_source, Key='new_foo2')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test copy-source conditional on put obj')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_put_obj_copy_source_meta():
+    src_bucket_name = _create_objects(keys=['public/foo', 'public/bar'])
+    client = get_client()
+
+    src_resource = _make_arn_resource("{}/{}".format(src_bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       src_resource)
+
+    client.put_bucket_policy(Bucket=src_bucket_name, Policy=policy_document)
+
+    bucket_name = get_new_bucket()
+
+    tag_conditional = {"StringEquals": {
+        "s3:x-amz-metadata-directive" : "COPY"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:PutObject",
+                                       resource,
+                                       conditions=tag_conditional)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-metadata-directive": "COPY"}))
+    alt_client.meta.events.register('before-call.s3.CopyObject', lf)
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': 'public/foo'}
+    alt_client.copy_object(Bucket=bucket_name, CopySource=copy_source, Key='new_foo')
+
+    # This is possible because we are still the owner, see the grants with
+    # policy on how to do this right
+    response = alt_client.get_object(Bucket=bucket_name, Key='new_foo')
+    body = _get_body(response)
+    eq(body, 'public/foo')
+
+    # remove the x-amz-metadata-directive header
+    def remove_header(**kwargs):
+        if ("x-amz-metadata-directive" in kwargs['params']['headers']):
+            del kwargs['params']['headers']["x-amz-metadata-directive"]
+
+    alt_client.meta.events.register('before-call.s3.CopyObject', remove_header)
+
+    copy_source = {'Bucket': src_bucket_name, 'Key': 'public/bar'}
+    check_access_denied(alt_client.copy_object, Bucket=bucket_name, CopySource=copy_source, Key='new_foo2', Metadata={"foo": "bar"})
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test put obj with canned-acl not to be public')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+def test_bucket_policy_put_obj_acl():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    # An allow conditional will require atleast the presence of an x-amz-acl
+    # attribute a Deny conditional would negate any requests that try to set a
+    # public-read/write acl
+    conditional = {"StringLike": {
+        "s3:x-amz-acl" : "public*"
+    }}
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    s1 = Statement("s3:PutObject",resource)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=conditional)
+
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    key1 = 'private-key'
+
+    # if we want to be really pedantic, we should check that this doesn't raise
+    # and mark a failure, however if this does raise nosetests would mark this
+    # as an ERROR anyway
+    response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
+    #response = alt_client.put_object_acl(Bucket=bucket_name, Key=key1, ACL='private')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    key2 = 'public-key'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-acl": "public-read"}))
+    alt_client.meta.events.register('before-call.s3.PutObject', lf)
+
+    e = assert_raises(ClientError, alt_client.put_object, Bucket=bucket_name, Key=key2, Body=key2)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test put obj with amz-grant back to bucket-owner')
+@attr(assertion='success')
+@attr('bucket-policy')
+def test_bucket_policy_put_obj_grant():
+
+    bucket_name = get_new_bucket()
+    bucket_name2 = get_new_bucket()
+    client = get_client()
+
+    # In normal cases a key owner would be the uploader of a key in first case
+    # we explicitly require that the bucket owner is granted full control over
+    # the object uploaded by any user, the second bucket is where no such
+    # policy is enforced meaning that the uploader still retains ownership
+
+    main_user_id = get_main_user_id()
+    alt_user_id = get_alt_user_id()
+
+    owner_id_str = "id=" + main_user_id
+    s3_conditional = {"StringEquals": {
+        "s3:x-amz-grant-full-control" : owner_id_str
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:PutObject",
+                                       resource,
+                                       conditions=s3_conditional)
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name2, "*"))
+    policy_document2 = make_json_policy("s3:PutObject", resource)
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    client.put_bucket_policy(Bucket=bucket_name2, Policy=policy_document2)
+
+    alt_client = get_alt_client()
+    key1 = 'key1'
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update({"x-amz-grant-full-control" : owner_id_str}))
+    alt_client.meta.events.register('before-call.s3.PutObject', lf)
+
+    response = alt_client.put_object(Bucket=bucket_name, Key=key1, Body=key1)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    def remove_header(**kwargs):
+        if ("x-amz-grant-full-control" in kwargs['params']['headers']):
+            del kwargs['params']['headers']["x-amz-grant-full-control"]
+
+    alt_client.meta.events.register('before-call.s3.PutObject', remove_header)
+
+    key2 = 'key2'
+    response = alt_client.put_object(Bucket=bucket_name2, Key=key2, Body=key2)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    acl1_response = client.get_object_acl(Bucket=bucket_name, Key=key1)
+
+    # user 1 is trying to get acl for the object from user2 where ownership
+    # wasn't transferred
+    check_access_denied(client.get_object_acl, Bucket=bucket_name2, Key=key2)
+
+    acl2_response = alt_client.get_object_acl(Bucket=bucket_name2, Key=key2)
+
+    eq(acl1_response['Grants'][0]['Grantee']['ID'], main_user_id)
+    eq(acl2_response['Grants'][0]['Grantee']['ID'], alt_user_id)
+
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj specifying both sse-c and sse-s3')
+@attr(assertion='success')
+@attr('encryption')
+def test_put_obj_enc_conflict_c_s3():
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'AES256',
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj specifying both sse-c and sse-kms')
+@attr(assertion='success')
+@attr('encryption')
+def test_put_obj_enc_conflict_c_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-once'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid,
+        'x-amz-server-side-encryption-customer-algorithm': 'AES256',
+        'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=',
+        'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw=='
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj specifying sse-s3 with kms key id')
+@attr(assertion='success')
+@attr('encryption')
+def test_put_obj_enc_conflict_s3_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-once'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'AES256',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj specifying invalid algorithm' )
+@attr(assertion='success')
+@attr('encryption')
+def test_put_obj_enc_conflict_bad_enc_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-once'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    key1_str ='testobj'
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'aes:kms',    # aes != aws
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key=key1_str)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidArgument')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj requests if not sse-s3: without encryption')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-policy')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_bucket_policy_put_obj_s3_noenc():
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "AES256"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+
+    # doing this here breaks the next request w/ 400 (non-sse bug).  Do it last.
+    #check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+    #TODO: why is this a 400 and not passing, it appears boto3 is not parsing the 200 response the rgw sends back properly
+    # DEBUGGING: run the boto2 and compare the requests
+    # DEBUGGING: try to run this with v2 auth (figure out why get_v2_client isn't working) to make the requests similar to what boto2 is doing
+    # DEBUGGING: try to add other options to put_object to see if that makes the response better
+
+    # first validate that writing a sse-s3 object works
+    response = client.put_object(Bucket=bucket_name, Key=key1_str, ServerSideEncryption='AES256')
+    response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption']
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256')
+
+    # then validate that a non-encrypted object fails.
+    # (this also breaks the connection--non-sse bug, probably because the server
+    #  errors out before it consumes the data...)
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj requests if not sse-s3: kms')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-policy')
+@attr('sse-s3')
+def test_bucket_policy_put_obj_s3_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-twice'
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "AES256"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption': 'aws:kms',
+        'x-amz-server-side-encryption-aws-kms-key-id': kms_keyid
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj requests if not sse-kms: without encryption')
+@attr(assertion='success')
+@attr('encryption')
+@attr('fails_on_dbstore')
+@attr('bucket-policy')
+def test_bucket_policy_put_obj_kms_noenc():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        raise SkipTest
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "aws:kms"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+    key2_str ='unicorn'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+    # must do check_access_denied last - otherwise, pending data
+    #  breaks next call...
+    response = client.put_object(Bucket=bucket_name, Key=key1_str,
+         ServerSideEncryption='aws:kms', SSEKMSKeyId=kms_keyid)
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
+
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key2_str, Body=key2_str)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Deny put obj requests if not sse-kms: s3')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-policy')
+def test_bucket_policy_put_obj_kms_s3():
+    bucket_name = get_new_bucket()
+    client = get_v2_client()
+
+    deny_incorrect_algo = {
+        "StringNotEquals": {
+          "s3:x-amz-server-side-encryption": "aws:kms"
+        }
+    }
+
+    deny_unencrypted_obj = {
+        "Null" : {
+          "s3:x-amz-server-side-encryption": "true"
+        }
+    }
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_incorrect_algo)
+    s2 = Statement("s3:PutObject", resource, effect="Deny", condition=deny_unencrypted_obj)
+    policy_document = p.add_statement(s1).add_statement(s2).to_json()
+
+    # boto3.set_stream_logger(name='botocore')
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    key1_str ='testobj'
+
+    #response = client.get_bucket_policy(Bucket=bucket_name)
+    #print response
+
+    sse_client_headers = {
+        'x-amz-server-side-encryption' : 'AES256',
+    }
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_client_headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    check_access_denied(client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='put obj with RequestObjectTag')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+# TODO: remove this fails_on_rgw when I fix it
+@attr('fails_on_rgw')
+def test_bucket_policy_put_obj_request_obj_tag():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:RequestObjectTag/security" : "public"
+    }}
+
+    p = Policy()
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    s1 = Statement("s3:PutObject", resource, effect="Allow", condition=tag_conditional)
+    policy_document = p.add_statement(s1).to_json()
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+
+    alt_client = get_alt_client()
+    key1_str ='testobj'
+    check_access_denied(alt_client.put_object, Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+    headers = {"x-amz-tagging" : "security=public"}
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(headers))
+    client.meta.events.register('before-call.s3.PutObject', lf)
+    #TODO: why is this a 400 and not passing
+    alt_client.put_object(Bucket=bucket_name, Key=key1_str, Body=key1_str)
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='Test ExistingObjectTag conditional on get object acl')
+@attr(assertion='success')
+@attr('tagging')
+@attr('bucket-policy')
+@attr('fails_on_dbstore')
+def test_bucket_policy_get_obj_acl_existing_tag():
+    bucket_name = _create_objects(keys=['publictag', 'privatetag', 'invalidtag'])
+    client = get_client()
+
+    tag_conditional = {"StringEquals": {
+        "s3:ExistingObjectTag/security" : "public"
+    }}
+
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObjectAcl",
+                                       resource,
+                                       conditions=tag_conditional)
+
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    tagset = []
+    tagset.append({'Key': 'security', 'Value': 'public'})
+    tagset.append({'Key': 'foo', 'Value': 'bar'})
+
+    input_tagset = {'TagSet': tagset}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='publictag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    tagset2 = []
+    tagset2.append({'Key': 'security', 'Value': 'private'})
+
+    input_tagset = {'TagSet': tagset2}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='privatetag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    tagset3 = []
+    tagset3.append({'Key': 'security1', 'Value': 'public'})
+
+    input_tagset = {'TagSet': tagset3}
+
+    response = client.put_object_tagging(Bucket=bucket_name, Key='invalidtag', Tagging=input_tagset)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    alt_client = get_alt_client()
+    response = alt_client.get_object_acl(Bucket=bucket_name, Key='publictag')
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    # A get object itself should fail since we allowed only GetObjectTagging
+    e = assert_raises(ClientError, alt_client.get_object, Bucket=bucket_name, Key='publictag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='privatetag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, alt_client.get_object_tagging, Bucket=bucket_name, Key='invalidtag')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object lock with defalut retention')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_lock():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    response = client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'COMPLIANCE',
+                    'Years':1
+                }
+            }}
+    response = client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+    response = client.get_bucket_versioning(Bucket=bucket_name)
+    eq(response['Status'], 'Enabled')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object lock with bucket object lock not enabled')
+@attr(assertion='fails')
+@attr('object-lock')
+def test_object_lock_put_obj_lock_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 409)
+    eq(error_code, 'InvalidBucketState')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object lock with days and years')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_lock_with_days_and_years():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1,
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object lock with invalid days')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_lock_invalid_days():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':0
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidRetentionPeriod')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object lock with invalid years')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_lock_invalid_years():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Years':-1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidRetentionPeriod')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object lock with invalid mode')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_lock_invalid_mode():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'abc',
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'governance',
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+
+attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object lock with invalid status')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_lock_invalid_status():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Disabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Years':1
+                }
+            }}
+    e = assert_raises(ClientError, client.put_object_lock_configuration, Bucket=bucket_name, ObjectLockConfiguration=conf)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+
+attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test suspend versioning when object lock enabled')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_suspend_versioning():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    e = assert_raises(ClientError, client.put_bucket_versioning, Bucket=bucket_name, VersioningConfiguration={'Status': 'Suspended'})
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 409)
+    eq(error_code, 'InvalidBucketState')
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test get object lock')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_get_obj_lock():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    response = client.get_object_lock_configuration(Bucket=bucket_name)
+    eq(response['ObjectLockConfiguration'], conf)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test get object lock with bucket object lock not enabled')
+@attr(assertion='fails')
+@attr('object-lock')
+def test_object_lock_get_obj_lock_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    e = assert_raises(ClientError, client.get_object_lock_configuration, Bucket=bucket_name)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 404)
+    eq(error_code, 'ObjectLockConfigurationNotFoundError')
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test put object retention')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    response = client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object retention with bucket object lock not enabled')
+@attr(assertion='fails')
+@attr('object-lock')
+def test_object_lock_put_obj_retention_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidRequest')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object retention with invalid mode')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_retention_invalid_mode():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'governance', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+    retention = {'Mode':'abc', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test get object retention')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_get_obj_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    eq(response['Retention'], retention)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test object retention date formatting')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_get_obj_retention_iso8601():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    date = datetime.datetime.today() + datetime.timedelta(days=365)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate': date}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    client.meta.events.register('after-call.s3.HeadObject', get_http_response)
+    client.head_object(Bucket=bucket_name,VersionId=version_id,Key=key)
+    retain_date = http_response['headers']['x-amz-object-lock-retain-until-date']
+    isodate.parse_datetime(retain_date)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test get object retention with invalid bucket')
+@attr(assertion='fails')
+@attr('object-lock')
+def test_object_lock_get_obj_retention_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    e = assert_raises(ClientError, client.get_object_retention, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidRequest')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object retention with version id')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_retention_versionid():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id, Retention=retention)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key, VersionId=version_id)
+    eq(response['Retention'], retention)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object retention to override default retention')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_retention_override_default_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    conf = {'ObjectLockEnabled':'Enabled',
+            'Rule': {
+                'DefaultRetention':{
+                    'Mode':'GOVERNANCE',
+                    'Days':1
+                }
+            }}
+    client.put_object_lock_configuration(
+        Bucket=bucket_name,
+        ObjectLockConfiguration=conf)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    eq(response['Retention'], retention)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object retention to increase retention period')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_retention_increase_period():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention1 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention1)
+    retention2 = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention2)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    eq(response['Retention'], retention2)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object retention to shorten period')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_retention_shorten_period():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put object retention to shorten period with bypass header')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_obj_retention_shorten_period_bypass():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    version_id = response['VersionId']
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,3,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
+    response = client.get_object_retention(Bucket=bucket_name, Key=key)
+    eq(response['Retention'], retention)
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=version_id, BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='Test delete object with retention')
+@attr(assertion='retention period make effects')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_delete_object_with_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='Test delete object with retention and delete marker')
+@attr(assertion='retention period make effects')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_delete_object_with_retention_and_marker():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    del_response = client.delete_object(Bucket=bucket_name, Key=key)
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=del_response['VersionId'])
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+@attr(resource='object')
+@attr(method='delete')
+@attr(operation='Test multi-delete object with retention')
+@attr(assertion='retention period make effects')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_multi_delete_object_with_retention():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key1 = 'file1'
+    key2 = 'file2'
+
+    response1 = client.put_object(Bucket=bucket_name, Body='abc', Key=key1)
+    response2 = client.put_object(Bucket=bucket_name, Body='abc', Key=key2)
+
+    versionId1 = response1['VersionId']
+    versionId2 = response2['VersionId']
+
+    # key1 is under retention, but key2 isn't.
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key1, Retention=retention)
+
+    delete_response = client.delete_objects(
+        Bucket=bucket_name,
+        Delete={
+            'Objects': [
+                {
+                    'Key': key1,
+                    'VersionId': versionId1
+                },
+                {
+                    'Key': key2,
+                    'VersionId': versionId2
+                }
+            ]
+        }
+    )
+
+    eq(len(delete_response['Deleted']), 1)
+    eq(len(delete_response['Errors']), 1)
+    
+    failed_object = delete_response['Errors'][0]
+    eq(failed_object['Code'], 'AccessDenied')
+    eq(failed_object['Key'], key1)
+    eq(failed_object['VersionId'], versionId1)
+
+    deleted_object = delete_response['Deleted'][0]
+    eq(deleted_object['Key'], key2)
+    eq(deleted_object['VersionId'], versionId2)
+
+    delete_response = client.delete_objects(
+        Bucket=bucket_name,
+        Delete={
+            'Objects': [
+                {
+                    'Key': key1,
+                    'VersionId': versionId1
+                }
+            ]
+        },
+        BypassGovernanceRetention=True
+    )
+
+    assert( ('Errors' not in delete_response) or (len(delete_response['Errors']) == 0) )
+    eq(len(delete_response['Deleted']), 1)
+    deleted_object = delete_response['Deleted'][0]
+    eq(deleted_object['Key'], key1)
+    eq(deleted_object['VersionId'], versionId1)
+
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put legal hold')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_legal_hold():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    response = client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put legal hold with invalid bucket')
+@attr(assertion='fails')
+@attr('object-lock')
+def test_object_lock_put_legal_hold_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidRequest')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put legal hold with invalid status')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_put_legal_hold_invalid_status():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'abc'}
+    e = assert_raises(ClientError, client.put_object_legal_hold, Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'MalformedXML')
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test get legal hold')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_get_legal_hold():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
+    eq(response['LegalHold'], legal_hold)
+    legal_hold_off = {'Status': 'OFF'}
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold_off)
+    response = client.get_object_legal_hold(Bucket=bucket_name, Key=key)
+    eq(response['LegalHold'], legal_hold_off)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test get legal hold with invalid bucket')
+@attr(assertion='fails')
+@attr('object-lock')
+def test_object_lock_get_legal_hold_invalid_bucket():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    e = assert_raises(ClientError, client.get_object_legal_hold, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(error_code, 'InvalidRequest')
+
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='Test delete object with legal hold on')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_delete_object_with_legal_hold_on():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'ON'})
+    e = assert_raises(ClientError, client.delete_object, Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='Test delete object with legal hold off')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_delete_object_with_legal_hold_off():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    response = client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status': 'OFF'})
+    response = client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'])
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test get object metadata')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_get_obj_metadata():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key)
+    legal_hold = {'Status': 'ON'}
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold=legal_hold)
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':datetime.datetime(2030,1,1,tzinfo=pytz.UTC)}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention)
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    eq(response['ObjectLockMode'], retention['Mode'])
+    eq(response['ObjectLockRetainUntilDate'], retention['RetainUntilDate'])
+    eq(response['ObjectLockLegalHoldStatus'], legal_hold['Status'])
+
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='Test put legal hold and retention when uploading object')
+@attr(assertion='success')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_uploading_obj():
+    bucket_name = get_new_bucket_name()
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    key = 'file1'
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
+                      ObjectLockRetainUntilDate=datetime.datetime(2030,1,1,tzinfo=pytz.UTC), ObjectLockLegalHoldStatus='ON')
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    eq(response['ObjectLockMode'], 'GOVERNANCE')
+    eq(response['ObjectLockRetainUntilDate'], datetime.datetime(2030,1,1,tzinfo=pytz.UTC))
+    eq(response['ObjectLockLegalHoldStatus'], 'ON')
+    client.put_object_legal_hold(Bucket=bucket_name, Key=key, LegalHold={'Status':'OFF'})
+    client.delete_object(Bucket=bucket_name, Key=key, VersionId=response['VersionId'], BypassGovernanceRetention=True)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test changing object retention mode from GOVERNANCE to COMPLIANCE with bypass')
+@attr(assertion='succeeds')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_changing_mode_from_governance_with_bypass():
+    bucket_name = get_new_bucket_name()
+    key = 'file1'
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    # upload object with mode=GOVERNANCE
+    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
+                      ObjectLockRetainUntilDate=retain_until)
+    # change mode to COMPLIANCE
+    retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
+    client.put_object_retention(Bucket=bucket_name, Key=key, Retention=retention, BypassGovernanceRetention=True)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test changing object retention mode from GOVERNANCE to COMPLIANCE without bypass')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_changing_mode_from_governance_without_bypass():
+    bucket_name = get_new_bucket_name()
+    key = 'file1'
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    # upload object with mode=GOVERNANCE
+    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='GOVERNANCE',
+                      ObjectLockRetainUntilDate=retain_until)
+    # try to change mode to COMPLIANCE
+    retention = {'Mode':'COMPLIANCE', 'RetainUntilDate':retain_until}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test changing object retention mode from COMPLIANCE to GOVERNANCE')
+@attr(assertion='fails')
+@attr('object-lock')
+@attr('fails_on_dbstore')
+def test_object_lock_changing_mode_from_compliance():
+    bucket_name = get_new_bucket_name()
+    key = 'file1'
+    client = get_client()
+    client.create_bucket(Bucket=bucket_name, ObjectLockEnabledForBucket=True)
+    # upload object with mode=COMPLIANCE
+    retain_until = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=10)
+    client.put_object(Bucket=bucket_name, Body='abc', Key=key, ObjectLockMode='COMPLIANCE',
+                      ObjectLockRetainUntilDate=retain_until)
+    # try to change mode to GOVERNANCE
+    retention = {'Mode':'GOVERNANCE', 'RetainUntilDate':retain_until}
+    e = assert_raises(ClientError, client.put_object_retention, Bucket=bucket_name, Key=key, Retention=retention)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+    eq(error_code, 'AccessDenied')
+
+@attr(resource='object')
+@attr(method='copy')
+@attr(operation='copy w/ x-amz-copy-source-if-match: the latest ETag')
+@attr(assertion='succeeds')
+@attr('fails_on_dbstore')
+def test_copy_object_ifmatch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch=resp['ETag'], Key='bar')
+    response = client.get_object(Bucket=bucket_name, Key='bar')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='copy')
+@attr(operation='copy w/ x-amz-copy-source-if-match: bogus ETag')
+@attr(assertion='fails 412')
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
+@attr('fails_on_rgw')
+def test_copy_object_ifmatch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfMatch='ABCORZ', Key='bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+@attr(resource='object')
+@attr(method='copy')
+@attr(operation='copy w/ x-amz-copy-source-if-none-match: the latest ETag')
+@attr(assertion='fails 412')
+# TODO: remove fails_on_rgw when https://tracker.ceph.com/issues/40808 is resolved
+@attr('fails_on_rgw')
+def test_copy_object_ifnonematch_good():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    e = assert_raises(ClientError, client.copy_object, Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch=resp['ETag'], Key='bar')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 412)
+    eq(error_code, 'PreconditionFailed')
+
+@attr(resource='object')
+@attr(method='copy')
+@attr(operation='copy w/ x-amz-copy-source-if-none-match: bogus ETag')
+@attr(assertion='succeeds')
+@attr('fails_on_dbstore')
+def test_copy_object_ifnonematch_failed():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.put_object(Bucket=bucket_name, Key='foo', Body='bar')
+
+    client.copy_object(Bucket=bucket_name, CopySource=bucket_name+'/foo', CopySourceIfNoneMatch='ABCORZ', Key='bar')
+    response = client.get_object(Bucket=bucket_name, Key='bar')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='get')
+@attr(operation='read to invalid key')
+@attr(assertion='fails 400')
+# TODO: results in a 404 instead of 400 on the RGW
+@attr('fails_on_rgw')
+def test_object_read_unreadable():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    e = assert_raises(ClientError, client.get_object, Bucket=bucket_name, Key='\xae\x8a-')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+    eq(e.response['Error']['Message'], 'Couldn\'t parse the specified URI.')
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='Test User Policy')
+@attr(assertion='succeeds')
+@attr('user-policy')
+def test_user_policy():
+    client = get_tenant_iam_client()
+
+    policy_document = json.dumps(
+    {"Version":"2012-10-17",
+     "Statement": {
+         "Effect":"Allow",
+         "Action":"*",
+         "Resource":"*"}}
+    )
+    client.put_user_policy(
+        PolicyDocument= policy_document,
+        PolicyName='AllAccessPolicy',
+        UserName=get_tenant_user_id(),
+    )
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket policy status on a new bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_get_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],False)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket policy status on a public acl bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_get_public_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],True)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket policy status on a authenticated acl bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_get_authpublic_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client = get_client()
+    client.put_bucket_acl(Bucket=bucket_name, ACL='authenticated-read')
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],True)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket policy status on a public policy bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_get_publicpolicy_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client = get_client()
+
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],False)
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],True)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket policy status on a public policy bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_get_nonpublicpolicy_acl_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    client = get_client()
+
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],False)
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "*"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ],
+        "Condition": {
+            "IpAddress":
+            {"aws:SourceIp": "10.0.0.0/32"}
+        }
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],False)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket policy status on a public policy bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_get_nonpublicpolicy_deny_bucket_policy_status():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],False)
+
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "NotPrincipal": {"AWS": "arn:aws:iam::s3tenant1:root"},
+        "Action": "s3:ListBucket",
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ],
+        }]
+     })
+
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    resp = client.get_bucket_policy_status(Bucket=bucket_name)
+    eq(resp['PolicyStatus']['IsPublic'],True)
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get public access block on a bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_get_default_public_block():
+    #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    resp = client.get_public_access_block(Bucket=bucket_name)
+    eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], False)
+    eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], False)
+    eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], False)
+    eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], False)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='get public access block on a bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_put_public_block():
+    #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': True,
+                   'IgnorePublicAcls': True,
+                   'BlockPublicPolicy': True,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+
+    resp = client.get_public_access_block(Bucket=bucket_name)
+    eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
+    eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
+    eq(resp['PublicAccessBlockConfiguration']['IgnorePublicAcls'], access_conf['IgnorePublicAcls'])
+    eq(resp['PublicAccessBlockConfiguration']['RestrictPublicBuckets'], access_conf['RestrictPublicBuckets'])
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='get public access block on a bucket')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_block_public_put_bucket_acls():
+    #client = get_svc_client(svc='s3control', client_config=Config(s3={'addressing_style': 'path'}))
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': True,
+                   'IgnorePublicAcls': False,
+                   'BlockPublicPolicy': True,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+
+    resp = client.get_public_access_block(Bucket=bucket_name)
+    eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
+    eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='public-read-write')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, client.put_bucket_acl, Bucket=bucket_name,ACL='authenticated-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='block public acls on canned acls')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_block_public_object_canned_acls():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': True,
+                   'IgnorePublicAcls': False,
+                   'BlockPublicPolicy': False,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+
+    # resp = client.get_public_access_block(Bucket=bucket_name)
+    # eq(resp['PublicAccessBlockConfiguration']['BlockPublicAcls'], access_conf['BlockPublicAcls'])
+    # eq(resp['PublicAccessBlockConfiguration']['BlockPublicPolicy'], access_conf['BlockPublicPolicy'])
+
+    #FIXME: use empty body until #42208
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo1', Body='', ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo2', Body='', ACL='public-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+    e = assert_raises(ClientError, client.put_object, Bucket=bucket_name, Key='foo3', Body='', ACL='authenticated-read')
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 403)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='block public acls on canned acls')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_block_public_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    access_conf = {'BlockPublicAcls': False,
+                   'IgnorePublicAcls': False,
+                   'BlockPublicPolicy': True,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+    policy_document = make_json_policy("s3:GetObject",
+                                       resource)
+
+    check_access_denied(client.put_bucket_policy, Bucket=bucket_name, Policy=policy_document)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='ignore public acls on canned acls')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_ignore_public_acls():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    alt_client = get_alt_client()
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+    # Public bucket should be accessible
+    alt_client.list_objects(Bucket=bucket_name)
+
+    client.put_object(Bucket=bucket_name,Key='key1',Body='abcde',ACL='public-read')
+    resp=alt_client.get_object(Bucket=bucket_name, Key='key1')
+    eq(_get_body(resp), 'abcde')
+
+    access_conf = {'BlockPublicAcls': False,
+                   'IgnorePublicAcls': True,
+                   'BlockPublicPolicy': False,
+                   'RestrictPublicBuckets': False}
+
+    client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=access_conf)
+    resource = _make_arn_resource("{}/{}".format(bucket_name, "*"))
+
+    client.put_bucket_acl(Bucket=bucket_name, ACL='public-read')
+    # IgnorePublicACLs is true, so regardless this should behave as a private bucket
+    check_access_denied(alt_client.list_objects, Bucket=bucket_name)
+    check_access_denied(alt_client.get_object, Bucket=bucket_name, Key='key1')
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='multipart upload on a bucket with a policy')
+@attr(assertion='succeeds')
+@attr('policy_status')
+def test_multipart_upload_on_a_bucket_with_policy():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    resource1 = "arn:aws:s3:::" + bucket_name
+    resource2 = "arn:aws:s3:::" + bucket_name + "/*"
+    policy_document = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": "*",
+        "Action": "*",
+        "Resource": [
+            resource1,
+            resource2
+          ],
+        }]
+     })
+    key = "foo"
+    objlen=50*1024*1024
+    client.put_bucket_policy(Bucket=bucket_name, Policy=policy_document)
+    (upload_id, data, parts) = _multipart_upload(bucket_name=bucket_name, key=key, size=objlen, client=client)
+    response = client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+def _put_bucket_encryption_s3(client, bucket_name):
+    """
+    enable a default encryption policy on the given bucket
+    """
+    server_side_encryption_conf = {
+        'Rules': [
+            {
+                'ApplyServerSideEncryptionByDefault': {
+                    'SSEAlgorithm': 'AES256'
+                }
+            },
+        ]
+    }
+    response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+def _put_bucket_encryption_kms(client, bucket_name):
+    """
+    enable a default encryption policy on the given bucket
+    """
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-again'
+    server_side_encryption_conf = {
+        'Rules': [
+            {
+                'ApplyServerSideEncryptionByDefault': {
+                    'SSEAlgorithm': 'aws:kms',
+                    'KMSMasterKeyID': kms_keyid
+                }
+            },
+        ]
+    }
+    response = client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_conf)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='put bucket encryption on bucket - s3')
+@attr(assertion='succeeds')
+@attr('sse-s3')
+def test_put_bucket_encryption_s3():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+@attr(resource='bucket')
+@attr(method='put')
+@attr(operation='put bucket encryption on bucket - kms')
+@attr(assertion='succeeds')
+@attr('encryption')
+def test_put_bucket_encryption_kms():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_kms(client, bucket_name)
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket encryption on bucket - s3')
+@attr(assertion='succeeds')
+@attr('sse-s3')
+def test_get_bucket_encryption_s3():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError')
+
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    response = client.get_bucket_encryption(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'], 'AES256')
+
+
+@attr(resource='bucket')
+@attr(method='get')
+@attr(operation='get bucket encryption on bucket - kms')
+@attr(assertion='succeeds')
+@attr('encryption')
+def test_get_bucket_encryption_kms():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        kms_keyid = 'fool-me-again'
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError')
+
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    response = client.get_bucket_encryption(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 200)
+    eq(response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm'], 'aws:kms')
+    eq(response['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['KMSMasterKeyID'], kms_keyid)
+
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='delete bucket encryption on bucket - s3')
+@attr(assertion='succeeds')
+@attr('sse-s3')
+def test_delete_bucket_encryption_s3():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError')
+
+
+@attr(resource='bucket')
+@attr(method='delete')
+@attr(operation='delete bucket encryption on bucket - kms')
+@attr(assertion='succeeds')
+@attr('encryption')
+def test_delete_bucket_encryption_kms():
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    response = client.delete_bucket_encryption(Bucket=bucket_name)
+    eq(response['ResponseMetadata']['HTTPStatusCode'], 204)
+
+    response_code = ""
+    try:
+        client.get_bucket_encryption(Bucket=bucket_name)
+    except ClientError as e:
+        response_code = e.response['Error']['Code']
+
+    eq(response_code, 'ServerSideEncryptionConfigurationNotFoundError')
+
+def _test_sse_s3_default_upload(file_size):
+    """
+    Test enables bucket encryption.
+    Create a file of A's of certain size, and use it to set_contents_from_file.
+    Re-read the contents, and confirm we get same content as input i.e., A's
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    data = 'A'*file_size
+    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256')
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256')
+    body = _get_body(response)
+    eq(body, data)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1 byte upload to SSE-S3 default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_default_upload_1b():
+    _test_sse_s3_default_upload(1)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1KB upload to SSE-S3 default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_default_upload_1kb():
+    _test_sse_s3_default_upload(1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1MB upload to SSE-S3 default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_default_upload_1mb():
+    _test_sse_s3_default_upload(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 8MB upload to SSE-S3 default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_default_upload_8mb():
+    _test_sse_s3_default_upload(8*1024*1024)
+
+def _test_sse_kms_default_upload(file_size):
+    """
+    Test enables bucket encryption.
+    Create a file of A's of certain size, and use it to set_contents_from_file.
+    Re-read the contents, and confirm we get same content as input i.e., A's
+    """
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        raise SkipTest
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    data = 'A'*file_size
+    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data)
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
+    body = _get_body(response)
+    eq(body, data)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1 byte upload to SSE-KMS default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_kms_default_upload_1b():
+    _test_sse_kms_default_upload(1)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1KB upload to SSE-KMS default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_kms_default_upload_1kb():
+    _test_sse_kms_default_upload(1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1MB upload to SSE-KMS default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_kms_default_upload_1mb():
+    _test_sse_kms_default_upload(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 8MB upload to SSE-KMS default-encrypted bucket')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_kms_default_upload_8mb():
+    _test_sse_kms_default_upload(8*1024*1024)
+
+
+
+@attr(resource='object')
+@attr(method='head')
+@attr(operation='Test head operation on SSE-S3 default-encrypted object')
+@attr(assertion='success')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_default_method_head():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    data = 'A'*1000
+    key = 'testobj'
+    client.put_object(Bucket=bucket_name, Key=key, Body=data)
+
+    response = client.head_object(Bucket=bucket_name, Key=key)
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256')
+
+    sse_s3_headers = {
+        'x-amz-server-side-encryption': 'AES256',
+    }
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(sse_s3_headers))
+    client.meta.events.register('before-call.s3.HeadObject', lf)
+    e = assert_raises(ClientError, client.head_object, Bucket=bucket_name, Key=key)
+    status, error_code = _get_status_and_error_code(e.response)
+    eq(status, 400)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='complete SSE-S3 multi-part upload')
+@attr(assertion='successful')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_default_multipart_upload():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    key = "multipart_enc"
+    content_type = 'text/plain'
+    objlen = 30 * 1024 * 1024
+    metadata = {'foo': 'bar'}
+    enc_headers = {
+        'Content-Type': content_type
+    }
+    resend_parts = []
+
+    (upload_id, data, parts) = _multipart_upload_enc(client, bucket_name, key, objlen,
+            part_size=5*1024*1024, init_headers=enc_headers, part_headers=enc_headers, metadata=metadata, resend_parts=resend_parts)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(enc_headers))
+    client.meta.events.register('before-call.s3.CompleteMultipartUpload', lf)
+    client.complete_multipart_upload(Bucket=bucket_name, Key=key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+
+    response = client.head_bucket(Bucket=bucket_name)
+    rgw_object_count = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-object-count', 1))
+    eq(rgw_object_count, 1)
+    rgw_bytes_used = int(response['ResponseMetadata']['HTTPHeaders'].get('x-rgw-bytes-used', objlen))
+    eq(rgw_bytes_used, objlen)
+
+    lf = (lambda **kwargs: kwargs['params']['headers'].update(part_headers))
+    client.meta.events.register('before-call.s3.UploadPart', lf)
+
+    response = client.get_object(Bucket=bucket_name, Key=key)
+
+    eq(response['Metadata'], metadata)
+    eq(response['ResponseMetadata']['HTTPHeaders']['content-type'], content_type)
+
+    body = _get_body(response)
+    eq(body, data)
+    size = response['ContentLength']
+    eq(len(body), size)
+
+    _check_content_using_range(key, bucket_name, data, 1000000)
+    _check_content_using_range(key, bucket_name, data, 10000000)
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated SSE-S3 browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_default_post_object_authenticated_request():
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_s3(client, bucket_name)
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {
+            "expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),
+            "conditions": [
+                {"bucket": bucket_name},
+                ["starts-with", "$key", "foo"],
+                {"acl": "private"},
+                ["starts-with", "$Content-Type", "text/plain"],
+                ["starts-with", "$x-amz-server-side-encryption", ""], 
+                ["content-length-range", 0, 1024]
+            ]
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('file', ('bar'))])
+
+    r = requests.post(url, files = payload)
+    eq(r.status_code, 204)
+
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256')
+    body = _get_body(response)
+    eq(body, 'bar')
+
+@attr(resource='object')
+@attr(method='post')
+@attr(operation='authenticated SSE-kMS browser based upload via POST request')
+@attr(assertion='succeeds and returns written data')
+@attr('encryption')
+@attr('bucket-encryption')
+@attr('encryption')
+@attr('fails_on_dbstore')
+def test_sse_kms_default_post_object_authenticated_request():
+    kms_keyid = get_main_kms_keyid()
+    if kms_keyid is None:
+        raise SkipTest
+    bucket_name = get_new_bucket()
+    client = get_client()
+    _put_bucket_encryption_kms(client, bucket_name)
+
+    url = _get_post_url(bucket_name)
+    utc = pytz.utc
+    expires = datetime.datetime.now(utc) + datetime.timedelta(seconds=+6000)
+
+    policy_document = {
+            "expiration": expires.strftime("%Y-%m-%dT%H:%M:%SZ"),
+            "conditions": [
+                {"bucket": bucket_name},
+                ["starts-with", "$key", "foo"],
+                {"acl": "private"},
+                ["starts-with", "$Content-Type", "text/plain"],
+                ["starts-with", "$x-amz-server-side-encryption", ""], 
+                ["content-length-range", 0, 1024]
+            ]
+    }
+
+    json_policy_document = json.JSONEncoder().encode(policy_document)
+    bytes_json_policy_document = bytes(json_policy_document, 'utf-8')
+    policy = base64.b64encode(bytes_json_policy_document)
+    aws_secret_access_key = get_main_aws_secret_key()
+    aws_access_key_id = get_main_aws_access_key()
+
+    signature = base64.b64encode(hmac.new(bytes(aws_secret_access_key, 'utf-8'), policy, hashlib.sha1).digest())
+
+    payload = OrderedDict([ ("key" , "foo.txt"),("AWSAccessKeyId" , aws_access_key_id),\
+    ("acl" , "private"),("signature" , signature),("policy" , policy),\
+    ("Content-Type" , "text/plain"),
+    ('file', ('bar'))])
+
+    r = requests.post(url, files = payload)
+    eq(r.status_code, 204)
+
+    response = client.get_object(Bucket=bucket_name, Key='foo.txt')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'aws:kms')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption-aws-kms-key-id'], kms_keyid)
+    body = _get_body(response)
+    eq(body, 'bar')
+
+
+def _test_sse_s3_encrypted_upload(file_size):
+    """
+    Test upload of the given size, specifically requesting sse-s3 encryption.
+    """
+    bucket_name = get_new_bucket()
+    client = get_client()
+
+    data = 'A'*file_size
+    response = client.put_object(Bucket=bucket_name, Key='testobj', Body=data, ServerSideEncryption='AES256')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256')
+
+    response = client.get_object(Bucket=bucket_name, Key='testobj')
+    eq(response['ResponseMetadata']['HTTPHeaders']['x-amz-server-side-encryption'], 'AES256')
+    body = _get_body(response)
+    eq(body, data)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1 byte upload with SSE-S3 encryption')
+@attr(assertion='success')
+@attr('encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_encrypted_upload_1b():
+    _test_sse_s3_encrypted_upload(1)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1Kb upload with SSE-S3 encryption')
+@attr(assertion='success')
+@attr('encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_encrypted_upload_1kb():
+    _test_sse_s3_encrypted_upload(1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 1MB upload with SSE-S3 encryption')
+@attr(assertion='success')
+@attr('encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_encrypted_upload_1mb():
+    _test_sse_s3_encrypted_upload(1024*1024)
+
+@attr(resource='object')
+@attr(method='put')
+@attr(operation='Test 8MB upload with SSE-S3 encryption')
+@attr(assertion='success')
+@attr('encryption')
+@attr('sse-s3')
+@attr('fails_on_dbstore')
+def test_sse_s3_encrypted_upload_8mb():
+    _test_sse_s3_encrypted_upload(8*1024*1024)
diff --git a/s3tests_boto3/functional/test_s3select.py b/s3tests_boto3/functional/test_s3select.py
new file mode 100644 (file)
index 0000000..2b31342
--- /dev/null
@@ -0,0 +1,1276 @@
+import nose
+import random
+import string
+import re
+from nose.plugins.attrib import attr
+from botocore.exceptions import ClientError
+
+import uuid
+from nose.tools import eq_ as eq
+
+from . import (
+    get_client
+    )
+
+import logging
+logging.basicConfig(level=logging.INFO)
+
+region_name = ''
+
+# recurssion function for generating arithmetical expression 
+def random_expr(depth):
+    # depth is the complexity of expression 
+    if depth==1 :
+        return str(int(random.random() * 100) + 1)+".0"
+    return '(' + random_expr(depth-1) + random.choice(['+','-','*','/']) + random_expr(depth-1) + ')'
+
+
+def generate_s3select_where_clause(bucket_name,obj_name):
+
+    a=random_expr(4)
+    b=random_expr(4)
+    s=random.choice([ '<','>','=','<=','>=','!=' ])
+
+    try:
+        eval( a )
+        eval( b )
+    except ZeroDivisionError:
+        return
+
+    # generate s3select statement using generated randome expression
+    # upon count(0)>0 it means true for the where clause expression
+    # the python-engine {eval( conditional expression )} should return same boolean result.
+    s3select_stmt =  "select count(0) from s3object where " + a + s + b + ";"
+
+    res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,s3select_stmt) ).replace(",","")
+
+    if  s == '=':
+        s = '=='
+
+    s3select_assert_result(int(res)>0 , eval( a + s + b ))
+
+def generate_s3select_expression_projection(bucket_name,obj_name):
+
+        # generate s3select statement using generated randome expression
+        # statement return an arithmetical result for the generated expression.
+        # the same expression is evaluated by python-engine, result should be close enough(Epsilon)
+        
+        e = random_expr( 4 )
+
+        try:
+            eval( e )
+        except ZeroDivisionError:
+            return
+
+        if eval( e ) == 0:
+            return
+
+        res = remove_xml_tags_from_result( run_s3select(bucket_name,obj_name,"select " + e + " from s3object;",) ).replace(",","")
+
+        # accuracy level 
+        epsilon = float(0.000001) 
+
+        # both results should be close (epsilon)
+        assert (1 - (float(res.split("\n")[1]) / eval( e )) ) < epsilon
+
+@attr('s3select')
+def get_random_string():
+
+    return uuid.uuid4().hex[:6].upper()
+
+@attr('s3select')
+def test_generate_where_clause():
+
+    # create small csv file for testing the random expressions
+    single_line_csv = create_random_csv_object(1,1)
+    bucket_name = "test"
+    obj_name = get_random_string() #"single_line_csv.csv"
+    upload_csv_object(bucket_name,obj_name,single_line_csv)
+       
+    for _ in range(100): 
+        generate_s3select_where_clause(bucket_name,obj_name)
+
+@attr('s3select')
+def test_generate_projection():
+
+    # create small csv file for testing the random expressions
+    single_line_csv = create_random_csv_object(1,1)
+    bucket_name = "test"
+    obj_name = get_random_string() #"single_line_csv.csv"
+    upload_csv_object(bucket_name,obj_name,single_line_csv)
+       
+    for _ in range(100): 
+        generate_s3select_expression_projection(bucket_name,obj_name)
+
+def s3select_assert_result(a,b):
+    if type(a) == str:
+        a_strip = a.strip()
+        b_strip = b.strip()
+        assert a_strip != ""
+        assert b_strip != ""
+    else:
+        assert a != ""
+        assert b != ""
+    
+    nose.tools.assert_equal(a,b)
+
+def create_csv_object_for_datetime(rows,columns):
+        result = ""
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                row = row + "{}{:02d}{:02d}T{:02d}{:02d}{:02d}Z,".format(random.randint(0,100)+1900,random.randint(1,12),random.randint(1,28),random.randint(0,23),random.randint(0,59),random.randint(0,59),)
+            result += row + "\n"
+
+        return result
+
+def create_random_csv_object(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                row = row + "{}{}".format(random.randint(0,1000),col_delim)
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_string(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,9) == 5:
+                    row = row + "{}{}".format(''.join(random.choice(string.ascii_letters) for m in range(10)) + "aeiou",col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("cbcd" + random.choice(string.ascii_letters) for m in range(10)) + "vwxyzzvwxyz" ,col_delim)
+                
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_trim(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,5) == 2:
+                    row = row + "{}{}".format(''.join("   aeiou    ") ,col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("abcd") ,col_delim)
+
+
+                
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_escape(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,9) == 5:
+                    row = row + "{}{}".format(''.join("_ar") ,col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("aeio_")  ,col_delim)
+                
+            result += row + record_delim
+
+        return result
+
+def create_random_csv_object_null(rows,columns,col_delim=",",record_delim="\n",csv_schema=""):
+        result = ""
+        if len(csv_schema)>0 :
+            result = csv_schema + record_delim
+
+        for _ in range(rows):
+            row = ""
+            for _ in range(columns):
+                if random.randint(0,5) == 2:
+                    row = row + "{}{}".format(''.join("") ,col_delim)
+                else:
+                    row = row + "{}{}".format(''.join("abc") ,col_delim)
+                
+            result += row + record_delim
+
+        return result
+
+def upload_csv_object(bucket_name,new_key,obj):
+
+        client = get_client()
+        client.create_bucket(Bucket=bucket_name)
+        client.put_object(Bucket=bucket_name, Key=new_key, Body=obj)
+
+        # validate uploaded object
+        c2 = get_client()
+        response = c2.get_object(Bucket=bucket_name, Key=new_key)
+        eq(response['Body'].read().decode('utf-8'), obj, 's3select error[ downloaded object not equal to uploaded objecy')
+
+def run_s3select(bucket,key,query,column_delim=",",row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE", progress = False):
+
+    s3 = get_client()
+    result = ""
+    try:
+        r = s3.select_object_content(
+        Bucket=bucket,
+        Key=key,
+        ExpressionType='SQL',
+        InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
+        OutputSerialization = {"CSV": {}},
+        Expression=query,
+        RequestProgress = {"Enabled": progress})
+
+    except ClientError as c:
+        result += str(c)
+        return result
+
+    if progress == False:
+        for event in r['Payload']:
+            if 'Records' in event:
+                records = event['Records']['Payload'].decode('utf-8')
+                result += records
+    else:
+        result = []
+        for event in r['Payload']:
+            if 'Records' in event:
+                records = event['Records']
+                result.append(records.copy())
+            if 'Progress' in event:
+                progress = event['Progress']
+                result.append(progress.copy())
+            if 'Stats' in event:
+                stats = event['Stats']
+                result.append(stats.copy())
+            if 'End' in event:
+                end = event['End']
+                result.append(end.copy())
+    return result
+def run_s3select_output(bucket,key,query, quot_field, op_column_delim = ",", op_row_delim = "\n",  column_delim=",", op_quot_char = '"', op_esc_char = '\\', row_delim="\n",quot_char='"',esc_char='\\',csv_header_info="NONE"):
+
+    s3 = get_client()
+
+    r = s3.select_object_content(
+        Bucket=bucket,
+        Key=key,
+        ExpressionType='SQL',
+        InputSerialization = {"CSV": {"RecordDelimiter" : row_delim, "FieldDelimiter" : column_delim,"QuoteEscapeCharacter": esc_char, "QuoteCharacter": quot_char, "FileHeaderInfo": csv_header_info}, "CompressionType": "NONE"},
+        OutputSerialization = {"CSV": {"RecordDelimiter" : op_row_delim, "FieldDelimiter" : op_column_delim, "QuoteCharacter" : op_quot_char, "QuoteEscapeCharacter" : op_esc_char, "QuoteFields" : quot_field}},
+        Expression=query,)
+    
+    result = ""
+    for event in r['Payload']:
+        if 'Records' in event:
+            records = event['Records']['Payload'].decode('utf-8')
+            result += records
+    
+    return result
+
+def remove_xml_tags_from_result(obj):
+    result = ""
+    for rec in obj.split("\n"):
+        if(rec.find("Payload")>0 or rec.find("Records")>0):
+            continue
+        result += rec + "\n" # remove by split
+
+    result_strip= result.strip()
+    x = bool(re.search("^failure.*$", result_strip))
+    if x:
+        logging.info(result)
+    nose.tools.assert_equal(x, False)
+
+    return result
+
+def create_list_of_int(column_pos,obj,field_split=",",row_split="\n"):
+    
+    list_of_int = [] 
+    for rec in obj.split(row_split):
+        col_num = 1
+        if ( len(rec) == 0):
+            continue
+        for col in rec.split(field_split):
+            if (col_num == column_pos):
+                list_of_int.append(int(col))
+            col_num+=1
+
+    return list_of_int
+
+@attr('s3select')
+def test_count_operation():
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    num_of_rows = 1234
+    obj_to_load = create_random_csv_object(num_of_rows,10)
+    upload_csv_object(bucket_name,csv_obj_name,obj_to_load)
+    res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object;") ).replace(",","")
+
+    s3select_assert_result( num_of_rows, int( res ))
+
+@attr('s3select')
+def test_column_sum_min_max():
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+    
+    csv_obj_name_2 = get_random_string()
+    bucket_name_2 = "testbuck2"
+    upload_csv_object(bucket_name_2,csv_obj_name_2,csv_obj)
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_1)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 1 , csv_obj )
+    res_target = min( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_4)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = min( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select avg(int(_6)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 6 , csv_obj )
+    res_target = float(sum(list_int ))/10000
+
+    s3select_assert_result( float(res_s3select), float(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select max(int(_4)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = max( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select max(int(_7)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 7 , csv_obj )
+    res_target = max( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select sum(int(_4)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 4 , csv_obj )
+    res_target = sum( list_int )
+
+    s3select_assert_result( int(res_s3select), int(res_target))
+    
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select sum(int(_7)) from s3object;")  ).replace(",","")
+    list_int = create_list_of_int( 7 , csv_obj )
+    res_target = sum( list_int )
+
+    s3select_assert_result(  int(res_s3select) , int(res_target) )
+
+    # the following queries, validates on *random* input an *accurate* relation between condition result,sum operation and count operation.
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name_2,csv_obj_name_2,"select count(0),sum(int(_1)),sum(int(_2)) from s3object where (int(_1)-int(_2)) = 2;" ) )
+    count,sum1,sum2 = res_s3select.split(",")
+
+    s3select_assert_result( int(count)*2 , int(sum1)-int(sum2 ) )
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0),sum(int(_1)),sum(int(_2)) from s3object where (int(_1)-int(_2)) = 4;" ) ) 
+    count,sum1,sum2 = res_s3select.split(",")
+
+    s3select_assert_result( int(count)*4 , int(sum1)-int(sum2) )
+
+@attr('s3select')
+def test_nullif_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where nullif(_1,_2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 = _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,_2) is null) from s3object ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 = _2) from s3object  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where not nullif(_1,_2) is null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,_2) is not null) from s3object ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 != _2) from s3object  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where  nullif(_1,_2) = _1 ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    csv_obj = create_random_csv_object_null(10000,10)
+
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where nullif(_1,null) is null;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 is null;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (nullif(_1,null) is null) from s3object;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select (_1 is null) from s3object;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+@attr('s3select')
+def test_nulliftrue_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where (nullif(_1,_2) is null) = true ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 = _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where not (nullif(_1,_2) is null) = true ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+    res_s3select_nullif = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where (nullif(_1,_2) = _1 = true) ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_nullif, res_s3select)
+
+@attr('s3select')
+def test_is_not_null_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where nullif(_1,_2) is not null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_null, res_s3select)
+
+    res_s3select_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where (nullif(_1,_1) and _1 = _2) is not null ;")  ).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select count(*) from s3object where _1 != _2  ;")  ).replace("\n","")
+
+    s3select_assert_result( res_s3select_null, res_s3select)
+
+@attr('s3select')
+def test_lowerupper_expressions():
+
+    csv_obj = create_random_csv_object(1,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select lower("AB12cd$$") from s3object ;')  ).replace("\n","")
+
+    s3select_assert_result( res_s3select, "ab12cd$$")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select upper("ab12CD$$") from s3object ;')  ).replace("\n","")
+
+    s3select_assert_result( res_s3select, "AB12CD$$")
+
+@attr('s3select')
+def test_in_expressions():
+
+    # purpose of test: engine is process correctly several projections containing aggregation-functions
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) in(1);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in(1)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) = 1) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) in(1,0);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in(1,0)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) = 1 or int(_1) = 0) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) in(1,0,2);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2) in(1,0,2)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2) = 1 or int(_2) = 0 or int(_2) = 2) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5);')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5)) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5) from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where character_length(_1) = 2 and substring(_1,2,1) in ("3");')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where _1 like "_3";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (character_length(_1) = 2 and substring(_1,2,1) in ("3")) from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_1 like "_3") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+@attr('s3select')
+def test_true_false_in_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(1)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(1,0)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = 1 or int(_1) = 0;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where (int(_2) in(1,0,2)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2) = 1 or int(_2) = 0 or int(_2) = 2;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where (int(_2)*2 in(int(_3)*2,int(_4)*3,int(_5)*5)) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_2) from s3object where int(_2)*2 = int(_3)*2 or int(_2)*2 = int(_4)*3 or int(_2)*2 = int(_5)*5;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where (character_length(_1) = 2) = true and (substring(_1,2,1) in ("3")) = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where _1 like "_3";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (int(_1) in (1,2,0)) as a1 from s3object where a1 = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select \"true\"from s3object where (int(_1) in (1,0,2)) ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_in, res_s3select )  
+
+@attr('s3select')
+def test_like_expressions():
+
+    csv_obj = create_random_csv_object_string(1000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%aeio%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,11,4) = "aeio" ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select  (_1 like "%aeio%") from s3object ;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_1,11,4) = "aeio") from s3object ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "cbcd%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,4) = "cbcd";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from stdin where _1 like "%aeio%" like;')).replace("\n","")
+
+    find_like = res_s3select_like.find("s3select-Syntax-Error")
+
+    assert int(find_like) >= 0
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_1 like "cbcd%") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_1,1,4) = "cbcd") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _3 like "%y[y-z]";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_3,char_length(_3),1) between "y" and "z" and substring(_3,char_length(_3)-1,1) = "y";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_3 like "%y[y-z]") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_3,char_length(_3),1) between "y" and "z" and substring(_3,char_length(_3)-1,1) = "y") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _2 like "%yz";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_2,char_length(_2),1) = "z" and substring(_2,char_length(_2)-1,1) = "y";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_2 like "%yz") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_2,char_length(_2),1) = "z" and substring(_2,char_length(_2)-1,1) = "y") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _3 like "c%z";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_3,char_length(_3),1) = "z" and substring(_3,1,1) = "c";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_3 like "c%z") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_3,char_length(_3),1) = "z" and substring(_3,1,1) = "c") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _2 like "%xy_";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_2,char_length(_2)-1,1) = "y" and substring(_2,char_length(_2)-2,1) = "x";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select (_2 like "%xy_") from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select (substring(_2,char_length(_2)-1,1) = "y" and substring(_2,char_length(_2)-2,1) = "x") from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+@attr('s3select')
+def test_truefalselike_expressions():
+
+    csv_obj = create_random_csv_object_string(1000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_1 like "%aeio%") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,11,4) = "aeio" ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_1 like "cbcd%") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,4) = "cbcd";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_3 like "%y[y-z]") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_3,char_length(_3),1) between "y" and "z") = true and (substring(_3,char_length(_3)-1,1) = "y") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_2 like "%yz") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_2,char_length(_2),1) = "z") = true and (substring(_2,char_length(_2)-1,1) = "y") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_3 like "c%z") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_3,char_length(_3),1) = "z") = true and (substring(_3,1,1) = "c") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+    res_s3select_like = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where (_2 like "%xy_") = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where (substring(_2,char_length(_2)-1,1) = "y") = true and (substring(_2,char_length(_2)-2,1) = "x") = true;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_like, res_s3select )
+
+@attr('s3select')
+def test_complex_expressions():
+
+    # purpose of test: engine is process correctly several projections containing aggregation-functions 
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from s3object;")).replace("\n","")
+
+    min_1 = min ( create_list_of_int( 1 , csv_obj ) )
+    max_2 = max ( create_list_of_int( 2 , csv_obj ) )
+    min_3 = min ( create_list_of_int( 3 , csv_obj ) ) + 1
+
+    __res = "{},{},{}".format(min_1,max_2,min_3)
+    
+    # assert is according to radom-csv function 
+    s3select_assert_result( res_s3select, __res )
+
+    # purpose of test that all where conditions create the same group of values, thus same result
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where substring(_2,1,1) = "1" and char_length(_2) = 3;')).replace("\n","")
+
+    res_s3select_between_numbers = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where int(_2)>=100 and int(_2)<200;')).replace("\n","")
+
+    res_s3select_eq_modolu = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select min(int(_2)),max(int(_2)) from s3object where int(_2)/100 = 1 and character_length(_2) = 3;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_substring, res_s3select_between_numbers)
+
+    s3select_assert_result( res_s3select_between_numbers, res_s3select_eq_modolu)
+    
+@attr('s3select')
+def test_alias():
+
+    # purpose: test is comparing result of exactly the same queries , one with alias the other without.
+    # this test is setting alias on 3 projections, the third projection is using other projection alias, also the where clause is using aliases
+    # the test validate that where-clause and projections are executing aliases correctly, bare in mind that each alias has its own cache,
+    # and that cache need to be invalidate per new row. 
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1, int(_2) as a2 , (a1+a2) as a3 from s3object where a3>100 and a3<300;")  ).replace(",","")
+
+    res_s3select_no_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1),int(_2),int(_1)+int(_2) from s3object where (int(_1)+int(_2))>100 and (int(_1)+int(_2))<300;")  ).replace(",","")
+
+    s3select_assert_result( res_s3select_alias, res_s3select_no_alias)
+
+
+@attr('s3select')
+def test_alias_cyclic_refernce():
+
+    number_of_rows = 10000
+    
+    # purpose of test is to validate the s3select-engine is able to detect a cyclic reference to alias.
+    csv_obj = create_random_csv_object(number_of_rows,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select int(_1) as a1,int(_2) as a2, a1+a4 as a3, a5+a1 as a4, int(_3)+a3 as a5 from s3object;")  )
+
+    find_res = res_s3select_alias.find("number of calls exceed maximum size, probably a cyclic reference to alias")
+    
+    assert int(find_res) >= 0 
+
+@attr('s3select')
+def test_datetime():
+
+    # purpose of test is to validate date-time functionality is correct,
+    # by creating same groups with different functions (nested-calls) ,which later produce the same result 
+
+    csv_obj = create_csv_object_for_datetime(10000,1)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where extract(year from to_timestamp(_1)) > 1950 and extract(year from to_timestamp(_1)) < 1960;')  )
+
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where int(substring(_1,1,4))>1950 and int(substring(_1,1,4))<1960;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_substring)
+
+    res_s3select_date_time_to_string = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select cast(to_string(to_timestamp(_1), \'x\') as int) from  s3object;')  )
+
+    res_s3select_date_time_extract = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select extract(timezone_hour from to_timestamp(_1)) from  s3object;')  )
+
+    s3select_assert_result( res_s3select_date_time_to_string, res_s3select_date_time_extract )
+
+    res_s3select_date_time_to_timestamp = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select extract(month from to_timestamp(_1)) from s3object where extract(month from to_timestamp(_1)) = 5;')  )
+
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select cast(substring(_1, 5, 2) as int) from s3object where _1 like \'____05%\';')  )
+
+    s3select_assert_result( res_s3select_date_time_to_timestamp, res_s3select_substring)
+
+@attr('s3select')
+def test_true_false_datetime():
+
+    # purpose of test is to validate date-time functionality is correct,
+    # by creating same groups with different functions (nested-calls) ,which later produce the same result 
+
+    csv_obj = create_csv_object_for_datetime(10000,1)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (extract(year from to_timestamp(_1)) > 1950) = true and (extract(year from to_timestamp(_1)) < 1960) = true;')  )
+
+    res_s3select_substring = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where int(substring(_1,1,4))>1950 and int(substring(_1,1,4))<1960;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_substring)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where  (date_diff(month,to_timestamp(_1),date_add(month,2,to_timestamp(_1)) ) = 2) = true;')  )
+
+    res_s3select_count = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_count)
+
+    res_s3select_date_time = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (date_diff(year,to_timestamp(_1),date_add(day, 366 ,to_timestamp(_1))) = 1) = true ;')  )
+
+    s3select_assert_result( res_s3select_date_time, res_s3select_count)
+
+    # validate that utcnow is integrate correctly with other date-time functions 
+    res_s3select_date_time_utcnow = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(0) from  s3object where (date_diff(hour,utcnow(),date_add(day,1,utcnow())) = 24) = true ;')  )
+
+    s3select_assert_result( res_s3select_date_time_utcnow, res_s3select_count)
+
+@attr('s3select')
+def test_csv_parser():
+
+    # purpuse: test default csv values(, \n " \ ), return value may contain meta-char 
+    # NOTE: should note that default meta-char for s3select are also for python, thus for one example double \ is mandatory
+
+    csv_obj = ',first,,,second,third="c31,c32,c33",forth="1,2,3,4",fifth="my_string=\\"any_value\\" , my_other_string=\\"aaaa,bbb\\" ",' + "\n"
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    # return value contain comma{,}
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _6 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'third="c31,c32,c33"')
+
+    # return value contain comma{,}
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _7 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'forth="1,2,3,4"')
+
+    # return value contain comma{,}{"}, escape-rule{\} by-pass quote{"} , the escape{\} is removed.
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _8 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'fifth="my_string="any_value" , my_other_string="aaaa,bbb" "')
+
+    # return NULL as first token
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _1 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+    # return NULL in the middle of line
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _3 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+    # return NULL in the middle of line (successive)
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _4 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+    # return NULL at the end line
+    res_s3select_alias = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _9 from s3object;")  ).replace("\n","")
+    s3select_assert_result( res_s3select_alias, 'null')
+
+@attr('s3select')
+def test_csv_definition():
+
+    number_of_rows = 10000
+
+    #create object with pipe-sign as field separator and tab as row delimiter.
+    csv_obj = create_random_csv_object(number_of_rows,10,"|","\t")
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+   
+    # purpose of tests is to parse correctly input with different csv defintions  
+    res = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select count(0) from s3object;","|","\t") ).replace(",","")
+
+    s3select_assert_result( number_of_rows, int(res))
+    
+    # assert is according to radom-csv function 
+    # purpose of test is validate that tokens are processed correctly
+    res_s3select = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select min(int(_1)),max(int(_2)),min(int(_3))+1 from s3object;","|","\t") ).replace("\n","")
+
+    min_1 = min ( create_list_of_int( 1 , csv_obj , "|","\t") )
+    max_2 = max ( create_list_of_int( 2 , csv_obj , "|","\t") )
+    min_3 = min ( create_list_of_int( 3 , csv_obj , "|","\t") ) + 1
+
+    __res = "{},{},{}".format(min_1,max_2,min_3)
+    s3select_assert_result( res_s3select, __res )
+
+
+@attr('s3select')
+def test_schema_definition():
+
+    number_of_rows = 10000
+
+    # purpose of test is to validate functionality using csv header info
+    csv_obj = create_random_csv_object(number_of_rows,10,csv_schema="c1,c2,c3,c4,c5,c6,c7,c8,c9,c10")
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    # ignoring the schema on first line and retrieve using generic column number
+    res_ignore = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select _1,_3 from s3object;",csv_header_info="IGNORE") ).replace("\n","")
+
+    # using the scheme on first line, query is using the attach schema
+    res_use = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c3 from s3object;",csv_header_info="USE") ).replace("\n","")
+    
+    # result of both queries should be the same
+    s3select_assert_result( res_ignore, res_use)
+
+    # using column-name not exist in schema
+    res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select c1,c10,int(c11) from s3object;",csv_header_info="USE") ).replace("\n","")
+
+    assert ((res_multiple_defintion.find("alias {c11} or column not exist in schema")) >= 0)
+
+    #find_processing_error = res_multiple_defintion.find("s3select-ProcessingTime-Error")
+    assert ((res_multiple_defintion.find("s3select-ProcessingTime-Error")) >= 0)
+
+    # alias-name is identical to column-name
+    res_multiple_defintion = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select int(c1)+int(c2) as c4,c4 from s3object;",csv_header_info="USE") ).replace("\n","")
+
+    assert ((res_multiple_defintion.find("multiple definition of column {c4} as schema-column and alias"))  >= 0)
+
+@attr('s3select')
+def test_when_then_else_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select case when cast(_1 as int)>100 and cast(_1 as int)<200 then "(100-200)" when cast(_1 as int)>200 and cast(_1 as int)<300 then "(200-300)" else "NONE" end from s3object;')  ).replace("\n","")
+
+    count1 = res_s3select.count("(100-200)")  
+
+    count2 = res_s3select.count("(200-300)") 
+
+    count3 = res_s3select.count("NONE")
+
+    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)>100 and cast(_1 as int)<200  ;')  ).replace("\n","")
+
+    res1 = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)>200 and cast(_1 as int)<300  ;')  ).replace("\n","")
+    
+    res2 = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where  cast(_1 as int)<=100 or cast(_1 as int)>=300 or cast(_1 as int)=200  ;')  ).replace("\n","")
+
+    s3select_assert_result( str(count1) , res)
+
+    s3select_assert_result( str(count2) , res1)
+
+    s3select_assert_result( str(count3) , res2)
+
+@attr('s3select')
+def test_coalesce_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>2 and char_length(_4)>2 and cast(substring(_3,1,2) as int) = cast(substring(_4,1,2) as int);')  ).replace("\n","")  
+
+    res_null = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_4 as int)>99 and coalesce(nullif(cast(substring(_3,1,2) as int),cast(substring(_4,1,2) as int)),7) = 7;' ) ).replace("\n","") 
+
+    s3select_assert_result( res_s3select, res_null)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select coalesce(nullif(_5,_5),nullif(_1,_1),_2) from s3object;')  ).replace("\n","") 
+
+    res_coalesce = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select coalesce(_2) from s3object;')  ).replace("\n","")   
+
+    s3select_assert_result( res_s3select, res_coalesce)
+
+
+@attr('s3select')
+def test_cast_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>999;')  ).replace("\n","")  
+
+    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)>3;')  ).replace("\n","") 
+
+    s3select_assert_result( res_s3select, res)
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(_3 as int)>99 and cast(_3 as int)<1000;')  ).replace("\n","")  
+
+    res = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where char_length(_3)=3;')  ).replace("\n","") 
+
+    s3select_assert_result( res_s3select, res)
+
+@attr('s3select')
+def test_version():
+
+    return
+    number_of_rows = 1
+
+    # purpose of test is to validate functionality using csv header info
+    csv_obj = create_random_csv_object(number_of_rows,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_version = remove_xml_tags_from_result( run_s3select(bucket_name,csv_obj_name,"select version() from s3object;") ).replace("\n","")
+
+    s3select_assert_result( res_version, "41.a," )
+
+@attr('s3select')
+def test_trim_expressions():
+
+    csv_obj = create_random_csv_object_trim(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(_1) = "aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1 from 4 for 5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(both from _1) = "aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trailing from _1) = "   aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(leading from _1) = "aeiou    ";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trim(leading from _1)) = "aeiou";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+@attr('s3select')
+def test_truefalse_trim_expressions():
+
+    csv_obj = create_random_csv_object_trim(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(_1) = "aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1 from 4 for 5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(both from _1) = "aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trailing from _1) = "   aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(leading from _1) = "aeiou    " = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+    res_s3select_trim = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where trim(trim(leading from _1)) = "aeiou" = true;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,4,5) = "aeiou";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_trim, res_s3select )
+
+@attr('s3select')
+def test_escape_expressions():
+
+    csv_obj = create_random_csv_object_escape(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_escape = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%_ar" escape "%";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,char_length(_1),1) = "r" and substring(_1,char_length(_1)-1,1) = "a" and substring(_1,char_length(_1)-2,1) = "_";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_escape, res_s3select )
+
+    res_s3select_escape = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where _1 like "%aeio$_" escape "$";')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where substring(_1,1,5) = "aeio_";')).replace("\n","")
+
+    s3select_assert_result( res_s3select_escape, res_s3select )
+
+@attr('s3select')
+def test_case_value_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_case = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select case cast(_1 as int) when cast(_2 as int) then "case_1_1" else "case_2_2" end from s3object;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select case when cast(_1 as int) = cast(_2 as int) then "case_1_1" else "case_2_2" end from s3object;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_case, res_s3select )
+
+@attr('s3select')
+def test_bool_cast_expressions():
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_cast = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select count(*) from s3object where cast(int(_1) as bool) = true ;')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select count(*) from s3object where cast(_1 as int) != 0 ;')).replace("\n","")
+
+    s3select_assert_result( res_s3select_cast, res_s3select )
+
+@attr('s3select')
+def test_progress_expressions():
+
+    csv_obj = create_random_csv_object(1000000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    obj_size = len(csv_obj.encode('utf-8'))
+
+    res_s3select_response = run_s3select(bucket_name,csv_obj_name,"select sum(int(_1)) from s3object;",progress = True)
+    records_payload_size = len(remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name, 'select sum(int(_1)) from s3object;')).replace("\n",""))
+
+    total_response = len(res_s3select_response)
+    
+    # To do: Validate bytes processed after supporting compressed data
+    s3select_assert_result(obj_size, res_s3select_response[total_response-3]['Details']['BytesScanned'])
+    s3select_assert_result(records_payload_size, res_s3select_response[total_response-3]['Details']['BytesReturned'])
+
+    # stats response payload validation
+    s3select_assert_result(obj_size, res_s3select_response[total_response-2]['Details']['BytesScanned'])
+    s3select_assert_result(records_payload_size, res_s3select_response[total_response-2]['Details']['BytesReturned'])
+
+    # end response
+    s3select_assert_result({}, res_s3select_response[total_response-1])
+
+@attr('s3select')
+def test_output_serial_expressions():
+    return # TODO fix test
+
+    csv_obj = create_random_csv_object(10000,10)
+
+    csv_obj_name = get_random_string()
+    bucket_name = "test"
+    upload_csv_object(bucket_name,csv_obj_name,csv_obj)
+
+    res_s3select_1 = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,"select _1, _2 from s3object where nullif(_1,_2) is null ;", "ALWAYS")  ).replace("\n",",")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,"select _1, _2 from s3object where _1 = _2 ;")  ).replace("\n",",")
+
+    res_s3select_list = res_s3select.split(',')
+    res_s3select_final = (','.join('"' + item + '"' for item in res_s3select_list)).replace('""','') # remove empty result(first,last)
+
+    s3select_assert_result( res_s3select_1, res_s3select_final)
+
+    res_s3select_in = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(int(_2)));', "ASNEEDED", '$', '#')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = int(_2);')).replace("\n","#")
+    res_s3select = res_s3select[1:len(res_s3select)] # remove first redundant
+    res_s3select_final = res_s3select[0:len(res_s3select)-1] # remove last redundant
+
+    s3select_assert_result( res_s3select_in, res_s3select_final )
+
+    res_s3select_quot = remove_xml_tags_from_result(  run_s3select_output(bucket_name,csv_obj_name,'select int(_1) from s3object where (int(_1) in(int(_2)));', "ALWAYS", '$', '#')).replace("\n","")
+
+    res_s3select = remove_xml_tags_from_result(  run_s3select(bucket_name,csv_obj_name,'select int(_1) from s3object where int(_1) = int(_2);')).replace("\n","#")
+    res_s3select = res_s3select[1:len(res_s3select)] # remove first redundant
+    res_s3select = res_s3select[0:len(res_s3select)-1] # remove last redundant
+
+    res_s3select_list = res_s3select.split('#')
+    res_s3select_final = ('#'.join('"' + item + '"' for item in res_s3select_list)).replace('""','')
+    
+    s3select_assert_result( res_s3select_quot, res_s3select_final )
+
diff --git a/s3tests_boto3/functional/test_sts.py b/s3tests_boto3/functional/test_sts.py
new file mode 100644 (file)
index 0000000..dce1712
--- /dev/null
@@ -0,0 +1,2209 @@
+import boto3
+import botocore.session
+from botocore.exceptions import ClientError
+from botocore.exceptions import ParamValidationError
+from nose.tools import eq_ as eq
+from nose.plugins.attrib import attr
+from nose.plugins.skip import SkipTest
+import isodate
+import email.utils
+import datetime
+import threading
+import re
+import pytz
+from collections import OrderedDict
+import requests
+import json
+import base64
+import hmac
+import hashlib
+import xml.etree.ElementTree as ET
+import time
+import operator
+import nose
+import os
+import string
+import random
+import socket
+import ssl
+import logging
+from collections import namedtuple
+
+from email.header import decode_header
+
+from . import(
+    get_iam_client,
+    get_sts_client,
+    get_client,
+    get_alt_user_id,
+    get_config_endpoint,
+    get_new_bucket_name,
+    get_parameter_name,
+    get_main_aws_access_key,
+    get_main_aws_secret_key,
+    get_thumbprint,
+    get_aud,
+    get_token,
+    get_realm_name,
+    check_webidentity,
+    get_iam_access_key,
+    get_iam_secret_key,
+    get_sub,
+    get_azp,
+    get_user_token
+    )
+
+log = logging.getLogger(__name__)
+
+def create_role(iam_client,path,rolename,policy_document,description,sessionduration,permissionboundary,tag_list=None):
+    role_err=None
+    if rolename is None:
+        rolename=get_parameter_name()
+    if tag_list is None:
+        tag_list = []
+    try:
+        role_response = iam_client.create_role(Path=path,RoleName=rolename,AssumeRolePolicyDocument=policy_document,Tags=tag_list)
+    except ClientError as e:
+       role_err = e.response['Code']
+    return (role_err,role_response,rolename)
+
+def put_role_policy(iam_client,rolename,policyname,role_policy):
+    role_err=None
+    if policyname is None:
+        policyname=get_parameter_name() 
+    try:
+        role_response = iam_client.put_role_policy(RoleName=rolename,PolicyName=policyname,PolicyDocument=role_policy)
+    except ClientError as e:
+       role_err = e.response['Code']
+    return (role_err,role_response)
+
+def put_user_policy(iam_client,username,policyname,policy_document):
+    role_err=None
+    if policyname is None:
+        policyname=get_parameter_name()
+    try:
+        role_response = iam_client.put_user_policy(UserName=username,PolicyName=policyname,PolicyDocument=policy_document)
+    except ClientError as e:
+        role_err = e.response['Code']
+    return (role_err,role_response,policyname)
+
+def get_s3_client_using_iam_creds():
+    iam_access_key = get_iam_access_key()
+    iam_secret_key = get_iam_secret_key()
+    default_endpoint = get_config_endpoint()
+
+    s3_client_iam_creds = boto3.client('s3',
+                              aws_access_key_id = iam_access_key,
+                              aws_secret_access_key = iam_secret_key,
+                              endpoint_url=default_endpoint,
+                              region_name='',
+                          )
+
+    return s3_client_iam_creds
+
+def create_oidc_provider(iam_client, url, clientidlist, thumbprintlist):
+    oidc_arn = None
+    oidc_error = None
+    clientids = []
+    if clientidlist is None:
+        clientidlist=clientids
+    try:
+        oidc_response = iam_client.create_open_id_connect_provider(
+            Url=url,
+            ClientIDList=clientidlist,
+            ThumbprintList=thumbprintlist,
+        )
+        oidc_arn = oidc_response['OpenIDConnectProviderArn']
+        print (oidc_arn)
+    except ClientError as e:
+        oidc_error = e.response['Code']
+        print (oidc_error)
+        try:
+            oidc_error = None
+            print (url)
+            if url.startswith('http://'):
+                url = url[len('http://'):]
+            elif url.startswith('https://'):
+                url = url[len('https://'):]
+            elif url.startswith('www.'):
+                url = url[len('www.'):]
+            oidc_arn = 'arn:aws:iam:::oidc-provider/{}'.format(url)
+            print (url)
+            print (oidc_arn)
+            oidc_response = iam_client.get_open_id_connect_provider(OpenIDConnectProviderArn=oidc_arn)
+        except ClientError as e:
+            oidc_arn = None
+    return (oidc_arn, oidc_error)
+
+def get_s3_resource_using_iam_creds():
+    iam_access_key = get_iam_access_key()
+    iam_secret_key = get_iam_secret_key()
+    default_endpoint = get_config_endpoint()
+
+    s3_res_iam_creds = boto3.resource('s3',
+                              aws_access_key_id = iam_access_key,
+                              aws_secret_access_key = iam_secret_key,
+                              endpoint_url=default_endpoint,
+                              region_name='',
+                          )
+
+    return s3_res_iam_creds
+
+@attr(resource='get session token')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='s3 ops only accessible by temporary credentials')
+@attr('test_of_sts')
+@attr('fails_on_dbstore')
+def test_get_session_token():
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    
+    user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}"
+    (resp_err,resp,policy_name)=put_user_policy(iam_client,sts_user_id,None,user_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    response=sts_client.get_session_token()
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    s3_client=boto3.client('s3',
+                aws_access_key_id = response['Credentials']['AccessKeyId'],
+               aws_secret_access_key = response['Credentials']['SecretAccessKey'],
+                aws_session_token = response['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+        eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+        finish=s3_client.delete_bucket(Bucket=bucket_name)
+    finally: # clean up user policy even if create_bucket/delete_bucket fails
+        iam_client.delete_user_policy(UserName=sts_user_id,PolicyName=policy_name)
+
+@attr(resource='get session token')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='s3 ops denied by permanent credentials')
+@attr('test_of_sts')
+@attr('fails_on_dbstore')
+def test_get_session_token_permanent_creds_denied():
+    s3bucket_error=None
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    s3_main_access_key=get_main_aws_access_key()
+    s3_main_secret_key=get_main_aws_secret_key()
+    
+    user_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"],\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}},{\"Effect\":\"Allow\",\"Action\":\"sts:GetSessionToken\",\"Resource\":\"*\",\"Condition\":{\"BoolIfExists\":{\"sts:authentication\":\"false\"}}}]}"
+    (resp_err,resp,policy_name)=put_user_policy(iam_client,sts_user_id,None,user_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    response=sts_client.get_session_token()
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    s3_client=boto3.client('s3',
+                aws_access_key_id = s3_main_access_key,
+               aws_secret_access_key = s3_main_secret_key,
+                aws_session_token = response['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    eq(s3bucket_error,'AccessDenied')
+    iam_client.delete_user_policy(UserName=sts_user_id,PolicyName=policy_name)
+
+@attr(resource='assume role')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='role policy allows all s3 ops')
+@attr('test_of_sts')
+@attr('fails_on_dbstore')
+def test_assume_role_allow():
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+@attr(resource='assume role')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='role policy denies all s3 ops')
+@attr('test_of_sts')
+@attr('fails_on_dbstore')
+def test_assume_role_deny():
+    s3bucket_error=None
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    eq(s3bucket_error,'AccessDenied')
+
+@attr(resource='assume role')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='creds expire so all s3 ops fails')
+@attr('test_of_sts')
+@attr('fails_on_dbstore')
+def test_assume_role_creds_expiry():
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/"+sts_user_id+"\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,DurationSeconds=900)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+    time.sleep(900)
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    eq(s3bucket_error,'AccessDenied')
+
+@attr(resource='assume role')
+@attr(method='head')
+@attr(operation='check')
+@attr(assertion='HEAD fails with 403 when role policy denies s3:ListBucket')
+@attr('test_of_sts')
+@attr('fails_on_dbstore')
+def test_assume_role_deny_head_nonexistent():
+    # create a bucket with the normal s3 client
+    bucket_name = get_new_bucket_name()
+    get_client().create_bucket(Bucket=bucket_name)
+
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+
+    policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}'
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name)
+
+    # allow GetObject but deny ListBucket
+    role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":"s3:GetObject","Principal":"*","Resource":"arn:aws:s3:::*"}}'
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='')
+    status=200
+    try:
+        s3_client.head_object(Bucket=bucket_name, Key='nonexistent')
+    except ClientError as e:
+        status = e.response['ResponseMetadata']['HTTPStatusCode']
+    eq(status,403)
+
+@attr(resource='assume role')
+@attr(method='head')
+@attr(operation='check')
+@attr(assertion='HEAD fails with 404 when role policy allows s3:ListBucket')
+@attr('test_of_sts')
+@attr('fails_on_dbstore')
+def test_assume_role_allow_head_nonexistent():
+    # create a bucket with the normal s3 client
+    bucket_name = get_new_bucket_name()
+    get_client().create_bucket(Bucket=bucket_name)
+
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    sts_user_id=get_alt_user_id()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+
+    policy_document = '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["arn:aws:iam:::user/'+sts_user_id+'"]},"Action":["sts:AssumeRole"]}]}'
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name)
+
+    # allow GetObject and ListBucket
+    role_policy = '{"Version":"2012-10-17","Statement":{"Effect":"Allow","Action":["s3:GetObject","s3:ListBucket"],"Principal":"*","Resource":"arn:aws:s3:::*"}}'
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='')
+    status=200
+    try:
+        s3_client.head_object(Bucket=bucket_name, Key='nonexistent')
+    except ClientError as e:
+        status = e.response['ResponseMetadata']['HTTPStatusCode']
+    eq(status,404)
+
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role through web token')
+@attr('webidentity_test')
+@attr('token_claims_trust_policy_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity():
+    check_webidentity()
+    iam_client=get_iam_client()    
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+    
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+    
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+    
+    s3_client = boto3.client('s3',
+               aws_access_key_id = resp['Credentials']['AccessKeyId'],
+               aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+               aws_session_token = resp['Credentials']['SessionToken'],
+               endpoint_url=default_endpoint,
+               region_name='',
+               )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+    
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+'''
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assume_role_with_web_token creds expire')
+@attr('webidentity_test')
+def test_assume_role_with_web_identity_invalid_webtoken():
+    resp_error=None
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=""
+    try:
+        resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken='abcdef')
+    except InvalidIdentityTokenException as e:
+        log.debug('{}'.format(resp))
+        log.debug('{}'.format(e.response.get("Error", {}).get("Code")))
+        log.debug('{}'.format(e))
+        resp_error = e.response.get("Error", {}).get("Code")
+    eq(resp_error,'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+'''
+
+#######################
+# Session Policy Tests
+#######################
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='checking session policy working for two different buckets')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_check_on_different_buckets():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::test2\",\"arn:aws:s3:::test2/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_name_1 = 'test1'
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name_1)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    eq(s3bucket_error, 'AccessDenied')
+
+    bucket_name_2 = 'test2'
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name_2)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    eq(s3bucket_error, 'AccessDenied')
+
+    bucket_body = 'please-write-something'
+    #body.encode(encoding='utf_8')
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    eq(s3_put_obj_error,'NoSuchBucket')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking session policy working for same bucket')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_check_on_same_bucket():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='checking put_obj op denial')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_check_put_obj_denial():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    eq(s3_put_obj_error, 'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='checking put_obj working by swapping policies')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_swapping_role_policy_and_session_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\",\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking put_obj working by setting different permissions to role and session policy')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_check_different_op_permissions():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    eq(s3_put_obj_error, 'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking op behaviour with deny effect')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_check_with_deny_effect():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    eq(s3_put_obj_error, 'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking put_obj working with deny and allow on same op')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_check_with_deny_on_same_op():
+    check_webidentity()
+    iam_client=get_iam_client()
+    iam_access_key=get_iam_access_key()
+    iam_secret_key=get_iam_secret_key()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy_new = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy_new)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client_iam_creds = get_s3_client_using_iam_creds()
+
+    bucket_name_1 = 'test1'
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Deny\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    eq(s3_put_obj_error, 'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking op when bucket policy has role arn')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_bucket_policy_role_arn():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolearn = "arn:aws:iam:::role/" + general_role_name
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "{}".format(rolearn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    try:
+        obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3object_error = e.response.get("Error", {}).get("Code")
+    eq(s3object_error, 'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='checking op when bucket policy has session arn')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_bucket_policy_session_arn():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "{}".format(rolesessionarn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+    })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-1.txt")
+    eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking copy object op with role, session and bucket policy')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_copy_object():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
+    print (rolesessionarn)
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Allow",
+        "Principal": {"AWS": "{}".format(rolesessionarn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+     })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    copy_source = {
+    'Bucket': bucket_name_1,
+    'Key': 'test-1.txt'
+    }
+
+    s3_client.copy(copy_source, bucket_name_1, "test-2.txt")
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name_1, Key="test-2.txt")
+    eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking op is denied when no role policy')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_no_bucket_role_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\",\"s3:GetObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3putobj_error = e.response.get("Error", {}).get("Code")
+    eq(s3putobj_error, 'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='checking op is denied when resource policy denies')
+@attr('webidentity_test')
+@attr('session_policy')
+@attr('fails_on_dbstore')
+def test_session_policy_bucket_policy_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    aud=get_aud()
+    token=get_token()
+    realm=get_realm_name()
+
+    url = 'http://localhost:8080/auth/realms/{}'.format(realm)
+    thumbprintlist = [thumbprint]
+    (oidc_arn,oidc_error) = create_oidc_provider(iam_client, url, None, thumbprintlist)
+    if oidc_error is not None:
+        raise RuntimeError('Unable to create/get openid connect provider {}'.format(oidc_error))
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_arn+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":app_id\":\""+aud+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":[\"*\"]}}"
+
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3client_iamcreds = get_s3_client_using_iam_creds()
+    bucket_name_1 = 'test1'
+    s3bucket = s3client_iamcreds.create_bucket(Bucket=bucket_name_1)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resource1 = "arn:aws:s3:::" + bucket_name_1
+    resource2 = "arn:aws:s3:::" + bucket_name_1 + "/*"
+    rolesessionarn = "arn:aws:iam:::assumed-role/" + general_role_name + "/" + role_session_name
+    bucket_policy = json.dumps(
+    {
+        "Version": "2012-10-17",
+        "Statement": [{
+        "Effect": "Deny",
+        "Principal": {"AWS": "{}".format(rolesessionarn)},
+        "Action": ["s3:GetObject","s3:PutObject"],
+        "Resource": [
+            "{}".format(resource1),
+            "{}".format(resource2)
+          ]
+        }]
+    })
+    s3client_iamcreds.put_bucket_policy(Bucket=bucket_name_1, Policy=bucket_policy)
+    session_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":[\"s3:PutObject\"],\"Resource\":[\"arn:aws:s3:::test1\",\"arn:aws:s3:::test1/*\"]}}"
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token,Policy=session_policy)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+                aws_access_key_id = resp['Credentials']['AccessKeyId'],
+                aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+                aws_session_token = resp['Credentials']['SessionToken'],
+                endpoint_url=default_endpoint,
+                region_name='',
+                )
+    bucket_body = 'this is a test file'
+
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name_1, Key="test-1.txt")
+    except ClientError as e:
+        s3putobj_error = e.response.get("Error", {}).get("Code")
+    eq(s3putobj_error, 'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_arn
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token using sub in trust policy')
+@attr('webidentity_test')
+@attr('token_claims_trust_policy_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_with_sub():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    sub=get_sub()
+    token=get_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":sub\":\""+sub+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token using azp in trust policy')
+@attr('webidentity_test')
+@attr('token_claims_trust_policy_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_with_azp():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    azp=get_azp()
+    token=get_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\"],\"Condition\":{\"StringEquals\":{\"localhost:8080/auth/realms/"+realm+":azp\":\""+azp+"\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token using aws:RequestTag in trust policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_request_tag_trust_policy_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_with_request_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\"}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with aws:PrincipalTag in role policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_principal_tag_role_policy_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_with_principal_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:PrincipalTag/Department\":\"Engineering\"}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with aws:PrincipalTag in role policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_principal_tag_role_policy_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_for_all_values():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\",\"Marketing\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with aws:PrincipalTag in role policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_principal_tag_role_policy_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_for_all_values_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    #ForAllValues: The condition returns true if every key value in the request matches at least one value in the policy
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAllValues:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    try:
+        s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    except ClientError as e:
+        s3bucket_error = e.response.get("Error", {}).get("Code")
+    eq(s3bucket_error,'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with aws:TagKeys in trust policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_tag_keys_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_tag_keys_trust_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":\"Department\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"ForAnyValue:StringEquals\":{\"aws:PrincipalTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='get')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with aws:TagKeys in role permission policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_tag_keys_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_tag_keys_role_policy():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"aws:TagKeys\":[\"Department\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+    bkt = s3_client.delete_bucket(Bucket=bucket_name)
+    eq(bkt['ResponseMetadata']['HTTPStatusCode'],204)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with s3:ResourceTag in role permission policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_resource_tags_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_resource_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with s3:ResourceTag with missing tags on bucket')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_resource_tags_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_resource_tag_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    eq(s3_put_obj_error,'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with s3:ResourceTag with wrong resource tag in policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_resource_tags_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_wrong_resource_tag_deny():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'WrongResourcetag'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    try:
+        s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    except ClientError as e:
+        s3_put_obj_error = e.response.get("Error", {}).get("Code")
+    eq(s3_put_obj_error,'AccessDenied')
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with s3:ResourceTag matching aws:PrincipalTag in role permission policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_resource_tags_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_resource_tag_princ_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    tags = 'Department=Engineering&Department=Marketing'
+    key = "test-1.txt"
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags)
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key=key)
+    eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with s3:ResourceTag used to test copy object')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_resource_tags_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_resource_tag_copy_obj():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    #create two buckets and add same tags to both
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
+
+    copy_bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=copy_bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(copy_bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"aws:RequestTag/Department\":\"Engineering\"}}}]}"
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"${aws:PrincipalTag/Department}\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    tags = 'Department=Engineering'
+    key = "test-1.txt"
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key=key, Tagging=tags)
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    #copy to same bucket
+    copy_source = {
+    'Bucket': bucket_name,
+    'Key': 'test-1.txt'
+    }
+
+    s3_client.copy(copy_source, bucket_name, "test-2.txt")
+
+    s3_get_obj = s3_client.get_object(Bucket=bucket_name, Key="test-2.txt")
+    eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    #copy to another bucket
+    copy_source = {
+    'Bucket': bucket_name,
+    'Key': 'test-1.txt'
+    }
+
+    s3_client.copy(copy_source, copy_bucket_name, "test-1.txt")
+
+    s3_get_obj = s3_client.get_object(Bucket=copy_bucket_name, Key="test-1.txt")
+    eq(s3_get_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
+
+@attr(resource='assume role with web identity')
+@attr(method='put')
+@attr(operation='check')
+@attr(assertion='assuming role using web token with iam:ResourceTag in role trust policy')
+@attr('webidentity_test')
+@attr('abac_test')
+@attr('token_role_tags_test')
+@attr('fails_on_dbstore')
+def test_assume_role_with_web_identity_role_resource_tag():
+    check_webidentity()
+    iam_client=get_iam_client()
+    sts_client=get_sts_client()
+    default_endpoint=get_config_endpoint()
+    role_session_name=get_parameter_name()
+    thumbprint=get_thumbprint()
+    user_token=get_user_token()
+    realm=get_realm_name()
+
+    s3_res_iam_creds = get_s3_resource_using_iam_creds()
+
+    s3_client_iam_creds = s3_res_iam_creds.meta.client
+
+    bucket_name = get_new_bucket_name()
+    s3bucket = s3_client_iam_creds.create_bucket(Bucket=bucket_name)
+    eq(s3bucket['ResponseMetadata']['HTTPStatusCode'],200)
+
+    bucket_tagging = s3_res_iam_creds.BucketTagging(bucket_name)
+    Set_Tag = bucket_tagging.put(Tagging={'TagSet':[{'Key':'Department', 'Value': 'Engineering'},{'Key':'Department', 'Value': 'Marketing'}]})
+
+    oidc_response = iam_client.create_open_id_connect_provider(
+    Url='http://localhost:8080/auth/realms/{}'.format(realm),
+    ThumbprintList=[
+        thumbprint,
+    ],
+    )
+
+    #iam:ResourceTag refers to the tag attached to role, hence the role is allowed to be assumed only when it has a tag matching the policy.
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Federated\":[\""+oidc_response["OpenIDConnectProviderArn"]+"\"]},\"Action\":[\"sts:AssumeRoleWithWebIdentity\",\"sts:TagSession\"],\"Condition\":{\"StringEquals\":{\"iam:ResourceTag/Department\":\"Engineering\"}}}]}"
+    tags_list = [
+            {'Key':'Department','Value':'Engineering'},
+            {'Key':'Department','Value':'Marketing'}
+        ]
+
+    (role_error,role_response,general_role_name)=create_role(iam_client,'/',None,policy_document,None,None,None,tags_list)
+    eq(role_response['Role']['Arn'],'arn:aws:iam:::role/'+general_role_name+'')
+
+    role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::*\",\"Condition\":{\"StringEquals\":{\"s3:ResourceTag/Department\":[\"Engineering\"]}}}}"
+    (role_err,response)=put_role_policy(iam_client,general_role_name,None,role_policy)
+    eq(response['ResponseMetadata']['HTTPStatusCode'],200)
+
+    resp=sts_client.assume_role_with_web_identity(RoleArn=role_response['Role']['Arn'],RoleSessionName=role_session_name,WebIdentityToken=user_token)
+    eq(resp['ResponseMetadata']['HTTPStatusCode'],200)
+
+    s3_client = boto3.client('s3',
+        aws_access_key_id = resp['Credentials']['AccessKeyId'],
+        aws_secret_access_key = resp['Credentials']['SecretAccessKey'],
+        aws_session_token = resp['Credentials']['SessionToken'],
+        endpoint_url=default_endpoint,
+        region_name='',
+        )
+
+    bucket_body = 'this is a test file'
+    s3_put_obj = s3_client.put_object(Body=bucket_body, Bucket=bucket_name, Key="test-1.txt")
+    eq(s3_put_obj['ResponseMetadata']['HTTPStatusCode'],200)
+
+    oidc_remove=iam_client.delete_open_id_connect_provider(
+    OpenIDConnectProviderArn=oidc_response["OpenIDConnectProviderArn"]
+    )
diff --git a/s3tests_boto3/functional/test_utils.py b/s3tests_boto3/functional/test_utils.py
new file mode 100644 (file)
index 0000000..59c3c74
--- /dev/null
@@ -0,0 +1,11 @@
+from nose.tools import eq_ as eq
+
+from . import utils
+
+def test_generate():
+    FIVE_MB = 5 * 1024 * 1024
+    eq(len(''.join(utils.generate_random(0))), 0)
+    eq(len(''.join(utils.generate_random(1))), 1)
+    eq(len(''.join(utils.generate_random(FIVE_MB - 1))), FIVE_MB - 1)
+    eq(len(''.join(utils.generate_random(FIVE_MB))), FIVE_MB)
+    eq(len(''.join(utils.generate_random(FIVE_MB + 1))), FIVE_MB + 1)
diff --git a/s3tests_boto3/functional/utils.py b/s3tests_boto3/functional/utils.py
new file mode 100644 (file)
index 0000000..4d9dc49
--- /dev/null
@@ -0,0 +1,49 @@
+import random
+import requests
+import string
+import time
+
+from nose.tools import eq_ as eq
+
+def assert_raises(excClass, callableObj, *args, **kwargs):
+    """
+    Like unittest.TestCase.assertRaises, but returns the exception.
+    """
+    try:
+        callableObj(*args, **kwargs)
+    except excClass as e:
+        return e
+    else:
+        if hasattr(excClass, '__name__'):
+            excName = excClass.__name__
+        else:
+            excName = str(excClass)
+        raise AssertionError("%s not raised" % excName)
+
+def generate_random(size, part_size=5*1024*1024):
+    """
+    Generate the specified number random data.
+    (actually each MB is a repetition of the first KB)
+    """
+    chunk = 1024
+    allowed = string.ascii_letters
+    for x in range(0, size, part_size):
+        strpart = ''.join([allowed[random.randint(0, len(allowed) - 1)] for _ in range(chunk)])
+        s = ''
+        left = size - x
+        this_part_size = min(left, part_size)
+        for y in range(this_part_size // chunk):
+            s = s + strpart
+        s = s + strpart[:(this_part_size % chunk)]
+        yield s
+        if (x == size):
+            return
+
+def _get_status(response):
+    status = response['ResponseMetadata']['HTTPStatusCode']
+    return status
+
+def _get_status_and_error_code(response):
+    status = response['ResponseMetadata']['HTTPStatusCode']
+    error_code = response['Error']['Code']
+    return status, error_code
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..a8ca207
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+from setuptools import setup, find_packages
+
+setup(
+    name='s3tests',
+    version='0.0.1',
+    packages=find_packages(),
+
+    author='Tommi Virtanen',
+    author_email='tommi.virtanen@dreamhost.com',
+    description='Unofficial Amazon AWS S3 compatibility tests',
+    license='MIT',
+    keywords='s3 web testing',
+
+    install_requires=[
+        'boto >=2.0b4',
+        'boto3 >=1.0.0',
+        'PyYAML',
+        'munch >=2.0.0',
+        'gevent >=1.0',
+        'isodate >=0.4.4',
+        ],
+    )