r = requests.post(url, files=payload, verify=get_config_ssl_verify())
assert r.status_code == 400
+
+def _set_log_bucket_policy_tenant(client, log_tenant, log_bucket_name, src_tenant, src_user, src_buckets, log_prefixes):
+ statements = []
+ for j in range(len(src_buckets)):
+ if len(log_prefixes) == 1:
+ prefix = log_prefixes[0]
+ else:
+ prefix = log_prefixes[j]
+ statements.append({
+ "Sid": "S3ServerAccessLogsPolicy",
+ "Effect": "Allow",
+ "Principal": {"Service": "logging.s3.amazonaws.com"},
+ "Action": ["s3:PutObject"],
+ "Resource": "arn:aws:s3::{}:{}/{}".format(log_tenant, log_bucket_name, prefix),
+ "Condition": {
+ "ArnLike": {"aws:SourceArn": "arn:aws:s3::{}:{}".format(src_tenant, src_buckets[j])},
+ "StringEquals": {
+ "aws:SourceAccount": "{}${}".format(src_tenant, src_user) if src_tenant else src_user
+ }
+ }
+ })
+
+ policy_document = json.dumps({
+ "Version": "2012-10-17",
+ "Statement": statements
+ })
+
+ result = client.put_bucket_policy(Bucket=log_bucket_name, Policy=policy_document)
+ assert(result['ResponseMetadata']['HTTPStatusCode'] == 204)
+
+
+def _set_log_bucket_policy(client, log_bucket_name, src_buckets, log_prefixes):
+ _set_log_bucket_policy_tenant(client, "", log_bucket_name, "", get_main_user_id(), src_buckets, log_prefixes)
+
+
def _has_bucket_logging_extension():
src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
log_bucket_name = get_new_bucket_name()
- log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/', 'LoggingType': 'Journal'}
+ log_prefix = 'log/'
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': log_prefix, 'LoggingType': 'Journal'}
try:
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
except ParamValidationError as e:
return False
+ except ClientError as e:
+ # should fail on non-existing bucket
+ return True
+
return True
+
def _has_taget_object_key_format():
src_bucket_name = get_new_bucket_name()
- src_bucket = get_new_bucket_resource(name=src_bucket_name)
log_bucket_name = get_new_bucket_name()
- log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/', 'TargetObjectKeyFormat': {'SimplePrefix': {}}}
+ log_prefix = 'log/'
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': log_prefix, 'TargetObjectKeyFormat': {'SimplePrefix': {}}}
try:
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
except ParamValidationError as e:
return False
- return True
+ finally:
+ # should fail on non-existing bucket
+ return True
import shlex
@pytest.mark.bucket_logging
def test_put_bucket_logging():
+ has_extensions = _has_bucket_logging_extension()
+ has_key_format = _has_taget_object_key_format()
src_bucket_name = get_new_bucket_name()
src_bucket = get_new_bucket_resource(name=src_bucket_name)
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
- has_extensions = _has_bucket_logging_extension()
- has_key_format = _has_taget_object_key_format()
-
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
# minimal configuration
logging_enabled = {
- 'TargetBucket': log_bucket_name,
- 'TargetPrefix': 'log/'
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': prefix
}
if has_extensions:
# default value for key prefix is returned
logging_enabled['TargetObjectKeyFormat'] = {'SimplePrefix': {}}
assert response['LoggingEnabled'] == logging_enabled
-
+
if has_key_format:
# with simple target object prefix
logging_enabled = {
- 'TargetBucket': log_bucket_name,
+ 'TargetBucket': log_bucket_name,
'TargetPrefix': 'log/',
'TargetObjectKeyFormat': {
'SimplePrefix': {}
logging_enabled['LoggingType'] = 'Standard'
logging_enabled['RecordsBatchSize'] = 0
assert response['LoggingEnabled'] == logging_enabled
-
+
# with partitioned target object prefix
logging_enabled = {
- 'TargetBucket': log_bucket_name,
+ 'TargetBucket': log_bucket_name,
'TargetPrefix': 'log/',
'TargetObjectKeyFormat': {
'PartitionedPrefix': {
logging_enabled['LoggingType'] = 'Standard'
logging_enabled['RecordsBatchSize'] = 0
assert response['LoggingEnabled'] == logging_enabled
-
+
# with target grant (not implemented in RGW)
main_display_name = get_main_display_name()
main_user_id = get_main_user_id()
logging_enabled = {
- 'TargetBucket': log_bucket_name,
+ 'TargetBucket': log_bucket_name,
'TargetPrefix': 'log/',
'TargetGrants': [{'Grantee': {'DisplayName': main_display_name, 'ID': main_user_id,'Type': 'CanonicalUser'},'Permission': 'FULL_CONTROL'}]
}
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
-
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
+
logging_enabled = {
- 'TargetBucket': log_bucket_name,
+ 'TargetBucket': log_bucket_name,
'LoggingType': log_type,
- 'TargetPrefix': 'log/',
+ 'TargetPrefix': prefix,
'ObjectRollTime': expected_object_roll_time,
'TargetObjectKeyFormat': {'SimplePrefix': {}},
'RecordsBatchSize': 0,
- 'Filter':
+ 'Filter':
{
'Key': {
'FilterRules': [
print('TODO')
else:
assert False, 'unknown log type: %s' % log_type
-
+
names = []
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
if log_type == 'Standard':
# standard log records are not filtered
names.append(name)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
-
+
for j in range(num_keys):
name = 'test/'+'myobject'+str(j)+'.txt'
names.append(name)
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
-
+
num_buckets = 5
buckets = []
+ log_prefixes = []
longer_time = expected_object_roll_time*10
for j in range(num_buckets):
src_bucket_name = get_new_bucket_name()
src_bucket = get_new_bucket_resource(name=src_bucket_name)
- logging_enabled = {'TargetBucket': log_bucket_name,
- 'ObjectRollTime': longer_time, 'LoggingType': logging_type}
+ buckets.append(src_bucket_name)
if single_prefix:
- logging_enabled['TargetPrefix'] = 'log/'
+ log_prefixes.append('log/')
else:
- logging_enabled['TargetPrefix'] = src_bucket_name+'/'
+ log_prefixes.append(src_bucket_name+'/')
- response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ _set_log_bucket_policy(client, log_bucket_name, buckets, log_prefixes)
+
+ for j in range(num_buckets):
+ logging_enabled = {'TargetBucket': log_bucket_name,
+ 'ObjectRollTime': longer_time,
+ 'LoggingType': logging_type,
+ 'TargetPrefix': log_prefixes[j]
+ }
+
+ response = client.put_bucket_logging(Bucket=buckets[j], BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- buckets.append(src_bucket_name)
num_keys = 10
src_names = []
t = []
for src_bucket_name in buckets:
if concurrency:
- thr = threading.Thread(target = client.post_bucket_logging,
+ thr = threading.Thread(target = client.post_bucket_logging,
kwargs={'Bucket': src_bucket_name})
thr.start()
t.append(thr)
# because flushing itself will be logged
# and the next flush will commit the log
break
-
+
_do_wait_completion(t)
response = client.list_objects_v2(Bucket=log_bucket_name)
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
found = False
- for src_bucket_name in buckets:
- if single_prefix:
- prefix = 'log/'
- else:
- prefix = src_bucket_name+'/'
+ for j in range(num_buckets):
+ prefix = log_prefixes[j]
if key.startswith(prefix):
found = True
- assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_names, logging_type, num_keys)
- assert _verify_records(body, src_bucket_name, 'REST.DELETE.OBJECT', src_names, logging_type, num_keys)
+ assert _verify_records(body, buckets[j], 'REST.PUT.OBJECT', src_names, logging_type, num_keys)
+ assert _verify_records(body, buckets[j], 'REST.DELETE.OBJECT', src_names, logging_type, num_keys)
assert found
log_bucket_name1 = get_new_bucket_name()
log_bucket1 = get_new_bucket_resource(name=log_bucket_name1)
client = get_client()
-
+ prefix = 'log/'
+
# invalid source bucket
try:
response = client.put_bucket_logging(Bucket=src_bucket_name+'kaboom', BucketLoggingStatus={
- 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': 'log/'},
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': prefix},
})
assert False, 'expected failure'
except ClientError as e:
assert e.response['Error']['Code'] == 'NoSuchBucket'
-
+
# invalid log bucket
try:
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': {'TargetBucket': log_bucket_name1+'kaboom', 'TargetPrefix': 'log/'},
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1+'kaboom', 'TargetPrefix': prefix},
})
assert False, 'expected failure'
except ClientError as e:
assert e.response['Error']['Code'] == 'NoSuchKey'
-
+
# log bucket has bucket logging
log_bucket_name2 = get_new_bucket_name()
log_bucket2 = get_new_bucket_resource(name=log_bucket_name2)
+ _set_log_bucket_policy(client, log_bucket_name1, [log_bucket_name2], [prefix])
response = client.put_bucket_logging(Bucket=log_bucket_name2, BucketLoggingStatus={
- 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': 'log/'},
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': prefix},
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ _set_log_bucket_policy(client, log_bucket_name2, [src_bucket_name], [prefix])
try:
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': {'TargetBucket': log_bucket_name2, 'TargetPrefix': 'log/'},
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name2, 'TargetPrefix': prefix},
})
assert False, 'expected failure'
except ClientError as e:
assert e.response['Error']['Code'] == 'InvalidArgument'
-
+
# invalid partition prefix
if _has_taget_object_key_format():
logging_enabled = {
- 'TargetBucket': log_bucket_name1,
- 'TargetPrefix': 'log/',
+ 'TargetBucket': log_bucket_name1,
+ 'TargetPrefix': prefix,
'TargetObjectKeyFormat': {
'PartitionedPrefix': {
'PartitionDateSource': 'kaboom'
}
}
try:
+ _set_log_bucket_policy(client, log_bucket_name1, [src_bucket_name], [prefix])
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
assert e.response['Error']['Code'] == 'MalformedXML'
# log bucket is the same as source bucket
+ _set_log_bucket_policy(client, src_bucket_name, [src_bucket_name], [prefix])
try:
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': {'TargetBucket': src_bucket_name, 'TargetPrefix': 'log/'},
+ 'LoggingEnabled': {'TargetBucket': src_bucket_name, 'TargetPrefix': prefix},
})
assert False, 'expected failure'
except ClientError as e:
assert e.response['Error']['Code'] == 'InvalidArgument'
-
+
# log bucket is encrypted
_put_bucket_encryption_s3(client, log_bucket_name1)
try:
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': 'log/'},
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': prefix},
})
assert False, 'expected failure'
except ClientError as e:
if _has_bucket_logging_extension():
try:
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
- 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': 'log/', 'LoggingType': 'kaboom'},
+ 'LoggingEnabled': {'TargetBucket': log_bucket_name1, 'TargetPrefix': prefix, 'LoggingType': 'kaboom'},
})
assert False, 'expected failure'
except ClientError as e:
def _bucket_logging_tenant_objects(src_client, src_bucket_name, log_client, log_bucket_name, log_type, op_name):
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
src_client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
expected_count = num_keys
tenant_client = get_tenant_client()
log_bucket = get_new_bucket(client=tenant_client, name=log_bucket_name)
client = get_client()
- logging_enabled = {'TargetBucket': get_tenant_name()+':'+log_bucket_name, 'TargetPrefix': 'log/'}
+ prefix = 'log/'
+ log_tenant = get_tenant_name()
+ _set_log_bucket_policy_tenant(tenant_client, log_tenant, log_bucket_name, "", get_main_user_id(), [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': log_tenant+':'+log_bucket_name, 'TargetPrefix': prefix}
if log_type == 'Journal':
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
_bucket_logging_tenant_objects(client, src_bucket_name, tenant_client, log_bucket_name, log_type, 'REST.PUT.OBJECT')
-
# src is on default tenant and log is on a different tenant with the same name
src_bucket_name = get_new_bucket_name()
src_bucket = get_new_bucket_resource(name=src_bucket_name)
tenant_client = get_tenant_client()
log_bucket = get_new_bucket(client=tenant_client, name=log_bucket_name)
client = get_client()
- logging_enabled = {'TargetBucket': get_tenant_name()+':'+log_bucket_name, 'TargetPrefix': 'log/'}
+ tenant_name = get_tenant_name()
+ _set_log_bucket_policy_tenant(tenant_client, tenant_name, log_bucket_name, "", get_main_user_id(), [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': tenant_name+':'+log_bucket_name, 'TargetPrefix': prefix}
if log_type == 'Journal':
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
src_bucket_name = get_new_bucket_name()
src_bucket = get_new_bucket_resource(name=src_bucket_name)
log_bucket_name = get_new_bucket_name()
- log_bucket = get_new_bucket(client=get_tenant_client(), name=log_bucket_name)
+ log_client = get_tenant_client()
+ log_bucket = get_new_bucket(client=log_client, name=log_bucket_name)
client = get_client()
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ _set_log_bucket_policy_tenant(log_client, get_tenant_name(), log_bucket_name, "", get_main_user_id(), [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if log_type == 'Journal':
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
src_bucket = get_new_bucket(client=client, name=src_bucket_name)
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket(client=client, name=log_bucket_name)
- logging_enabled = {'TargetBucket': get_tenant_name()+':'+log_bucket_name, 'TargetPrefix': 'log/'}
+ tenant_name = get_tenant_name()
+ _set_log_bucket_policy_tenant(client, tenant_name, log_bucket_name, tenant_name, get_tenant_user_id(), [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': tenant_name+':'+log_bucket_name, 'TargetPrefix': prefix}
if log_type == 'Journal':
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
_bucket_logging_tenant_objects(client, src_bucket_name, client, log_bucket_name, log_type, 'REST.PUT.OBJECT')
-
+
# src and log are on the same tenant
# log bucket name is set without tenant
client = get_tenant_client()
src_bucket = get_new_bucket(client=client, name=src_bucket_name)
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket(client=client, name=log_bucket_name)
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ tenant_name = get_tenant_name()
+ _set_log_bucket_policy_tenant(client, tenant_name, log_bucket_name, tenant_name, get_tenant_user_id(), [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if log_type == 'Journal':
logging_enabled['LoggingType'] = 'Journal'
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
_bucket_logging_tenant_objects(client, src_bucket_name, client, log_bucket_name, log_type, 'REST.PUT.OBJECT')
-
+
# src is on tenant and log is on the default tenant
# log bucket name is set with explicit default tenant
client = get_tenant_client()
log_client = get_client()
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket(client=log_client, name=log_bucket_name)
+ _set_log_bucket_policy_tenant(log_client, "", log_bucket_name, get_tenant_name(), get_tenant_user_id(), [src_bucket_name], [prefix])
logging_enabled = {'TargetBucket': ':'+log_bucket_name, 'TargetPrefix': 'log/'}
if log_type == 'Journal':
logging_enabled['LoggingType'] = 'Journal'
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
_bucket_logging_tenant_objects(client, src_bucket_name, log_client, log_bucket_name, log_type, 'REST.PUT.OBJECT')
-
+
try:
# src is on tenant and log is on the default tenant
client = get_tenant_client()
src_bucket_name = get_new_bucket_name()
src_bucket = get_new_bucket(client=client, name=src_bucket_name)
log_bucket_name = get_new_bucket_name()
- log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ log_client = get_client()
+ log_bucket = get_new_bucket(client=log_client, name=log_bucket_name)
+ _set_log_bucket_policy_tenant(log_client, "", log_bucket_name, get_tenant_name(), get_tenant_user_id(), [src_bucket_name], [prefix])
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if log_type == 'Journal':
logging_enabled['LoggingType'] = 'Journal'
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
+
response = client.get_bucket_logging(Bucket=src_bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
assert not 'LoggingEnabled' in response
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
- logging_enabled = {'TargetBucket': log_bucket_name,
- 'TargetPrefix': 'log/',
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': log_bucket_name,
+ 'TargetPrefix': prefix,
'LoggingType': 'Standard',
'ObjectRollTime': expected_object_roll_time,
'RecordsBatchSize': 0
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
+
response = client.get_bucket_logging(Bucket=src_bucket_name)
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
has_extensions = _has_bucket_logging_extension()
-
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
+
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
if versioned:
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
assert len(keys) == 1
record_type = 'Standard' if not has_extensions else 'Journal'
-
+
for key in keys:
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client(client_config=botocore.config.Config(max_pool_connections=50))
has_extensions = _has_bucket_logging_extension()
-
+
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
num_keys = 50
t = []
for i in range(num_keys):
- name = 'myobject'+str(i)
- thr = threading.Thread(target = client.put_object,
+ name = 'myobject'+str(i)
+ thr = threading.Thread(target = client.put_object,
kwargs={'Bucket': src_bucket_name, 'Key': name, 'Body': randcontent()})
thr.start()
t.append(thr)
assert len(keys) == 1
record_type = 'Standard' if not has_extensions else 'Journal'
-
+
for key in keys:
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
if versioned:
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
if versioned:
expected_count = 2*num_keys
else:
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
if versioned:
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Standard'
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
if versioned:
expected_count = 2*num_keys
else:
@pytest.mark.bucket_logging
def test_bucket_logging_get_objects():
_bucket_logging_get_objects(False)
-
+
@pytest.mark.bucket_logging
def test_bucket_logging_get_objects_versioned():
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
if versioned:
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name, dst_bucket_name], [prefix])
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
has_extensions = _has_bucket_logging_extension()
-
+
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Standard'
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
if versioned:
expected_count = 2*num_keys
else:
expected_count = num_keys
-
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
client = get_client()
has_extensions = _has_bucket_logging_extension()
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
-
+
src_key = "myobject"
objlen = 30 * 1024 * 1024
(upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
if versioned:
(upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
-
+
_flush_logs(client, src_bucket_name)
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
if versioned:
expected_count = 4 if not has_extensions else 2
else:
(upload_id, data, parts) = _multipart_upload(bucket_name=src_bucket_name, key=src_key, size=objlen)
client.complete_multipart_upload(Bucket=src_bucket_name, Key=src_key, UploadId=upload_id, MultipartUpload={'Parts': parts})
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
if versioned:
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
# minimal configuration
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
logging_enabled['LoggingType'] = 'Journal'
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
if versioned:
expected_count = 2*num_keys
else:
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
logging_enabled = {
- 'TargetBucket': log_bucket_name,
- 'TargetPrefix': 'log/',
+ 'TargetBucket': log_bucket_name,
+ 'TargetPrefix': prefix,
'ObjectRollTime': expected_object_roll_time,
'LoggingType': logging_type
}
})
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
client.head_object(Bucket=src_bucket_name, Key=name)
-
+
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
-
+
_flush_logs(client, src_bucket_name)
-
+
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
log_bucket_name = get_new_bucket_name()
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
-
+
+ prefix = 'log/'
+ _set_log_bucket_policy(client, log_bucket_name, [src_bucket_name], [prefix])
roll_time = 10
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/', 'ObjectRollTime': roll_time}
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': prefix, 'ObjectRollTime': roll_time}
response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
num_keys = 5
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
response = client.list_objects_v2(Bucket=src_bucket_name)
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
len(keys) == 1
-
+
key = keys[0]
assert key.startswith('log/')
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
assert _verify_records(body, src_bucket_name, 'REST.PUT.OBJECT', src_keys, 'Standard', num_keys)
client.delete_object(Bucket=log_bucket_name, Key=key)
-
+
num_keys = 25
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
time.sleep(1)
-
+
response = client.list_objects_v2(Bucket=src_bucket_name)
src_keys = _get_keys(response)
-
+
time.sleep(roll_time)
client.put_object(Bucket=src_bucket_name, Key='myobject', Body=randcontent())
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) > 1
-
+
body = ''
for key in keys:
assert key.startswith('log/')
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
has_extensions = _has_bucket_logging_extension()
-
+
num_buckets = 5
+ log_prefixes = []
buckets = []
bucket_name_prefix = get_new_bucket_name()
for j in range(num_buckets):
src_bucket_name = bucket_name_prefix+str(j)
src_bucket = get_new_bucket_resource(name=src_bucket_name)
- logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': src_bucket_name+'/'}
+ log_prefixes.append(src_bucket_name+'/')
+ buckets.append(src_bucket_name)
+
+ _set_log_bucket_policy(client, log_bucket_name, buckets, log_prefixes)
+
+ for j in range(num_buckets):
+ logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': log_prefixes[j]}
if has_extensions:
logging_enabled['ObjectRollTime'] = expected_object_roll_time
- response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ response = client.put_bucket_logging(Bucket=buckets[j], BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- buckets.append(src_bucket_name)
num_keys = 5
for src_bucket_name in buckets:
for j in range(num_keys):
- name = 'myobject'+str(j)
+ name = 'myobject'+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
for src_bucket_name in buckets:
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) >= num_buckets
-
+
for key in keys:
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
log_bucket = get_new_bucket_resource(name=log_bucket_name)
client = get_client()
has_extensions = _has_bucket_logging_extension()
-
+
num_buckets = 5
buckets = []
+ prefix = 'log/'
bucket_name_prefix = get_new_bucket_name()
for j in range(num_buckets):
src_bucket_name = bucket_name_prefix+str(j)
src_bucket = get_new_bucket_resource(name=src_bucket_name)
+ buckets.append(src_bucket_name)
+
+ _set_log_bucket_policy(client, log_bucket_name, buckets, [prefix])
+
+ for j in range(num_buckets):
# minimal configuration
logging_enabled = {'TargetBucket': log_bucket_name, 'TargetPrefix': 'log/'}
if has_extensions:
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- buckets.append(src_bucket_name)
num_keys = 5
bucket_ind = 0
for src_bucket_name in buckets:
bucket_ind += 1
for j in range(num_keys):
- name = 'myobject'+str(bucket_ind)+str(j)
+ name = 'myobject'+str(bucket_ind)+str(j)
client.put_object(Bucket=src_bucket_name, Key=name, Body=randcontent())
_flush_logs(client, src_bucket_name)
response = client.list_objects_v2(Bucket=log_bucket_name)
keys = _get_keys(response)
assert len(keys) == 1
-
+
key = keys[0]
response = client.get_object(Bucket=log_bucket_name, Key=key)
body = _get_body(response)
num_buckets = 5
buckets = []
+ log_prefixes = []
longer_time = expected_object_roll_time*10
for j in range(num_buckets):
src_bucket_name = get_new_bucket_name()
src_bucket = get_new_bucket_resource(name=src_bucket_name)
- logging_enabled = {'TargetBucket': log_bucket_name,
- 'ObjectRollTime': longer_time, 'LoggingType': logging_type}
+ buckets.append(src_bucket_name)
if single_prefix:
- logging_enabled['TargetPrefix'] = 'log/'
+ log_prefixes.append('log/')
else:
- logging_enabled['TargetPrefix'] = src_bucket_name+'/'
+ log_prefixes.append(src_bucket_name+'/')
- response = client.put_bucket_logging(Bucket=src_bucket_name, BucketLoggingStatus={
+ _set_log_bucket_policy(client, log_bucket_name, buckets, log_prefixes)
+
+ for j in range(num_buckets):
+ logging_enabled = {'TargetBucket': log_bucket_name,
+ 'ObjectRollTime': longer_time,
+ 'LoggingType': logging_type,
+ 'TargetPrefix': log_prefixes[j]}
+ response = client.put_bucket_logging(Bucket=buckets[j], BucketLoggingStatus={
'LoggingEnabled': logging_enabled,
})
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- buckets.append(src_bucket_name)
num_keys = 10
src_names = []
# no concurrecy testing
client.delete_bucket(Bucket=log_bucket_name)
log_bucket = get_new_bucket_resource(name=log_bucket_name)
+ _set_log_bucket_policy(client, log_bucket_name, buckets, log_prefixes)
old_names = src_names
src_names = []
for j in range(num_keys):