host=endpoint.hostname,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
+ connection3 = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key3,
+ aws_secret_access_key=secret_key3,
+ is_secure=False,
+ port=endpoint.port,
+ host=endpoint.hostname,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
acc = usage_acc()
rl = requestlog_queue(acc.generate_make_entry())
connection.set_request_hook(rl)
connection2.set_request_hook(rl)
+ connection3.set_request_hook(rl)
# legend (test cases can be easily grep-ed out)
# TESTCASE 'testname','object','method','operation','assertion'
assert entry['successful_ops'] > 0
# TESTCASE 'user-rename', 'user', 'rename', 'existing user', 'new user', 'succeeds'
- # create a new user
+ # create a new user user3
(err, out) = rgwadmin(ctx, client, [
'user', 'create',
'--uid', user3,
check_status=True)
# create a bucket
- bucket = connection.create_bucket(bucket_name + '6')
+ bucket = connection3.create_bucket(bucket_name + '6')
rl.log_and_clear("create_bucket", bucket_name + '6', user3)
# create object
- object_name = 'thirteen'
- key = boto.s3.key.Key(bucket, object_name)
- key.set_contents_from_string(object_name)
+ object_name1 = 'thirteen'
+ key1 = boto.s3.key.Key(bucket, object_name1)
+ key1.set_contents_from_string(object_name1)
rl.log_and_clear("put_obj", bucket_name + '6', user3)
# rename user3
time.sleep(5)
# get bucket and object to test if user keys are preserved
- bucket = connection.get_bucket(bucket_name + '6')
- s = key.get_contents_as_string()
+ bucket = connection3.get_bucket(bucket_name + '6')
+ s = key1.get_contents_as_string()
rl.log_and_clear("get_obj", bucket_name + '6', user4)
- assert s == object_name
+ assert s == object_name1
+
+ # TESTCASE 'user-rename', 'user', 'rename', 'existing user', 'another existing user', 'fails'
+ # create a new user user2
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--access-key', access_key2,
+ '--secret', secret_key2,
+ '--max-buckets', '4'
+ ],
+ check_status=True)
+
+ # create a bucket
+ bucket = connection2.create_bucket(bucket_name + '7')
+
+ rl.log_and_clear("create_bucket", bucket_name + '7', user2)
+
+ # create object
+ object_name2 = 'fourteen'
+ key2 = boto.s3.key.Key(bucket, object_name2)
+ key2.set_contents_from_string(object_name2)
+ rl.log_and_clear("put_obj", bucket_name + '7', user2)
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rename', '--uid', user4, '--new-uid', user2])
+ assert err
+
+ # test if user 2 and user4 can still access their bucket and objects after rename fails
+ bucket = connection3.get_bucket(bucket_name + '6')
+ s = key1.get_contents_as_string()
+ rl.log_and_clear("get_obj", bucket_name + '6', user4)
+ assert s == object_name1
+
+ bucket = connection2.get_bucket(bucket_name + '7')
+ s = key2.get_contents_as_string()
+ rl.log_and_clear("get_obj", bucket_name + '7', user2)
+ assert s == object_name2
(err, out) = rgwadmin(ctx, client,
['user', 'rm', '--uid', user4, '--purge-data' ],
check_status=True)
+ (err, out) = rgwadmin(ctx, client,
+ ['user', 'rm', '--uid', user2, '--purge-data' ],
+ check_status=True)
+
+ time.sleep(5)
+
# should be all through with connection. (anything using connection
# should be BEFORE the usage stuff above.)
rl.log_and_clear("(before-close)", '-', '-', ignore_this_entry)