else:
assert False, f"Object {name}: found {out_of_order_versions} versions which are out of order"
+def bucket_check_olh(zone, bucket, args = None):
+ cmd = ['bucket', 'check', 'olh', '--bucket', bucket, '--dump-keys', '--hide-progress'] + (args or [])
+ keys, _ = zone.cluster.admin(cmd, read_only=True)
+ return json.loads(keys)
+
+def bucket_check_unlinked(zone, bucket, args = None):
+ cmd = ['bucket', 'check', 'unlinked', '--bucket', bucket, '--dump-keys', '--hide-progress'] + (args or [])
+ keys, _ = zone.cluster.admin(cmd, read_only=True)
+ return json.loads(keys)
+
+def bucket_lc_process(zone, bucket, args = None):
+ cmd = ['lc', 'process', '--bucket', bucket] + (args or [])
+ zone.cluster.admin(cmd)
+
+def test_versioned_lifecycle_deletes():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ # use a lifecycle policy that removes all versions
+ lifecycle = {
+ "Rules": [{
+ "ID": "ExpireVersions",
+ "Status": "Enabled",
+ "Prefix": "",
+ "Expiration": {
+ "ExpiredObjectDeleteMarker": True
+ },
+ "NoncurrentVersionExpiration": {
+ "NoncurrentDays": 1
+ }
+ }]
+ }
+
+ # create a versioned bucket on each zone
+ buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
+
+ for zone, bucket in zone_bucket:
+ zone.s3_client.put_bucket_versioning(Bucket=bucket.name, VersioningConfiguration={'Status': 'Enabled'})
+ zone.s3_client.put_bucket_lifecycle_configuration(Bucket=bucket.name, LifecycleConfiguration=lifecycle)
+
+ realm_meta_checkpoint(realm)
+
+ # upload lots of objects/versions
+ NUM_OBJECTS = 20
+ NUM_VERSIONS = 20
+
+ for zone, bucket in zone_bucket:
+ for i in range(0, NUM_OBJECTS):
+ key = f"obj-{i}.txt"
+ for vid in range(0, NUM_VERSIONS):
+ zone.s3_client.put_object(Bucket=bucket.name, Key=key, Body=f"This is version {vid}")
+ # create a delete marker for each name
+ zone.s3_client.delete_object(Bucket=bucket.name, Key=key)
+ log.info(f"Finished uploads on zone={zone.name} bucket={bucket.name}")
+
+ # wait for all uploads to sync
+ for _, bucket in zone_bucket:
+ zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name)
+
+ # run lifecycle on each zone to expire all versions. lifecycle changes aren't replicated,
+ # so we don't wait for bucket checkpoints before validating the results
+ for zone, bucket in zone_bucket:
+ # apply NoncurrentVersionExpiration and verify that all Versions were removed
+ bucket_lc_process(zone.zone, bucket.name, ['--rgw-lc-debug-interval=1'])
+ response = zone.s3_client.list_object_versions(Bucket=bucket.name)
+ assert 'Versions' not in response
+ assert 'DeleteMarkers' in response
+
+ # apply ExpiredObjectDeleteMarker and verify that all DeleteMarkers were removed
+ bucket_lc_process(zone.zone, bucket.name, ['--rgw-lc-debug-interval=1'])
+ response = zone.s3_client.list_object_versions(Bucket=bucket.name)
+ assert 'Versions' not in response
+ assert 'DeleteMarkers' not in response
+
+ # check for orphaned index entries
+ olh = bucket_check_olh(zone.zone, bucket.name)
+ assert len(olh) == 0
+ unlinked = bucket_check_unlinked(zone.zone, bucket.name)
+ assert len(unlinked) == 0
def run_per_zonegroup(func):
def wrapper(*args, **kwargs):