From: Casey Bodley Date: Mon, 15 Jun 2020 15:45:11 +0000 (-0400) Subject: test/rgw: test_datalog_autotrim filters out new entries X-Git-Tag: v15.2.13~11^2~5^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=c04f66bd75524cdfbacc2b58bd4435b19052e02e;p=ceph.git test/rgw: test_datalog_autotrim filters out new entries if other sync activity is racing with test_datalog_autotrim, it can create new datalog entries after the 'datalog autotrim' command runs instead of asserting that the datalog is empty after trim, assert that any entries have a marker larger than the max-marker reported by 'datalog status' before the trim Fixes: https://tracker.ceph.com/issues/45626 Signed-off-by: Casey Bodley (cherry picked from commit abd08f1843642e318d74dfadb0f9cf1f6b86d827) --- diff --git a/src/test/rgw/rgw_multi/tests.py b/src/test/rgw/rgw_multi/tests.py index 018aa4bd4ec48..6cca6dc060693 100644 --- a/src/test/rgw/rgw_multi/tests.py +++ b/src/test/rgw/rgw_multi/tests.py @@ -5,6 +5,7 @@ import sys import time import logging import errno +import dateutil.parser try: from itertools import izip_longest as zip_longest # type: ignore @@ -83,8 +84,13 @@ def mdlog_list(zone, period = None): def mdlog_autotrim(zone): zone.cluster.admin(['mdlog', 'autotrim']) -def datalog_list(zone, period = None): - cmd = ['datalog', 'list'] +def datalog_list(zone, args = None): + cmd = ['datalog', 'list'] + (args or []) + (datalog_json, _) = zone.cluster.admin(cmd, read_only=True) + return json.loads(datalog_json) + +def datalog_status(zone): + cmd = ['datalog', 'status'] (datalog_json, _) = zone.cluster.admin(cmd, read_only=True) return json.loads(datalog_json) @@ -962,9 +968,21 @@ def test_datalog_autotrim(): # trim each datalog for zone, _ in zone_bucket: + # read max markers for each shard + status = datalog_status(zone.zone) + datalog_autotrim(zone.zone) - datalog = datalog_list(zone.zone) - assert len(datalog) == 0 + + for shard_id, shard_status in enumerate(status): + try: + before_trim = dateutil.parser.isoparse(shard_status['last_update']) + except: # empty timestamps look like "0.000000" and will fail here + continue + entries = datalog_list(zone.zone, ['--shard-id', str(shard_id), '--max-entries', '1']) + if not len(entries): + continue + after_trim = dateutil.parser.isoparse(entries[0]['timestamp']) + assert before_trim < after_trim, "any datalog entries must be newer than trim" def test_multi_zone_redirect(): zonegroup = realm.master_zonegroup()