From 429ac06cbb44b8a8263beb0d0780a01cedb517ba Mon Sep 17 00:00:00 2001 From: Mykola Golub Date: Wed, 13 Oct 2021 18:22:09 +0300 Subject: [PATCH] qa/tasks/backfill_toofull: make test work when compression on The osd backfill reservation does not take compression into account so we need to operate with "uncompressed" bytes when calculating nearfull ratio. Signed-off-by: Mykola Golub --- qa/tasks/backfill_toofull.py | 16 ++++++++++++++-- qa/tasks/ceph_manager.py | 9 +++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/qa/tasks/backfill_toofull.py b/qa/tasks/backfill_toofull.py index f2156bea6017c..1a866595d2695 100644 --- a/qa/tasks/backfill_toofull.py +++ b/qa/tasks/backfill_toofull.py @@ -139,10 +139,22 @@ def task(ctx, config): # data, so if the osd backfill reservation incorrectly calculates "toofull" # the test will detect this (fail). # + # Note, we need to operate with "uncompressed" bytes because currently + # osd backfill reservation does not take compression into account. + # # We also need to update nearfull ratio to prevent "full ratio(s) out of order". - backfillfull = min(used_kb + primary_used_kb, total_kb * 0.9) / total_kb - nearfull_min = max(used_kb, primary_used_kb) / total_kb + pdf = manager.get_pool_df(pool) + log.debug("pool %s df: %s" % (pool, df)) + assert pdf + compress_ratio = 1.0 * pdf['compress_under_bytes'] / pdf['compress_bytes_used'] \ + if pdf['compress_bytes_used'] > 0 else 1.0 + log.debug("compress_ratio: %s" % compress_ratio) + + backfillfull = (used_kb + primary_used_kb) * compress_ratio / total_kb + assert backfillfull < 0.9 + nearfull_min = max(used_kb, primary_used_kb) * compress_ratio / total_kb + assert nearfull_min < backfillfull delta = backfillfull - nearfull_min nearfull = nearfull_min + delta * 0.1 backfillfull = nearfull_min + delta * 0.2 diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 2524635ff0518..b5b1099e9101e 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -2341,6 +2341,15 @@ class CephManager: j = json.loads('\n'.join(out.split('\n')[1:])) return j['nodes'][0] + def get_pool_df(self, name): + """ + Get the pool df stats + """ + out = self.raw_cluster_cmd('df', 'detail', '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + return next((p['stats'] for p in j['pools'] if p['name'] == name), + None) + def get_pgids_to_force(self, backfill): """ Return the randomized list of PGs that can have their recovery/backfill forced -- 2.39.5