]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/backfill_toofull: make test work when compression on 43572/head
authorMykola Golub <mgolub@suse.com>
Wed, 13 Oct 2021 15:22:09 +0000 (18:22 +0300)
committerMykola Golub <mgolub@suse.com>
Sat, 16 Oct 2021 12:28:53 +0000 (15:28 +0300)
The osd backfill reservation does not take compression into account so
we need to operate with "uncompressed" bytes when calculating nearfull
ratio.

Signed-off-by: Mykola Golub <mgolub@suse.com>
qa/tasks/backfill_toofull.py
qa/tasks/ceph_manager.py

index f2156bea6017cd391a7e11c28270fb34836c26be..1a866595d26959f455760ebf878f20b874dedb21 100644 (file)
@@ -139,10 +139,22 @@ def task(ctx, config):
     # data, so if the osd backfill reservation incorrectly calculates "toofull"
     # the test will detect this (fail).
     #
+    # Note, we need to operate with "uncompressed" bytes because currently
+    # osd backfill reservation does not take compression into account.
+    #
     # We also need to update nearfull ratio to prevent "full ratio(s) out of order".
 
-    backfillfull = min(used_kb + primary_used_kb, total_kb * 0.9) / total_kb
-    nearfull_min = max(used_kb, primary_used_kb) / total_kb
+    pdf = manager.get_pool_df(pool)
+    log.debug("pool %s df: %s" % (pool, df))
+    assert pdf
+    compress_ratio = 1.0 * pdf['compress_under_bytes'] / pdf['compress_bytes_used'] \
+        if pdf['compress_bytes_used'] > 0 else 1.0
+    log.debug("compress_ratio: %s" % compress_ratio)
+
+    backfillfull = (used_kb + primary_used_kb) * compress_ratio / total_kb
+    assert backfillfull < 0.9
+    nearfull_min = max(used_kb, primary_used_kb) * compress_ratio / total_kb
+    assert nearfull_min < backfillfull
     delta = backfillfull - nearfull_min
     nearfull = nearfull_min + delta * 0.1
     backfillfull = nearfull_min + delta * 0.2
index 2524635ff05182d6baf3ce311b84f087c3580fa2..b5b1099e9101e9d6a8adc30f464e0b732942b2a6 100644 (file)
@@ -2341,6 +2341,15 @@ class CephManager:
         j = json.loads('\n'.join(out.split('\n')[1:]))
         return j['nodes'][0]
 
+    def get_pool_df(self, name):
+        """
+        Get the pool df stats
+        """
+        out = self.raw_cluster_cmd('df', 'detail', '--format=json')
+        j = json.loads('\n'.join(out.split('\n')[1:]))
+        return next((p['stats'] for p in j['pools'] if p['name'] == name),
+                    None)
+
     def get_pgids_to_force(self, backfill):
         """
         Return the randomized list of PGs that can have their recovery/backfill forced