# data, so if the osd backfill reservation incorrectly calculates "toofull"
# the test will detect this (fail).
#
+ # Note, we need to operate with "uncompressed" bytes because currently
+ # osd backfill reservation does not take compression into account.
+ #
# We also need to update nearfull ratio to prevent "full ratio(s) out of order".
- backfillfull = min(used_kb + primary_used_kb, total_kb * 0.9) / total_kb
- nearfull_min = max(used_kb, primary_used_kb) / total_kb
+ pdf = manager.get_pool_df(pool)
+ log.debug("pool %s df: %s" % (pool, df))
+ assert pdf
+ compress_ratio = 1.0 * pdf['compress_under_bytes'] / pdf['compress_bytes_used'] \
+ if pdf['compress_bytes_used'] > 0 else 1.0
+ log.debug("compress_ratio: %s" % compress_ratio)
+
+ backfillfull = (used_kb + primary_used_kb) * compress_ratio / total_kb
+ assert backfillfull < 0.9
+ nearfull_min = max(used_kb, primary_used_kb) * compress_ratio / total_kb
+ assert nearfull_min < backfillfull
delta = backfillfull - nearfull_min
nearfull = nearfull_min + delta * 0.1
backfillfull = nearfull_min + delta * 0.2
j = json.loads('\n'.join(out.split('\n')[1:]))
return j['nodes'][0]
+ def get_pool_df(self, name):
+ """
+ Get the pool df stats
+ """
+ out = self.raw_cluster_cmd('df', 'detail', '--format=json')
+ j = json.loads('\n'.join(out.split('\n')[1:]))
+ return next((p['stats'] for p in j['pools'] if p['name'] == name),
+ None)
+
def get_pgids_to_force(self, backfill):
"""
Return the randomized list of PGs that can have their recovery/backfill forced