From d62c121ee3839a2fd4243a9b1c33aded1cc3db13 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Sat, 1 Feb 2020 14:41:32 -0500 Subject: [PATCH] mgr/pg_autoscaler: remove target ratio warning Since the ratios are normalized, they cannot exceed 1.0 or overcommit combined with target_bytes. Signed-off-by: Josh Durgin --- doc/rados/operations/health-checks.rst | 15 --------- doc/rados/operations/placement-groups.rst | 6 ++-- qa/workunits/mon/pg_autoscaler.sh | 8 ----- src/pybind/mgr/pg_autoscaler/module.py | 37 ----------------------- 4 files changed, 2 insertions(+), 64 deletions(-) diff --git a/doc/rados/operations/health-checks.rst b/doc/rados/operations/health-checks.rst index 1719c4a55ef0e..85eeea9afc0c5 100644 --- a/doc/rados/operations/health-checks.rst +++ b/doc/rados/operations/health-checks.rst @@ -833,21 +833,6 @@ recommended amount with:: Please refer to :ref:`choosing-number-of-placement-groups` and :ref:`pg-autoscaler` for more information. -POOL_TARGET_SIZE_RATIO_OVERCOMMITTED -____________________________________ - -One or more pools have a ``target_size_ratio`` property set to -estimate the expected size of the pool as a fraction of total storage, -but the value(s) exceed the total available storage (either by -themselves or in combination with other pools' actual usage). - -This is usually an indication that the ``target_size_ratio`` value for -the pool is too large and should be reduced or set to zero with:: - - ceph osd pool set target_size_ratio 0 - -For more information, see :ref:`specifying_pool_target_size`. - POOL_TARGET_SIZE_BYTES_OVERCOMMITTED ____________________________________ diff --git a/doc/rados/operations/placement-groups.rst b/doc/rados/operations/placement-groups.rst index 060c1f53d1bbe..59fd6f979e976 100644 --- a/doc/rados/operations/placement-groups.rst +++ b/doc/rados/operations/placement-groups.rst @@ -138,10 +138,8 @@ total cluster capacity. You can also set the target size of a pool at creation time with the optional ``--target-size-bytes `` or ``--target-size-ratio `` arguments to the ``ceph osd pool create`` command. Note that if impossible target size values are specified (for example, -a capacity larger than the total cluster, or ratio(s) that sum to more -than 1.0) then a health warning -(``POOL_TARET_SIZE_RATIO_OVERCOMMITTED`` or -``POOL_TARGET_SIZE_BYTES_OVERCOMMITTED``) will be raised. +a capacity larger than the total cluster) then a health warning +(``POOL_TARGET_SIZE_BYTES_OVERCOMMITTED``) will be raised. Specifying bounds on a pool's PGs --------------------------------- diff --git a/qa/workunits/mon/pg_autoscaler.sh b/qa/workunits/mon/pg_autoscaler.sh index c4c4810bea86a..cf7bf0cc4c251 100755 --- a/qa/workunits/mon/pg_autoscaler.sh +++ b/qa/workunits/mon/pg_autoscaler.sh @@ -61,14 +61,6 @@ BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target') test $APGS -eq $APGS2 test $BPGS -eq $BPGS2 -# too much ratio -ceph osd pool set a target_size_ratio .9 -ceph osd pool set b target_size_ratio .9 -wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_RATIO_OVERCOMMITTED" -wait_for 60 "ceph health detail | grep 1.8" -ceph osd pool set a target_size_ratio 0 -ceph osd pool set b target_size_ratio 0 - # target_size ceph osd pool set a target_size_bytes 1000000000000000 ceph osd pool set b target_size_bytes 1000000000000000 diff --git a/src/pybind/mgr/pg_autoscaler/module.py b/src/pybind/mgr/pg_autoscaler/module.py index 5ebc672f05532..12b1e13ffa9da 100644 --- a/src/pybind/mgr/pg_autoscaler/module.py +++ b/src/pybind/mgr/pg_autoscaler/module.py @@ -429,21 +429,12 @@ class PgAutoscaler(MgrModule): too_many = [] health_checks = {} - total_ratio = dict([(r, 0.0) for r in iter(root_map)]) - total_target_ratio = dict([(r, 0.0) for r in iter(root_map)]) - target_ratio_pools = dict([(r, []) for r in iter(root_map)]) - total_bytes = dict([(r, 0) for r in iter(root_map)]) total_target_bytes = dict([(r, 0.0) for r in iter(root_map)]) target_bytes_pools = dict([(r, []) for r in iter(root_map)]) for p in ps: pool_id = str(p['pool_id']) - total_ratio[p['crush_root_id']] += max(p['actual_capacity_ratio'], - p['target_ratio']) - if p['target_ratio'] > 0: - total_target_ratio[p['crush_root_id']] += p['target_ratio'] - target_ratio_pools[p['crush_root_id']].append(p['pool_name']) total_bytes[p['crush_root_id']] += max( p['actual_raw_used'], p['target_bytes'] * p['raw_used_rate']) @@ -525,34 +516,6 @@ class PgAutoscaler(MgrModule): 'detail': too_many } - too_much_target_ratio = [] - for root_id, total in iteritems(total_ratio): - total_target = total_target_ratio[root_id] - if total_target > 0 and total > 1.0: - too_much_target_ratio.append( - 'Pools %s overcommit available storage by %.03fx due to ' - 'target_size_ratio %.03f on pools %s' % ( - root_map[root_id].pool_names, - total, - total_target, - target_ratio_pools[root_id] - ) - ) - elif total_target > 1.0: - too_much_target_ratio.append( - 'Pools %s have collective target_size_ratio %.03f > 1.0' % ( - root_map[root_id].pool_names, - total_target - ) - ) - if too_much_target_ratio: - health_checks['POOL_TARGET_SIZE_RATIO_OVERCOMMITTED'] = { - 'severity': 'warning', - 'summary': "%d subtrees have overcommitted pool target_size_ratio" % len(too_much_target_ratio), - 'count': len(too_much_target_ratio), - 'detail': too_much_target_ratio, - } - too_much_target_bytes = [] for root_id, total in iteritems(total_bytes): total_target = total_target_bytes[root_id] -- 2.39.5