]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/pg_autoscaler: remove target ratio warning
authorJosh Durgin <jdurgin@redhat.com>
Sat, 1 Feb 2020 19:41:32 +0000 (14:41 -0500)
committerKefu Chai <kchai@redhat.com>
Mon, 10 Feb 2020 02:08:36 +0000 (10:08 +0800)
Since the ratios are normalized, they cannot exceed 1.0 or overcommit
combined with target_bytes.

Signed-off-by: Josh Durgin <jdurgin@redhat.com>
doc/rados/operations/health-checks.rst
doc/rados/operations/placement-groups.rst
qa/workunits/mon/pg_autoscaler.sh
src/pybind/mgr/pg_autoscaler/module.py

index 1719c4a55ef0e9e1eb57a25f20da0f8654b9fce6..85eeea9afc0c5002ddf0f2b5cea8319127a7c675 100644 (file)
@@ -833,21 +833,6 @@ recommended amount with::
 Please refer to :ref:`choosing-number-of-placement-groups` and
 :ref:`pg-autoscaler` for more information.
 
-POOL_TARGET_SIZE_RATIO_OVERCOMMITTED
-____________________________________
-
-One or more pools have a ``target_size_ratio`` property set to
-estimate the expected size of the pool as a fraction of total storage,
-but the value(s) exceed the total available storage (either by
-themselves or in combination with other pools' actual usage).
-
-This is usually an indication that the ``target_size_ratio`` value for
-the pool is too large and should be reduced or set to zero with::
-
-  ceph osd pool set <pool-name> target_size_ratio 0
-
-For more information, see :ref:`specifying_pool_target_size`.
-
 POOL_TARGET_SIZE_BYTES_OVERCOMMITTED
 ____________________________________
 
index 060c1f53d1bbe5e42a9eaa5f1f7af593f2b555a1..59fd6f979e976fa624cec5933322726dca193306 100644 (file)
@@ -138,10 +138,8 @@ total cluster capacity.
 You can also set the target size of a pool at creation time with the optional ``--target-size-bytes <bytes>`` or ``--target-size-ratio <ratio>`` arguments to the ``ceph osd pool create`` command.
 
 Note that if impossible target size values are specified (for example,
-a capacity larger than the total cluster, or ratio(s) that sum to more
-than 1.0) then a health warning
-(``POOL_TARET_SIZE_RATIO_OVERCOMMITTED`` or
-``POOL_TARGET_SIZE_BYTES_OVERCOMMITTED``) will be raised.
+a capacity larger than the total cluster) then a health warning
+(``POOL_TARGET_SIZE_BYTES_OVERCOMMITTED``) will be raised.
 
 Specifying bounds on a pool's PGs
 ---------------------------------
index c4c4810bea86a2b94db0036363d7403585bc5521..cf7bf0cc4c2519f2b318fa53dd5dcc9ece2cc150 100755 (executable)
@@ -61,14 +61,6 @@ BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target')
 test $APGS -eq $APGS2
 test $BPGS -eq $BPGS2
 
-# too much ratio
-ceph osd pool set a target_size_ratio .9
-ceph osd pool set b target_size_ratio .9
-wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_RATIO_OVERCOMMITTED"
-wait_for 60 "ceph health detail | grep 1.8"
-ceph osd pool set a target_size_ratio 0
-ceph osd pool set b target_size_ratio 0
-
 # target_size
 ceph osd pool set a target_size_bytes 1000000000000000
 ceph osd pool set b target_size_bytes 1000000000000000
index 5ebc672f05532c19defc46abb30a56ad0a8884ec..12b1e13ffa9dad64d4b91d7037132ac5e422a256 100644 (file)
@@ -429,21 +429,12 @@ class PgAutoscaler(MgrModule):
         too_many = []
         health_checks = {}
 
-        total_ratio = dict([(r, 0.0) for r in iter(root_map)])
-        total_target_ratio = dict([(r, 0.0) for r in iter(root_map)])
-        target_ratio_pools = dict([(r, []) for r in iter(root_map)])
-
         total_bytes = dict([(r, 0) for r in iter(root_map)])
         total_target_bytes = dict([(r, 0.0) for r in iter(root_map)])
         target_bytes_pools = dict([(r, []) for r in iter(root_map)])
 
         for p in ps:
             pool_id = str(p['pool_id'])
-            total_ratio[p['crush_root_id']] += max(p['actual_capacity_ratio'],
-                                                   p['target_ratio'])
-            if p['target_ratio'] > 0:
-                total_target_ratio[p['crush_root_id']] += p['target_ratio']
-                target_ratio_pools[p['crush_root_id']].append(p['pool_name'])
             total_bytes[p['crush_root_id']] += max(
                 p['actual_raw_used'],
                 p['target_bytes'] * p['raw_used_rate'])
@@ -525,34 +516,6 @@ class PgAutoscaler(MgrModule):
                 'detail': too_many
             }
 
-        too_much_target_ratio = []
-        for root_id, total in iteritems(total_ratio):
-            total_target = total_target_ratio[root_id]
-            if total_target > 0 and total > 1.0:
-                too_much_target_ratio.append(
-                    'Pools %s overcommit available storage by %.03fx due to '
-                    'target_size_ratio %.03f on pools %s' % (
-                        root_map[root_id].pool_names,
-                        total,
-                        total_target,
-                        target_ratio_pools[root_id]
-                    )
-                )
-            elif total_target > 1.0:
-                too_much_target_ratio.append(
-                    'Pools %s have collective target_size_ratio %.03f > 1.0' % (
-                        root_map[root_id].pool_names,
-                        total_target
-                    )
-                )
-        if too_much_target_ratio:
-            health_checks['POOL_TARGET_SIZE_RATIO_OVERCOMMITTED'] = {
-                'severity': 'warning',
-                'summary': "%d subtrees have overcommitted pool target_size_ratio" % len(too_much_target_ratio),
-                'count': len(too_much_target_ratio),
-                'detail': too_much_target_ratio,
-            }
-
         too_much_target_bytes = []
         for root_id, total in iteritems(total_bytes):
             total_target = total_target_bytes[root_id]