else:
from typing_extensions import Literal
- ScaleModeT = Literal['scale-up', 'scale-down']
+ PassT = Literal['first', 'second', 'third']
def nearest_power_of_two(n: int) -> int:
name='sleep_interval',
type='secs',
default=60),
- Option(
- 'autoscale_profile',
- default='scale-up',
- type='str',
- desc='pg_autoscale profiler',
- long_desc=('Determines the behavior of the autoscaler algorithm, '
- '`scale-up` means that it starts out with minmum pgs '
- 'and scales up when there is pressure'
- '`scale-down means start out with full pgs and scales'
- 'down when there is pressure'),
- runtime=True),
+
Option(
name='threshold',
type='float',
# to just keep a copy of the pythonized version.
self._osd_map = None
if TYPE_CHECKING:
- self.autoscale_profile: 'ScaleModeT' = 'scale-up'
self.sleep_interval = 60
self.mon_target_pg_per_osd = 0
self.threshold = 3.0
self.get_module_option(opt['name']))
self.log.debug(' mgr option %s = %s',
opt['name'], getattr(self, opt['name']))
- # if the profiler option is not set, this means it is an old cluster
- autoscale_profile = self.get_module_option("autoscale_profile")
- if not autoscale_profile:
- self.set_module_option("autoscale_profile", "scale-up")
@CLIReadCommand('osd pool autoscale-status')
def _command_autoscale_status(self, format: str = 'plain') -> Tuple[int, str, str]:
"""
osdmap = self.get_osdmap()
pools = osdmap.get_pools_by_name()
- profile = self.autoscale_profile
- ps, root_map = self._get_pool_status(osdmap, pools, profile)
+ ps, root_map = self._get_pool_status(osdmap, pools)
if format in ('json', 'json-pretty'):
return 0, json.dumps(ps, indent=4, sort_keys=True), ''
'PG_NUM',
# 'IDEAL',
'NEW PG_NUM', 'AUTOSCALE',
- 'PROFILE'],
+ 'BULK'],
border=False)
table.left_padding_width = 0
table.right_padding_width = 2
# table.align['IDEAL'] = 'r'
table.align['NEW PG_NUM'] = 'r'
table.align['AUTOSCALE'] = 'l'
- table.align['PROFILE'] = 'l'
+ table.align['BULK'] = 'l'
for p in ps:
if p['would_adjust']:
final = str(p['pg_num_final'])
# p['pg_num_ideal'],
final,
p['pg_autoscale_mode'],
- profile
+ str(p['bulk'])
])
return 0, table.get_string(), ''
self.set_module_option("threshold", num)
return 0, "threshold updated", ""
- @CLIWriteCommand("osd pool set autoscale-profile scale-up")
- def set_profile_scale_up(self) -> Tuple[int, str, str]:
- """
- set the autoscaler behavior to start out with minimum pgs and scales up when there is pressure
- """
- if self.autoscale_profile == "scale-up":
- return 0, "", "autoscale-profile is already a scale-up!"
- else:
- self.set_module_option("autoscale_profile", "scale-up")
- return 0, "", "autoscale-profile is now scale-up"
-
- @CLIWriteCommand("osd pool set autoscale-profile scale-down")
- def set_profile_scale_down(self) -> Tuple[int, str, str]:
- """
- set the autoscaler behavior to start out with full pgs and
- scales down when there is pressure
- """
- if self.autoscale_profile == "scale-down":
- return 0, "", "autoscale-profile is already a scale-down!"
- else:
- self.set_module_option("autoscale_profile", "scale-down")
- return 0, "", "autoscale-profile is now scale-down"
-
def serve(self) -> None:
self.config_notify()
while not self._shutdown.is_set():
root_map: Dict[int, CrushSubtreeResourceStatus],
root_id: int,
capacity_ratio: float,
- even_pools: Dict[str, Dict[str, Any]],
bias: float,
- is_used: bool,
- profile: 'ScaleModeT',
+ even_pools: Dict[str, Dict[str, Any]],
+ bulk_pools: Dict[str, Dict[str, Any]],
+ func_pass: 'PassT',
+ bulk: bool,
) -> Union[Tuple[float, int, int], Tuple[None, None, None]]:
"""
`profile` determines behaviour of the autoscaler.
- `is_used` flag used to determine if this is the first
+ `first_pass` flag used to determine if this is the first
pass where the caller tries to calculate/adjust pools that has
used_ratio > even_ratio else this is the second pass,
we calculate final_ratio by giving it 1 / pool_count
of the root we are currently looking at.
"""
- if profile == "scale-up":
- final_ratio = capacity_ratio
- # So what proportion of pg allowance should we be using?
- pg_target = root_map[root_id].pg_target
- assert pg_target is not None
- pool_pg_target = (final_ratio * pg_target) / p['size'] * bias
- final_pg_target = max(p.get('options', {}).get('pg_num_min', PG_NUM_MIN),
- nearest_power_of_two(pool_pg_target))
-
- else:
- if is_used:
- pool_count = root_map[root_id].pool_count
- assert pool_count is not None
- even_ratio = 1 / pool_count
- used_ratio = capacity_ratio
-
- if used_ratio > even_ratio:
- root_map[root_id].pool_used += 1
- else:
- # keep track of even_pools to be used in second pass
- # of the caller function
- even_pools[pool_name] = p
- return None, None, None
-
- final_ratio = max(used_ratio, even_ratio)
- pg_target = root_map[root_id].pg_target
- assert pg_target is not None
- used_pg = final_ratio * pg_target
+ if func_pass == 'first':
+ # first pass to deal with small pools (no bulk flag)
+ # calculating final_pg_target based on capacity ratio
+ # we also keep track of bulk_pools to be used in second pass
+ if not bulk:
+ final_ratio = capacity_ratio
+ pg_left = root_map[root_id].pg_left
+ assert pg_left is not None
+ used_pg = final_ratio * pg_left
root_map[root_id].pg_left -= int(used_pg)
+ root_map[root_id].pool_used += 1
pool_pg_target = used_pg / p['size'] * bias
-
else:
- pool_count = root_map[root_id].pool_count
- assert pool_count is not None
- final_ratio = 1 / (pool_count - root_map[root_id].pool_used)
- pool_pg_target = (final_ratio * root_map[root_id].pg_left) / p['size'] * bias
-
- final_pg_target = max(p.get('options', {}).get('pg_num_min', PG_NUM_MIN),
- nearest_power_of_two(pool_pg_target))
-
- self.log.info("Pool '{0}' root_id {1} using {2} of space, bias {3}, "
- "pg target {4} quantized to {5} (current {6})".format(
- p['pool_name'],
- root_id,
- capacity_ratio,
- bias,
- pool_pg_target,
- final_pg_target,
- p['pg_num_target']
- ))
+ bulk_pools[pool_name] = p
+ return None, None, None
+
+ elif func_pass == 'second':
+ # second pass we calculate the final_pg_target
+ # for pools that have used_ratio > even_ratio
+ # and we keep track of even pools to be used in third pass
+ pool_count = root_map[root_id].pool_count
+ assert pool_count is not None
+ even_ratio = 1 / (pool_count - root_map[root_id].pool_used)
+ used_ratio = capacity_ratio
+
+ if used_ratio > even_ratio:
+ root_map[root_id].pool_used += 1
+ else:
+ even_pools[pool_name] = p
+ return None, None, None
+
+ final_ratio = max(used_ratio, even_ratio)
+ pg_left = root_map[root_id].pg_left
+ assert pg_left is not None
+ used_pg = final_ratio * pg_left
+ root_map[root_id].pg_left -= int(used_pg)
+ pool_pg_target = used_pg / p['size'] * bias
+ else:
+ # third pass we just split the pg_left to all even_pools
+ pool_count = root_map[root_id].pool_count
+ assert pool_count is not None
+ final_ratio = 1 / (pool_count - root_map[root_id].pool_used)
+ pool_pg_target = (final_ratio * root_map[root_id].pg_left) / p['size'] * bias
+
+ final_pg_target = max(p.get('options', {}).get('pg_num_min', PG_NUM_MIN),
+ nearest_power_of_two(pool_pg_target))
+ self.log.info("Pool '{0}' root_id {1} using {2} of space, bias {3}, "
+ "pg target {4} quantized to {5} (current {6})".format(
+ p['pool_name'],
+ root_id,
+ capacity_ratio,
+ bias,
+ pool_pg_target,
+ final_pg_target,
+ p['pg_num_target']
+ ))
return final_ratio, pool_pg_target, final_pg_target
- def _calc_pool_targets(
+ def _get_pool_pg_targets(
self,
osdmap: OSDMap,
pools: Dict[str, Dict[str, Any]],
pool_stats: Dict[int, Dict[str, int]],
ret: List[Dict[str, Any]],
threshold: float,
- is_used: bool,
- profile: 'ScaleModeT',
+ func_pass: 'PassT',
overlapped_roots: Set[int],
- ) -> Tuple[List[Dict[str, Any]], Dict[str, Dict[str, Any]]]:
+ ) -> Tuple[List[Dict[str, Any]], Dict[str, Dict[str, Any]] , Dict[str, Dict[str, Any]]]:
"""
Calculates final_pg_target of each pools and determine if it needs
scaling, this depends on the profile of the autoscaler. For scale-down,
the minimal amount of pgs and only scale when there is increase in usage.
"""
even_pools: Dict[str, Dict[str, Any]] = {}
+ bulk_pools: Dict[str, Dict[str, Any]] = {}
for pool_name, p in pools.items():
pool_id = p['pool']
if pool_id not in pool_stats:
cr_name = crush_rule['rule_name']
root_id = crush_map.get_rule_root(cr_name)
assert root_id is not None
- if root_id in overlapped_roots and profile == "scale-down":
- # for scale-down profile skip pools
+ if root_id in overlapped_roots:
+ # skip pools
# with overlapping roots
self.log.warn("pool %d contains an overlapping root %d"
"... skipping scaling", pool_id, root_id)
root_map[root_id].total_target_bytes,
capacity)
+ # determine if the pool is a bulk
+ bulk = False
+ flags = p['flags_names'].split(",")
+ if "bulk" in flags:
+ bulk = True
+
capacity_ratio = max(capacity_ratio, target_ratio)
final_ratio, pool_pg_target, final_pg_target = self._calc_final_pg_target(
- p, pool_name, root_map, root_id, capacity_ratio, even_pools, bias, is_used, profile)
+ p, pool_name, root_map, root_id,
+ capacity_ratio, bias, even_pools,
+ bulk_pools, func_pass, bulk)
if final_ratio is None:
continue
'pg_num_final': final_pg_target,
'would_adjust': adjust,
'bias': p.get('options', {}).get('pg_autoscale_bias', 1.0),
+ 'bulk': bulk,
})
- return ret, even_pools
+ return ret, bulk_pools, even_pools
def _get_pool_status(
self,
osdmap: OSDMap,
pools: Dict[str, Dict[str, Any]],
- profile: 'ScaleModeT',
) -> Tuple[List[Dict[str, Any]],
Dict[int, CrushSubtreeResourceStatus]]:
threshold = self.threshold
ret: List[Dict[str, Any]] = []
# Iterate over all pools to determine how they should be sized.
- # First call of _calc_pool_targets() is to find/adjust pools that uses more capacaity than
+ # First call of _get_pool_pg_targets() is to find/adjust pools that uses more capacaity than
# the even_ratio of other pools and we adjust those first.
# Second call make use of the even_pools we keep track of in the first call.
# All we need to do is iterate over those and give them 1/pool_count of the
# total pgs.
- ret, even_pools = self._calc_pool_targets(osdmap, pools, crush_map, root_map,
- pool_stats, ret, threshold, True, profile, overlapped_roots)
+ ret, bulk_pools, _ = self._get_pool_pg_targets(osdmap, pools, crush_map, root_map,
+ pool_stats, ret, threshold, 'first', overlapped_roots)
+
+ ret, _, even_pools = self._get_pool_pg_targets(osdmap, bulk_pools, crush_map, root_map,
+ pool_stats, ret, threshold, 'second', overlapped_roots)
- if profile == "scale-down":
- # We only have adjust even_pools when we use scale-down profile
- ret, _ = self._calc_pool_targets(osdmap, even_pools, crush_map, root_map,
- pool_stats, ret, threshold, False, profile, overlapped_roots)
+ ret, _, _ = self._get_pool_pg_targets(osdmap, even_pools, crush_map, root_map,
+ pool_stats, ret, threshold, 'third', overlapped_roots)
return (ret, root_map)
if osdmap.get_require_osd_release() < 'nautilus':
return
pools = osdmap.get_pools_by_name()
- profile = self.autoscale_profile
- ps, root_map = self._get_pool_status(osdmap, pools, profile)
+ ps, root_map = self._get_pool_status(osdmap, pools)
# Anyone in 'warn', set the health message for them and then
# drop them from consideration.