result.append(cls.Endpoint(cls, func))
return result
+ @staticmethod
+ def get_client_version():
+ try:
+ client_version = APIVersion.from_mime_type(
+ cherrypy.request.headers['Accept'])
+ except Exception:
+ raise cherrypy.HTTPError(
+ 415, "Unable to find version in request header")
+ return client_version
+
@staticmethod
def _request_wrapper(func, method, json_response, xml, # pylint: disable=unused-argument
version: Optional[APIVersion]):
kwargs.update(params)
if version is not None:
- try:
- client_version = APIVersion.from_mime_type(
- cherrypy.request.headers['Accept'])
- except Exception:
- raise cherrypy.HTTPError(
- 415, "Unable to find version in request header")
+ client_version = BaseController.get_client_version()
if version.supports(client_version):
ret = func(*args, **kwargs)
else:
ret = func(*args, **kwargs)
+
if isinstance(ret, bytes):
ret = ret.decode('utf-8')
+
if xml:
- if version:
- cherrypy.response.headers['Content-Type'] = \
- 'application/vnd.ceph.api.v{}+xml'.format(version)
- else:
- cherrypy.response.headers['Content-Type'] = 'application/xml'
+ cherrypy.response.headers['Content-Type'] = (version.to_mime_type(subtype='xml')
+ if version else 'application/xml')
return ret.encode('utf8')
if json_response:
- if version:
- cherrypy.response.headers['Content-Type'] = \
- 'application/vnd.ceph.api.v{}+json'.format(version)
- else:
- cherrypy.response.headers['Content-Type'] = 'application/json'
+ cherrypy.response.headers['Content-Type'] = (version.to_mime_type(subtype='json')
+ if version else 'application/json')
ret = json.dumps(ret).encode('utf8')
return ret
return inner
return arg_map
+ def _get_metadata(self, arg_map):
+ metadata = {}
+ for k, v in self.metadata.items():
+ if isinstance(v, str) and v and v[0] == '{' and v[-1] == '}':
+ param = v[1:-1]
+ try:
+ pos = int(param)
+ metadata[k] = arg_map[pos]
+ except ValueError:
+ if param.find('.') == -1:
+ metadata[k] = arg_map[param]
+ else:
+ path = param.split('.')
+ metadata[k] = arg_map[path[0]]
+ for i in range(1, len(path)):
+ metadata[k] = metadata[k][path[i]]
+ else:
+ metadata[k] = v
+ return metadata
+
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
arg_map = self._gen_arg_map(func, args, kwargs)
- metadata = {}
- for k, v in self.metadata.items():
- if isinstance(v, str) and v and v[0] == '{' and v[-1] == '}':
- param = v[1:-1]
- try:
- pos = int(param)
- metadata[k] = arg_map[pos]
- except ValueError:
- if param.find('.') == -1:
- metadata[k] = arg_map[param]
- else:
- path = param.split('.')
- metadata[k] = arg_map[path[0]]
- for i in range(1, len(path)):
- metadata[k] = metadata[k][path[i]]
- else:
- metadata[k] = v
+ metadata = self._get_metadata(arg_map)
+
task = TaskManager.run(self.name, metadata, func, args, kwargs,
exception_handler=self.exception_handler)
try:
return
mds_versions[metadata.get('ceph_version', 'unknown')].append(metadata_key)
+ def _find_standby_replays(self, mdsmap_info, rank_table):
+ # pylint: disable=unused-variable
+ for gid_str, daemon_info in mdsmap_info.items():
+ if daemon_info['state'] != "up:standby-replay":
+ continue
+
+ inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
+ dns = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dn")
+ dirs = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dir")
+ caps = mgr.get_latest("mds", daemon_info['name'], "mds_mem.cap")
+
+ activity = CephService.get_rate(
+ "mds", daemon_info['name'], "mds_log.replay")
+
+ rank_table.append(
+ {
+ "rank": "{0}-s".format(daemon_info['rank']),
+ "state": "standby-replay",
+ "mds": daemon_info['name'],
+ "activity": activity,
+ "dns": dns,
+ "inos": inos,
+ "dirs": dirs,
+ "caps": caps
+ }
+ )
+
+ def get_standby_table(self, standbys, mds_versions):
+ standby_table = []
+ for standby in standbys:
+ self._append_mds_metadata(mds_versions, standby['name'])
+ standby_table.append({
+ 'name': standby['name']
+ })
+ return standby_table
+
# pylint: disable=too-many-statements,too-many-branches
def fs_status(self, fs_id):
mds_versions: dict = defaultdict(list)
dirs = mgr.get_latest("mds", info['name'], "mds_mem.dir")
caps = mgr.get_latest("mds", info['name'], "mds_mem.cap")
- if rank == 0:
- client_count = mgr.get_latest("mds", info['name'],
- "mds_sessions.session_count")
- elif client_count == 0:
- # In case rank 0 was down, look at another rank's
- # sessionmap to get an indication of clients.
+ # In case rank 0 was down, look at another rank's
+ # sessionmap to get an indication of clients.
+ if rank == 0 or client_count == 0:
client_count = mgr.get_latest("mds", info['name'],
"mds_sessions.session_count")
}
)
- # Find the standby replays
- # pylint: disable=unused-variable
- for gid_str, daemon_info in mdsmap['info'].items():
- if daemon_info['state'] != "up:standby-replay":
- continue
-
- inos = mgr.get_latest("mds", daemon_info['name'], "mds_mem.ino")
- dns = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dn")
- dirs = mgr.get_latest("mds", daemon_info['name'], "mds_mem.dir")
- caps = mgr.get_latest("mds", daemon_info['name'], "mds_mem.cap")
-
- activity = CephService.get_rate(
- "mds", daemon_info['name'], "mds_log.replay")
-
- rank_table.append(
- {
- "rank": "{0}-s".format(daemon_info['rank']),
- "state": "standby-replay",
- "mds": daemon_info['name'],
- "activity": activity,
- "dns": dns,
- "inos": inos,
- "dirs": dirs,
- "caps": caps
- }
- )
+ self._find_standby_replays(mdsmap['info'], rank_table)
df = mgr.get("df")
pool_stats = {p['id']: p['stats'] for p in df['pools']}
"avail": stats['max_avail']
})
- standby_table = []
- for standby in fsmap['standbys']:
- self._append_mds_metadata(mds_versions, standby['name'])
- standby_table.append({
- 'name': standby['name']
- })
+ standby_table = self.get_standby_table(fsmap['standbys'], mds_versions)
return {
"cephfs": {
return parameters
+ @staticmethod
+ def _process_func_attr(func):
+ summary = ''
+ version = None
+ response = {}
+ p_info = []
+
+ if hasattr(func, '__method_map_method__'):
+ version = func.__method_map_method__['version']
+ elif hasattr(func, '__resource_method__'):
+ version = func.__resource_method__['version']
+ elif hasattr(func, '__collection_method__'):
+ version = func.__collection_method__['version']
+
+ if hasattr(func, 'doc_info'):
+ if func.doc_info['summary']:
+ summary = func.doc_info['summary']
+ response = func.doc_info['response']
+ p_info = func.doc_info['parameters']
+
+ return summary, version, response, p_info
+
+ @classmethod
+ def _get_params(cls, endpoint, para_info):
+ params = []
+
+ def extend_params(endpoint_params, param_name):
+ if endpoint_params:
+ params.extend(
+ cls._gen_params(
+ cls._add_param_info(endpoint_params, para_info), param_name))
+
+ extend_params(endpoint.path_params, 'path')
+ extend_params(endpoint.query_params, 'query')
+ return params
+
+ @classmethod
+ def set_request_body_param(cls, endpoint_param, method, methods, p_info):
+ if endpoint_param:
+ params_info = cls._add_param_info(endpoint_param, p_info)
+ methods[method.lower()]['requestBody'] = {
+ 'content': {
+ 'application/json': {
+ 'schema': cls._gen_schema_for_content(params_info)}}}
+
@classmethod
def gen_paths(cls, all_endpoints):
# pylint: disable=R0912
method = endpoint.method
func = endpoint.func
- summary = ''
- version = None
- resp = {}
- p_info = []
-
- if hasattr(func, '__method_map_method__'):
- version = func.__method_map_method__['version']
-
- elif hasattr(func, '__resource_method__'):
- version = func.__resource_method__['version']
-
- elif hasattr(func, '__collection_method__'):
- version = func.__collection_method__['version']
-
- if hasattr(func, 'doc_info'):
- if func.doc_info['summary']:
- summary = func.doc_info['summary']
- resp = func.doc_info['response']
- p_info = func.doc_info['parameters']
- params = []
- if endpoint.path_params:
- params.extend(
- cls._gen_params(
- cls._add_param_info(endpoint.path_params, p_info), 'path'))
- if endpoint.query_params:
- params.extend(
- cls._gen_params(
- cls._add_param_info(endpoint.query_params, p_info), 'query'))
+ summary, version, resp, p_info = cls._process_func_attr(func)
+ params = cls._get_params(endpoint, p_info)
methods[method.lower()] = {
'tags': [cls._get_tag(endpoint)],
methods[method.lower()]['summary'] = summary
if method.lower() in ['post', 'put']:
- if endpoint.body_params:
- body_params = cls._add_param_info(endpoint.body_params, p_info)
- methods[method.lower()]['requestBody'] = {
- 'content': {
- 'application/json': {
- 'schema': cls._gen_schema_for_content(body_params)}}}
-
- if endpoint.query_params:
- query_params = cls._add_param_info(endpoint.query_params, p_info)
- methods[method.lower()]['requestBody'] = {
- 'content': {
- 'application/json': {
- 'schema': cls._gen_schema_for_content(query_params)}}}
+ cls.set_request_body_param(endpoint.body_params, method, methods, p_info)
+ cls.set_request_body_param(endpoint.query_params, method, methods, p_info)
if endpoint.is_secure:
methods[method.lower()]['security'] = [{'jwt': []}]
osd_spec = str(osd_id)
if 'osd' not in osd:
continue # pragma: no cover - simple early continue
- for stat in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
- prop = stat.split('.')[1]
- rates = CephService.get_rates('osd', osd_spec, stat)
- osd['stats'][prop] = get_most_recent_rate(rates)
- osd['stats_history'][prop] = rates
- # Gauge stats
- for stat in ['osd.numpg', 'osd.stat_bytes', 'osd.stat_bytes_used']:
- osd['stats'][stat.split('.')[1]] = mgr.get_latest('osd', osd_spec, stat)
+ self.gauge_stats(osd, osd_spec)
osd['operational_status'] = self._get_operational_status(osd_id, removing_osd_ids)
return list(osds.values())
+ @staticmethod
+ def gauge_stats(osd, osd_spec):
+ for stat in ['osd.op_w', 'osd.op_in_bytes', 'osd.op_r', 'osd.op_out_bytes']:
+ prop = stat.split('.')[1]
+ rates = CephService.get_rates('osd', osd_spec, stat)
+ osd['stats'][prop] = get_most_recent_rate(rates)
+ osd['stats_history'][prop] = rates
+ # Gauge stats
+ for stat in ['osd.numpg', 'osd.stat_bytes', 'osd.stat_bytes_used']:
+ osd['stats'][stat.split('.')[1]] = mgr.get_latest('osd', osd_spec, stat)
+
@RESTController.Collection('GET', version=APIVersion.EXPERIMENTAL)
@ReadPermission
def settings(self):
self._wait_for_pgs(pool)
def _set_pool_values(self, pool, application_metadata, flags, update_existing, kwargs):
- update_name = False
current_pool = self._get(pool)
if update_existing and kwargs.get('compression_mode') == 'unset':
self._prepare_compression_removal(current_pool.get('options'), kwargs)
CephService.send_command('mon', 'osd pool set', pool=pool, var='allow_ec_overwrites',
val='true')
if application_metadata is not None:
- def set_app(what, app):
- CephService.send_command('mon', 'osd pool application ' + what, pool=pool, app=app,
- yes_i_really_mean_it=True)
+ def set_app(app_metadata, set_app_what):
+ for app in app_metadata:
+ CephService.send_command('mon', 'osd pool application ' + set_app_what,
+ pool=pool, app=app, yes_i_really_mean_it=True)
+
if update_existing:
original_app_metadata = set(
cast(Iterable[Any], current_pool.get('application_metadata')))
else:
original_app_metadata = set()
- for app in original_app_metadata - set(application_metadata):
- set_app('disable', app)
- for app in set(application_metadata) - original_app_metadata:
- set_app('enable', app)
-
- def set_key(key, value):
- CephService.send_command('mon', 'osd pool set', pool=pool, var=key, val=str(value))
+ set_app(original_app_metadata - set(application_metadata), 'disable')
+ set_app(set(application_metadata) - original_app_metadata, 'enable')
quotas = {}
quotas['max_objects'] = kwargs.pop('quota_max_objects', None)
quotas['max_bytes'] = kwargs.pop('quota_max_bytes', None)
self._set_quotas(pool, quotas)
+ self._set_pool_keys(pool, kwargs)
- for key, value in kwargs.items():
+ def _set_pool_keys(self, pool, pool_items):
+ def set_key(key, value):
+ CephService.send_command('mon', 'osd pool set', pool=pool, var=key, val=str(value))
+
+ update_name = False
+ for key, value in pool_items.items():
if key == 'pool':
update_name = True
destpool = value
# check disabled features
_sort_features(curr_features, enable=False)
for feature in curr_features:
- if feature not in features and feature in self.ALLOW_DISABLE_FEATURES:
- if feature not in format_bitmask(image.features()):
- continue
+ if (feature not in features
+ and feature in self.ALLOW_DISABLE_FEATURES
+ and feature in format_bitmask(image.features())):
f_bitmask = format_features([feature])
image.update_features(f_bitmask, False)
# check enabled features
_sort_features(features)
for feature in features:
- if feature not in curr_features and feature in self.ALLOW_ENABLE_FEATURES:
- if feature in format_bitmask(image.features()):
- continue
+ if (feature not in curr_features
+ and feature in self.ALLOW_ENABLE_FEATURES
+ and feature not in format_bitmask(image.features())):
f_bitmask = format_features([feature])
image.update_features(f_bitmask, True)
continue
else:
dev_smart_data = {}
- for dev_id, dev_data in dev_smart_data.items():
- if 'error' in dev_data:
- logger.warning(
- '[SMART] Error retrieving smartctl data for device ID "%s": %s',
- dev_id, dev_data)
+
+ CephService.log_dev_data_error(dev_smart_data)
+
break
return dev_smart_data
device['devid'])
return {}
+ @staticmethod
+ def log_dev_data_error(dev_smart_data):
+ for dev_id, dev_data in dev_smart_data.items():
+ if 'error' in dev_data:
+ logger.warning(
+ '[SMART] Error retrieving smartctl data for device ID "%s": %s',
+ dev_id, dev_data)
+
@staticmethod
def get_devices_by_host(hostname):
# type: (str) -> dict
data_pool_name = None
stat['data_pool'] = data_pool_name
- try:
- stat['parent'] = img.get_parent_image_spec()
- except rbd.ImageNotFound:
- # no parent image
- stat['parent'] = None
+ stat['parent'] = cls._rbd_image_stat_parent(img)
# snapshots
stat['snapshots'] = []
return stat
+ @classmethod
+ def _rbd_image_stat_parent(cls, img):
+ stat_parent = None
+ try:
+ stat_parent = img.get_parent_image_spec()
+ except rbd.ImageNotFound:
+ # no parent image
+ stat_parent = None
+ return stat_parent
+
@classmethod
def _rbd_image_refs(cls, ioctx):
rbd_inst = rbd.RBD()
"""
# pylint: disable=unused-argument
- # Do some validations.
- try:
- retention_period_days = int(retention_period_days) if retention_period_days else 0
- retention_period_years = int(retention_period_years) if retention_period_years else 0
- if retention_period_days < 0 or retention_period_years < 0:
- raise ValueError
- except (TypeError, ValueError):
- msg = "Retention period must be a positive integer."
- raise DashboardException(msg=msg, component='rgw')
- if retention_period_days and retention_period_years:
- # https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTBucketPUTObjectLockConfiguration.html
- msg = "Retention period requires either Days or Years. "\
- "You can't specify both at the same time."
- raise DashboardException(msg=msg, component='rgw')
- if not retention_period_days and not retention_period_years:
- msg = "Retention period requires either Days or Years. "\
- "You must specify at least one."
- raise DashboardException(msg=msg, component='rgw')
- if not isinstance(mode, str) or mode.upper() not in ['COMPLIANCE', 'GOVERNANCE']:
- msg = "Retention mode must be either COMPLIANCE or GOVERNANCE."
- raise DashboardException(msg=msg, component='rgw')
+ retention_period_days, retention_period_years = self.perform_validations(
+ retention_period_days, retention_period_years, mode)
# Generate the XML data like this:
# <ObjectLockConfiguration>
_ = request(data=data) # type: ignore
except RequestException as e:
raise DashboardException(msg=str(e), component='rgw')
+
+ def perform_validations(self, retention_period_days, retention_period_years, mode):
+ try:
+ retention_period_days = int(retention_period_days) if retention_period_days else 0
+ retention_period_years = int(retention_period_years) if retention_period_years else 0
+ if retention_period_days < 0 or retention_period_years < 0:
+ raise ValueError
+ except (TypeError, ValueError):
+ msg = "Retention period must be a positive integer."
+ raise DashboardException(msg=msg, component='rgw')
+ if retention_period_days and retention_period_years:
+ # https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTBucketPUTObjectLockConfiguration.html
+ msg = "Retention period requires either Days or Years. "\
+ "You can't specify both at the same time."
+ raise DashboardException(msg=msg, component='rgw')
+ if not retention_period_days and not retention_period_years:
+ msg = "Retention period requires either Days or Years. "\
+ "You must specify at least one."
+ raise DashboardException(msg=msg, component='rgw')
+ if not isinstance(mode, str) or mode.upper() not in ['COMPLIANCE', 'GOVERNANCE']:
+ msg = "Retention mode must be either COMPLIANCE or GOVERNANCE."
+ raise DashboardException(msg=msg, component='rgw')
+ return retention_period_days, retention_period_years
# clear up races w/ tcmu-runner clients that haven't detected
# loss of optimized path
- for image in images.values():
- optimized_daemon = image.get('optimized_daemon', None)
- if optimized_daemon:
- for daemon_name in image['optimized_paths']:
- if daemon_name != optimized_daemon:
- daemon = daemons[daemon_name]
- daemon['optimized_paths'] -= 1
- daemon['non_optimized_paths'] += 1
- image['non_optimized_paths'].append(daemon_name)
- image['optimized_paths'] = [optimized_daemon]
+ TcmuService.remove_undetected_clients(images, daemons, daemon)
return {
'daemons': sorted(daemons.values(),
if image['pool_name'] == pool_name and image['name'] == image_name:
return image
return None
+
+ @staticmethod
+ def remove_undetected_clients(images, daemons, daemon):
+ for image in images.values():
+ optimized_daemon = image.get('optimized_daemon', None)
+ if optimized_daemon:
+ for daemon_name in image['optimized_paths']:
+ if daemon_name != optimized_daemon:
+ daemon = daemons[daemon_name]
+ daemon['optimized_paths'] -= 1
+ daemon['non_optimized_paths'] += 1
+ image['non_optimized_paths'].append(daemon_name)
+ image['optimized_paths'] = [optimized_daemon]