from cephadm.serve import CephadmServe
from cephadm.services.cephadmservice import CephadmDaemonSpec
-from mgr_module import MgrModule, HandleCommandResult
+from mgr_module import MgrModule, HandleCommandResult, Option
from mgr_util import create_self_signed_cert, verify_tls, ServerConfigException
import secrets
import orchestrator
instance = None
NATIVE_OPTIONS = [] # type: List[Any]
- MODULE_OPTIONS: List[dict] = [
- {
- 'name': 'ssh_config_file',
- 'type': 'str',
- 'default': None,
- 'desc': 'customized SSH config file to connect to managed hosts',
- },
- {
- 'name': 'device_cache_timeout',
- 'type': 'secs',
- 'default': 30 * 60,
- 'desc': 'seconds to cache device inventory',
- },
- {
- 'name': 'daemon_cache_timeout',
- 'type': 'secs',
- 'default': 10 * 60,
- 'desc': 'seconds to cache service (daemon) inventory',
- },
- {
- 'name': 'facts_cache_timeout',
- 'type': 'secs',
- 'default': 1 * 60,
- 'desc': 'seconds to cache host facts data',
- },
- {
- 'name': 'host_check_interval',
- 'type': 'secs',
- 'default': 10 * 60,
- 'desc': 'how frequently to perform a host check',
- },
- {
- 'name': 'mode',
- 'type': 'str',
- 'enum_allowed': ['root', 'cephadm-package'],
- 'default': 'root',
- 'desc': 'mode for remote execution of cephadm',
- },
- {
- 'name': 'container_image_base',
- 'default': 'docker.io/ceph/ceph',
- 'desc': 'Container image name, without the tag',
- 'runtime': True,
- },
- {
- 'name': 'container_image_prometheus',
- 'default': 'docker.io/prom/prometheus:v2.18.1',
- 'desc': 'Prometheus container image',
- },
- {
- 'name': 'container_image_grafana',
- 'default': 'docker.io/ceph/ceph-grafana:6.6.2',
- 'desc': 'Prometheus container image',
- },
- {
- 'name': 'container_image_alertmanager',
- 'default': 'docker.io/prom/alertmanager:v0.20.0',
- 'desc': 'Prometheus container image',
- },
- {
- 'name': 'container_image_node_exporter',
- 'default': 'docker.io/prom/node-exporter:v0.18.1',
- 'desc': 'Prometheus container image',
- },
- {
- 'name': 'warn_on_stray_hosts',
- 'type': 'bool',
- 'default': True,
- 'desc': 'raise a health warning if daemons are detected on a host '
- 'that is not managed by cephadm',
- },
- {
- 'name': 'warn_on_stray_daemons',
- 'type': 'bool',
- 'default': True,
- 'desc': 'raise a health warning if daemons are detected '
- 'that are not managed by cephadm',
- },
- {
- 'name': 'warn_on_failed_host_check',
- 'type': 'bool',
- 'default': True,
- 'desc': 'raise a health warning if the host check fails',
- },
- {
- 'name': 'log_to_cluster',
- 'type': 'bool',
- 'default': True,
- 'desc': 'log to the "cephadm" cluster log channel"',
- },
- {
- 'name': 'allow_ptrace',
- 'type': 'bool',
- 'default': False,
- 'desc': 'allow SYS_PTRACE capability on ceph containers',
- 'long_desc': 'The SYS_PTRACE capability is needed to attach to a '
- 'process with gdb or strace. Enabling this options '
- 'can allow debugging daemons that encounter problems '
- 'at runtime.',
- },
- {
- 'name': 'container_init',
- 'type': 'bool',
- 'default': False,
- 'desc': 'Run podman/docker with `--init`',
- },
- {
- 'name': 'prometheus_alerts_path',
- 'type': 'str',
- 'default': '/etc/prometheus/ceph/ceph_default_alerts.yml',
- 'desc': 'location of alerts to include in prometheus deployments',
- },
- {
- 'name': 'migration_current',
- 'type': 'int',
- 'default': None,
- 'desc': 'internal - do not modify',
+ MODULE_OPTIONS = [
+ Option(
+ 'ssh_config_file',
+ type='str',
+ default=None,
+ desc='customized SSH config file to connect to managed hosts',
+ ),
+ Option(
+ 'device_cache_timeout',
+ type='secs',
+ default=30 * 60,
+ desc='seconds to cache device inventory',
+ ),
+ Option(
+ 'daemon_cache_timeout',
+ type='secs',
+ default=10 * 60,
+ desc='seconds to cache service (daemon) inventory',
+ ),
+ Option(
+ 'facts_cache_timeout',
+ type='secs',
+ default=1 * 60,
+ desc='seconds to cache host facts data',
+ ),
+ Option(
+ 'host_check_interval',
+ type='secs',
+ default=10 * 60,
+ desc='how frequently to perform a host check',
+ ),
+ Option(
+ 'mode',
+ type='str',
+ enum_allowed=['root', 'cephadm-package'],
+ default='root',
+ desc='mode for remote execution of cephadm',
+ ),
+ Option(
+ 'container_image_base',
+ default='docker.io/ceph/ceph',
+ desc='Container image name, without the tag',
+ runtime=True,
+ ),
+ Option(
+ 'container_image_prometheus',
+ default='docker.io/prom/prometheus:v2.18.1',
+ desc='Prometheus container image',
+ ),
+ Option(
+ 'container_image_grafana',
+ default='docker.io/ceph/ceph-grafana:6.6.2',
+ desc='Prometheus container image',
+ ),
+ Option(
+ 'container_image_alertmanager',
+ default='docker.io/prom/alertmanager:v0.20.0',
+ desc='Prometheus container image',
+ ),
+ Option(
+ 'container_image_node_exporter',
+ default='docker.io/prom/node-exporter:v0.18.1',
+ desc='Prometheus container image',
+ ),
+ Option(
+ 'warn_on_stray_hosts',
+ type='bool',
+ default=True,
+ desc='raise a health warning if daemons are detected on a host '
+ 'that is not managed by cephadm',
+ ),
+ Option(
+ 'warn_on_stray_daemons',
+ type='bool',
+ default=True,
+ desc='raise a health warning if daemons are detected '
+ 'that are not managed by cephadm',
+ ),
+ Option(
+ 'warn_on_failed_host_check',
+ type='bool',
+ default=True,
+ desc='raise a health warning if the host check fails',
+ ),
+ Option(
+ 'log_to_cluster',
+ type='bool',
+ default=True,
+ desc='log to the "cephadm" cluster log channel"',
+ ),
+ Option(
+ 'allow_ptrace',
+ type='bool',
+ default=False,
+ desc='allow SYS_PTRACE capability on ceph containers',
+ long_desc='The SYS_PTRACE capability is needed to attach to a '
+ 'process with gdb or strace. Enabling this options '
+ 'can allow debugging daemons that encounter problems '
+ 'at runtime.',
+ ),
+ Option(
+ 'container_init',
+ type='bool',
+ default=False,
+ desc='Run podman/docker with `--init`'
+ ),
+ Option(
+ 'prometheus_alerts_path',
+ type='str',
+ default='/etc/prometheus/ceph/ceph_default_alerts.yml',
+ desc='location of alerts to include in prometheus deployments',
+ ),
+ Option(
+ 'migration_current',
+ type='int',
+ default=None,
+ desc='internal - do not modify',
# used to track track spec and other data migrations.
- },
- {
- 'name': 'config_dashboard',
- 'type': 'bool',
- 'default': True,
- 'desc': 'manage configs like API endpoints in Dashboard.'
- },
- {
- 'name': 'manage_etc_ceph_ceph_conf',
- 'type': 'bool',
- 'default': False,
- 'desc': 'Manage and own /etc/ceph/ceph.conf on the hosts.',
- },
- {
- 'name': 'registry_url',
- 'type': 'str',
- 'default': None,
- 'desc': 'Custom repository url'
- },
- {
- 'name': 'registry_username',
- 'type': 'str',
- 'default': None,
- 'desc': 'Custom repository username'
- },
- {
- 'name': 'registry_password',
- 'type': 'str',
- 'default': None,
- 'desc': 'Custom repository password'
- },
- {
- 'name': 'use_repo_digest',
- 'type': 'bool',
- 'default': False,
- 'desc': 'Automatically convert image tags to image digest. Make sure all daemons use the same image',
- }
+ ),
+ Option(
+ 'config_dashboard',
+ type='bool',
+ default=True,
+ desc='manage configs like API endpoints in Dashboard.'
+ ),
+ Option(
+ 'manage_etc_ceph_ceph_conf',
+ type='bool',
+ default=False,
+ desc='Manage and own /etc/ceph/ceph.conf on the hosts.',
+ ),
+ Option(
+ 'registry_url',
+ type='str',
+ default=None,
+ desc='Custom repository url'
+ ),
+ Option(
+ 'registry_username',
+ type='str',
+ default=None,
+ desc='Custom repository username'
+ ),
+ Option(
+ 'registry_password',
+ type='str',
+ default=None,
+ desc='Custom repository password'
+ ),
+ Option(
+ 'use_repo_digest',
+ type='bool',
+ default=False,
+ desc='Automatically convert image tags to image digest. Make sure all daemons use the same image',
+ ),
]
def __init__(self, *args: Any, **kwargs: Any):
import ceph_module # noqa
-try:
- from typing import Set, Tuple, Iterator, Any, Dict, Optional, Callable, List
-except ImportError:
- # just for type checking
- pass
+from typing import Set, Tuple, Iterator, Any, Dict, Optional, Callable, List, \
+ Union, TYPE_CHECKING, NamedTuple
+if TYPE_CHECKING:
+ import sys
+ if sys.version_info >= (3, 8):
+ from typing import Literal
+ else:
+ from typing_extensions import Literal
+
+
import logging
import errno
import json
def _get_localized_key(prefix, key):
return '{}/{}'.format(prefix, key)
+"""
+MODULE_OPTIONS types and Option Class
+"""
+if TYPE_CHECKING:
+ OptionTypeLabel = Literal[
+ 'uint', 'int', 'str', 'float', 'bool', 'addr', 'addrvec', 'uuid', 'size', 'secs']
-class Option(dict):
- """
- Helper class to declare options for MODULE_OPTIONS list.
- Caveat: it uses argument names matching Python keywords (type, min, max),
- so any further processing should happen in a separate method.
+# common/options.h: value_t
+OptionValue = Optional[Union[bool, int, float, str]]
- TODO: type validation.
+
+class Option(Dict):
+ """
+ Helper class to declare options for MODULE_OPTIONS list.
+ TODO: Replace with typing.TypedDict when in python_version >= 3.8
"""
def __init__(
- self, name,
- default=None,
- type='str',
- desc=None, longdesc=None,
- min=None, max=None,
- enum_allowed=None,
- see_also=None,
- tags=None,
- runtime=False,
+ self,
+ name: str,
+ default: OptionValue=None,
+ type: 'OptionTypeLabel'='str',
+ desc: Optional[str]=None,
+ long_desc: Optional[str]=None,
+ min: OptionValue=None,
+ max: OptionValue=None,
+ enum_allowed: Optional[List[str]]=None,
+ tags: Optional[List[str]]=None,
+ see_also: Optional[List[str]]=None,
+ runtime: bool=False,
):
super(Option, self).__init__(
(k, v) for k, v in vars().items()
from their active peer), and to configuration settings (read only).
"""
- MODULE_OPTIONS = [] # type: List[Dict[str, Any]]
+ MODULE_OPTIONS: List[Option] = []
MODULE_OPTION_DEFAULTS = {} # type: Dict[str, Any]
def __init__(self, module_name, capsule):
def get_mgr_id(self):
return self._ceph_get_mgr_id()
- def get_module_option(self, key, default=None):
+ def get_module_option(self, key: str, default: OptionValue=None) -> OptionValue:
"""
Retrieve the value of a persistent configuration setting
- :param str key:
:param default: the default value of the config if it is not found
- :return: str
"""
r = self._ceph_get_module_option(key)
if r is None:
def get_active_uri(self):
return self._ceph_get_active_uri()
- def get_localized_module_option(self, key, default=None):
+ def get_localized_module_option(self, key: str, default: OptionValue=None) -> OptionValue:
r = self._ceph_get_module_option(key, self.get_mgr_id())
if r is None:
return self.MODULE_OPTION_DEFAULTS.get(key, default)
class MgrModule(ceph_module.BaseMgrModule, MgrModuleLoggingMixin):
COMMANDS = [] # type: List[Any]
- MODULE_OPTIONS = [] # type: List[dict]
+ MODULE_OPTIONS: List[Option] = []
MODULE_OPTION_DEFAULTS = {} # type: Dict[str, Any]
# Priority definitions for perf counters
else:
return r
- def get_module_option(self, key, default=None):
+ def get_module_option(self, key: str, default: OptionValue=None) -> OptionValue:
"""
Retrieve the value of a persistent configuration setting
-
- :param str key:
- :param str default:
- :return: str
"""
self._validate_module_option(key)
return self._get_module_option(key, default)
- def get_module_option_ex(self, module, key, default=None):
+ def get_module_option_ex(self, module: str, key: str, default: OptionValue=None) -> OptionValue:
"""
Retrieve the value of a persistent configuration setting
for the specified module.
- :param str module: The name of the module, e.g. 'dashboard'
+ :param module: The name of the module, e.g. 'dashboard'
or 'telemetry'.
- :param str key: The configuration key, e.g. 'server_addr'.
- :param str,None default: The default value to use when the
+ :param key: The configuration key, e.g. 'server_addr'.
+ :param default: The default value to use when the
returned value is ``None``. Defaults to ``None``.
- :return: str,int,bool,float,None
"""
if module == self.module_name:
self._validate_module_option(key)
def _set_localized(self, key, val, setter):
return setter(_get_localized_key(self.get_mgr_id(), key), val)
- def get_localized_module_option(self, key, default=None):
+ def get_localized_module_option(self, key: str, default: OptionValue=None) -> OptionValue:
"""
Retrieve localized configuration for this ceph-mgr instance
- :param str key:
- :param str default:
- :return: str
"""
self._validate_module_option(key)
return self._get_module_option(key, default, self.get_mgr_id())
from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
from mgr_util import format_bytes, to_pretty_timedelta, format_dimless
-from mgr_module import MgrModule, HandleCommandResult
+from mgr_module import MgrModule, HandleCommandResult, Option
from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
class OrchestratorCli(OrchestratorClientMixin, MgrModule,
metaclass=CLICommandMeta):
MODULE_OPTIONS = [
- {
- 'name': 'orchestrator',
- 'type': 'str',
- 'default': None,
- 'desc': 'Orchestrator backend',
- 'enum_allowed': ['cephadm', 'rook',
- 'test_orchestrator'],
- 'runtime': True,
- },
+ Option(
+ 'orchestrator',
+ type='str',
+ default=None,
+ desc='Orchestrator backend',
+ enum_allowed=['cephadm', 'rook', 'test_orchestrator'],
+ runtime=True,
+ )
]
NATIVE_OPTIONS = [] # type: List[dict]
usage:
ceph orch apply osd -i <json_file/yaml_file> [--dry-run]
ceph orch apply osd --all-available-devices [--dry-run] [--unmanaged]
-
+
Restrictions:
-
+
Mutexes:
* -i, --all-available-devices
* -i, --unmanaged (this would overwrite the osdspec loaded from a file)
-
+
Parameters:
-
+
* --unmanaged
Only works with --all-available-devices.
-
+
Description:
-
+
* -i
An inbuf object like a file or a json/yaml blob containing a valid OSDSpec
-
+
* --all-available-devices
The most simple OSDSpec there is. Takes all as 'available' marked devices
and creates standalone OSDs on them.
-
+
* --unmanaged
Set a the unmanaged flag for all--available-devices (default is False)
-
+
Examples:
# ceph orch apply osd -i <file.yml|json>
-
+
Applies one or more OSDSpecs found in <file>
-
+
# ceph orch osd apply --all-available-devices --unmanaged=true
-
+
Creates and applies simple OSDSpec with the unmanaged flag set to <true>
"""
except ImportError:
TYPE_CHECKING = False
-from mgr_module import MgrModule, OSDMap
+from mgr_module import MgrModule, OSDMap, Option
from mgr_util import to_pretty_timedelta
from datetime import timedelta
import os
def refs(self):
# type: () -> List[str]
return self._refs
-
+
@property
def add_to_ceph_s(self):
# type: () -> bool
]
MODULE_OPTIONS = [
- {
- 'name': 'max_completed_events',
- 'default': 50,
- 'type': 'int',
- 'desc': 'number of past completed events to remember',
- 'runtime': True,
- },
- {
- 'name': 'persist_interval',
- 'default': 5,
- 'type': 'secs',
- 'desc': 'how frequently to persist completed events',
- 'runtime': True,
- },
- {
- 'name': 'enabled',
- 'default': True,
- 'type': 'bool',
-
- }
- ] # type: List[Dict[str, Any]]
+ Option(
+ 'max_completed_events',
+ default=50,
+ type='int',
+ desc='number of past completed events to remember',
+ runtime=True
+ ),
+ Option(
+ 'persist_interval',
+ default=5,
+ type='secs',
+ desc='how frequently to persist completed events',
+ runtime=True
+ ),
+ Option(
+ 'enabled',
+ default=True,
+ type='bool',
+ )
+ ]
def __init__(self, *args, **kwargs):
super(Module, self).__init__(*args, **kwargs)
))
self._osdmap_changed(old_osdmap, self._latest_osdmap)
elif notify_type == "pg_summary":
- # if there are no events we will skip this here to avoid
+ # if there are no events we will skip this here to avoid
# expensive get calls
if len(self._events) == 0:
return
-
+
global_event = False
data = self.get("pg_stats")
ready = self.get("pg_ready")
for ev_id in list(self._events):
ev = self._events[ev_id]
- # Check for types of events
+ # Check for types of events
# we have to update
if isinstance(ev, PgRecoveryEvent):
ev.pg_update(data, ready, self.log)
self.maybe_complete(ev)
if not global_event:
- # If there is no global event
+ # If there is no global event
# we create one
self._pg_state_changed(data)
self.clear_all_progress_events()
def _handle_clear(self):
- self.clear()
+ self.clear()
return 0, "", ""
def handle_command(self, _, cmd):
import socket
import threading
import time
-from mgr_module import MgrModule, MgrStandbyModule, PG_STATES
+from mgr_module import MgrModule, MgrStandbyModule, PG_STATES, Option
from mgr_util import get_default_addr, profile_method
from rbd import RBD
from collections import namedtuple
try:
- from typing import DefaultDict, Optional, Dict, Any, Set
+ from typing import DefaultDict, Optional, Dict, Any, Set, cast
except ImportError:
pass
]
MODULE_OPTIONS = [
- {'name': 'server_addr'},
- {'name': 'server_port'},
- {'name': 'scrape_interval'},
- {'name': 'stale_cache_strategy'},
- {'name': 'rbd_stats_pools'},
- {'name': 'rbd_stats_pools_refresh_interval', 'type': 'int', 'default': 300},
+ Option(
+ 'server_addr'
+ ),
+ Option(
+ 'server_port',
+ type='int'
+ ),
+ Option(
+ 'scrape_interval',
+ type='float',
+ default=15.0
+ ),
+ Option(
+ 'stale_cache_strategy',
+ default='log'
+ ),
+ Option(
+ 'rbd_stats_pools',
+ default=''
+ ),
+ Option(
+ name='rbd_stats_pools_refresh_interval',
+ type='int',
+ default=300
+ )
]
STALE_CACHE_FAIL = 'fail'
self.shutdown_event = threading.Event()
self.collect_lock = threading.Lock()
self.collect_time = 0.0
- self.scrape_interval = 15.0
- self.stale_cache_strategy = self.STALE_CACHE_FAIL
+ self.scrape_interval: float = 15.0
+ self.stale_cache_strategy: str = self.STALE_CACHE_FAIL
self.collect_cache = None
self.rbd_stats = {
'pools': {},
# list of pool[/namespace] entries. If no namespace is specifed the
# stats are collected for every namespace in the pool. The wildcard
# '*' can be used to indicate all pools or namespaces
- pools_string = self.get_localized_module_option('rbd_stats_pools', '')
+ pools_string = cast(str, self.get_localized_module_option('rbd_stats_pools'))
pool_keys = []
for x in re.split('[\s,]+', pools_string):
if not x:
raise cherrypy.HTTPError(503, msg)
# Make the cache timeout for collecting configurable
- self.scrape_interval = float(self.get_localized_module_option('scrape_interval', 15.0))
+ self.scrape_interval = cast(float, self.get_localized_module_option('scrape_interval'))
- self.stale_cache_strategy = self.get_localized_module_option('stale_cache_strategy', 'log')
+ self.stale_cache_strategy = cast(str, self.get_localized_module_option('stale_cache_strategy'))
if self.stale_cache_strategy not in [self.STALE_CACHE_FAIL,
self.STALE_CACHE_RETURN]:
self.stale_cache_strategy = self.STALE_CACHE_FAIL
cherrypy.config.update({
'server.socket_host': server_addr,
- 'server.socket_port': int(server_port),
+ 'server.socket_port': server_port,
'engine.autoreload.on': False
})
cherrypy.tree.mount(Root(), "/")
(server_addr, server_port))
cherrypy.config.update({
'server.socket_host': server_addr,
- 'server.socket_port': int(server_port),
+ 'server.socket_port': server_port,
'engine.autoreload.on': False
})
client = None
config = None
-from mgr_module import MgrModule
+from mgr_module import MgrModule, Option
import orchestrator
from .rook_cluster import RookCluster
Right now, we are calling the k8s API synchronously.
"""
- MODULE_OPTIONS = [
+ MODULE_OPTIONS: List[Option] = [
# TODO: configure k8s API addr instead of assuming local
- ] # type: List[Dict[str, Any]]
+ ]
def process(self, completions):
# type: (List[RookCompletion]) -> None
import json
import sqlite3
from .fs.schedule_client import SnapSchedClient
-from mgr_module import MgrModule, CLIReadCommand, CLIWriteCommand
+from mgr_module import MgrModule, CLIReadCommand, CLIWriteCommand, Option
from mgr_util import CephfsConnectionException
from threading import Event
class Module(MgrModule):
MODULE_OPTIONS = [
- {
- 'name': 'allow_m_granularity',
- 'type': 'bool',
- 'default': False,
- 'desc': 'allow minute scheduled snapshots',
- 'runtime': True,
- },
+ Option(
+ 'allow_m_granularity',
+ type='bool',
+ default=False,
+ desc='allow minute scheduled snapshots',
+ runtime=True,
+ ),
]
def __init__(self, *args, **kwargs):
import json
from typing import List, Dict
-from mgr_module import MgrModule
+from mgr_module import MgrModule, Option
from .fs.perf_stats import FSPerfStats
"perm": "r"
},
]
- MODULE_OPTIONS = [] # type: List[Dict]
+ MODULE_OPTIONS: List[Option] = []
def __init__(self, *args, **kwargs):
super(Module, self).__init__(*args, **kwargs)
import traceback
import threading
-from mgr_module import MgrModule
+from mgr_module import MgrModule, Option
import orchestrator
from .fs.volume import VolumeClient
]
MODULE_OPTIONS = [
- {
- 'name': 'max_concurrent_clones',
- 'type': 'int',
- 'default': 4,
- 'desc': 'Number of asynchronous cloner threads',
- }
+ Option(
+ 'max_concurrent_clones',
+ type='int',
+ default=4,
+ desc='Number of asynchronous cloner threads',
+ )
]
def __init__(self, *args, **kwargs):
pytest-mypy; python_version >= '3'
pytest >= 2.1.3; python_version >= '3'
pyyaml
+typing-extensions; python_version < '3.8'