from ceph.deployment import inventory, translate
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.drive_selection import selector
+from ceph.deployment.service_spec import HostPlacementSpec, ServiceSpec
from mgr_module import MgrModule
import orchestrator
-from orchestrator import OrchestratorError, HostPlacementSpec, OrchestratorValidationError, HostSpec, \
- CLICommandMeta, ServiceSpec
+from orchestrator import OrchestratorError, OrchestratorValidationError, HostSpec, \
+ CLICommandMeta
from . import remotes
from .osd import RemoveUtil, OSDRemoval
def __init__(self, mgr):
# type: (CephadmOrchestrator) -> None
self.mgr = mgr
- self.specs = {} # type: Dict[str, orchestrator.ServiceSpec]
+ self.specs = {} # type: Dict[str, ServiceSpec]
self.spec_created = {} # type: Dict[str, datetime.datetime]
def load(self):
pass
def save(self, spec):
- # type: (orchestrator.ServiceSpec) -> None
+ # type: (ServiceSpec) -> None
self.specs[spec.service_name()] = spec
self.spec_created[spec.service_name()] = datetime.datetime.utcnow()
self.mgr.set_store(
self.mgr.set_store(SPEC_STORE_PREFIX + service_name, None)
def find(self, service_name):
- # type: (str) -> List[orchestrator.ServiceSpec]
+ # type: (str) -> List[ServiceSpec]
specs = []
for sn, spec in self.specs.items():
if sn == service_name or sn.startswith(service_name + '.'):
def _apply_all_services(self):
r = False
- specs = [] # type: List[orchestrator.ServiceSpec]
+ specs = [] # type: List[ServiceSpec]
for sn, spec in self.spec_store.specs.items():
specs.append(spec)
for spec in specs:
extra_config=extra_config)
def add_mon(self, spec):
- # type: (orchestrator.ServiceSpec) -> orchestrator.Completion
+ # type: (ServiceSpec) -> orchestrator.Completion
return self._add_daemon('mon', spec, self._create_mon)
def _create_mgr(self, mgr_id, host):
return self._create_daemon('mgr', mgr_id, host, keyring=keyring)
def add_mgr(self, spec):
- # type: (orchestrator.ServiceSpec) -> orchestrator.Completion
+ # type: (ServiceSpec) -> orchestrator.Completion
return self._add_daemon('mgr', spec, self._create_mgr)
def _apply(self, spec):
return self._apply(spec)
def add_mds(self, spec):
- # type: (orchestrator.ServiceSpec) -> AsyncCompletion
+ # type: (ServiceSpec) -> AsyncCompletion
return self._add_daemon('mds', spec, self._create_mds, self._config_mds)
- def apply_mds(self, spec: orchestrator.ServiceSpec) -> orchestrator.Completion:
+ def apply_mds(self, spec: ServiceSpec) -> orchestrator.Completion:
return self._apply(spec)
def _config_mds(self, spec):
return self._apply(spec)
def add_node_exporter(self, spec):
- # type: (orchestrator.ServiceSpec) -> AsyncCompletion
+ # type: (ServiceSpec) -> AsyncCompletion
return self._add_daemon('node-exporter', spec,
self._create_node_exporter)
return self._create_daemon('node-exporter', daemon_id, host)
def add_crash(self, spec):
- # type: (orchestrator.ServiceSpec) -> AsyncCompletion
+ # type: (ServiceSpec) -> AsyncCompletion
return self._add_daemon('crash', spec,
self._create_crash)
return self._create_daemon('crash', daemon_id, host, keyring=keyring)
def add_grafana(self, spec):
- # type: (orchestrator.ServiceSpec) -> AsyncCompletion
+ # type: (ServiceSpec) -> AsyncCompletion
return self._add_daemon('grafana', spec, self._create_grafana)
def apply_grafana(self, spec):
- # type: (orchestrator.ServiceSpec) -> AsyncCompletion
+ # type: (ServiceSpec) -> AsyncCompletion
return self._apply(spec)
def _create_grafana(self, daemon_id, host):
return self._create_daemon('grafana', daemon_id, host)
def add_alertmanager(self, spec):
- # type: (orchestrator.ServiceSpec) -> AsyncCompletion
+ # type: (ServiceSpec) -> AsyncCompletion
return self._add_daemon('alertmanager', spec, self._create_alertmanager)
def apply_alertmanager(self, spec):
- # type: (orchestrator.ServiceSpec) -> AsyncCompletion
+ # type: (ServiceSpec) -> AsyncCompletion
return self._apply(spec)
def _create_alertmanager(self, daemon_id, host):
"""
def __init__(self,
- spec, # type: orchestrator.ServiceSpec
+ spec, # type: ServiceSpec
get_hosts_func, # type: Callable[[Optional[str]],List[str]]
get_daemons_func, # type: Callable[[str],List[orchestrator.DaemonDescription]]
scheduler=None, # type: Optional[BaseScheduler]
):
assert spec and get_hosts_func and get_daemons_func
- self.spec = spec # type: orchestrator.ServiceSpec
+ self.spec = spec # type: ServiceSpec
self.scheduler = scheduler if scheduler else SimpleScheduler(self.spec.placement)
self.get_hosts_func = get_hosts_func
self.get_daemons_func = get_daemons_func
except ImportError:
pass
+from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec
from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
- ServiceSpec, PlacementSpec, RGWSpec, HostSpec, OrchestratorError
+ HostSpec, OrchestratorError
from tests import mock
from .fixtures import cephadm_module, wait, _run_cephadm, mon_command, match_glob
from cephadm.module import CephadmOrchestrator
from typing import NamedTuple, List
import pytest
+from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, ServiceSpecValidationError
+
from cephadm.module import HostAssignment
-from orchestrator import ServiceSpec, PlacementSpec, DaemonDescription, OrchestratorValidationError
+from orchestrator import DaemonDescription, OrchestratorValidationError
class NodeAssignmentTest(NamedTuple):
try:
s = PlacementSpec.from_string(placement.split(' '))
assert False
- except OrchestratorValidationError as e:
+ except ServiceSpecValidationError as e:
pass
CLICommand, _cli_write_command, _cli_read_command, CLICommandMeta, \
Orchestrator, OrchestratorClientMixin, \
OrchestratorValidationError, OrchestratorError, NoOrchestrator, \
- ServiceSpec, NFSServiceSpec, RGWSpec, HostPlacementSpec, \
- servicespec_validate_add, \
- ServiceDescription, InventoryFilter, PlacementSpec, HostSpec, \
+ ServiceDescription, InventoryFilter, HostSpec, \
DaemonDescription, \
InventoryHost, DeviceLightLoc, \
OutdatableData, OutdatablePersistentDict, \
from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection, \
DriveGroupSpecs
+from ceph.deployment.service_spec import PlacementSpec, ServiceSpec
from mgr_module import MgrModule, HandleCommandResult
from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
- NoOrchestrator, ServiceSpec, PlacementSpec, OrchestratorValidationError, NFSServiceSpec, \
- RGWSpec, InventoryFilter, InventoryHost, HostPlacementSpec, HostSpec, CLICommandMeta
+ NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
+ RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta
def nice_delta(now, t, suffix=''):
if t:
import fnmatch
from ceph.deployment.inventory import Device
+from ceph.deployment.service_spec import ServiceSpecValidationError
+
try:
from typing import Optional, List, Dict, Any
except ImportError:
return repr(self) == repr(other)
-class DriveGroupValidationError(Exception):
+class DriveGroupValidationError(ServiceSpecValidationError):
"""
Defining an exception here is a bit problematic, cause you cannot properly catch it,
if it was raised in a different mgr module.
import six
+class ServiceSpecValidationError(Exception):
+ """
+ Defining an exception here is a bit problematic, cause you cannot properly catch it,
+ if it was raised in a different mgr module.
+ """
+
+ def __init__(self, msg):
+ super(ServiceSpecValidationError, self).__init__(msg)
+
+
class HostPlacementSpec(namedtuple('HostPlacementSpec', ['hostname', 'network', 'name'])):
def __str__(self):
res = ''
def __init__(self, label=None, hosts=None, count=None, all_hosts=False):
# type: (Optional[str], Optional[List], Optional[int], bool) -> None
if all_hosts and (count or hosts or label):
- raise ValueError('cannot combine all:true and count|hosts|label')
+ raise ServiceSpecValidationError('cannot combine all:true and count|hosts|label')
self.label = label
self.hosts = [] # type: List[HostPlacementSpec]
if hosts:
def validate(self):
if self.hosts and self.label:
# TODO: a less generic Exception
- raise ValueError('Host and label are mutually exclusive')
+ raise ServiceSpecValidationError('Host and label are mutually exclusive')
if self.count is not None and self.count <= 0:
- raise ValueError("num/count must be > 1")
+ raise ServiceSpecValidationError("num/count must be > 1")
@classmethod
def from_string(cls, arg):
else:
strings = [arg]
else:
- raise ValueError('invalid placement %s' % arg)
+ raise ServiceSpecValidationError('invalid placement %s' % arg)
count = None
if strings:
hosts = [x for x in strings if x != '*' and 'label:' not in x]
labels = [x[6:] for x in strings if 'label:' in x]
if len(labels) > 1:
- raise ValueError('more than one label provided: {}'.format(labels))
+ raise ServiceSpecValidationError('more than one label provided: {}'.format(labels))
ps = PlacementSpec(count=count,
hosts=hosts,
# This must not be a method of ServiceSpec, otherwise you'll hunt
# sub-interpreter affinity bugs.
if not self.service_type:
- raise ValueError('Cannot add Service: type required')
+ raise ServiceSpecValidationError('Cannot add Service: type required')
if self.service_type in ['mds', 'rgw', 'nfs'] and not self.service_id:
- raise ValueError('Cannot add Service: id required')
+ raise ServiceSpecValidationError('Cannot add Service: id required')
class NFSServiceSpec(ServiceSpec):
servicespec_validate_add(self)
if not self.pool:
- raise ValueError('Cannot add NFS: No Pool specified')
+ raise ServiceSpecValidationError('Cannot add NFS: No Pool specified')
class RGWSpec(ServiceSpec):