| [--skip-pull] [--skip-firewalld] [--allow-overwrite]
| [--allow-fqdn-hostname] [--skip-prepare-host]
| [--orphan-initial-daemons] [--skip-monitoring-stack]
+| [--apply-spec APPLY_SPEC]
+
| **cephadm** **deploy** [-h] --name NAME --fsid FSID [--config CONFIG]
* [--skip-prepare-host] Do not prepare host
* [--orphan-initial-daemons] Do not create initial mon, mgr, and crash service specs
* [--skip-monitoring-stack] Do not automatically provision monitoring stack] (prometheus, grafana, alertmanager, node-exporter)
-
+* [--apply-spec APPLY_SPEC] Apply cluster spec after bootstrap (copy ssh key, add hosts and apply services)
ceph-volume
-----------
ceph orch host add <hostname> [<addr>] [<labels>...]
ceph orch host rm <hostname>
+Host Specification
+------------------
+
+Many hosts can be added at once using
+``ceph orch apply -i`` by submitting a multi-document YAML file::
+
+ ---
+ service_type: host
+ addr: node-00
+ hostname: node-00
+ labels:
+ - example1
+ - example2
+ ---
+ service_type: host
+ addr: node-01
+ hostname: node-01
+ labels:
+ - grafana
+ ---
+ service_type: host
+ addr: node-02
+ hostname: node-02
+
+This can be combined with service specifications (below) to create a cluster spec file to deploy a whole cluster in one command. see ``cephadm bootstrap --apply-spec`` also to do this during bootstrap. Cluster SSH Keys must be copied to hosts prior.
+
OSD Management
==============
hosts:
- host1
- host2
- - host3
-
+ - host3
Configuring the Orchestrator CLI
================================
get_fqdn(), port,
args.initial_dashboard_user,
password))
+
+ if args.apply_spec:
+ logger.info('Applying %s to cluster' % args.apply_spec)
+
+ with open(args.apply_spec) as f:
+ for line in f:
+ if 'hostname:' in line:
+ line = line.replace('\n', '')
+ split = line.split(': ')
+ if split[1] != host:
+ logger.info('Adding ssh key to %s' % split[1])
+
+ ssh_key = '/etc/ceph/ceph.pub'
+ if args.ssh_public_key:
+ ssh_key = args.ssh_public_key.name
+ out, err, code = call_throws(['ssh-copy-id', '-f', '-i', ssh_key, 'root@%s' % split[1]])
+
+ mounts = {}
+ mounts[pathify(args.apply_spec)] = '/tmp/spec.yml:z'
+
+ out = cli(['orch', 'apply', '-i', '/tmp/spec.yml'], extra_mounts=mounts)
+ logger.info(out)
logger.info('You can access the Ceph CLI with:\n\n'
'\tsudo %s shell --fsid %s -c %s -k %s\n' % (
'--skip-monitoring-stack',
action='store_true',
help='Do not automatically provision monitoring stack (prometheus, grafana, alertmanager, node-exporter)')
+ parser_bootstrap.add_argument(
+ '--apply-spec',
+ help='Apply cluster spec after bootstrap (copy ssh key, add hosts and apply services)')
+
parser_deploy = subparsers.add_parser(
'deploy', help='deploy a daemon')
import orchestrator
from orchestrator import OrchestratorError, OrchestratorValidationError, HostSpec, \
CLICommandMeta
+from orchestrator._interface import GenericSpec
from . import remotes
from . import utils
# type: (Optional[str]) -> List[str]
return list(self.inventory.filter_by_label(label))
- @trivial_completion
- def add_host(self, spec):
+ def _add_host(self, spec):
# type: (HostSpec) -> str
"""
Add a host to be managed by the orchestrator.
self.log.info('Added host %s' % spec.hostname)
return "Added host '{}'".format(spec.hostname)
+ @trivial_completion
+ def add_host(self, spec: HostSpec) -> str:
+ return self._add_host(spec)
+
@trivial_completion
def remove_host(self, host):
# type: (str) -> str
# type: (ServiceSpec) -> List[str]
return self._add_daemon('mgr', spec, self.mgr_service.create)
- def _apply(self, spec: ServiceSpec) -> str:
+ def _apply(self, spec: GenericSpec) -> str:
+ if spec.service_type == 'host':
+ return self._add_host(cast(HostSpec, spec))
+
+ return self._apply_service_spec(cast(ServiceSpec, spec))
+
+ def _apply_service_spec(self, spec: ServiceSpec) -> str:
if spec.placement.is_empty():
# fill in default placement
defaults = {
return "Scheduled %s update..." % spec.service_name()
@trivial_completion
- def apply(self, specs: List[ServiceSpec]):
- return [self._apply(spec) for spec in specs]
+ def apply(self, specs: List[GenericSpec]):
+ results = []
+ for spec in specs:
+ results.append(self._apply(spec))
+ return results
@trivial_completion
def apply_mgr(self, spec):
"""
raise NotImplementedError()
- def apply(self, specs: List[ServiceSpec]) -> Completion:
+ def apply(self, specs: List["GenericSpec"]) -> Completion:
"""
Applies any spec
"""
- fns: Dict[str, Callable[[ServiceSpec], Completion]] = {
+ fns: Dict[str, function] = {
'alertmanager': self.apply_alertmanager,
'crash': self.apply_crash,
'grafana': self.apply_grafana,
- 'iscsi': cast(Callable[[ServiceSpec], Completion], self.apply_iscsi),
+ 'iscsi': self.apply_iscsi,
'mds': self.apply_mds,
'mgr': self.apply_mgr,
'mon': self.apply_mon,
- 'nfs': cast(Callable[[ServiceSpec], Completion], self.apply_nfs),
+ 'nfs': self.apply_nfs,
'node-exporter': self.apply_node_exporter,
- 'osd': cast(Callable[[ServiceSpec], Completion], lambda dg: self.apply_drivegroups([dg])),
+ 'osd': lambda dg: self.apply_drivegroups([dg]),
'prometheus': self.apply_prometheus,
'rbd-mirror': self.apply_rbd_mirror,
- 'rgw': cast(Callable[[ServiceSpec], Completion], self.apply_rgw),
+ 'rgw': self.apply_rgw,
+ 'host': self.add_host,
}
def merge(ls, r):
spec, *specs = specs
- completion = fns[spec.service_type](spec)
+ fn = cast(Callable[["GenericSpec"], Completion], fns[spec.service_type])
+ completion = fn(spec)
for s in specs:
def next(ls):
- return fns[s.service_type](s).then(lambda r: merge(ls, r))
+ fn = cast(Callable[["GenericSpec"], Completion], fns[spec.service_type])
+ return fn(s).then(lambda r: merge(ls, r))
completion = completion.then(next)
return completion
labels=None, # type: Optional[List[str]]
status=None, # type: Optional[str]
):
+ self.service_type = 'host'
+
#: the bare hostname on the host. Not the FQDN.
self.hostname = hostname # type: str
'status': self.status,
}
+ @classmethod
+ def from_json(cls, host_spec):
+ _cls = cls(host_spec['hostname'],
+ host_spec['addr'] if 'addr' in host_spec else None,
+ host_spec['labels'] if 'labels' in host_spec else None)
+ return _cls
+
def __repr__(self):
args = [self.hostname] # type: List[Any]
if self.addr is not None:
self.addr == other.addr and \
self.labels == other.labels
+GenericSpec = Union[ServiceSpec, HostSpec]
+
+def json_to_generic_spec(spec):
+ # type: (dict) -> GenericSpec
+ if 'service_type' in spec and spec['service_type'] == 'host':
+ return HostSpec.from_json(spec)
+ else:
+ return ServiceSpec.from_json(spec)
class UpgradeStatusSpec(object):
# Orchestrator's report on what's going on with any ongoing upgrade
import datetime
import errno
import json
-from typing import List, Set, Optional, Iterator
+from typing import List, Set, Optional, Iterator, cast
import re
import ast
raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
- ServiceDescription, DaemonDescription, IscsiServiceSpec
+ ServiceDescription, DaemonDescription, IscsiServiceSpec, json_to_generic_spec, GenericSpec
def nice_delta(now, t, suffix=''):
spec = service.spec
spec.unmanaged = unmanaged_flag
specs.append(spec)
- completion = self.apply(specs)
+ completion = self.apply(cast(List[GenericSpec], specs))
self._orchestrator_wait([completion])
raise_if_exception(completion)
if specs:
if service_type or placement or unmanaged:
raise OrchestratorValidationError(usage)
content: Iterator = yaml.load_all(inbuf)
- specs = [ServiceSpec.from_json(s) for s in content]
+ specs: List[GenericSpec] = [json_to_generic_spec(s) for s in content]
+
else:
- spec = PlacementSpec.from_string(placement)
+ placmentspec = PlacementSpec.from_string(placement)
assert service_type
- specs = [ServiceSpec(service_type, placement=spec, unmanaged=unmanaged)]
-
+ specs = [ServiceSpec(service_type, placement=placmentspec, unmanaged=unmanaged)]
+
completion = self.apply(specs)
self._orchestrator_wait([completion])
raise_if_exception(completion)