]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: cephadm bootstrap add --apply-spec <cluster.yaml> 34879/head
authorDaniel-Pivonka <dpivonka@redhat.com>
Fri, 24 Apr 2020 13:46:46 +0000 (09:46 -0400)
committerDaniel-Pivonka <dpivonka@redhat.com>
Thu, 28 May 2020 16:26:48 +0000 (12:26 -0400)
Have a single command when setting up a cluster for Day 1

example spec

---
service_type: host
addr: jmo-node-00
hostname: jmo-node-00
labels:
- test
- test2
---
service_type: host
addr: jmo-node-01
hostname: jmo-node-01
labels:
- grafana
---
service_type: host
addr: jmo-node-02
hostname: jmo-node-02
---
service_type: grafana
placement:
  label: "grafana"
---
service_type: osd
placement:
  host_pattern: '*'
data_devices:
  all: true

Signed-off-by: Daniel-Pivonka <dpivonka@redhat.com>
doc/man/8/cephadm.rst
doc/mgr/orchestrator.rst
src/cephadm/cephadm
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/orchestrator/_interface.py
src/pybind/mgr/orchestrator/module.py

index c0533367df9218fb408a22370a501625b29a615f..bd5fa86989e267a0d668acc683de1b45f38b54e5 100644 (file)
@@ -70,6 +70,8 @@ Synopsis
 |                           [--skip-pull] [--skip-firewalld] [--allow-overwrite]
 |                           [--allow-fqdn-hostname] [--skip-prepare-host]
 |                           [--orphan-initial-daemons] [--skip-monitoring-stack]
+|                           [--apply-spec APPLY_SPEC]
+
 
 
 | **cephadm** **deploy** [-h] --name NAME --fsid FSID [--config CONFIG]
@@ -216,7 +218,7 @@ Arguments:
 * [--skip-prepare-host]           Do not prepare host
 * [--orphan-initial-daemons]      Do not create initial mon, mgr, and crash service specs
 * [--skip-monitoring-stack]       Do not automatically provision monitoring stack] (prometheus, grafana, alertmanager, node-exporter)
-
+* [--apply-spec APPLY_SPEC]       Apply cluster spec after bootstrap (copy ssh key, add hosts and apply services)
 
 ceph-volume
 -----------
index 67f47347789a6b0584304e94e3344a89661cab65..48abedb80f5754e73382df348a5a04e46b430175 100644 (file)
@@ -73,6 +73,32 @@ Add and remove hosts::
     ceph orch host add <hostname> [<addr>] [<labels>...]
     ceph orch host rm <hostname>
 
+Host Specification
+------------------
+
+Many hosts can be added at once using
+``ceph orch apply -i`` by submitting a multi-document YAML file::
+
+    ---
+    service_type: host
+    addr: node-00
+    hostname: node-00
+    labels:
+    - example1
+    - example2
+    ---
+    service_type: host
+    addr: node-01
+    hostname: node-01
+    labels:
+    - grafana
+    ---
+    service_type: host
+    addr: node-02
+    hostname: node-02
+
+This can be combined with service specifications (below) to create a cluster spec file to deploy a whole cluster in one command.  see ``cephadm bootstrap --apply-spec`` also to do this during bootstrap. Cluster SSH Keys must be copied to hosts prior.
+
 OSD Management
 ==============
 
@@ -548,8 +574,7 @@ Or with hosts:
       hosts: 
         - host1
         - host2
-        - host3
-
+        - host3 
 
 Configuring the Orchestrator CLI
 ================================
index e5253d5225c0e84b311d7e9c4db22d700df6d3cb..8e3e77b99f4baf5dbd883d11df2367a149da88a7 100755 (executable)
@@ -2655,6 +2655,28 @@ def command_bootstrap():
                         get_fqdn(), port,
                         args.initial_dashboard_user,
                         password))
+    
+    if args.apply_spec:
+        logger.info('Applying %s to cluster' % args.apply_spec)
+
+        with open(args.apply_spec) as f:
+            for line in f:
+                if 'hostname:' in line:
+                    line = line.replace('\n', '')
+                    split = line.split(': ')
+                    if split[1] != host:
+                        logger.info('Adding ssh key to %s' % split[1])
+
+                        ssh_key = '/etc/ceph/ceph.pub'
+                        if args.ssh_public_key:
+                            ssh_key = args.ssh_public_key.name
+                        out, err, code = call_throws(['ssh-copy-id', '-f', '-i', ssh_key, 'root@%s' % split[1]])
+
+        mounts = {}
+        mounts[pathify(args.apply_spec)] = '/tmp/spec.yml:z'
+
+        out = cli(['orch', 'apply', '-i', '/tmp/spec.yml'], extra_mounts=mounts)
+        logger.info(out)
 
     logger.info('You can access the Ceph CLI with:\n\n'
                 '\tsudo %s shell --fsid %s -c %s -k %s\n' % (
@@ -4541,6 +4563,10 @@ def _get_parser():
         '--skip-monitoring-stack',
         action='store_true',
         help='Do not automatically provision monitoring stack (prometheus, grafana, alertmanager, node-exporter)')
+    parser_bootstrap.add_argument(
+        '--apply-spec',
+        help='Apply cluster spec after bootstrap (copy ssh key, add hosts and apply services)')
+
 
     parser_deploy = subparsers.add_parser(
         'deploy', help='deploy a daemon')
index 6fc2f14025782847837e970b5161d08e49d3c9ec..2a8c0007f75992258f4dfe4f651f0ab9c0868fe9 100644 (file)
@@ -27,6 +27,7 @@ from mgr_module import MgrModule, HandleCommandResult
 import orchestrator
 from orchestrator import OrchestratorError, OrchestratorValidationError, HostSpec, \
     CLICommandMeta
+from orchestrator._interface import GenericSpec
 
 from . import remotes
 from . import utils
@@ -1055,8 +1056,7 @@ you may want to run:
         # type: (Optional[str]) -> List[str]
         return list(self.inventory.filter_by_label(label))
 
-    @trivial_completion
-    def add_host(self, spec):
+    def _add_host(self, spec):
         # type: (HostSpec) -> str
         """
         Add a host to be managed by the orchestrator.
@@ -1079,6 +1079,10 @@ you may want to run:
         self.log.info('Added host %s' % spec.hostname)
         return "Added host '{}'".format(spec.hostname)
 
+    @trivial_completion
+    def add_host(self, spec: HostSpec) -> str:
+        return self._add_host(spec)
+
     @trivial_completion
     def remove_host(self, host):
         # type: (str) -> str
@@ -1992,7 +1996,13 @@ you may want to run:
         # type: (ServiceSpec) -> List[str]
         return self._add_daemon('mgr', spec, self.mgr_service.create)
 
-    def _apply(self, spec: ServiceSpec) -> str:
+    def _apply(self, spec: GenericSpec) -> str:
+        if spec.service_type == 'host':
+            return self._add_host(cast(HostSpec, spec))
+
+        return self._apply_service_spec(cast(ServiceSpec, spec))
+
+    def _apply_service_spec(self, spec: ServiceSpec) -> str:
         if spec.placement.is_empty():
             # fill in default placement
             defaults = {
@@ -2029,8 +2039,11 @@ you may want to run:
         return "Scheduled %s update..." % spec.service_name()
 
     @trivial_completion
-    def apply(self, specs: List[ServiceSpec]):
-        return [self._apply(spec) for spec in specs]
+    def apply(self, specs: List[GenericSpec]):
+        results = []
+        for spec in specs:
+            results.append(self._apply(spec))
+        return results
 
     @trivial_completion
     def apply_mgr(self, spec):
index fd854b04c236f85edfbac76f66428aa06f767f89..db520c0f25da20a082987226726985163d69d2e9 100644 (file)
@@ -855,24 +855,25 @@ class Orchestrator(object):
         """
         raise NotImplementedError()
 
-    def apply(self, specs: List[ServiceSpec]) -> Completion:
+    def apply(self, specs: List["GenericSpec"]) -> Completion:
         """
         Applies any spec
         """
-        fns: Dict[str, Callable[[ServiceSpec], Completion]] = {
+        fns: Dict[str, function] = {
             'alertmanager': self.apply_alertmanager,
             'crash': self.apply_crash,
             'grafana': self.apply_grafana,
-            'iscsi': cast(Callable[[ServiceSpec], Completion], self.apply_iscsi),
+            'iscsi': self.apply_iscsi,
             'mds': self.apply_mds,
             'mgr': self.apply_mgr,
             'mon': self.apply_mon,
-            'nfs': cast(Callable[[ServiceSpec], Completion], self.apply_nfs),
+            'nfs': self.apply_nfs,
             'node-exporter': self.apply_node_exporter,
-            'osd': cast(Callable[[ServiceSpec], Completion], lambda dg: self.apply_drivegroups([dg])),
+            'osd': lambda dg: self.apply_drivegroups([dg]),
             'prometheus': self.apply_prometheus,
             'rbd-mirror': self.apply_rbd_mirror,
-            'rgw': cast(Callable[[ServiceSpec], Completion], self.apply_rgw),
+            'rgw': self.apply_rgw,
+            'host': self.add_host,
         }
 
         def merge(ls, r):
@@ -882,10 +883,12 @@ class Orchestrator(object):
 
         spec, *specs = specs
 
-        completion = fns[spec.service_type](spec)
+        fn = cast(Callable[["GenericSpec"], Completion], fns[spec.service_type])
+        completion = fn(spec)
         for s in specs:
             def next(ls):
-                return fns[s.service_type](s).then(lambda r: merge(ls, r))
+                fn = cast(Callable[["GenericSpec"], Completion], fns[spec.service_type])
+                return fn(s).then(lambda r: merge(ls, r))
             completion = completion.then(next)
         return completion
 
@@ -1169,6 +1172,8 @@ class HostSpec(object):
                  labels=None,  # type: Optional[List[str]]
                  status=None,  # type: Optional[str]
                  ):
+        self.service_type = 'host'
+
         #: the bare hostname on the host. Not the FQDN.
         self.hostname = hostname  # type: str
 
@@ -1189,6 +1194,13 @@ class HostSpec(object):
             'status': self.status,
         }
 
+    @classmethod
+    def from_json(cls, host_spec):
+        _cls = cls(host_spec['hostname'],
+                   host_spec['addr'] if 'addr' in host_spec else None,
+                   host_spec['labels'] if 'labels' in host_spec else None)
+        return _cls
+
     def __repr__(self):
         args = [self.hostname]  # type: List[Any]
         if self.addr is not None:
@@ -1206,6 +1218,14 @@ class HostSpec(object):
                self.addr == other.addr and \
                self.labels == other.labels
 
+GenericSpec = Union[ServiceSpec, HostSpec]
+
+def json_to_generic_spec(spec):
+    # type: (dict) -> GenericSpec
+    if 'service_type' in spec and spec['service_type'] == 'host':
+        return HostSpec.from_json(spec)
+    else:
+        return ServiceSpec.from_json(spec)
 
 class UpgradeStatusSpec(object):
     # Orchestrator's report on what's going on with any ongoing upgrade
index faf1cdf04e1d60fd64c02f6730088c1cac5cd1de..0518941c6e0f964ed8d1e9236cc075619aafee3a 100644 (file)
@@ -1,7 +1,7 @@
 import datetime
 import errno
 import json
-from typing import List, Set, Optional, Iterator
+from typing import List, Set, Optional, Iterator, cast
 import re
 import ast
 
@@ -20,7 +20,7 @@ from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_comma
     raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
     NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
     RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
-    ServiceDescription, DaemonDescription, IscsiServiceSpec
+    ServiceDescription, DaemonDescription, IscsiServiceSpec, json_to_generic_spec, GenericSpec
 
 
 def nice_delta(now, t, suffix=''):
@@ -468,7 +468,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
             spec = service.spec
             spec.unmanaged = unmanaged_flag
             specs.append(spec)
-        completion = self.apply(specs)
+        completion = self.apply(cast(List[GenericSpec], specs))
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
         if specs:
@@ -996,12 +996,13 @@ Usage:
             if service_type or placement or unmanaged:
                 raise OrchestratorValidationError(usage)
             content: Iterator = yaml.load_all(inbuf)
-            specs = [ServiceSpec.from_json(s) for s in content]
+            specs: List[GenericSpec] = [json_to_generic_spec(s) for s in content]
+
         else:
-            spec = PlacementSpec.from_string(placement)
+            placmentspec = PlacementSpec.from_string(placement)
             assert service_type
-            specs = [ServiceSpec(service_type, placement=spec, unmanaged=unmanaged)]
-
+            specs = [ServiceSpec(service_type, placement=placmentspec, unmanaged=unmanaged)]
         completion = self.apply(specs)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)