]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/orch: move to service_apply; remove update_foo()
authorSage Weil <sage@redhat.com>
Tue, 11 Feb 2020 22:42:08 +0000 (16:42 -0600)
committerSage Weil <sage@redhat.com>
Thu, 13 Feb 2020 15:00:58 +0000 (09:00 -0600)
Signed-off-by: Sage Weil <sage@redhat.com>
doc/bootstrap.rst
doc/mgr/orchestrator_cli.rst
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/orchestrator.py
src/pybind/mgr/orchestrator_cli/module.py
src/pybind/mgr/rook/module.py

index c83ddab8862697c311d8b95d5b6b35533665b36f..0689f1d7061c8704654522711b27c162e44cde1e 100644 (file)
@@ -118,12 +118,12 @@ either as a simple IP address or as a CIDR network name.
 
 To deploy additional monitors,::
 
-  [monitor 1] # ceph orch mon update *<new-num-monitors>* *<host1:network1> [<host1:network2>...]*
+  [monitor 1] # ceph orch apply mon *<new-num-monitors>* *<host1:network1> [<host1:network2>...]*
 
 For example, to deploy a second monitor on ``newhost`` using an IP
 address in network ``10.1.2.0/24``,::
 
-  [monitor 1] # ceph orch mon update 2 newhost:10.1.2.0/24
+  [monitor 1] # ceph orch apply mon 2 newhost:10.1.2.0/24
 
 Deploying OSDs
 ==============
@@ -143,7 +143,7 @@ Deploying manager daemons
 It is a good idea to have at least one backup manager daemon.  To
 deploy one or more new manager daemons,::
 
-  [monitor 1] # ceph orch mgr update *<new-num-mgrs>* [*<host1>* ...]
+  [monitor 1] # ceph orch apply mgr *<new-num-mgrs>* [*<host1>* ...]
 
 Deploying MDSs
 ==============
index 2f3d1880ba7d9dbee8c9a78b6ed82b3ba5b7f1d2..1bad0a83902ef4d6bc461789a8cc600f80e0171a 100644 (file)
@@ -213,13 +213,13 @@ error if it doesn't know how to do this transition.
 
 Update the number of monitor nodes::
 
-    ceph orch mon update <num> [host, host:network...]
+    ceph orch apply mon <num> [host, host:network...]
 
 Each host can optionally specify a network for the monitor to listen on.
 
 Update the number of manager nodes::
 
-    ceph orch mgr update <num> [host...]
+    ceph orch apply mgr <num> [host...]
 
 ..
     .. note::
@@ -295,27 +295,27 @@ This is an overview of the current implementation status of the orchestrators.
 =================================== ====== =========
  Command                             Rook   Cephadm
 =================================== ====== =========
+ apply iscsi                         ⚪      ⚪
+ apply mds                           ✔      ✔
+ apply mgr                           ⚪      ✔
+ apply mon                           ✔      ✔
+ apply nfs                           ✔      ⚪
+ apply osd                           ✔      ✔
+ apply rbd-mirror                    ✔      ✔
+ apply rgw                           ⚪      ✔
  host add                            ⚪      ✔
  host ls                             ✔      ✔
  host rm                             ⚪      ✔
- mgr update                          ⚪      ✔
- mon update                          ✔      ✔
- osd create                          ✔      ✔
  daemon status                       ⚪      ✔
  daemon {stop,start,...}             ⚪      ✔
  device {ident,fault}-(on,off}       ⚪      ✔
  device ls                           ✔      ✔
  iscsi add                           ⚪      ⚪
- iscsi update                        ⚪      ⚪
  mds add                             ✔      ✔
- mds update                          ✔      ✔
  nfs add                             ✔      ⚪
- nfs update                          ✔      ⚪
  ps                                  ⚪      ✔
  rbd-mirror add                      ⚪      ✔
- rbd-mirror update                   ⚪      ✔
  rgw add                             ✔      ✔
- rgw update                          ⚪      ✔
  service ls                          ✔      ⚪
 =================================== ====== =========
 
index b48f092072cb4895fec5a015677d6e7588133496..b88c787813c7812b94c63ad1de0b7be144678b4a 100644 (file)
@@ -1488,6 +1488,19 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             return self._remove_daemon(args)
         return self._get_daemons(daemon_type=service_type).then(_filter)
 
+    def service_apply(self, spec):
+        if spec.service_type == 'mgr':
+            return self.update_mgrs(spec)
+        if spec.service_type == 'mon':
+            return self.update_mons(spec)
+        if spec.service_type == 'mds':
+            return self.update_mds(spec)
+        if spec.service_type == 'rgw':
+            return self.update_rgw(spec)
+        if spec.service_type == 'rbd-mirror':
+            return self.update_rbd_mirror(spec)
+        raise NotImplementedError()
+
     def get_inventory(self, node_filter=None, refresh=False):
         """
         Return the storage inventory of nodes matching the given filter.
index 623a8891067ed28a88a50055555ff5f6a674bc40..2c21ed518b9b2979c4dea03672e6fa4384cfff19 100644 (file)
@@ -922,6 +922,15 @@ class Orchestrator(object):
         #assert action in ["start", "stop", "reload, "restart", "redeploy"]
         raise NotImplementedError()
 
+    def service_apply(self, spec):
+        # type: (ServiceSpec) -> Completion
+        """
+        Create or update a service.
+
+        :pram spec: a ServiceSpec (or derivative type)
+        """
+        raise NotImplementedError()
+
     def create_osds(self, drive_groups):
         # type: (List[DriveGroupSpec]) -> Completion
         """
@@ -951,78 +960,26 @@ class Orchestrator(object):
         """
         raise NotImplementedError()
 
-    def update_mgrs(self, spec):
-        # type: (ServiceSpec) -> Completion
-        """
-        Update the number of cluster managers.
-
-        :param num: requested number of managers.
-        :param hosts: list of hosts (optional)
-        """
-        raise NotImplementedError()
-
-    def update_mons(self, spec):
-        # type: (ServiceSpec) -> Completion
-        """
-        Update the number of cluster monitors.
-
-        :param num: requested number of monitors.
-        :param hosts: list of hosts + network + name (optional)
-        """
-        raise NotImplementedError()
-
     def add_mds(self, spec):
         # type: (ServiceSpec) -> Completion
         """Create a new MDS cluster"""
         raise NotImplementedError()
 
-    def update_mds(self, spec):
-        # type: (ServiceSpec) -> Completion
-        """
-        Update / redeploy existing MDS cluster
-        Like for example changing the number of service instances.
-        """
-        raise NotImplementedError()
-
     def add_rbd_mirror(self, spec):
         # type: (ServiceSpec) -> Completion
         """Create rbd-mirror cluster"""
         raise NotImplementedError()
 
-    def update_rbd_mirror(self, spec):
-        # type: (ServiceSpec) -> Completion
-        """
-        Update / redeploy rbd-mirror cluster
-        Like for example changing the number of service instances.
-        """
-        raise NotImplementedError()
-
     def add_nfs(self, spec):
         # type: (NFSServiceSpec) -> Completion
         """Create a new MDS cluster"""
         raise NotImplementedError()
 
-    def update_nfs(self, spec):
-        # type: (NFSServiceSpec) -> Completion
-        """
-        Update / redeploy existing NFS cluster
-        Like for example changing the number of service instances.
-        """
-        raise NotImplementedError()
-
     def add_rgw(self, spec):
         # type: (RGWSpec) -> Completion
         """Create a new MDS zone"""
         raise NotImplementedError()
 
-    def update_rgw(self, spec):
-        # type: (RGWSpec) -> Completion
-        """
-        Update / redeploy existing RGW zone
-        Like for example changing the number of service instances.
-        """
-        raise NotImplementedError()
-
     def upgrade_check(self, image, version):
         # type: (Optional[str], Optional[str]) -> Completion
         raise NotImplementedError()
index 19a1ad1f60fb5b3b0bb9a31107bc1acef4669765..46717993a37c2782e61bf8ba076f27cc680aba76 100644 (file)
@@ -401,21 +401,6 @@ Usage:
         orchestrator.raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
-    @orchestrator._cli_write_command(
-        'orch rbd-mirror update',
-        "name=num,type=CephInt,req=false "
-        "name=hosts,type=CephString,n=N,req=false "
-        "name=label,type=CephString,req=false",
-        'Update the number of rbd-mirror instances')
-    def _rbd_mirror_update(self, num, label=None, hosts=[]):
-        spec = orchestrator.ServiceSpec(
-            None,
-            placement=orchestrator.PlacementSpec(hosts=hosts, count=num, label=label))
-        completion = self.update_rbd_mirror(spec)
-        self._orchestrator_wait([completion])
-        orchestrator.raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
-
     @orchestrator._cli_write_command(
         'orch mds add',
         "name=fs_name,type=CephString "
@@ -431,26 +416,6 @@ Usage:
         orchestrator.raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
-    @orchestrator._cli_write_command(
-        'orch mds update',
-        "name=fs_name,type=CephString "
-        "name=num,type=CephInt,req=false "
-        "name=hosts,type=CephString,n=N,req=false "
-        "name=label,type=CephString,req=false",
-        'Update the number of MDS instances for the given fs_name')
-    def _mds_update(self, fs_name, num=None, label=None, hosts=[]):
-        placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts)
-        placement.validate()
-
-        spec = orchestrator.ServiceSpec(
-            fs_name,
-            placement=placement)
-
-        completion = self.update_mds(spec)
-        self._orchestrator_wait([completion])
-        orchestrator.raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
-
     @orchestrator._cli_write_command(
         'orch rgw add',
         'name=realm_name,type=CephString '
@@ -481,24 +446,6 @@ Usage:
         orchestrator.raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
-    @orchestrator._cli_write_command(
-        'orch rgw update',
-        'name=realm_name,type=CephString '
-        'name=zone_name,type=CephString '
-        'name=num,type=CephInt,req=false '
-        'name=hosts,type=CephString,n=N,req=false '
-        'name=label,type=CephString,req=false',
-        'Update the number of RGW instances for the given zone')
-    def _rgw_update(self, zone_name, realm_name, num=None, label=None, hosts=[]):
-        spec = orchestrator.RGWSpec(
-            rgw_realm=realm_name,
-            rgw_zone=zone_name,
-            placement=orchestrator.PlacementSpec(hosts=hosts, label=label, count=num))
-        completion = self.update_rgw(spec)
-        self._orchestrator_wait([completion])
-        orchestrator.raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
-
     @orchestrator._cli_write_command(
         'orch nfs add',
         "name=svc_arg,type=CephString "
@@ -521,23 +468,6 @@ Usage:
         orchestrator.raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
-    @orchestrator._cli_write_command(
-        'orch nfs update',
-        "name=svc_id,type=CephString "
-        'name=num,type=CephInt,req=false '
-        'name=hosts,type=CephString,n=N,req=false '
-        'name=label,type=CephString,req=false',
-        'Scale an NFS service')
-    def _nfs_update(self, svc_id, num=None, label=None, hosts=[]):
-        # type: (str, Optional[int], Optional[str], List[str]) -> HandleCommandResult
-        spec = orchestrator.NFSServiceSpec(
-            svc_id,
-            placement=orchestrator.PlacementSpec(label=label, hosts=hosts, count=num),
-        )
-        completion = self.update_nfs(spec)
-        self._orchestrator_wait([completion])
-        return HandleCommandResult(stdout=completion.result_str())
-
     @orchestrator._cli_write_command(
         'orch service',
         "name=action,type=CephChoices,strings=start|stop|restart|redeploy|reconfig "
@@ -593,30 +523,30 @@ Usage:
         return HandleCommandResult(stdout=completion.result_str())
 
     @orchestrator._cli_write_command(
-        'orch mgr update',
+        'orch apply mgr',
         "name=num,type=CephInt,req=false "
         "name=hosts,type=CephString,n=N,req=false "
         "name=label,type=CephString,req=false",
-        'Update the number of manager instances')
-    def _update_mgrs(self, num=None, hosts=[], label=None):
-
-        placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts)
+        'Update the size or placement of managers')
+    def _apply_mgr(self, num=None, hosts=[], label=None):
+        placement = orchestrator.PlacementSpec(
+            label=label, count=num, hosts=hosts)
         placement.validate()
 
         spec = orchestrator.ServiceSpec(placement=placement)
 
-        completion = self.update_mgrs(spec)
+        completion = self.service_apply(spec)
         self._orchestrator_wait([completion])
         orchestrator.raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
     @orchestrator._cli_write_command(
-        'orch mon update',
+        'orch apply mon',
         "name=num,type=CephInt,req=false "
         "name=hosts,type=CephString,n=N,req=false "
         "name=label,type=CephString,req=false",
         'Update the number of monitor instances')
-    def _update_mons(self, num=None, hosts=[], label=None):
+    def _apply_mon(self, num=None, hosts=[], label=None):
         if not num and not hosts and not label:
             # Improve Error message. Point to parse_host_spec examples
             raise orchestrator.OrchestratorValidationError("Mons need a placement spec. (num, host, network, name(opt))")
@@ -625,11 +555,80 @@ Usage:
 
         spec = orchestrator.ServiceSpec(placement=placement)
 
-        completion = self.update_mons(spec)
+        completion = self.service_apply(spec)
+        self._orchestrator_wait([completion])
+        orchestrator.raise_if_exception(completion)
+        return HandleCommandResult(stdout=completion.result_str())
+
+    @orchestrator._cli_write_command(
+        'orch apply mds',
+        "name=fs_name,type=CephString "
+        "name=num,type=CephInt,req=false "
+        "name=hosts,type=CephString,n=N,req=false "
+        "name=label,type=CephString,req=false",
+        'Update the number of MDS instances for the given fs_name')
+    def _apply_mds(self, fs_name, num=None, label=None, hosts=[]):
+        placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts)
+        placement.validate()
+
+        spec = orchestrator.ServiceSpec(
+            fs_name,
+            placement=placement)
+
+        completion = self.service_apply(spec)
+        self._orchestrator_wait([completion])
+        orchestrator.raise_if_exception(completion)
+        return HandleCommandResult(stdout=completion.result_str())
+
+    @orchestrator._cli_write_command(
+        'orch apply rbd-mirror',
+        "name=num,type=CephInt,req=false "
+        "name=hosts,type=CephString,n=N,req=false "
+        "name=label,type=CephString,req=false",
+        'Update the number of rbd-mirror instances')
+    def _apply_rbd_mirror(self, num, label=None, hosts=[]):
+        spec = orchestrator.ServiceSpec(
+            placement=orchestrator.PlacementSpec(hosts=hosts, count=num, label=label))
+        completion = self.service_apply(spec)
+        self._orchestrator_wait([completion])
+        orchestrator.raise_if_exception(completion)
+        return HandleCommandResult(stdout=completion.result_str())
+
+    @orchestrator._cli_write_command(
+        'orch apply rgw',
+        'name=realm_name,type=CephString '
+        'name=zone_name,type=CephString '
+        'name=num,type=CephInt,req=false '
+        'name=hosts,type=CephString,n=N,req=false '
+        'name=label,type=CephString,req=false',
+        'Update the number of RGW instances for the given zone')
+    def _apply_rgw(self, zone_name, realm_name, num=None, label=None, hosts=[]):
+        spec = orchestrator.RGWSpec(
+            rgw_realm=realm_name,
+            rgw_zone=zone_name,
+            placement=orchestrator.PlacementSpec(hosts=hosts, label=label, count=num))
+        completion = self.service_apply(spec)
         self._orchestrator_wait([completion])
         orchestrator.raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
+    @orchestrator._cli_write_command(
+        'orch apply nfs',
+        "name=svc_id,type=CephString "
+        'name=num,type=CephInt,req=false '
+        'name=hosts,type=CephString,n=N,req=false '
+        'name=label,type=CephString,req=false',
+        'Scale an NFS service')
+    def _apply_nfs(self, svc_id, num=None, label=None, hosts=[]):
+        # type: (str, Optional[int], Optional[str], List[str]) -> HandleCommandResult
+        spec = orchestrator.NFSServiceSpec(
+            svc_id,
+            placement=orchestrator.PlacementSpec(label=label, hosts=hosts, count=num),
+        )
+        completion = self.service_apply(spec)
+        self._orchestrator_wait([completion])
+        return HandleCommandResult(stdout=completion.result_str())
+
     @orchestrator._cli_write_command(
         'orch set backend',
         "name=module_name,type=CephString,req=true",
index c9f8c70dae59ccabc2786d01c4f9274e8b6f0371..37ff39b18740ab22a2624b9078c63a073d111b72 100644 (file)
@@ -340,6 +340,16 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
                 'NFS', service_name, lambda: self.rook_cluster.rm_service('cephnfses', service_name)
             )
 
+    def service_apply(self, spec):
+        if spec.service_type == 'mon':
+            return self.update_mons(spec)
+        if spec.service_type == 'mgr':
+            raise NotImplementedError()
+        if spec.service_type == 'mds':
+            return self.update_mds(spec)
+        if spec.service_type == 'nfs':
+            return self.update_nfs(spec)
+
     def update_mons(self, spec):
         # type: (orchestrator.ServiceSpec) -> RookCompletion
         if spec.placement.hosts or spec.placement.label: