From: Sage Weil Date: Tue, 11 Feb 2020 22:42:08 +0000 (-0600) Subject: mgr/orch: move to service_apply; remove update_foo() X-Git-Tag: v15.1.1~425^2~14 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=67d72bca895ca99a361fcb47026d3a4431e106f9;p=ceph.git mgr/orch: move to service_apply; remove update_foo() Signed-off-by: Sage Weil --- diff --git a/doc/bootstrap.rst b/doc/bootstrap.rst index c83ddab88626..0689f1d7061c 100644 --- a/doc/bootstrap.rst +++ b/doc/bootstrap.rst @@ -118,12 +118,12 @@ either as a simple IP address or as a CIDR network name. To deploy additional monitors,:: - [monitor 1] # ceph orch mon update ** * [...]* + [monitor 1] # ceph orch apply mon ** * [...]* For example, to deploy a second monitor on ``newhost`` using an IP address in network ``10.1.2.0/24``,:: - [monitor 1] # ceph orch mon update 2 newhost:10.1.2.0/24 + [monitor 1] # ceph orch apply mon 2 newhost:10.1.2.0/24 Deploying OSDs ============== @@ -143,7 +143,7 @@ Deploying manager daemons It is a good idea to have at least one backup manager daemon. To deploy one or more new manager daemons,:: - [monitor 1] # ceph orch mgr update ** [** ...] + [monitor 1] # ceph orch apply mgr ** [** ...] Deploying MDSs ============== diff --git a/doc/mgr/orchestrator_cli.rst b/doc/mgr/orchestrator_cli.rst index 2f3d1880ba7d..1bad0a83902e 100644 --- a/doc/mgr/orchestrator_cli.rst +++ b/doc/mgr/orchestrator_cli.rst @@ -213,13 +213,13 @@ error if it doesn't know how to do this transition. Update the number of monitor nodes:: - ceph orch mon update [host, host:network...] + ceph orch apply mon [host, host:network...] Each host can optionally specify a network for the monitor to listen on. Update the number of manager nodes:: - ceph orch mgr update [host...] + ceph orch apply mgr [host...] .. .. note:: @@ -295,27 +295,27 @@ This is an overview of the current implementation status of the orchestrators. =================================== ====== ========= Command Rook Cephadm =================================== ====== ========= + apply iscsi ⚪ ⚪ + apply mds ✔ ✔ + apply mgr ⚪ ✔ + apply mon ✔ ✔ + apply nfs ✔ ⚪ + apply osd ✔ ✔ + apply rbd-mirror ✔ ✔ + apply rgw ⚪ ✔ host add ⚪ ✔ host ls ✔ ✔ host rm ⚪ ✔ - mgr update ⚪ ✔ - mon update ✔ ✔ - osd create ✔ ✔ daemon status ⚪ ✔ daemon {stop,start,...} ⚪ ✔ device {ident,fault}-(on,off} ⚪ ✔ device ls ✔ ✔ iscsi add ⚪ ⚪ - iscsi update ⚪ ⚪ mds add ✔ ✔ - mds update ✔ ✔ nfs add ✔ ⚪ - nfs update ✔ ⚪ ps ⚪ ✔ rbd-mirror add ⚪ ✔ - rbd-mirror update ⚪ ✔ rgw add ✔ ✔ - rgw update ⚪ ✔ service ls ✔ ⚪ =================================== ====== ========= diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index b48f092072cb..b88c787813c7 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -1488,6 +1488,19 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin): return self._remove_daemon(args) return self._get_daemons(daemon_type=service_type).then(_filter) + def service_apply(self, spec): + if spec.service_type == 'mgr': + return self.update_mgrs(spec) + if spec.service_type == 'mon': + return self.update_mons(spec) + if spec.service_type == 'mds': + return self.update_mds(spec) + if spec.service_type == 'rgw': + return self.update_rgw(spec) + if spec.service_type == 'rbd-mirror': + return self.update_rbd_mirror(spec) + raise NotImplementedError() + def get_inventory(self, node_filter=None, refresh=False): """ Return the storage inventory of nodes matching the given filter. diff --git a/src/pybind/mgr/orchestrator.py b/src/pybind/mgr/orchestrator.py index 623a8891067e..2c21ed518b9b 100644 --- a/src/pybind/mgr/orchestrator.py +++ b/src/pybind/mgr/orchestrator.py @@ -922,6 +922,15 @@ class Orchestrator(object): #assert action in ["start", "stop", "reload, "restart", "redeploy"] raise NotImplementedError() + def service_apply(self, spec): + # type: (ServiceSpec) -> Completion + """ + Create or update a service. + + :pram spec: a ServiceSpec (or derivative type) + """ + raise NotImplementedError() + def create_osds(self, drive_groups): # type: (List[DriveGroupSpec]) -> Completion """ @@ -951,78 +960,26 @@ class Orchestrator(object): """ raise NotImplementedError() - def update_mgrs(self, spec): - # type: (ServiceSpec) -> Completion - """ - Update the number of cluster managers. - - :param num: requested number of managers. - :param hosts: list of hosts (optional) - """ - raise NotImplementedError() - - def update_mons(self, spec): - # type: (ServiceSpec) -> Completion - """ - Update the number of cluster monitors. - - :param num: requested number of monitors. - :param hosts: list of hosts + network + name (optional) - """ - raise NotImplementedError() - def add_mds(self, spec): # type: (ServiceSpec) -> Completion """Create a new MDS cluster""" raise NotImplementedError() - def update_mds(self, spec): - # type: (ServiceSpec) -> Completion - """ - Update / redeploy existing MDS cluster - Like for example changing the number of service instances. - """ - raise NotImplementedError() - def add_rbd_mirror(self, spec): # type: (ServiceSpec) -> Completion """Create rbd-mirror cluster""" raise NotImplementedError() - def update_rbd_mirror(self, spec): - # type: (ServiceSpec) -> Completion - """ - Update / redeploy rbd-mirror cluster - Like for example changing the number of service instances. - """ - raise NotImplementedError() - def add_nfs(self, spec): # type: (NFSServiceSpec) -> Completion """Create a new MDS cluster""" raise NotImplementedError() - def update_nfs(self, spec): - # type: (NFSServiceSpec) -> Completion - """ - Update / redeploy existing NFS cluster - Like for example changing the number of service instances. - """ - raise NotImplementedError() - def add_rgw(self, spec): # type: (RGWSpec) -> Completion """Create a new MDS zone""" raise NotImplementedError() - def update_rgw(self, spec): - # type: (RGWSpec) -> Completion - """ - Update / redeploy existing RGW zone - Like for example changing the number of service instances. - """ - raise NotImplementedError() - def upgrade_check(self, image, version): # type: (Optional[str], Optional[str]) -> Completion raise NotImplementedError() diff --git a/src/pybind/mgr/orchestrator_cli/module.py b/src/pybind/mgr/orchestrator_cli/module.py index 19a1ad1f60fb..46717993a37c 100644 --- a/src/pybind/mgr/orchestrator_cli/module.py +++ b/src/pybind/mgr/orchestrator_cli/module.py @@ -401,21 +401,6 @@ Usage: orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( - 'orch rbd-mirror update', - "name=num,type=CephInt,req=false " - "name=hosts,type=CephString,n=N,req=false " - "name=label,type=CephString,req=false", - 'Update the number of rbd-mirror instances') - def _rbd_mirror_update(self, num, label=None, hosts=[]): - spec = orchestrator.ServiceSpec( - None, - placement=orchestrator.PlacementSpec(hosts=hosts, count=num, label=label)) - completion = self.update_rbd_mirror(spec) - self._orchestrator_wait([completion]) - orchestrator.raise_if_exception(completion) - return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( 'orch mds add', "name=fs_name,type=CephString " @@ -431,26 +416,6 @@ Usage: orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( - 'orch mds update', - "name=fs_name,type=CephString " - "name=num,type=CephInt,req=false " - "name=hosts,type=CephString,n=N,req=false " - "name=label,type=CephString,req=false", - 'Update the number of MDS instances for the given fs_name') - def _mds_update(self, fs_name, num=None, label=None, hosts=[]): - placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts) - placement.validate() - - spec = orchestrator.ServiceSpec( - fs_name, - placement=placement) - - completion = self.update_mds(spec) - self._orchestrator_wait([completion]) - orchestrator.raise_if_exception(completion) - return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( 'orch rgw add', 'name=realm_name,type=CephString ' @@ -481,24 +446,6 @@ Usage: orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( - 'orch rgw update', - 'name=realm_name,type=CephString ' - 'name=zone_name,type=CephString ' - 'name=num,type=CephInt,req=false ' - 'name=hosts,type=CephString,n=N,req=false ' - 'name=label,type=CephString,req=false', - 'Update the number of RGW instances for the given zone') - def _rgw_update(self, zone_name, realm_name, num=None, label=None, hosts=[]): - spec = orchestrator.RGWSpec( - rgw_realm=realm_name, - rgw_zone=zone_name, - placement=orchestrator.PlacementSpec(hosts=hosts, label=label, count=num)) - completion = self.update_rgw(spec) - self._orchestrator_wait([completion]) - orchestrator.raise_if_exception(completion) - return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( 'orch nfs add', "name=svc_arg,type=CephString " @@ -521,23 +468,6 @@ Usage: orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( - 'orch nfs update', - "name=svc_id,type=CephString " - 'name=num,type=CephInt,req=false ' - 'name=hosts,type=CephString,n=N,req=false ' - 'name=label,type=CephString,req=false', - 'Scale an NFS service') - def _nfs_update(self, svc_id, num=None, label=None, hosts=[]): - # type: (str, Optional[int], Optional[str], List[str]) -> HandleCommandResult - spec = orchestrator.NFSServiceSpec( - svc_id, - placement=orchestrator.PlacementSpec(label=label, hosts=hosts, count=num), - ) - completion = self.update_nfs(spec) - self._orchestrator_wait([completion]) - return HandleCommandResult(stdout=completion.result_str()) - @orchestrator._cli_write_command( 'orch service', "name=action,type=CephChoices,strings=start|stop|restart|redeploy|reconfig " @@ -593,30 +523,30 @@ Usage: return HandleCommandResult(stdout=completion.result_str()) @orchestrator._cli_write_command( - 'orch mgr update', + 'orch apply mgr', "name=num,type=CephInt,req=false " "name=hosts,type=CephString,n=N,req=false " "name=label,type=CephString,req=false", - 'Update the number of manager instances') - def _update_mgrs(self, num=None, hosts=[], label=None): - - placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts) + 'Update the size or placement of managers') + def _apply_mgr(self, num=None, hosts=[], label=None): + placement = orchestrator.PlacementSpec( + label=label, count=num, hosts=hosts) placement.validate() spec = orchestrator.ServiceSpec(placement=placement) - completion = self.update_mgrs(spec) + completion = self.service_apply(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str()) @orchestrator._cli_write_command( - 'orch mon update', + 'orch apply mon', "name=num,type=CephInt,req=false " "name=hosts,type=CephString,n=N,req=false " "name=label,type=CephString,req=false", 'Update the number of monitor instances') - def _update_mons(self, num=None, hosts=[], label=None): + def _apply_mon(self, num=None, hosts=[], label=None): if not num and not hosts and not label: # Improve Error message. Point to parse_host_spec examples raise orchestrator.OrchestratorValidationError("Mons need a placement spec. (num, host, network, name(opt))") @@ -625,11 +555,80 @@ Usage: spec = orchestrator.ServiceSpec(placement=placement) - completion = self.update_mons(spec) + completion = self.service_apply(spec) + self._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + return HandleCommandResult(stdout=completion.result_str()) + + @orchestrator._cli_write_command( + 'orch apply mds', + "name=fs_name,type=CephString " + "name=num,type=CephInt,req=false " + "name=hosts,type=CephString,n=N,req=false " + "name=label,type=CephString,req=false", + 'Update the number of MDS instances for the given fs_name') + def _apply_mds(self, fs_name, num=None, label=None, hosts=[]): + placement = orchestrator.PlacementSpec(label=label, count=num, hosts=hosts) + placement.validate() + + spec = orchestrator.ServiceSpec( + fs_name, + placement=placement) + + completion = self.service_apply(spec) + self._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + return HandleCommandResult(stdout=completion.result_str()) + + @orchestrator._cli_write_command( + 'orch apply rbd-mirror', + "name=num,type=CephInt,req=false " + "name=hosts,type=CephString,n=N,req=false " + "name=label,type=CephString,req=false", + 'Update the number of rbd-mirror instances') + def _apply_rbd_mirror(self, num, label=None, hosts=[]): + spec = orchestrator.ServiceSpec( + placement=orchestrator.PlacementSpec(hosts=hosts, count=num, label=label)) + completion = self.service_apply(spec) + self._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + return HandleCommandResult(stdout=completion.result_str()) + + @orchestrator._cli_write_command( + 'orch apply rgw', + 'name=realm_name,type=CephString ' + 'name=zone_name,type=CephString ' + 'name=num,type=CephInt,req=false ' + 'name=hosts,type=CephString,n=N,req=false ' + 'name=label,type=CephString,req=false', + 'Update the number of RGW instances for the given zone') + def _apply_rgw(self, zone_name, realm_name, num=None, label=None, hosts=[]): + spec = orchestrator.RGWSpec( + rgw_realm=realm_name, + rgw_zone=zone_name, + placement=orchestrator.PlacementSpec(hosts=hosts, label=label, count=num)) + completion = self.service_apply(spec) self._orchestrator_wait([completion]) orchestrator.raise_if_exception(completion) return HandleCommandResult(stdout=completion.result_str()) + @orchestrator._cli_write_command( + 'orch apply nfs', + "name=svc_id,type=CephString " + 'name=num,type=CephInt,req=false ' + 'name=hosts,type=CephString,n=N,req=false ' + 'name=label,type=CephString,req=false', + 'Scale an NFS service') + def _apply_nfs(self, svc_id, num=None, label=None, hosts=[]): + # type: (str, Optional[int], Optional[str], List[str]) -> HandleCommandResult + spec = orchestrator.NFSServiceSpec( + svc_id, + placement=orchestrator.PlacementSpec(label=label, hosts=hosts, count=num), + ) + completion = self.service_apply(spec) + self._orchestrator_wait([completion]) + return HandleCommandResult(stdout=completion.result_str()) + @orchestrator._cli_write_command( 'orch set backend', "name=module_name,type=CephString,req=true", diff --git a/src/pybind/mgr/rook/module.py b/src/pybind/mgr/rook/module.py index c9f8c70dae59..37ff39b18740 100644 --- a/src/pybind/mgr/rook/module.py +++ b/src/pybind/mgr/rook/module.py @@ -340,6 +340,16 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): 'NFS', service_name, lambda: self.rook_cluster.rm_service('cephnfses', service_name) ) + def service_apply(self, spec): + if spec.service_type == 'mon': + return self.update_mons(spec) + if spec.service_type == 'mgr': + raise NotImplementedError() + if spec.service_type == 'mds': + return self.update_mds(spec) + if spec.service_type == 'nfs': + return self.update_nfs(spec) + def update_mons(self, spec): # type: (orchestrator.ServiceSpec) -> RookCompletion if spec.placement.hosts or spec.placement.label: