From: Joseph Sawaya Date: Fri, 3 Sep 2021 17:30:43 +0000 (-0400) Subject: mgr/rook: apply mds using placement spec and osd_pool_default_size X-Git-Tag: v17.1.0~703^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=1a67d3e559c50bad1ccc42bf1706326ffa18da80;p=ceph.git mgr/rook: apply mds using placement spec and osd_pool_default_size This commit changes the apply_mds command in the rook orchestrator to support some placement specs and also sets the replica size according to the osd_pool_default_size ceph option. This commit also adds `orch apply mds` to the QA to test if the command runs. Signed-off-by: Joseph Sawaya --- diff --git a/qa/suites/orch/rook/smoke/3-final.yaml b/qa/suites/orch/rook/smoke/3-final.yaml index 90bbafad6e0f..91a43f4acc8b 100644 --- a/qa/suites/orch/rook/smoke/3-final.yaml +++ b/qa/suites/orch/rook/smoke/3-final.yaml @@ -7,3 +7,4 @@ tasks: - ceph orch host ls - ceph orch device ls - ceph orch apply rgw foo + - ceph orch apply mds foo diff --git a/src/pybind/mgr/rook/module.py b/src/pybind/mgr/rook/module.py index 27b80538b673..d1412a708374 100644 --- a/src/pybind/mgr/rook/module.py +++ b/src/pybind/mgr/rook/module.py @@ -416,7 +416,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): return result - def _get_pool_params(self) -> Tuple[str, str]: + def _get_pool_params(self) -> Tuple[int, str]: num_replicas = self.get_ceph_option('osd_pool_default_size') assert type(num_replicas) is int @@ -453,7 +453,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): @handle_orch_error def apply_mds(self, spec): # type: (ServiceSpec) -> str - return self.rook_cluster.apply_filesystem(spec) + num_replicas, leaf_type = self._get_pool_params() + return self.rook_cluster.apply_filesystem(spec, num_replicas, leaf_type) @handle_orch_error def apply_rgw(self, spec): diff --git a/src/pybind/mgr/rook/rook_cluster.py b/src/pybind/mgr/rook/rook_cluster.py index cf302c84550d..1a2809350938 100644 --- a/src/pybind/mgr/rook/rook_cluster.py +++ b/src/pybind/mgr/rook/rook_cluster.py @@ -842,30 +842,65 @@ class RookCluster(object): else: raise - def apply_filesystem(self, spec: ServiceSpec) -> str: + def apply_filesystem(self, spec: ServiceSpec, num_replicas: int, + leaf_type: str) -> str: # TODO use spec.placement # TODO warn if spec.extended has entries we don't kow how # to action. + all_hosts = self.get_hosts() def _update_fs(new: cfs.CephFilesystem) -> cfs.CephFilesystem: new.spec.metadataServer.activeCount = spec.placement.count or 1 + new.spec.metadataServer.placement = cfs.Placement( + nodeAffinity=cfs.NodeAffinity( + requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution( + nodeSelectorTerms=cfs.NodeSelectorTermsList( + [placement_spec_to_node_selector(spec.placement, all_hosts)] + ) + ) + ) + ) return new - def _create_fs() -> cfs.CephFilesystem: - return cfs.CephFilesystem( + fs = cfs.CephFilesystem( apiVersion=self.rook_env.api_name, metadata=dict( name=spec.service_id, namespace=self.rook_env.namespace, ), spec=cfs.Spec( - None, - None, + dataPools=cfs.DataPoolsList( + { + cfs.DataPoolsItem( + failureDomain=leaf_type, + replicated=cfs.Replicated( + size=num_replicas + ) + ) + } + ), + metadataPool=cfs.MetadataPool( + failureDomain=leaf_type, + replicated=cfs.Replicated( + size=num_replicas + ) + ), metadataServer=cfs.MetadataServer( activeCount=spec.placement.count or 1, - activeStandby=True + activeStandby=True, + placement= + cfs.Placement( + nodeAffinity=cfs.NodeAffinity( + requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution( + nodeSelectorTerms=cfs.NodeSelectorTermsList( + [placement_spec_to_node_selector(spec.placement, all_hosts)] + ) + ) + ) + ) ) ) ) + return fs assert spec.service_id is not None return self._create_or_patch( cfs.CephFilesystem, 'cephfilesystems', spec.service_id,