From 1a67d3e559c50bad1ccc42bf1706326ffa18da80 Mon Sep 17 00:00:00 2001 From: Joseph Sawaya Date: Fri, 3 Sep 2021 13:30:43 -0400 Subject: [PATCH] mgr/rook: apply mds using placement spec and osd_pool_default_size This commit changes the apply_mds command in the rook orchestrator to support some placement specs and also sets the replica size according to the osd_pool_default_size ceph option. This commit also adds `orch apply mds` to the QA to test if the command runs. Signed-off-by: Joseph Sawaya --- qa/suites/orch/rook/smoke/3-final.yaml | 1 + src/pybind/mgr/rook/module.py | 5 +-- src/pybind/mgr/rook/rook_cluster.py | 47 ++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 8 deletions(-) diff --git a/qa/suites/orch/rook/smoke/3-final.yaml b/qa/suites/orch/rook/smoke/3-final.yaml index 90bbafad6e0..91a43f4acc8 100644 --- a/qa/suites/orch/rook/smoke/3-final.yaml +++ b/qa/suites/orch/rook/smoke/3-final.yaml @@ -7,3 +7,4 @@ tasks: - ceph orch host ls - ceph orch device ls - ceph orch apply rgw foo + - ceph orch apply mds foo diff --git a/src/pybind/mgr/rook/module.py b/src/pybind/mgr/rook/module.py index 27b80538b67..d1412a70837 100644 --- a/src/pybind/mgr/rook/module.py +++ b/src/pybind/mgr/rook/module.py @@ -416,7 +416,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): return result - def _get_pool_params(self) -> Tuple[str, str]: + def _get_pool_params(self) -> Tuple[int, str]: num_replicas = self.get_ceph_option('osd_pool_default_size') assert type(num_replicas) is int @@ -453,7 +453,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): @handle_orch_error def apply_mds(self, spec): # type: (ServiceSpec) -> str - return self.rook_cluster.apply_filesystem(spec) + num_replicas, leaf_type = self._get_pool_params() + return self.rook_cluster.apply_filesystem(spec, num_replicas, leaf_type) @handle_orch_error def apply_rgw(self, spec): diff --git a/src/pybind/mgr/rook/rook_cluster.py b/src/pybind/mgr/rook/rook_cluster.py index cf302c84550..1a280935093 100644 --- a/src/pybind/mgr/rook/rook_cluster.py +++ b/src/pybind/mgr/rook/rook_cluster.py @@ -842,30 +842,65 @@ class RookCluster(object): else: raise - def apply_filesystem(self, spec: ServiceSpec) -> str: + def apply_filesystem(self, spec: ServiceSpec, num_replicas: int, + leaf_type: str) -> str: # TODO use spec.placement # TODO warn if spec.extended has entries we don't kow how # to action. + all_hosts = self.get_hosts() def _update_fs(new: cfs.CephFilesystem) -> cfs.CephFilesystem: new.spec.metadataServer.activeCount = spec.placement.count or 1 + new.spec.metadataServer.placement = cfs.Placement( + nodeAffinity=cfs.NodeAffinity( + requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution( + nodeSelectorTerms=cfs.NodeSelectorTermsList( + [placement_spec_to_node_selector(spec.placement, all_hosts)] + ) + ) + ) + ) return new - def _create_fs() -> cfs.CephFilesystem: - return cfs.CephFilesystem( + fs = cfs.CephFilesystem( apiVersion=self.rook_env.api_name, metadata=dict( name=spec.service_id, namespace=self.rook_env.namespace, ), spec=cfs.Spec( - None, - None, + dataPools=cfs.DataPoolsList( + { + cfs.DataPoolsItem( + failureDomain=leaf_type, + replicated=cfs.Replicated( + size=num_replicas + ) + ) + } + ), + metadataPool=cfs.MetadataPool( + failureDomain=leaf_type, + replicated=cfs.Replicated( + size=num_replicas + ) + ), metadataServer=cfs.MetadataServer( activeCount=spec.placement.count or 1, - activeStandby=True + activeStandby=True, + placement= + cfs.Placement( + nodeAffinity=cfs.NodeAffinity( + requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution( + nodeSelectorTerms=cfs.NodeSelectorTermsList( + [placement_spec_to_node_selector(spec.placement, all_hosts)] + ) + ) + ) + ) ) ) ) + return fs assert spec.service_id is not None return self._create_or_patch( cfs.CephFilesystem, 'cephfilesystems', spec.service_id, -- 2.39.5