]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/rook: apply mds using placement spec and osd_pool_default_size 43049/head
authorJoseph Sawaya <jsawaya@redhat.com>
Fri, 3 Sep 2021 17:30:43 +0000 (13:30 -0400)
committerSage Weil <sage@newdream.net>
Mon, 11 Oct 2021 17:06:37 +0000 (12:06 -0500)
This commit changes the apply_mds command in the rook orchestrator
to support some placement specs and also sets the replica size according
to the osd_pool_default_size ceph option.

This commit also adds `orch apply mds` to the QA to test if the command
runs.

Signed-off-by: Joseph Sawaya <jsawaya@redhat.com>
qa/suites/orch/rook/smoke/3-final.yaml
src/pybind/mgr/rook/module.py
src/pybind/mgr/rook/rook_cluster.py

index 90bbafad6e0f292e87790de4fa16b0f4fa0dae4d..91a43f4acc8b57277e4698039bdec755bdbf03ad 100644 (file)
@@ -7,3 +7,4 @@ tasks:
       - ceph orch host ls
       - ceph orch device ls
       - ceph orch apply rgw foo
+      - ceph orch apply mds foo
index 27b80538b673487957b4d590b7257b935f056df6..d1412a708374daf7fe6b09f0b2586dec355e499f 100644 (file)
@@ -416,7 +416,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
 
         return result
 
-    def _get_pool_params(self) -> Tuple[str, str]:
+    def _get_pool_params(self) -> Tuple[int, str]:
         num_replicas = self.get_ceph_option('osd_pool_default_size')
         assert type(num_replicas) is int
 
@@ -453,7 +453,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
     @handle_orch_error
     def apply_mds(self, spec):
         # type: (ServiceSpec) -> str
-        return self.rook_cluster.apply_filesystem(spec)
+        num_replicas, leaf_type = self._get_pool_params()
+        return self.rook_cluster.apply_filesystem(spec, num_replicas, leaf_type)
 
     @handle_orch_error
     def apply_rgw(self, spec):
index cf302c84550d4b9274c9a91b1081eaa23d8fd6ac..1a28093509386cdb93fc762b5033db1137022a5f 100644 (file)
@@ -842,30 +842,65 @@ class RookCluster(object):
             else:
                 raise
 
-    def apply_filesystem(self, spec: ServiceSpec) -> str:
+    def apply_filesystem(self, spec: ServiceSpec, num_replicas: int,
+                         leaf_type: str) -> str:
         # TODO use spec.placement
         # TODO warn if spec.extended has entries we don't kow how
         #      to action.
+        all_hosts = self.get_hosts()
         def _update_fs(new: cfs.CephFilesystem) -> cfs.CephFilesystem:
             new.spec.metadataServer.activeCount = spec.placement.count or 1
+            new.spec.metadataServer.placement = cfs.Placement(
+                nodeAffinity=cfs.NodeAffinity(
+                    requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution(
+                        nodeSelectorTerms=cfs.NodeSelectorTermsList(
+                            [placement_spec_to_node_selector(spec.placement, all_hosts)]
+                        )
+                    )
+                )
+            )
             return new
-
         def _create_fs() -> cfs.CephFilesystem:
-            return cfs.CephFilesystem(
+            fs = cfs.CephFilesystem(
                 apiVersion=self.rook_env.api_name,
                 metadata=dict(
                     name=spec.service_id,
                     namespace=self.rook_env.namespace,
                 ),
                 spec=cfs.Spec(
-                    None,
-                    None,
+                    dataPools=cfs.DataPoolsList(
+                        {
+                            cfs.DataPoolsItem(
+                                failureDomain=leaf_type,
+                                replicated=cfs.Replicated(
+                                    size=num_replicas
+                                )
+                            )
+                        }
+                    ),
+                    metadataPool=cfs.MetadataPool(
+                        failureDomain=leaf_type,
+                        replicated=cfs.Replicated(
+                            size=num_replicas
+                        )
+                    ),
                     metadataServer=cfs.MetadataServer(
                         activeCount=spec.placement.count or 1,
-                        activeStandby=True
+                        activeStandby=True,
+                        placement=
+                        cfs.Placement(
+                            nodeAffinity=cfs.NodeAffinity(
+                                requiredDuringSchedulingIgnoredDuringExecution=cfs.RequiredDuringSchedulingIgnoredDuringExecution(
+                                    nodeSelectorTerms=cfs.NodeSelectorTermsList(
+                                        [placement_spec_to_node_selector(spec.placement, all_hosts)]
+                                    )
+                                )
+                            )
+                        )
                     )
                 )
             )
+            return fs
         assert spec.service_id is not None
         return self._create_or_patch(
             cfs.CephFilesystem, 'cephfilesystems', spec.service_id,