]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/rook: refactor apply/add for rgw
authorSage Weil <sage@redhat.com>
Wed, 18 Mar 2020 21:20:12 +0000 (17:20 -0400)
committerSage Weil <sage@redhat.com>
Fri, 20 Mar 2020 20:40:12 +0000 (16:40 -0400)
A few caveats here:

- enforce that realm == zone, since that is all rook does at the moment.
- we force a (bad!) pool configuration, since rook requires that these
be present (instead of allowing radosgw or the caller to create the pools)

Signed-off-by: Sage Weil <sage@redhat.com>
src/pybind/mgr/rook/module.py
src/pybind/mgr/rook/rook_cluster.py

index a8b18de0fcbb2fac766e4f48b2f0e2b72c17c77c..d1fad01e02b34f8f81f6c56f69f81d298d87010b 100644 (file)
@@ -336,7 +336,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
     def _list_daemons(self, daemon_type=None, daemon_id=None, host=None,
                       refresh=False):
         pods = self.rook_cluster.describe_pods(daemon_type, daemon_id, host)
-
+        self.log.debug('pods %s' % pods)
         result = []
         for p in pods:
             sd = orchestrator.DaemonDescription()
@@ -379,11 +379,6 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
             mgr=self
         )
 
-    def add_rgw(self, spec):
-        # type: (RGWSpec) -> RookCompletion
-        return self._service_add_decorate('RGW', spec,
-                                       self.rook_cluster.add_objectstore)
-
     def add_nfs(self, spec):
         # type: (NFSServiceSpec) -> RookCompletion
         return self._service_add_decorate("NFS", spec,
@@ -427,6 +422,12 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         return self._service_add_decorate('MDS', spec,
                                           self.rook_cluster.apply_filesystem)
 
+    def apply_rgw(self, spec):
+        # type: (RGWSpec) -> RookCompletion
+        return self._service_add_decorate('RGW', spec,
+                                          self.rook_cluster.apply_objectstore)
+
+
     def apply_nfs(self, spec):
         # type: (NFSServiceSpec) -> RookCompletion
         num = spec.placement.count
index 59300a16e37a47d5d302837b5cce595b989b5eb4..d603ebceee136d2bde1d4b0b2e8769658a433b26 100644 (file)
@@ -409,6 +409,59 @@ class RookCluster(object):
             cfs.CephFilesystem, 'cephfilesystems', spec.service_id,
             _update_fs, _create_fs)
 
+    def apply_objectstore(self, spec):
+
+        # FIXME: service_id is $realm.$zone, but rook uses realm
+        # $crname and zone $crname.  The '.'  will confuse kubernetes.
+        # For now, assert that realm==zone.
+        (realm, zone) = spec.service_id.split('.', 1)
+        assert realm == zone
+        name = realm
+
+        def _create_zone():
+            # type: () -> cos.CephObjectStore
+            port = None
+            secure_port = None
+            if spec.ssl:
+                secure_port = spec.get_port()
+            else:
+                port = spec.get_port()
+            return cos.CephObjectStore(
+                apiVersion=self.rook_env.api_name,
+                metadata=dict(
+                    name=name,
+                    namespace=self.rook_env.namespace
+                ),
+                spec=cos.Spec(
+                    metadataPool=cos.MetadataPool(
+                        failureDomain='host',
+                        replicated=cos.Replicated(
+                            size=1
+                        )
+                    ),
+                    dataPool=cos.DataPool(
+                        failureDomain='host',
+                        replicated=cos.Replicated(
+                            size=1
+                        )
+                    ),
+                    gateway=cos.Gateway(
+                        type='s3',
+                        port=port,
+                        securePort=secure_port,
+                        instances=spec.placement.count or 1,
+                    )
+                )
+            )
+
+        def _update_zone(current, new):
+            new.spec.gateway.instances = spec.placement.count or 1
+            return new
+
+        return self._create_or_patch(
+            cos.CephObjectStore, 'cephobjectstores', name,
+            _update_zone, _create_zone)
+
     def add_nfsgw(self, spec):
         # TODO use spec.placement
         # TODO warn if spec.extended has entries we don't kow how
@@ -436,38 +489,6 @@ class RookCluster(object):
         with self.ignore_409("NFS cluster '{0}' already exists".format(spec.service_id)):
             self.rook_api_post("cephnfses/", body=rook_nfsgw.to_json())
 
-    def add_objectstore(self, spec):
-
-        rook_os = cos.CephObjectStore(
-            apiVersion=self.rook_env.api_name,
-            metadata=dict(
-                name=spec.service_id,
-                namespace=self.rook_env.namespace
-            ),
-            spec=cos.Spec(
-                metadataPool=cos.MetadataPool(
-                    failureDomain='host',
-                    replicated=cos.Replicated(
-                        size=1
-                    )
-                ),
-                dataPool=cos.DataPool(
-                    failureDomain='osd',
-                    replicated=cos.Replicated(
-                        size=1
-                    )
-                ),
-                gateway=cos.Gateway(
-                    type='s3',
-                    port=spec.rgw_frontend_port if spec.rgw_frontend_port is not None else 80,
-                    instances=spec.placement.count
-                )
-            )
-        )
-        
-        with self.ignore_409("CephObjectStore '{0}' already exists".format(spec.service_id)):
-            self.rook_api_post("cephobjectstores/", body=rook_os.to_json())
-
     def rm_service(self, rooktype, service_id):
 
         objpath = "{0}/{1}".format(rooktype, service_id)