]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/rook: comment out osd creation functions 41871/head 41903/head
authorJoseph Sawaya <jsawaya@redhat.com>
Wed, 16 Jun 2021 16:49:53 +0000 (12:49 -0400)
committerSage Weil <sage@newdream.net>
Thu, 17 Jun 2021 13:16:29 +0000 (09:16 -0400)
This commit comments out the OSD creation functions in rook_cluster.py
and module.py, since the submodule update has broken them.

Signed-off-by: Joseph Sawaya <jsawaya@redhat.com>
src/pybind/mgr/rook/module.py
src/pybind/mgr/rook/rook_cluster.py

index 2bd1ddbbc2c25497de686ad6b323aee9c52b39cc..6b2fc08b0cda327c0bbb8d1db0a800635299959d 100644 (file)
@@ -439,16 +439,16 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
     @handle_orch_error
     def remove_daemons(self, names: List[str]) -> List[str]:
         return self.rook_cluster.remove_pods(names)
-
+    """
     @handle_orch_error
     def create_osds(self, drive_group):
         # type: (DriveGroupSpec) -> str
-        """ Creates OSDs from a drive group specification.
+        # Creates OSDs from a drive group specification.
 
-        $: ceph orch osd create -i <dg.file>
+        $: ceph orch osd create -i <dg.file>
 
-        The drivegroup file must only contain one spec at a time.
-        """
+        The drivegroup file must only contain one spec at a time.
+        # 
 
         targets = []  # type: List[str]
         if drive_group.data_devices and drive_group.data_devices.paths:
@@ -475,7 +475,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         return self.rook_cluster.add_osds(drive_group, matching_hosts)
 
         # TODO: this was the code to update the progress reference:
-        """
+        
         @handle_orch_error
         def has_osds(matching_hosts: List[str]) -> bool:
 
@@ -506,8 +506,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
                         osd_id, metadata['devices'] if metadata else 'DNE'
                     ))
 
-            return found is not None
-        """
+            return found is not None        
+    """
 
     @handle_orch_error
     def blink_device_light(self, ident_fault: str, on: bool, locs: List[orchestrator.DeviceLightLoc]) -> List[str]:
index dddc05893cad574b609a1d7c29866872c296e59d..5d0a9884d3909d2c08a80c9b004c04289cfb3dd1 100644 (file)
@@ -551,16 +551,18 @@ class RookCluster(object):
             # type: (ccl.CephCluster, ccl.CephCluster) -> ccl.CephCluster
             if newcount is None:
                 raise orchestrator.OrchestratorError('unable to set mon count to None')
+            if not new.spec.mon:
+                raise orchestrator.OrchestratorError("mon attribute not specified in new spec")
             new.spec.mon.count = newcount
             return new
         return self._patch(ccl.CephCluster, 'cephclusters', self.rook_env.cluster_name, _update_mon_count)
-
+    """
     def add_osds(self, drive_group, matching_hosts):
         # type: (DriveGroupSpec, List[str]) -> str
-        """
-        Rook currently (0.8) can only do single-drive OSDs, so we
-        treat all drive groups as just a list of individual OSDs.
-        """
+        
+        Rook currently (0.8) can only do single-drive OSDs, so we
+        treat all drive groups as just a list of individual OSDs.
+        
         block_devices = drive_group.data_devices.paths if drive_group.data_devices else []
         directories = drive_group.data_directories
 
@@ -580,14 +582,14 @@ class RookCluster(object):
 
             current_nodes = getattr(current_cluster.spec.storage, 'nodes', ccl.NodesList())
             matching_host = matching_hosts[0]
-
+            
             if matching_host not in [n.name for n in current_nodes]:
+                # FIXME: ccl.Config stopped existing since rook changed
+                # their CRDs, check if config is actually necessary for this
+                
                 pd = ccl.NodesItem(
                     name=matching_host,
-                    # config=ccl.Config(
-                    #     storeType=drive_group.objectstore
-                    # )
-                    config=object(  
+                    config=ccl.Config(
                         storeType=drive_group.objectstore
                     )
                 )
@@ -624,7 +626,7 @@ class RookCluster(object):
             return new_cluster
 
         return self._patch(ccl.CephCluster, 'cephclusters', self.rook_env.cluster_name, _add_osds)
-
+    """
     def _patch(self, crd: Type, crd_name: str, cr_name: str, func: Callable[[CrdClassT, CrdClassT], CrdClassT]) -> str:
         current_json = self.rook_api_get(
             "{}/{}".format(crd_name, cr_name)