@handle_orch_error
def remove_daemons(self, names: List[str]) -> List[str]:
return self.rook_cluster.remove_pods(names)
-
+ """
@handle_orch_error
def create_osds(self, drive_group):
# type: (DriveGroupSpec) -> str
- """ Creates OSDs from a drive group specification.
+ # Creates OSDs from a drive group specification.
- $: ceph orch osd create -i <dg.file>
+ # $: ceph orch osd create -i <dg.file>
- The drivegroup file must only contain one spec at a time.
- """
+ # The drivegroup file must only contain one spec at a time.
+ #
targets = [] # type: List[str]
if drive_group.data_devices and drive_group.data_devices.paths:
return self.rook_cluster.add_osds(drive_group, matching_hosts)
# TODO: this was the code to update the progress reference:
- """
+
@handle_orch_error
def has_osds(matching_hosts: List[str]) -> bool:
osd_id, metadata['devices'] if metadata else 'DNE'
))
- return found is not None
- """
+ return found is not None
+ """
@handle_orch_error
def blink_device_light(self, ident_fault: str, on: bool, locs: List[orchestrator.DeviceLightLoc]) -> List[str]:
# type: (ccl.CephCluster, ccl.CephCluster) -> ccl.CephCluster
if newcount is None:
raise orchestrator.OrchestratorError('unable to set mon count to None')
+ if not new.spec.mon:
+ raise orchestrator.OrchestratorError("mon attribute not specified in new spec")
new.spec.mon.count = newcount
return new
return self._patch(ccl.CephCluster, 'cephclusters', self.rook_env.cluster_name, _update_mon_count)
-
+ """
def add_osds(self, drive_group, matching_hosts):
# type: (DriveGroupSpec, List[str]) -> str
- """
- Rook currently (0.8) can only do single-drive OSDs, so we
- treat all drive groups as just a list of individual OSDs.
- """
+
+ # Rook currently (0.8) can only do single-drive OSDs, so we
+ # treat all drive groups as just a list of individual OSDs.
+
block_devices = drive_group.data_devices.paths if drive_group.data_devices else []
directories = drive_group.data_directories
current_nodes = getattr(current_cluster.spec.storage, 'nodes', ccl.NodesList())
matching_host = matching_hosts[0]
-
+
if matching_host not in [n.name for n in current_nodes]:
+ # FIXME: ccl.Config stopped existing since rook changed
+ # their CRDs, check if config is actually necessary for this
+
pd = ccl.NodesItem(
name=matching_host,
- # config=ccl.Config(
- # storeType=drive_group.objectstore
- # )
- config=object(
+ config=ccl.Config(
storeType=drive_group.objectstore
)
)
return new_cluster
return self._patch(ccl.CephCluster, 'cephclusters', self.rook_env.cluster_name, _add_osds)
-
+ """
def _patch(self, crd: Type, crd_name: str, cr_name: str, func: Callable[[CrdClassT, CrdClassT], CrdClassT]) -> str:
current_json = self.rook_api_get(
"{}/{}".format(crd_name, cr_name)