from copy import copy
import json
import logging
-from typing import TYPE_CHECKING, Dict, List, Iterator, Optional, Any, Tuple
+from typing import TYPE_CHECKING, Dict, List, Iterator, Optional, Any, Tuple, Set
import six
self.osdspec_previews_refresh_queue = [] # type: List[str]
self.daemon_config_deps = {} # type: Dict[str, Dict[str, Dict[str,Any]]]
self.last_host_check = {} # type: Dict[str, datetime.datetime]
- self.loading_osdspec_preview = {} # type: Dict[str, bool]
+ self.loading_osdspec_preview = set() # type: Set[str]
def load(self):
# type: () -> None
self.daemon_refresh_queue.append(host)
self.daemons[host] = {}
self.osdspec_previews[host] = []
- self.loading_osdspec_preview[host] = False
self.devices[host] = []
self.networks[host] = {}
self.daemon_config_deps[host] = {}
for d in j.get('devices', []):
self.devices[host].append(inventory.Device.from_json(d))
self.networks[host] = j.get('networks', {})
- self.osdspec_previews[host] = j.get('osdspec_previews', [])
+ self.osdspec_previews[host] = j.get('osdspec_previews', {})
for name, d in j.get('daemon_config_deps', {}).items():
self.daemon_config_deps[host][name] = {
host, e))
pass
- def update_osdspec_previews(self, search_host: str = ''):
- # Set global 'pending' flag for host
- self.loading_osdspec_preview[search_host] = True
- previews = []
- # query OSDSpecs for host <search host> and generate/get the preview
- for preview in self.mgr.osd_service.get_previews(search_host):
- # There can be multiple previews for one host due to multiple OSDSpecs.
- previews.append(preview)
- self.mgr.log.debug(f"Loading OSDSpec previews to HostCache")
- self.osdspec_previews[search_host] = previews
- # Unset global 'pending' flag for host
- self.loading_osdspec_preview[search_host] = False
-
def update_host_daemons(self, host, dm):
# type: (str, Dict[str, orchestrator.DaemonDescription]) -> None
self.daemons[host] = dm
if host in self.osdspec_previews:
del self.osdspec_previews[host]
if host in self.loading_osdspec_preview:
- del self.loading_osdspec_preview[host]
+ self.loading_osdspec_preview.remove(host)
if host in self.networks:
del self.networks[host]
if host in self.last_daemon_update:
self.log.info('Removed label %s to host %s' % (label, host))
return 'Removed label %s from host %s' % (label, host)
+ def update_osdspec_previews(self, search_host: str = ''):
+ # Set global 'pending' flag for host
+ self.cache.loading_osdspec_preview.add(search_host)
+ previews = []
+ # query OSDSpecs for host <search host> and generate/get the preview
+ for preview in self.osd_service.get_previews(search_host):
+ # There can be multiple previews for one host due to multiple OSDSpecs.
+ previews.append(preview)
+ self.log.debug(f"Loading OSDSpec previews to HostCache")
+ self.cache.osdspec_previews[search_host] = previews
+ # Unset global 'pending' flag for host
+ self.cache.loading_osdspec_preview.remove(search_host)
+
def _refresh_host_osdspec_previews(self, host) -> bool:
- self.cache.update_osdspec_previews(host)
+ self.update_osdspec_previews(host)
self.cache.save_host(host)
self.log.debug(f'Refreshed OSDSpec previews for host <{host}>')
return True
host, len(devices), len(networks)))
devices = inventory.Devices.from_json(devices)
self.cache.update_host_devices_networks(host, devices.devices, networks)
- self.cache.update_osdspec_previews(host)
+ self.update_osdspec_previews(host)
self.cache.save_host(host)
return None
if service_name:
self.log.debug(f"Looking for OSDSpec with service_name: {service_name}")
osdspecs = self.spec_store.find(service_name=service_name)
- osdspecs = [cast(DriveGroupSpec, spec) for spec in osdspecs]
self.log.debug(f"Found OSDSpecs: {osdspecs}")
if specs:
osdspecs = [cast(DriveGroupSpec, spec) for spec in specs]
if not osdspecs:
self.log.debug("No OSDSpecs found")
return []
+ # TODO: adapt this when we change patter_matches_hosts with https://github.com/ceph/ceph/pull/34860
return sum([spec.placement.pattern_matches_hosts(self.cache.get_hosts()) for spec in osdspecs], [])
def resolve_osdspecs_for_host(self, host):
def create_osds(self, drive_group: DriveGroupSpec):
return self.osd_service.create_from_spec(drive_group)
+ @trivial_completion
def preview_osdspecs(self,
osdspec_name: Optional[str] = None,
osdspecs: Optional[List[DriveGroupSpec]] = None
- ) -> Dict[str, List[Dict[str, Any]]]:
+ ):
matching_hosts = self.resolve_hosts_for_osdspecs(specs=osdspecs, service_name=osdspec_name)
if not matching_hosts:
return {'n/a': [{'error': True,
'message': 'No OSDSpec or matching hosts found.'}]}
# Is any host still loading previews
- pending_flags = {f for (h, f) in self.cache.loading_osdspec_preview.items() if h in matching_hosts}
- if any(pending_flags):
+ pending_hosts = {h for h in self.cache.loading_osdspec_preview if h in matching_hosts}
+ if pending_hosts:
# Report 'pending' when any of the matching hosts is still loading previews (flag is True)
return {'n/a': [{'error': True,
'message': 'Preview data is being generated.. '
import json
from typing import List, Set, Optional, Iterator
import re
+import ast
import yaml
import six
return HandleCommandResult(stdout=table.get_string())
- def set_unmanaged_flag(self, service_name: str, unmanaged_flag: bool) -> HandleCommandResult:
+ def set_unmanaged_flag(self,
+ unmanaged_flag: bool,
+ service_type: str = 'osd',
+ service_name=None
+ ) -> HandleCommandResult:
# setting unmanaged for $service_name
- completion = self.describe_service(service_name=service_name)
+ completion = self.describe_service(service_name=service_name, service_type=service_type)
self._orchestrator_wait([completion])
raise_if_exception(completion)
services: List[ServiceDescription] = completion.result
return HandleCommandResult(stdout=f"No specs found with the <service_name> -> {service_name}")
@_cli_write_command(
- 'orch apply osd',
- 'name=all_available_devices,type=CephBool,req=false '
- 'name=preview,type=CephBool,req=false '
+ 'orch osd spec',
'name=service_name,type=CephString,req=false '
+ 'name=preview,type=CephBool,req=false '
'name=unmanaged,type=CephBool,req=false '
"name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false",
- 'Create OSD daemon(s) using a drive group spec')
- def _apply_osd(self,
- all_available_devices: bool = False,
- preview: bool = False,
- service_name: Optional[str] = None,
- unmanaged: bool = False,
- format: Optional[str] = 'plain',
- inbuf: Optional[str] = None) -> HandleCommandResult:
- """Apply DriveGroupSpecs to create OSDs"""
+ 'Common operations on an OSDSpec. Allows previewing and changing the unmanaged flag.')
+ def _misc_osd(self,
+ preview: bool = False,
+ service_name: Optional[str] = None,
+ unmanaged=None,
+ format: Optional[str] = 'plain',
+ ) -> HandleCommandResult:
usage = """
-Usage:
- ceph orch apply osd -i <json_file/yaml_file>
- ceph orch apply osd --all-available-devices
- ceph orch apply osd --service-name <service_name> --preview
- ceph orch apply osd --service-name <service_name> --unmanaged=True|False
+usage:
+ ceph orch osd spec --preview
+ ceph orch osd spec --unmanaged=true|false
+ ceph orch osd spec --service-name <service_name> --preview
+ ceph orch osd spec --service-name <service_name> --unmanaged=true|false (defaults to false)
+
+Restrictions:
+
+ Mutexes:
+ * --preview ,--unmanaged
+
+ Although it it's possible to set these at the same time, we will lack a proper response to each
+ action, possibly shadowing any failures.
+
+Description:
+
+ * --service-name
+ If flag is omitted, assume to target all existing OSDSpecs.
+ Needs either --unamanged or --preview.
+
+ * --unmanaged
+ Applies <unamanged> flag to targeted --service-name.
+ If --service-name is omitted, target all OSDSpecs
+
+Examples:
+
+ # ceph orch osd spec --preview
+
+ Queries all available OSDSpecs for previews
+
+ # ceph orch osd spec --service-name my-osdspec-name --preview
+
+ Queries only the specified <my-osdspec-name> for previews
+
+ # ceph orch osd spec --unmanaged=true
+
+ # Changes flags of all available OSDSpecs to true
+
+ # ceph orch osd spec --service-name my-osdspec-name --unmanaged=true
+
+ Changes the unmanaged flag of <my-osdspec-name> to true
"""
- def print_preview(prev, format):
+ def print_preview(previews, format_to):
if format != 'plain':
- return to_format(prev, format)
+ return to_format(previews, format_to)
else:
table = PrettyTable(
['NAME', 'HOST', 'DATA', 'DB', 'WAL'],
table.align = 'l'
table.left_padding_width = 0
table.right_padding_width = 1
- for data in prev:
- dg_name = data.get('drivegroup')
- hostname = data.get('host')
- for osd in data.get('data', {}).get('osds', []):
- db_path = '-'
- wal_path = '-'
- block_db = osd.get('block.db', {}).get('path')
- block_wal = osd.get('block.wal', {}).get('path')
- block_data = osd.get('data', {}).get('path', '')
- if not block_data:
- continue
- if block_db:
- db_path = data.get('data', {}).get('vg', {}).get('devices', [])
- if block_wal:
- wal_path = data.get('data', {}).get('wal_vg', {}).get('devices', [])
- table.add_row((dg_name, hostname, block_data, db_path, wal_path))
- out = table.get_string()
- if not out:
- out = "No pending deployments."
- return out
-
- if (inbuf or all_available_devices) and service_name:
- # mutually exclusive
+ for host, data in previews.items():
+ for spec in data:
+ if spec.get('error'):
+ return spec.get('message')
+ dg_name = spec.get('osdspec')
+ for osd in spec.get('data', {}).get('osds', []):
+ db_path = '-'
+ wal_path = '-'
+ block_db = osd.get('block.db', {}).get('path')
+ block_wal = osd.get('block.wal', {}).get('path')
+ block_data = osd.get('data', {}).get('path', '')
+ if not block_data:
+ continue
+ if block_db:
+ db_path = spec.get('data', {}).get('vg', {}).get('devices', [])
+ if block_wal:
+ wal_path = spec.get('data', {}).get('wal_vg', {}).get('devices', [])
+ table.add_row((dg_name, host, block_data, db_path, wal_path))
+ ret = table.get_string()
+ if not ret:
+ ret = "Couldn't draw any conclusion.. This is likely a bug and should be reported"
+ return ret
+
+ if preview and (unmanaged is not None):
return HandleCommandResult(-errno.EINVAL, stderr=usage)
- if preview and not (service_name or all_available_devices or inbuf):
- # get all stored drivegroups and print
- prev = self.preview_drivegroups()
- return HandleCommandResult(stdout=print_preview(prev, format))
+ if service_name:
+ if preview:
+ completion = self.preview_osdspecs(osdspec_name=service_name)
+ self._orchestrator_wait([completion])
+ raise_if_exception(completion)
+ out = completion.result_str()
+ return HandleCommandResult(stdout=print_preview(ast.literal_eval(out), format))
+ if unmanaged is not None:
+ return self.set_unmanaged_flag(service_name=service_name, unmanaged_flag=unmanaged)
- if service_name and preview:
- # get specified drivegroup and print
- prev = self.preview_drivegroups(service_name)
- return HandleCommandResult(stdout=print_preview(prev, format))
+ return HandleCommandResult(-errno.EINVAL, stderr=usage)
- if service_name and unmanaged is not None:
- return self.set_unmanaged_flag(service_name, unmanaged)
+ if preview:
+ completion = self.preview_osdspecs()
+ self._orchestrator_wait([completion])
+ raise_if_exception(completion)
+ out = completion.result_str()
+ return HandleCommandResult(stdout=print_preview(ast.literal_eval(out), format))
+
+ if unmanaged is not None:
+ return self.set_unmanaged_flag(unmanaged_flag=unmanaged)
+
+ return HandleCommandResult(-errno.EINVAL, stderr=usage)
+
+ @_cli_write_command(
+ 'orch apply osd',
+ 'name=all_available_devices,type=CephBool,req=false '
+ 'name=unmanaged,type=CephBool,req=false '
+ "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false",
+ 'Create OSD daemon(s) using a drive group spec')
+ def _apply_osd(self,
+ all_available_devices: bool = False,
+ format: Optional[str] = 'plain',
+ unmanaged=None,
+ inbuf: Optional[str] = None) -> HandleCommandResult:
+ """Apply DriveGroupSpecs to create OSDs"""
+ usage = """
+usage:
+ ceph orch apply osd -i <json_file/yaml_file>
+ ceph orch apply osd --all-available-devices
+ ceph orch apply osd --all-available-devices --unmanaged=true|false
+
+Restrictions:
+
+ Mutexes:
+ * -i, --all-available-devices
+ * -i, --unmanaged (this would overwrite the osdspec loaded from a file)
+
+ Parameters:
+
+ * --unmanaged
+ Only works with --all-available-devices.
+
+Description:
+
+ * -i
+ An inbuf object like a file or a json/yaml blob containing a valid OSDSpec
+
+ * --all-available-devices
+ The most simple OSDSpec there is. Takes all as 'available' marked devices
+ and creates standalone OSDs on them.
+
+ * --unmanaged
+ Set a the unmanaged flag for all--available-devices (default is False)
+
+Examples:
+
+ # ceph orch apply osd -i <file.yml|json>
+
+ Applies one or more OSDSpecs found in <file>
+
+ # ceph orch osd apply --all-available-devices --unmanaged=true
+
+ Creates and applies simple OSDSpec with the unmanaged flag set to <true>
+"""
+
+ if inbuf and all_available_devices:
+ # mutually exclusive
+ return HandleCommandResult(-errno.EINVAL, stderr=usage)
if not inbuf and not all_available_devices:
+ # one parameter must be present
return HandleCommandResult(-errno.EINVAL, stderr=usage)
+
if inbuf:
- if all_available_devices:
- raise OrchestratorError('--all-available-devices cannot be combined with an osd spec')
+ if unmanaged is not None:
+ return HandleCommandResult(-errno.EINVAL, stderr=usage)
try:
drivegroups = yaml.load_all(inbuf)
dg_specs = [DriveGroupSpec.from_json(dg) for dg in drivegroups]
+ # This acts weird when abstracted to a function
+ completion = self.apply_drivegroups(dg_specs)
+ self._orchestrator_wait([completion])
+ raise_if_exception(completion)
+ return HandleCommandResult(stdout=completion.result_str())
except ValueError as e:
msg = 'Failed to read JSON/YAML input: {}'.format(str(e)) + usage
return HandleCommandResult(-errno.EINVAL, stderr=msg)
- else:
+ if all_available_devices:
+ if unmanaged is None:
+ unmanaged = False
dg_specs = [
DriveGroupSpec(
service_id='all-available-devices',
placement=PlacementSpec(host_pattern='*'),
data_devices=DeviceSelection(all=True),
+ unmanaged=unmanaged
)
]
-
- if not preview:
+ # This acts weird when abstracted to a function
completion = self.apply_drivegroups(dg_specs)
self._orchestrator_wait([completion])
raise_if_exception(completion)
- ret = self.preview_drivegroups(dg_specs=dg_specs)
- return HandleCommandResult(stdout=print_preview(ret, format))
+ return HandleCommandResult(stdout=completion.result_str())
+
+ return HandleCommandResult(-errno.EINVAL, stderr=usage)
@_cli_write_command(
'orch daemon add osd',