]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: rework --dry-run/previews
authorJoshua Schmid <jschmid@suse.de>
Wed, 10 Jun 2020 08:33:36 +0000 (10:33 +0200)
committerJoshua Schmid <jschmid@suse.de>
Wed, 22 Jul 2020 14:01:55 +0000 (16:01 +0200)
Signed-off-by: Joshua Schmid <jschmid@suse.de>
src/pybind/mgr/cephadm/inventory.py
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/schedule.py
src/pybind/mgr/cephadm/services/osd.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/pybind/mgr/orchestrator/_interface.py
src/pybind/mgr/orchestrator/module.py
src/python-common/ceph/deployment/drive_group.py
src/python-common/ceph/deployment/service_spec.py
src/python-common/ceph/tests/test_service_spec.py

index 8966813fd650d03a32b227759f06e3b47939a727..7bfde531d623440541e2a616f6261688f9b3ad08 100644 (file)
@@ -107,6 +107,7 @@ class SpecStore():
         self.mgr = mgr
         self.specs = {} # type: Dict[str, ServiceSpec]
         self.spec_created = {} # type: Dict[str, datetime.datetime]
+        self.spec_preview = {} # type: Dict[str, ServiceSpec]
 
     def load(self):
         # type: () -> None
@@ -127,6 +128,9 @@ class SpecStore():
 
     def save(self, spec):
         # type: (ServiceSpec) -> None
+        if spec.preview_only:
+            self.spec_preview[spec.service_name()] = spec
+            return None
         self.specs[spec.service_name()] = spec
         self.spec_created[spec.service_name()] = datetime.datetime.utcnow()
         self.mgr.set_store(
index f395096a5ad8fc4001ab75d4c969cd3346ee3de4..6a4d135ec2534abc1841e705c74e6c91d2ba1100 100644 (file)
@@ -1643,58 +1643,41 @@ you may want to run:
                 r[str(osd_id)] = o.get('uuid', '')
         return r
 
-    def resolve_hosts_for_osdspecs(self,
-                                   specs: Optional[List[DriveGroupSpec]] = None,
-                                   service_name: Optional[str] = None
-                                   ) -> List[str]:
-        osdspecs = []
-        if service_name:
-            self.log.debug(f"Looking for OSDSpec with service_name: {service_name}")
-            osdspecs = self.spec_store.find(service_name=service_name)
-            self.log.debug(f"Found OSDSpecs: {osdspecs}")
-        if specs:
-            osdspecs = [cast(DriveGroupSpec, spec) for spec in specs]
-        if not service_name and not specs:
-            # if neither parameters are fulfilled, search for all available osdspecs
-            osdspecs = self.spec_store.find(service_name='osd')
-            self.log.debug(f"Found OSDSpecs: {osdspecs}")
-        if not osdspecs:
-            self.log.debug("No OSDSpecs found")
-            return []
-        return sum([spec.placement.filter_matching_hosts(self._get_hosts) for spec in osdspecs], [])
-
-    def resolve_osdspecs_for_host(self, host):
-        matching_specs = []
-        self.log.debug(f"Finding OSDSpecs for host: <{host}>")
-        for spec in self.spec_store.find('osd'):
-            if host in spec.placement.filter_matching_hosts(self._get_hosts):
-                self.log.debug(f"Found OSDSpecs for host: <{host}> -> <{spec}>")
-                matching_specs.append(spec)
-        return matching_specs
-
     def _trigger_preview_refresh(self,
                                  specs: Optional[List[DriveGroupSpec]] = None,
-                                 service_name: Optional[str] = None):
-        refresh_hosts = self.resolve_hosts_for_osdspecs(specs=specs, service_name=service_name)
+                                 service_name: Optional[str] = None,
+                                 ) -> None:
+        # Only trigger a refresh when a spec has changed
+        trigger_specs = []
+        if specs:
+            for spec in specs:
+                preview_spec = self.spec_store.spec_preview.get(spec.service_name())
+                # the to-be-preview spec != the actual spec, this means we need to
+                # trigger a refresh, if the spec has been removed (==None) we need to
+                # refresh as well.
+                if not preview_spec or spec != preview_spec:
+                    trigger_specs.append(spec)
+        if service_name:
+            trigger_specs = [cast(DriveGroupSpec, self.spec_store.spec_preview.get(service_name))]
+        if not any(trigger_specs):
+            return None
+
+        refresh_hosts = self.osd_service.resolve_hosts_for_osdspecs(specs=trigger_specs)
         for host in refresh_hosts:
             self.log.info(f"Marking host: {host} for OSDSpec preview refresh.")
             self.cache.osdspec_previews_refresh_queue.append(host)
 
-    @trivial_completion
-    def apply_drivegroups(self, specs: List[DriveGroupSpec]):
-        self._trigger_preview_refresh(specs=specs)
-        return [self._apply(spec) for spec in specs]
-
     @trivial_completion
     def create_osds(self, drive_group: DriveGroupSpec):
         return self.osd_service.create_from_spec(drive_group)
 
-    @trivial_completion
-    def preview_osdspecs(self,
-                         osdspec_name: Optional[str] = None,
-                         osdspecs: Optional[List[DriveGroupSpec]] = None
-                         ):
-        matching_hosts = self.resolve_hosts_for_osdspecs(specs=osdspecs, service_name=osdspec_name)
+    def _preview_osdspecs(self,
+                          osdspecs: Optional[List[DriveGroupSpec]] = None
+                          ):
+        if not osdspecs:
+            return {'n/a': [{'error': True,
+                             'message': 'No OSDSpec or matching hosts found.'}]}
+        matching_hosts = self.osd_service.resolve_hosts_for_osdspecs(specs=osdspecs)
         if not matching_hosts:
             return {'n/a': [{'error': True,
                              'message': 'No OSDSpec or matching hosts found.'}]}
@@ -1704,9 +1687,18 @@ you may want to run:
             # Report 'pending' when any of the matching hosts is still loading previews (flag is True)
             return {'n/a': [{'error': True,
                              'message': 'Preview data is being generated.. '
-                                        'Please try again in a bit.'}]}
-        # drop all keys that are not in search_hosts and return preview struct
-        return {k: v for (k, v) in self.cache.osdspec_previews.items() if k in matching_hosts}
+                                        'Please re-run this command in a bit.'}]}
+        # drop all keys that are not in search_hosts and only select reports that match the requested osdspecs
+        previews_for_specs = {}
+        for host, raw_reports in self.cache.osdspec_previews.items():
+            if host not in matching_hosts:
+                continue
+            osd_reports = []
+            for osd_report in raw_reports:
+                if osd_report.get('osdspec') in [x.service_id for x in osdspecs]:
+                    osd_reports.append(osd_report)
+            previews_for_specs.update({host: osd_reports})
+        return previews_for_specs
 
     def _calc_daemon_deps(self, daemon_type, daemon_id):
         need = {
@@ -1857,7 +1849,10 @@ you may want to run:
         daemon_type = spec.service_type
         service_name = spec.service_name()
         if spec.unmanaged:
-            self.log.debug('Skipping unmanaged service %s spec' % service_name)
+            self.log.debug('Skipping unmanaged service %s' % service_name)
+            return False
+        if spec.preview_only:
+            self.log.debug('Skipping preview_only service %s' % service_name)
             return False
         self.log.debug('Applying service %s spec' % service_name)
 
@@ -2103,8 +2098,49 @@ you may want to run:
         if spec.service_type == 'host':
             return self._add_host(cast(HostSpec, spec))
 
+        if spec.service_type == 'osd':
+            # _trigger preview refresh needs to be smart and
+            # should only refresh if a change has been detected
+            self._trigger_preview_refresh(specs=[cast(DriveGroupSpec, spec)])
+
         return self._apply_service_spec(cast(ServiceSpec, spec))
 
+    def _plan(self, spec: ServiceSpec):
+        if spec.service_type == 'osd':
+            return {'service_name': spec.service_name(),
+                    'service_type': spec.service_type,
+                    'data': self._preview_osdspecs(osdspecs=[cast(DriveGroupSpec, spec)])}
+
+        ha = HostAssignment(
+            spec=spec,
+            get_hosts_func=self._get_hosts,
+            get_daemons_func=self.cache.get_daemons_by_service,
+        )
+        ha.validate()
+        hosts = ha.place()
+
+        add_daemon_hosts = ha.add_daemon_hosts(hosts)
+        remove_daemon_hosts = ha.remove_daemon_hosts(hosts)
+
+        return {
+            'service_name': spec.service_name(),
+            'service_type': spec.service_type,
+            'add': [hs.hostname for hs in add_daemon_hosts],
+            'remove': [d.hostname for d in remove_daemon_hosts]
+        }
+
+    @trivial_completion
+    def plan(self, specs: List[GenericSpec]):
+        results = [{'warning': 'WARNING! Dry-Runs are snapshots of a certain point in time and are bound \n'
+                               'to the current inventory setup. If any on these conditions changes, the \n'
+                               'preview will be invalid. Please make sure to have a minimal \n'
+                               'timeframe between planning and applying the specs.'}]
+        if any([spec.service_type == 'host' for spec in specs]):
+            return [{'error': 'Found <HostSpec>. Previews that include Host Specifications are not supported, yet.'}]
+        for spec in specs:
+            results.append(self._plan(cast(ServiceSpec, spec)))
+        return results
+
     def _apply_service_spec(self, spec: ServiceSpec) -> str:
         if spec.placement.is_empty():
             # fill in default placement
index 5eb25358dfcf484dc74fd9dbff0f90e7f8199785..81f6a5e029fdaf30ed5c6fac14ac42a326436ad5 100644 (file)
@@ -42,8 +42,10 @@ class SimpleScheduler(BaseScheduler):
         if not host_pool:
             return []
         host_pool = [x for x in host_pool]
+        # gen seed off of self.spec to make shuffling deterministic
+        seed = hash(self.spec.service_name())
         # shuffle for pseudo random selection
-        random.shuffle(host_pool)
+        random.Random(seed).shuffle(host_pool)
         return host_pool[:count]
 
 
index 1baeaa794971215b34532be7c9019233f55ce53f..b3e1d7c5ecabd3e4e6bd9fa426d96429b93ce53c 100644 (file)
@@ -136,7 +136,7 @@ class OSDService(CephadmService):
 
     def get_previews(self, host) -> List[Dict[str, Any]]:
         # Find OSDSpecs that match host.
-        osdspecs = self.mgr.resolve_osdspecs_for_host(host)
+        osdspecs = self.resolve_osdspecs_for_host(host)
         return self.generate_previews(osdspecs, host)
 
     def generate_previews(self, osdspecs: List[DriveGroupSpec], for_host: str) -> List[Dict[str, Any]]:
@@ -190,6 +190,29 @@ class OSDService(CephadmService):
                                     'host': host})
         return ret_all
 
+    def resolve_hosts_for_osdspecs(self,
+                                   specs: Optional[List[DriveGroupSpec]] = None
+                                   ) -> List[str]:
+        osdspecs = []
+        if specs:
+            osdspecs = [cast(DriveGroupSpec, spec) for spec in specs]
+        if not osdspecs:
+            self.mgr.log.debug("No OSDSpecs found")
+            return []
+        return sum([spec.placement.filter_matching_hosts(self.mgr._get_hosts) for spec in osdspecs], [])
+
+    def resolve_osdspecs_for_host(self, host: str, specs: Optional[List[DriveGroupSpec]] = None):
+        matching_specs = []
+        self.mgr.log.debug(f"Finding OSDSpecs for host: <{host}>")
+        if not specs:
+            specs = [cast(DriveGroupSpec, spec) for (sn, spec) in self.mgr.spec_store.spec_preview.items()
+                     if spec.service_type == 'osd']
+        for spec in specs:
+            if host in spec.placement.filter_matching_hosts(self.mgr._get_hosts):
+                self.mgr.log.debug(f"Found OSDSpecs for host: <{host}> -> <{spec}>")
+                matching_specs.append(spec)
+        return matching_specs
+
     def _run_ceph_volume_command(self, host: str,
                                  cmd: str, env_vars: Optional[List[str]] = None
                                  ) -> Tuple[List[str], List[str], int]:
index b6ca68c1c772adf4683433213b111fa466b44b14..15ef21293748851b9bb39e3295592c4f8e7b974b 100644 (file)
@@ -272,7 +272,7 @@ class TestCephadm(object):
                 )
             )
 
-            c = cephadm_module.apply_drivegroups([spec])
+            c = cephadm_module.apply([spec])
             assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
 
             inventory = Devices([
@@ -302,7 +302,7 @@ class TestCephadm(object):
             json_spec = {'service_type': 'osd', 'placement': {'host_pattern': 'test'}, 'service_id': 'foo', 'data_devices': {'all': True}}
             spec = ServiceSpec.from_json(json_spec)
             assert isinstance(spec, DriveGroupSpec)
-            c = cephadm_module.apply_drivegroups([spec])
+            c = cephadm_module.apply([spec])
             assert wait(cephadm_module, c) == ['Scheduled osd.foo update...']
             _save_spec.assert_called_with(spec)
 
index ab9b5539b0ee4a58f4cf9e5be663854c57082e76..29ac9c8431d85a0a7118610a63b8f9aa8874f308 100644 (file)
@@ -910,6 +910,12 @@ class Orchestrator(object):
             completion = completion.then(next)
         return completion
 
+    def plan(self, spec: List["GenericSpec"]):
+        """
+        Plan (Dry-run, Preview) a List of Specs.
+        """
+        raise NotImplementedError()
+
     def remove_daemons(self, names):
         # type: (List[str]) -> Completion
         """
index dbeb1b426d588a46d978b94f083746740d839ec7..d4abda8e5dd2f983f391614c432c5ce234968798 100644 (file)
@@ -1,7 +1,7 @@
 import datetime
 import errno
 import json
-from typing import List, Set, Optional, Iterator, cast
+from typing import List, Set, Optional, Iterator, cast, Dict, Any, Union
 import re
 import ast
 
@@ -68,6 +68,74 @@ def to_format(what, format: str, many: bool, cls):
         return yaml.dump(to_yaml(copy), default_flow_style=False)
 
 
+def generate_preview_tables(data):
+    error = [x.get('error') for x in data if x.get('error')]
+    if error:
+        return json.dumps(error)
+    warning = [x.get('warning') for x in data if x.get('warning')]
+    osd_table = preview_table_osd(data)
+    service_table = preview_table_services(data)
+    tables = f"""
+{''.join(warning)}
+
+####################
+SERVICESPEC PREVIEWS
+####################
+{service_table}
+
+################
+OSDSPEC PREVIEWS
+################
+{osd_table}
+"""
+    return tables
+
+
+def preview_table_osd(data):
+    table = PrettyTable(header_style='upper', title='OSDSPEC PREVIEWS', border=True)
+    table.field_names = "service name host data db wal".split()
+    table.align = 'l'
+    table.left_padding_width = 0
+    table.right_padding_width = 2
+    for osd_data in data:
+        if osd_data.get('service_type') != 'osd':
+            continue
+        for host, specs in osd_data.get('data').items():
+            for spec in specs:
+                if spec.get('error'):
+                    return spec.get('message')
+                dg_name = spec.get('osdspec')
+                for osd in spec.get('data', {}).get('osds', []):
+                    db_path = '-'
+                    wal_path = '-'
+                    block_db = osd.get('block.db', {}).get('path')
+                    block_wal = osd.get('block.wal', {}).get('path')
+                    block_data = osd.get('data', {}).get('path', '')
+                    if not block_data:
+                        continue
+                    if block_db:
+                        db_path = spec.get('data', {}).get('vg', {}).get('devices', [])
+                    if block_wal:
+                        wal_path = spec.get('data', {}).get('wal_vg', {}).get('devices', [])
+                    table.add_row(('osd', dg_name, host, block_data, db_path, wal_path))
+    return table.get_string()
+
+
+def preview_table_services(data):
+    table = PrettyTable(header_style='upper', title="SERVICESPEC PREVIEW", border=True)
+    table.field_names = 'SERVICE NAME ADD_TO REMOVE_FROM'.split()
+    table.align = 'l'
+    table.left_padding_width = 0
+    table.right_padding_width = 2
+    for item in data:
+        if item.get('warning'):
+            continue
+        if item.get('service_type') != 'osd':
+            table.add_row((item.get('service_type'), item.get('service_name'),
+                           " ".join(item.get('add')), " ".join(item.get('remove'))))
+    return table.get_string()
+
+
 class OrchestratorCli(OrchestratorClientMixin, MgrModule,
                       metaclass=CLICommandMeta):
     MODULE_OPTIONS = [
@@ -387,7 +455,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule,
             table = PrettyTable(
                 ['NAME', 'RUNNING', 'REFRESHED', 'AGE',
                  'PLACEMENT',
-                 'IMAGE NAME', 'IMAGE ID',
+                 'IMAGE NAME', 'IMAGE ID'
                 ],
                 border=False)
             table.align['NAME'] = 'l'
@@ -626,20 +694,21 @@ Examples:
     @_cli_write_command(
         'orch apply osd',
         'name=all_available_devices,type=CephBool,req=false '
+        'name=dry_run,type=CephBool,req=false '
         'name=unmanaged,type=CephBool,req=false '
         "name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false",
         'Create OSD daemon(s) using a drive group spec')
     def _apply_osd(self,
                    all_available_devices: bool = False,
-                   format: Optional[str] = 'plain',
+                   format: str = 'plain',
                    unmanaged=None,
+                   dry_run=None,
                    inbuf: Optional[str] = None) -> HandleCommandResult:
         """Apply DriveGroupSpecs to create OSDs"""
         usage = """
 usage:
-  ceph orch apply osd -i <json_file/yaml_file>
-  ceph orch apply osd --all-available-devices
-  ceph orch apply osd --all-available-devices --unmanaged=true|false 
+  ceph orch apply osd -i <json_file/yaml_file> [--dry-run]
+  ceph orch apply osd --all-available-devices [--dry-run] [--unmanaged]
   
 Restrictions:
   
@@ -688,12 +757,29 @@ Examples:
                 return HandleCommandResult(-errno.EINVAL, stderr=usage)
             try:
                 drivegroups = yaml.safe_load_all(inbuf)
-                dg_specs = [DriveGroupSpec.from_json(dg) for dg in drivegroups]
-                # This acts weird when abstracted to a function
-                completion = self.apply_drivegroups(dg_specs)
+
+                dg_specs = []
+                for dg in drivegroups:
+                    spec = DriveGroupSpec.from_json(dg)
+                    if dry_run:
+                        spec.preview_only = True
+                    dg_specs.append(spec)
+
+                completion = self.apply(dg_specs)
                 self._orchestrator_wait([completion])
                 raise_if_exception(completion)
-                return HandleCommandResult(stdout=completion.result_str())
+                out = completion.result_str()
+                if dry_run:
+                    completion = self.plan(dg_specs)
+                    self._orchestrator_wait([completion])
+                    raise_if_exception(completion)
+                    data = completion.result
+                    if format == 'plain':
+                        out = preview_table_osd(data)
+                    else:
+                        out = to_format(data, format, many=True, cls=None)
+                return HandleCommandResult(stdout=out)
+
             except ValueError as e:
                 msg = 'Failed to read JSON/YAML input: {}'.format(str(e)) + usage
                 return HandleCommandResult(-errno.EINVAL, stderr=msg)
@@ -705,14 +791,24 @@ Examples:
                     service_id='all-available-devices',
                     placement=PlacementSpec(host_pattern='*'),
                     data_devices=DeviceSelection(all=True),
-                    unmanaged=unmanaged
+                    unmanaged=unmanaged,
+                    preview_only=dry_run
                 )
             ]
             # This acts weird when abstracted to a function
-            completion = self.apply_drivegroups(dg_specs)
+            completion = self.apply(dg_specs)
             self._orchestrator_wait([completion])
             raise_if_exception(completion)
-            return HandleCommandResult(stdout=completion.result_str())
+            out = completion.result_str()
+            if dry_run:
+                completion = self.plan(dg_specs)
+                self._orchestrator_wait([completion])
+                data = completion.result
+                if format == 'plain':
+                    out = preview_table_osd(data)
+                else:
+                    out = to_format(data, format, many=True, cls=None)
+            return HandleCommandResult(stdout=out)
 
         return HandleCommandResult(-errno.EINVAL, stderr=usage)
 
@@ -1009,45 +1105,65 @@ Usage:
     @_cli_write_command(
         'orch apply',
         'name=service_type,type=CephChoices,strings=mon|mgr|rbd-mirror|crash|alertmanager|grafana|node-exporter|prometheus,req=false '
+        'name=dry_run,type=CephBool,req=false '
         'name=placement,type=CephString,req=false '
+        'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false '
         'name=unmanaged,type=CephBool,req=false',
         'Update the size or placement for a service or apply a large yaml spec')
     def _apply_misc(self,
                     service_type: Optional[str] = None,
                     placement: Optional[str] = None,
                     unmanaged: bool = False,
+                    dry_run: bool = False,
+                    format: str = 'plain',
                     inbuf: Optional[str] = None) -> HandleCommandResult:
         usage = """Usage:
-  ceph orch apply -i <yaml spec>
+  ceph orch apply -i <yaml spec> [--dry-run]
   ceph orch apply <service_type> <placement> [--unmanaged]
         """
         if inbuf:
             if service_type or placement or unmanaged:
                 raise OrchestratorValidationError(usage)
             content: Iterator = yaml.safe_load_all(inbuf)
-            specs: List[GenericSpec] = [json_to_generic_spec(s) for s in content]
-
+            specs: List[Union[ServiceSpec, HostSpec]] = []
+            for s in content:
+                spec = json_to_generic_spec(s)
+                if dry_run and not isinstance(spec, HostSpec):
+                    spec.preview_only = dry_run
+                specs.append(spec)
         else:
-            placmentspec = PlacementSpec.from_string(placement)
-            if not service_type:
-                raise OrchestratorValidationError(f'Error: Empty service_type\n{usage}')
+            placementspec = PlacementSpec.from_string(placement)
+            assert service_type
+            specs = [ServiceSpec(service_type, placement=placementspec, unmanaged=unmanaged, preview_only=dry_run)]
 
-            specs = [ServiceSpec(service_type, placement=placmentspec, unmanaged=unmanaged)]
         completion = self.apply(specs)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
+        out = completion.result_str()
+        if dry_run:
+            completion = self.plan(specs)
+            self._orchestrator_wait([completion])
+            raise_if_exception(completion)
+            data = completion.result
+            if format == 'plain':
+                out = generate_preview_tables(data)
+            else:
+                out = to_format(data, format, many=True, cls=None)
+        return HandleCommandResult(stdout=out)
 
     @_cli_write_command(
         'orch apply mds',
         'name=fs_name,type=CephString '
+        'name=dry_run,type=CephBool,req=false '
         'name=placement,type=CephString,req=false '
-        'name=unmanaged,type=CephBool,req=false',
+        'name=unmanaged,type=CephBool,req=false '
+        'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false',
         'Update the number of MDS instances for the given fs_name')
     def _apply_mds(self,
                    fs_name: str,
                    placement: Optional[str] = None,
+                   dry_run: bool = False,
+                   format: str = 'plain',
                    unmanaged: bool = False,
                    inbuf: Optional[str] = None) -> HandleCommandResult:
         if inbuf:
@@ -1057,12 +1173,23 @@ Usage:
             service_type='mds',
             service_id=fs_name,
             placement=PlacementSpec.from_string(placement),
-            unmanaged=unmanaged)
+            unmanaged=unmanaged,
+            preview_only=dry_run)
 
         completion = self.apply_mds(spec)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
+        out = completion.result_str()
+        if dry_run:
+            completion = self.plan([spec])
+            self._orchestrator_wait([completion])
+            raise_if_exception(completion)
+            data = completion.result
+            if format == 'plain':
+                out = preview_table_services(data)
+            else:
+                out = to_format(data, format, many=True, cls=None)
+        return HandleCommandResult(stdout=out)
 
     @_cli_write_command(
         'orch apply rgw',
@@ -1072,6 +1199,8 @@ Usage:
         'name=port,type=CephInt,req=false '
         'name=ssl,type=CephBool,req=false '
         'name=placement,type=CephString,req=false '
+        'name=dry_run,type=CephBool,req=false '
+        'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false ' 
         'name=unmanaged,type=CephBool,req=false',
         'Update the number of RGW instances for the given zone')
     def _apply_rgw(self,
@@ -1081,6 +1210,8 @@ Usage:
                    port: Optional[int] = None,
                    ssl: bool = False,
                    placement: Optional[str] = None,
+                   dry_run: bool = False,
+                   format: str = 'plain',
                    unmanaged: bool = False,
                    inbuf: Optional[str] = None) -> HandleCommandResult:
         if inbuf:
@@ -1094,12 +1225,23 @@ Usage:
             ssl=ssl,
             placement=PlacementSpec.from_string(placement),
             unmanaged=unmanaged,
+            preview_only=dry_run
         )
 
         completion = self.apply_rgw(spec)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
+        out = completion.result_str()
+        if dry_run:
+            completion = self.plan([spec])
+            self._orchestrator_wait([completion])
+            raise_if_exception(completion)
+            data = completion.result
+            if format == 'plain':
+                out = preview_table_services(data)
+            else:
+                out = to_format(data, format, many=True, cls=None)
+        return HandleCommandResult(stdout=out)
 
     @_cli_write_command(
         'orch apply nfs',
@@ -1107,6 +1249,8 @@ Usage:
         'name=pool,type=CephString '
         'name=namespace,type=CephString,req=false '
         'name=placement,type=CephString,req=false '
+        'name=dry_run,type=CephBool,req=false '
+        'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false ' 
         'name=unmanaged,type=CephBool,req=false',
         'Scale an NFS service')
     def _apply_nfs(self,
@@ -1114,6 +1258,8 @@ Usage:
                    pool: str,
                    namespace: Optional[str] = None,
                    placement: Optional[str] = None,
+                   format: str = 'plain',
+                   dry_run: bool = False,
                    unmanaged: bool = False,
                    inbuf: Optional[str] = None) -> HandleCommandResult:
         if inbuf:
@@ -1125,12 +1271,23 @@ Usage:
             namespace=namespace,
             placement=PlacementSpec.from_string(placement),
             unmanaged=unmanaged,
+            preview_only=dry_run
         )
 
         completion = self.apply_nfs(spec)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
+        out = completion.result_str()
+        if dry_run:
+            completion = self.plan([spec])
+            self._orchestrator_wait([completion])
+            raise_if_exception(completion)
+            data = completion.result
+            if format == 'plain':
+                out = preview_table_services(data)
+            else:
+                out = to_format(data, format, many=True, cls=None)
+        return HandleCommandResult(stdout=out)
 
     @_cli_write_command(
         'orch apply iscsi',
@@ -1139,6 +1296,8 @@ Usage:
         'name=api_password,type=CephString '
         'name=trusted_ip_list,type=CephString,req=false '
         'name=placement,type=CephString,req=false '
+        'name=dry_run,type=CephBool,req=false '
+        'name=format,type=CephChoices,strings=plain|json|json-pretty|yaml,req=false ' 
         'name=unmanaged,type=CephBool,req=false',
         'Scale an iSCSI service')
     def _apply_iscsi(self,
@@ -1148,6 +1307,8 @@ Usage:
                      trusted_ip_list: Optional[str] = None,
                      placement: Optional[str] = None,
                      unmanaged: bool = False,
+                     dry_run: bool = False,
+                     format: str = 'plain',
                      inbuf: Optional[str] = None) -> HandleCommandResult:
         if inbuf:
             raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
@@ -1160,12 +1321,23 @@ Usage:
             trusted_ip_list=trusted_ip_list,
             placement=PlacementSpec.from_string(placement),
             unmanaged=unmanaged,
+            preview_only=dry_run
         )
 
         completion = self.apply_iscsi(spec)
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
-        return HandleCommandResult(stdout=completion.result_str())
+        out = completion.result_str()
+        if dry_run:
+            completion = self.plan([spec])
+            self._orchestrator_wait([completion])
+            raise_if_exception(completion)
+            data = completion.result
+            if format == 'plain':
+                out = preview_table_services(data)
+            else:
+                out = to_format(data, format, many=True, cls=None)
+        return HandleCommandResult(stdout=out)
 
     @_cli_write_command(
         'orch set backend',
index 76e3f28e52bc85c62f40c1e37d82ad7dc828d4c6..a4307f6e73ac1532f95588716e283f034e9b6338 100644 (file)
@@ -142,7 +142,7 @@ class DriveGroupSpec(ServiceSpec):
         "db_slots", "wal_slots", "block_db_size", "placement", "service_id", "service_type",
         "data_devices", "db_devices", "wal_devices", "journal_devices",
         "data_directories", "osds_per_device", "objectstore", "osd_id_claims",
-        "journal_size", "unmanaged", "filter_logic"
+        "journal_size", "unmanaged", "filter_logic", "preview_only"
     ]
 
     def __init__(self,
@@ -164,12 +164,14 @@ class DriveGroupSpec(ServiceSpec):
                  journal_size=None,  # type: Optional[int]
                  service_type=None,  # type: Optional[str]
                  unmanaged=False,  # type: bool
-                 filter_logic='AND'  # type: str
+                 filter_logic='AND',  # type: str
+                 preview_only=False,  # type: bool
                  ):
         assert service_type is None or service_type == 'osd'
         super(DriveGroupSpec, self).__init__('osd', service_id=service_id,
                                              placement=placement,
-                                             unmanaged=unmanaged)
+                                             unmanaged=unmanaged,
+                                             preview_only=preview_only)
 
         #: A :class:`ceph.deployment.drive_group.DeviceSelection`
         self.data_devices = data_devices
@@ -219,6 +221,9 @@ class DriveGroupSpec(ServiceSpec):
         #: defaults to 'AND'
         self.filter_logic = filter_logic.upper()
 
+        #: If this should be treated as a 'preview' spec
+        self.preview_only = preview_only
+
     @classmethod
     def _from_json_impl(cls, json_drive_group):
         # type: (dict) -> DriveGroupSpec
index e1f0e440edb1b80958cad6f497391d617bcfc48b..2e887c67a68498cdcfd13f63418d9dd0b743f2fa 100644 (file)
@@ -408,6 +408,7 @@ class ServiceSpec(object):
                  placement: Optional[PlacementSpec] = None,
                  count: Optional[int] = None,
                  unmanaged: bool = False,
+                 preview_only: bool = False,
                  ):
         self.placement = PlacementSpec() if placement is None else placement  # type: PlacementSpec
 
@@ -415,6 +416,7 @@ class ServiceSpec(object):
         self.service_type = service_type
         self.service_id = service_id
         self.unmanaged = unmanaged
+        self.preview_only = preview_only
 
     @classmethod
     @handle_type_error
@@ -534,6 +536,11 @@ class ServiceSpec(object):
     def __repr__(self):
         return "{}({!r})".format(self.__class__.__name__, self.__dict__)
 
+    def __eq__(self, other):
+        return (self.__class__ == other.__class__
+                and
+                self.__dict__ == other.__dict__)
+
     def one_line_str(self):
         return '<{} for service_name={}>'.format(self.__class__.__name__, self.service_name())
 
@@ -553,11 +560,12 @@ class NFSServiceSpec(ServiceSpec):
                  namespace: Optional[str] = None,
                  placement: Optional[PlacementSpec] = None,
                  unmanaged: bool = False,
+                 preview_only: bool = False
                  ):
         assert service_type == 'nfs'
         super(NFSServiceSpec, self).__init__(
             'nfs', service_id=service_id,
-            placement=placement, unmanaged=unmanaged)
+            placement=placement, unmanaged=unmanaged, preview_only=preview_only)
 
         #: RADOS pool where NFS client recovery data is stored.
         self.pool = pool
@@ -565,6 +573,8 @@ class NFSServiceSpec(ServiceSpec):
         #: RADOS namespace where NFS client recovery data is stored in the pool.
         self.namespace = namespace
 
+        self.preview_only = preview_only
+
     def validate(self):
         super(NFSServiceSpec, self).validate()
 
@@ -606,6 +616,7 @@ class RGWSpec(ServiceSpec):
                  rgw_frontend_ssl_key: Optional[List[str]] = None,
                  unmanaged: bool = False,
                  ssl: bool = False,
+                 preview_only: bool = False,
                  ):
         assert service_type == 'rgw', service_type
         if service_id:
@@ -622,7 +633,8 @@ class RGWSpec(ServiceSpec):
                 service_id = '%s.%s' % (rgw_realm, rgw_zone)
         super(RGWSpec, self).__init__(
             'rgw', service_id=service_id,
-            placement=placement, unmanaged=unmanaged)
+            placement=placement, unmanaged=unmanaged,
+            preview_only=preview_only)
 
         self.rgw_realm = rgw_realm
         self.rgw_zone = rgw_zone
@@ -631,6 +643,7 @@ class RGWSpec(ServiceSpec):
         self.rgw_frontend_ssl_certificate = rgw_frontend_ssl_certificate
         self.rgw_frontend_ssl_key = rgw_frontend_ssl_key
         self.ssl = ssl
+        self.preview_only = preview_only
 
     def get_port(self):
         if self.rgw_frontend_port:
@@ -677,11 +690,13 @@ class IscsiServiceSpec(ServiceSpec):
                  ssl_cert: Optional[str] = None,
                  ssl_key: Optional[str] = None,
                  placement: Optional[PlacementSpec] = None,
-                 unmanaged: bool = False
+                 unmanaged: bool = False,
+                 preview_only: bool = False
                  ):
         assert service_type == 'iscsi'
         super(IscsiServiceSpec, self).__init__('iscsi', service_id=service_id,
-                                               placement=placement, unmanaged=unmanaged)
+                                               placement=placement, unmanaged=unmanaged,
+                                               preview_only=preview_only)
 
         #: RADOS pool where ceph-iscsi config data is stored.
         self.pool = pool
@@ -692,6 +707,7 @@ class IscsiServiceSpec(ServiceSpec):
         self.api_secure = api_secure
         self.ssl_cert = ssl_cert
         self.ssl_key = ssl_key
+        self.preview_only = preview_only
 
         if not self.api_secure and self.ssl_cert and self.ssl_key:
             self.api_secure = True
index 5f15ae45a90e57d2849a24ae3463402f7bdbf9c5..f91e55af618a2461f8ea67dc9f8f31deee218fa3 100644 (file)
@@ -163,3 +163,65 @@ spec:
 
         assert yaml.dump(object) == y
         assert yaml.dump(ServiceSpec.from_json(object.to_json())) == y
+
+@pytest.mark.parametrize("spec1, spec2, eq",
+                         [
+                             (
+                                     ServiceSpec(
+                                         service_type='mon'
+                                     ),
+                                     ServiceSpec(
+                                         service_type='mon'
+                                     ),
+                                     True
+                             ),
+                             (
+                                     ServiceSpec(
+                                         service_type='mon'
+                                     ),
+                                     ServiceSpec(
+                                         service_type='mon',
+                                         service_id='foo'
+                                     ),
+                                     False
+                             ),
+                             # Add service_type='mgr'
+                             (
+                                     ServiceSpec(
+                                         service_type='osd'
+                                     ),
+                                     ServiceSpec(
+                                         service_type='osd',
+                                     ),
+                                     True
+                             ),
+                             (
+                                     ServiceSpec(
+                                         service_type='osd'
+                                     ),
+                                     DriveGroupSpec(),
+                                     True
+                             ),
+                             (
+                                     ServiceSpec(
+                                         service_type='osd'
+                                     ),
+                                     ServiceSpec(
+                                         service_type='osd',
+                                         service_id='foo',
+                                     ),
+                                     False
+                             ),
+                             (
+                                     ServiceSpec(
+                                         service_type='rgw'
+                                     ),
+                                     RGWSpec(),
+                                     True
+                             ),
+                         ])
+def test_spec_hash_eq(spec1: ServiceSpec,
+                      spec2: ServiceSpec,
+                      eq: bool):
+
+    assert (spec1 == spec2) is eq