]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
pybind/mgr: Move ServiceSpec to python-common: Fix imports
authorSebastian Wagner <sebastian.wagner@suse.com>
Mon, 9 Mar 2020 12:07:12 +0000 (13:07 +0100)
committerSebastian Wagner <sebastian.wagner@suse.com>
Tue, 10 Mar 2020 12:28:22 +0000 (13:28 +0100)
Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/rook/module.py
src/pybind/mgr/rook/rook_cluster.py
src/pybind/mgr/test_orchestrator/module.py
src/pybind/mgr/volumes/fs/fs_util.py

index 20f046cad8f66fda2a4e3bcdcfd39c8de1b71652..339dcb4efe1696ad3c2d6b726424dccbbf7bb1e9 100644 (file)
@@ -30,7 +30,7 @@ import uuid
 from ceph.deployment import inventory, translate
 from ceph.deployment.drive_group import DriveGroupSpec
 from ceph.deployment.drive_selection import selector
-from ceph.deployment.service_spec import HostPlacementSpec, ServiceSpec
+from ceph.deployment.service_spec import HostPlacementSpec, ServiceSpec, PlacementSpec
 
 from mgr_module import MgrModule
 import orchestrator
@@ -2424,16 +2424,16 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
         if spec.placement.is_empty():
             # fill in default placement
             defaults = {
-                'mon': orchestrator.PlacementSpec(count=5),
-                'mgr': orchestrator.PlacementSpec(count=2),
-                'mds': orchestrator.PlacementSpec(count=2),
-                'rgw': orchestrator.PlacementSpec(count=2),
-                'rbd-mirror': orchestrator.PlacementSpec(count=2),
-                'grafana': orchestrator.PlacementSpec(count=1),
-                'alertmanager': orchestrator.PlacementSpec(count=1),
-                'prometheus': orchestrator.PlacementSpec(count=1),
-                'node-exporter': orchestrator.PlacementSpec(all_hosts=True),
-                'crash': orchestrator.PlacementSpec(all_hosts=True),
+                'mon': PlacementSpec(count=5),
+                'mgr': PlacementSpec(count=2),
+                'mds': PlacementSpec(count=2),
+                'rgw': PlacementSpec(count=2),
+                'rbd-mirror': PlacementSpec(count=2),
+                'grafana': PlacementSpec(count=1),
+                'alertmanager': PlacementSpec(count=1),
+                'prometheus': PlacementSpec(count=1),
+                'node-exporter': PlacementSpec(all_hosts=True),
+                'crash': PlacementSpec(all_hosts=True),
             }
             spec.placement = defaults[spec.service_type]
         self.log.info('Saving service %s spec with placement %s' % (
@@ -3008,7 +3008,7 @@ class BaseScheduler(object):
     """
 
     def __init__(self, placement_spec):
-        # type: (orchestrator.PlacementSpec) -> None
+        # type: (PlacementSpec) -> None
         self.placement_spec = placement_spec
 
     def place(self, host_pool, count=None):
@@ -3061,7 +3061,7 @@ class HostAssignment(object):
         self.service_name = spec.service_name()
 
     def place(self):
-        # type: () -> List[orchestrator.HostPlacementSpec]
+        # type: () -> List[HostPlacementSpec]
         """
         Load hosts into the spec.placement.hosts container.
         """
index f678aea8782351758186edb69fd835e2c3ceed62..b21cbee7d6835b968b889d6f5442524fb248a82a 100644 (file)
@@ -3,6 +3,7 @@ import functools
 import os
 
 from ceph.deployment import inventory
+from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec
 
 try:
     from typing import List, Dict, Optional, Callable, Any
@@ -297,17 +298,17 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         )
 
     def add_mds(self, spec):
-        # type: (orchestrator.ServiceSpec) -> RookCompletion
+        # type: (ServiceSpec) -> RookCompletion
         return self._service_add_decorate('MDS', spec,
                                        self.rook_cluster.add_filesystem)
 
     def add_rgw(self, spec):
-        # type: (orchestrator.RGWSpec) -> RookCompletion
+        # type: (RGWSpec) -> RookCompletion
         return self._service_add_decorate('RGW', spec,
                                        self.rook_cluster.add_objectstore)
 
     def add_nfs(self, spec):
-        # type: (orchestrator.NFSServiceSpec) -> RookCompletion
+        # type: (NFSServiceSpec) -> RookCompletion
         return self._service_add_decorate("NFS", spec,
                                           self.rook_cluster.add_nfsgw)
 
@@ -334,7 +335,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
             )
 
     def apply_mon(self, spec):
-        # type: (orchestrator.ServiceSpec) -> RookCompletion
+        # type: (ServiceSpec) -> RookCompletion
         if spec.placement.hosts or spec.placement.label:
             raise RuntimeError("Host list or label is not supported by rook.")
 
@@ -345,7 +346,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         )
 
     def apply_mds(self, spec):
-        # type: (orchestrator.ServiceSpec) -> RookCompletion
+        # type: (ServiceSpec) -> RookCompletion
         num = spec.placement.count
         return write_completion(
             lambda: self.rook_cluster.update_mds_count(spec.service_id, num),
@@ -354,7 +355,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         )
 
     def apply_nfs(self, spec):
-        # type: (orchestrator.NFSServiceSpec) -> RookCompletion
+        # type: (NFSServiceSpec) -> RookCompletion
         num = spec.placement.count
         return write_completion(
             lambda: self.rook_cluster.update_nfs_count(spec.service_id, num),
index dd107046bcad6f22b9ccf0b6d4dd7502f3990e4b..68b2a0d1ab91c07e372177b5c27b7003a4385ed1 100644 (file)
@@ -19,6 +19,7 @@ from six.moves.urllib.parse import urljoin  # pylint: disable=import-error
 from urllib3.exceptions import ProtocolError
 
 from ceph.deployment.drive_group import DriveGroupSpec
+from ceph.deployment.service_spec import ServiceSpec
 from mgr_util import merge_dicts
 
 try:
@@ -343,7 +344,7 @@ class RookCluster(object):
                 raise
 
     def add_filesystem(self, spec):
-        # type: (orchestrator.ServiceSpec) -> None
+        # type: (ServiceSpec) -> None
         # TODO use spec.placement
         # TODO warn if spec.extended has entries we don't kow how
         #      to action.
index ee375824098bcfd9e26009870fa41d58972bff75..55f43d56a320c01566def5cab4a063625f8c5acf 100644 (file)
@@ -6,6 +6,9 @@ import threading
 import functools
 import itertools
 from subprocess import check_output, CalledProcessError
+
+from ceph.deployment.service_spec import NFSServiceSpec, ServiceSpec
+
 try:
     from typing import Callable, List, Tuple
 except ImportError:
@@ -286,7 +289,7 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
 
     @deferred_write("Adding NFS service")
     def add_nfs(self, spec):
-        # type: (orchestrator.NFSServiceSpec) -> None
+        # type: (NFSServiceSpec) -> None
         assert isinstance(spec.pool, str)
 
     @deferred_write("apply_nfs")
@@ -329,14 +332,14 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
 
     @deferred_write("apply_mgr")
     def apply_mgr(self, spec):
-        # type: (orchestrator.ServiceSpec) -> None
+        # type: (ServiceSpec) -> None
 
         assert not spec.placement.hosts or len(spec.placement.hosts) == spec.placement.count
         assert all([isinstance(h, str) for h in spec.placement.hosts])
 
     @deferred_write("apply_mon")
     def apply_mon(self, spec):
-        # type: (orchestrator.ServiceSpec) -> None
+        # type: (ServiceSpec) -> None
 
         assert not spec.placement.hosts or len(spec.placement.hosts) == spec.placement.count
         assert all([isinstance(h[0], str) for h in spec.placement.hosts])
index 7d2d3c88694934b30217b5bd55f151dfa32bdff1..5f9995d4390b32a2d1890dd5be10cc7e0c897586 100644 (file)
@@ -2,6 +2,8 @@ import os
 import errno
 import logging
 
+from ceph.deployment.service_spec import ServiceSpec, PlacementSpec
+
 import cephfs
 import orchestrator
 
@@ -34,9 +36,9 @@ def remove_filesystem(mgr, fs_name):
     return mgr.mon_command(command)
 
 def create_mds(mgr, fs_name, placement):
-    spec = orchestrator.ServiceSpec(service_type='mds',
+    spec = ServiceSpec(service_type='mds',
                                     service_id=fs_name,
-                                    placement=orchestrator.PlacementSpec.from_string(placement))
+                                    placement=PlacementSpec.from_string(placement))
     try:
         completion = mgr.apply_mds(spec)
         mgr._orchestrator_wait([completion])