]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
orchestrator: merge the ServiceLocation class into ServiceDescription 24863/head
authorJeff Layton <jlayton@redhat.com>
Wed, 14 Nov 2018 17:38:22 +0000 (12:38 -0500)
committerJeff Layton <jlayton@redhat.com>
Thu, 15 Nov 2018 14:02:54 +0000 (09:02 -0500)
The ServiceDescription object just holds a list of ServiceLocation
objects.  Eliminate ServiceLocation and merge it into the
ServiceDescription class. Have describe_service return a list of
ServiceDescriptions.

Suggested-by: Tim Serong <tserong@suse.com>
Signed-off-by: Jeff Layton <jlayton@redhat.com>
doc/mgr/orchestrator_modules.rst
src/pybind/mgr/orchestrator.py
src/pybind/mgr/orchestrator_cli/module.py
src/pybind/mgr/rook/module.py

index 265e312dde6a71576bb1936f9aeb66c1974ba1ea..57a0b5ac2d53e67bd40da3f28a79c01d8b0048b5 100644 (file)
@@ -137,7 +137,6 @@ Inventory and status
 
 .. automethod:: Orchestrator.describe_service
 .. autoclass:: ServiceDescription
-.. autoclass:: ServiceLocation
 
 OSD management
 --------------
index 4cb7f12351f0ff6f8d6967b0276a09c9d1e4c0ee..4a0190fad7db72b0165b0b0be5dff6ffe2175c4d 100644 (file)
@@ -163,6 +163,8 @@ class Orchestrator(object):
 
         When viewing a CephFS filesystem in the dashboard, we would use this
         to display the pods being currently run for MDS daemons.
+
+        Returns a list of ServiceDescription objects.
         """
         raise NotImplementedError()
 
@@ -302,9 +304,17 @@ class PlacementSpec(object):
         self.label = None
 
 
-class ServiceLocation(object):
+class ServiceDescription(object):
     """
-    See ServiceDescription
+    For responding to queries about the status of a particular service,
+    stateful or stateless.
+
+    This is not about health or performance monitoring of services: it's
+    about letting the orchestrator tell Ceph whether and where a
+    service is scheduled in the cluster.  When an orchestrator tells
+    Ceph "it's running on node123", that's not a promise that the process
+    is literally up this second, it's a description of where the orchestrator
+    has decided the service should run.
     """
     def __init__(self):
         # Node is at the same granularity as InventoryNode
@@ -324,23 +334,6 @@ class ServiceLocation(object):
         self.service_type = None
 
 
-class ServiceDescription(object):
-    """
-    For responding to queries about the status of a particular service,
-    stateful or stateless.
-
-    This is not about health or performance monitoring of services: it's
-    about letting the orchestrator tell Ceph whether and where a 
-    service is scheduled in the cluster.  When an orchestrator tells
-    Ceph "it's running on node123", that's not a promise that the process
-    is literally up this second, it's a description of where the orchestrator
-    has decided the service should run.
-    """
-
-    def __init__(self):
-        self.locations = []
-
-
 class DriveGroupSpec(object):
     """
     Describe a drive group in the same form that ceph-volume
index d442cb72c13c20e3e775e59f95532377edd65204..d4bfcb3d499ef5e018b1b6629c8691423136aee8 100644 (file)
@@ -159,14 +159,13 @@ class OrchestratorCli(MgrModule):
 
         self._wait([completion])
 
-        service_description = completion.result
-        #assert isinstance(service_description, orchestrator.ServiceDescription)
+        service_list = completion.result
 
-        if len(service_description.locations) == 0:
+        if len(service_list) == 0:
             return 0, "", "No locations reported"
         else:
             lines = []
-            for l in service_description.locations:
+            for l in service_list:
                 lines.append("{0}.{1} {2} {3}".format(
                     svc_type,
                     l.daemon_name,
index afdaa505012345895864ba1c9becc89a04453866..90390a50cc6fbf771d1b9f337d4739100f3fd512 100644 (file)
@@ -338,26 +338,26 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
 
         pods = self.rook_cluster.describe_pods(service_type, service_id, nodename)
 
-        result = orchestrator.ServiceDescription()
+        result = []
         for p in pods:
-            sl = orchestrator.ServiceLocation()
-            sl.nodename = p['nodename']
-            sl.container_id = p['name']
-            sl.service_type = p['labels']['app'].replace('rook-ceph-', '')
-
-            if sl.service_type == "osd":
-                sl.daemon_name = "%s" % p['labels']["ceph-osd-id"]
-            elif sl.service_type == "mds":
-                sl.daemon_name = p['labels']["rook_file_system"]
-            elif sl.service_type == "mon":
-                sl.daemon_name = p['labels']["mon"]
-            elif sl.service_type == "mgr":
-                sl.daemon_name = p['labels']["mgr"]
+            sd = orchestrator.ServiceDescription()
+            sd.nodename = p['nodename']
+            sd.container_id = p['name']
+            sd.service_type = p['labels']['app'].replace('rook-ceph-', '')
+
+            if sd.service_type == "osd":
+                sd.daemon_name = "%s" % p['labels']["ceph-osd-id"]
+            elif sd.service_type == "mds":
+                sd.daemon_name = p['labels']["rook_file_system"]
+            elif sd.service_type == "mon":
+                sd.daemon_name = p['labels']["mon"]
+            elif sd.service_type == "mgr":
+                sd.daemon_name = p['labels']["mgr"]
             else:
                 # Unknown type -- skip it
                 continue
 
-            result.locations.append(sl)
+            result.append(sd)
 
         return result