From a5169055915590ff92bd6a9f5838079f0338aad8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 14 Nov 2018 12:38:22 -0500 Subject: [PATCH] orchestrator: merge the ServiceLocation class into ServiceDescription The ServiceDescription object just holds a list of ServiceLocation objects. Eliminate ServiceLocation and merge it into the ServiceDescription class. Have describe_service return a list of ServiceDescriptions. Suggested-by: Tim Serong Signed-off-by: Jeff Layton --- doc/mgr/orchestrator_modules.rst | 1 - src/pybind/mgr/orchestrator.py | 31 +++++++++-------------- src/pybind/mgr/orchestrator_cli/module.py | 7 +++-- src/pybind/mgr/rook/module.py | 30 +++++++++++----------- 4 files changed, 30 insertions(+), 39 deletions(-) diff --git a/doc/mgr/orchestrator_modules.rst b/doc/mgr/orchestrator_modules.rst index 265e312dde6a7..57a0b5ac2d53e 100644 --- a/doc/mgr/orchestrator_modules.rst +++ b/doc/mgr/orchestrator_modules.rst @@ -137,7 +137,6 @@ Inventory and status .. automethod:: Orchestrator.describe_service .. autoclass:: ServiceDescription -.. autoclass:: ServiceLocation OSD management -------------- diff --git a/src/pybind/mgr/orchestrator.py b/src/pybind/mgr/orchestrator.py index 4cb7f12351f0f..4a0190fad7db7 100644 --- a/src/pybind/mgr/orchestrator.py +++ b/src/pybind/mgr/orchestrator.py @@ -163,6 +163,8 @@ class Orchestrator(object): When viewing a CephFS filesystem in the dashboard, we would use this to display the pods being currently run for MDS daemons. + + Returns a list of ServiceDescription objects. """ raise NotImplementedError() @@ -302,9 +304,17 @@ class PlacementSpec(object): self.label = None -class ServiceLocation(object): +class ServiceDescription(object): """ - See ServiceDescription + For responding to queries about the status of a particular service, + stateful or stateless. + + This is not about health or performance monitoring of services: it's + about letting the orchestrator tell Ceph whether and where a + service is scheduled in the cluster. When an orchestrator tells + Ceph "it's running on node123", that's not a promise that the process + is literally up this second, it's a description of where the orchestrator + has decided the service should run. """ def __init__(self): # Node is at the same granularity as InventoryNode @@ -324,23 +334,6 @@ class ServiceLocation(object): self.service_type = None -class ServiceDescription(object): - """ - For responding to queries about the status of a particular service, - stateful or stateless. - - This is not about health or performance monitoring of services: it's - about letting the orchestrator tell Ceph whether and where a - service is scheduled in the cluster. When an orchestrator tells - Ceph "it's running on node123", that's not a promise that the process - is literally up this second, it's a description of where the orchestrator - has decided the service should run. - """ - - def __init__(self): - self.locations = [] - - class DriveGroupSpec(object): """ Describe a drive group in the same form that ceph-volume diff --git a/src/pybind/mgr/orchestrator_cli/module.py b/src/pybind/mgr/orchestrator_cli/module.py index d442cb72c13c2..d4bfcb3d499ef 100644 --- a/src/pybind/mgr/orchestrator_cli/module.py +++ b/src/pybind/mgr/orchestrator_cli/module.py @@ -159,14 +159,13 @@ class OrchestratorCli(MgrModule): self._wait([completion]) - service_description = completion.result - #assert isinstance(service_description, orchestrator.ServiceDescription) + service_list = completion.result - if len(service_description.locations) == 0: + if len(service_list) == 0: return 0, "", "No locations reported" else: lines = [] - for l in service_description.locations: + for l in service_list: lines.append("{0}.{1} {2} {3}".format( svc_type, l.daemon_name, diff --git a/src/pybind/mgr/rook/module.py b/src/pybind/mgr/rook/module.py index afdaa50501234..90390a50cc6fb 100644 --- a/src/pybind/mgr/rook/module.py +++ b/src/pybind/mgr/rook/module.py @@ -338,26 +338,26 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): pods = self.rook_cluster.describe_pods(service_type, service_id, nodename) - result = orchestrator.ServiceDescription() + result = [] for p in pods: - sl = orchestrator.ServiceLocation() - sl.nodename = p['nodename'] - sl.container_id = p['name'] - sl.service_type = p['labels']['app'].replace('rook-ceph-', '') - - if sl.service_type == "osd": - sl.daemon_name = "%s" % p['labels']["ceph-osd-id"] - elif sl.service_type == "mds": - sl.daemon_name = p['labels']["rook_file_system"] - elif sl.service_type == "mon": - sl.daemon_name = p['labels']["mon"] - elif sl.service_type == "mgr": - sl.daemon_name = p['labels']["mgr"] + sd = orchestrator.ServiceDescription() + sd.nodename = p['nodename'] + sd.container_id = p['name'] + sd.service_type = p['labels']['app'].replace('rook-ceph-', '') + + if sd.service_type == "osd": + sd.daemon_name = "%s" % p['labels']["ceph-osd-id"] + elif sd.service_type == "mds": + sd.daemon_name = p['labels']["rook_file_system"] + elif sd.service_type == "mon": + sd.daemon_name = p['labels']["mon"] + elif sd.service_type == "mgr": + sd.daemon_name = p['labels']["mgr"] else: # Unknown type -- skip it continue - result.locations.append(sl) + result.append(sd) return result -- 2.39.5