From 8990280b22b13fc888eb3a93566136b1aa515009 Mon Sep 17 00:00:00 2001 From: Joseph Sawaya Date: Thu, 23 Sep 2021 11:07:23 -0400 Subject: [PATCH] mgr/rook, qa/tasks/rook: change rgw daemon service name This commit changes the rgw daemon service name format from rgw.. to rgw. and changes the daemon removal in the QA accordingly. This also gets rid of the Rook API when describing services. Signed-off-by: Joseph Sawaya --- qa/tasks/rook.py | 11 ++--------- src/pybind/mgr/rook/module.py | 22 ++++++++-------------- src/pybind/mgr/rook/rook_cluster.py | 4 ++++ 3 files changed, 14 insertions(+), 23 deletions(-) diff --git a/qa/tasks/rook.py b/qa/tasks/rook.py index f197b0f472da7..2498828db009f 100644 --- a/qa/tasks/rook.py +++ b/qa/tasks/rook.py @@ -651,15 +651,8 @@ def task(ctx, config): if ret.exitstatus == 0: r = json.loads(ret.stdout.getvalue().decode('utf-8')) for service in r: - removal_name = None - if service['service_type'] == 'rgw': - removal_name = 'rgw.' + service['spec']['rgw_realm'] - elif service['service_type'] == 'mds': - removal_name = service['service_name'] - elif service['service_type'] == 'nfs': - removal_name = service['service_name'] - if removal_name != None: - _shell(ctx, config, ['ceph', 'orch', 'rm', removal_name]) + if service['service_type'] in ['rgw', 'mds', 'nfs']: + _shell(ctx, config, ['ceph', 'orch', 'rm', service['service_name']]) to_remove.append(service['service_name']) with safe_while(sleep=10, tries=90, action="waiting for service removal") as proceed: while proceed(): diff --git a/src/pybind/mgr/rook/module.py b/src/pybind/mgr/rook/module.py index 26042e8317ab6..5a7f177d3a7bd 100644 --- a/src/pybind/mgr/rook/module.py +++ b/src/pybind/mgr/rook/module.py @@ -274,10 +274,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): if service_type == 'mds' or service_type is None: # CephFilesystems - all_fs = self.rook_cluster.rook_api_get( - "cephfilesystems/") - self.log.debug('CephFilesystems %s' % all_fs) - for fs in all_fs.get('items', []): + all_fs = self.rook_cluster.get_resource("cephfilesystems") + for fs in all_fs: svc = 'mds.' + fs['metadata']['name'] if svc in spec: continue @@ -299,13 +297,11 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): if service_type == 'rgw' or service_type is None: # CephObjectstores - all_zones = self.rook_cluster.rook_api_get( - "cephobjectstores/") - self.log.debug('CephObjectstores %s' % all_zones) - for zone in all_zones.get('items', []): + all_zones = self.rook_cluster.get_resource("cephobjectstores") + for zone in all_zones: rgw_realm = zone['metadata']['name'] rgw_zone = rgw_realm - svc = 'rgw.' + rgw_realm + '.' + rgw_zone + svc = 'rgw.' + rgw_realm if svc in spec: continue active = zone['spec']['gateway']['instances']; @@ -317,7 +313,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): port = zone['spec']['gateway']['port'] or 80 spec[svc] = orchestrator.ServiceDescription( spec=RGWSpec( - service_id=rgw_realm + '.' + rgw_zone, + service_id=zone['metadata']['name'], rgw_realm=rgw_realm, rgw_zone=rgw_zone, ssl=ssl, @@ -331,10 +327,8 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator): if service_type == 'nfs' or service_type is None: # CephNFSes - all_nfs = self.rook_cluster.rook_api_get( - "cephnfses/") - self.log.warning('CephNFS %s' % all_nfs) - for nfs in all_nfs.get('items', []): + all_nfs = self.rook_cluster.get_resource("cephnfses") + for nfs in all_nfs: nfs_name = nfs['metadata']['name'] svc = 'nfs.' + nfs_name if svc in spec: diff --git a/src/pybind/mgr/rook/rook_cluster.py b/src/pybind/mgr/rook/rook_cluster.py index ac66d7dc91070..d6aa275181e5f 100644 --- a/src/pybind/mgr/rook/rook_cluster.py +++ b/src/pybind/mgr/rook/rook_cluster.py @@ -1011,6 +1011,10 @@ class RookCluster(object): objpath = "{0}/{1}".format(rooktype, service_id) return f'Removed {objpath}' + def get_resource(self, resource_type: str) -> Iterable: + custom_objects: KubernetesCustomResource = KubernetesCustomResource(self.customObjects_api.list_namespaced_custom_object, group="ceph.rook.io", version="v1", namespace="rook-ceph", plural=resource_type) + return custom_objects.items + def can_create_osd(self) -> bool: current_cluster = self.rook_api_get( "cephclusters/{0}".format(self.rook_env.cluster_name)) -- 2.39.5