if ret.exitstatus == 0:
r = json.loads(ret.stdout.getvalue().decode('utf-8'))
for service in r:
- removal_name = None
- if service['service_type'] == 'rgw':
- removal_name = 'rgw.' + service['spec']['rgw_realm']
- elif service['service_type'] == 'mds':
- removal_name = service['service_name']
- elif service['service_type'] == 'nfs':
- removal_name = service['service_name']
- if removal_name != None:
- _shell(ctx, config, ['ceph', 'orch', 'rm', removal_name])
+ if service['service_type'] in ['rgw', 'mds', 'nfs']:
+ _shell(ctx, config, ['ceph', 'orch', 'rm', service['service_name']])
to_remove.append(service['service_name'])
with safe_while(sleep=10, tries=90, action="waiting for service removal") as proceed:
while proceed():
if service_type == 'mds' or service_type is None:
# CephFilesystems
- all_fs = self.rook_cluster.rook_api_get(
- "cephfilesystems/")
- self.log.debug('CephFilesystems %s' % all_fs)
- for fs in all_fs.get('items', []):
+ all_fs = self.rook_cluster.get_resource("cephfilesystems")
+ for fs in all_fs:
svc = 'mds.' + fs['metadata']['name']
if svc in spec:
continue
if service_type == 'rgw' or service_type is None:
# CephObjectstores
- all_zones = self.rook_cluster.rook_api_get(
- "cephobjectstores/")
- self.log.debug('CephObjectstores %s' % all_zones)
- for zone in all_zones.get('items', []):
+ all_zones = self.rook_cluster.get_resource("cephobjectstores")
+ for zone in all_zones:
rgw_realm = zone['metadata']['name']
rgw_zone = rgw_realm
- svc = 'rgw.' + rgw_realm + '.' + rgw_zone
+ svc = 'rgw.' + rgw_realm
if svc in spec:
continue
active = zone['spec']['gateway']['instances'];
port = zone['spec']['gateway']['port'] or 80
spec[svc] = orchestrator.ServiceDescription(
spec=RGWSpec(
- service_id=rgw_realm + '.' + rgw_zone,
+ service_id=zone['metadata']['name'],
rgw_realm=rgw_realm,
rgw_zone=rgw_zone,
ssl=ssl,
if service_type == 'nfs' or service_type is None:
# CephNFSes
- all_nfs = self.rook_cluster.rook_api_get(
- "cephnfses/")
- self.log.warning('CephNFS %s' % all_nfs)
- for nfs in all_nfs.get('items', []):
+ all_nfs = self.rook_cluster.get_resource("cephnfses")
+ for nfs in all_nfs:
nfs_name = nfs['metadata']['name']
svc = 'nfs.' + nfs_name
if svc in spec:
objpath = "{0}/{1}".format(rooktype, service_id)
return f'Removed {objpath}'
+ def get_resource(self, resource_type: str) -> Iterable:
+ custom_objects: KubernetesCustomResource = KubernetesCustomResource(self.customObjects_api.list_namespaced_custom_object, group="ceph.rook.io", version="v1", namespace="rook-ceph", plural=resource_type)
+ return custom_objects.items
+
def can_create_osd(self) -> bool:
current_cluster = self.rook_api_get(
"cephclusters/{0}".format(self.rook_env.cluster_name))