num_nodes = len(self.rook_cluster.get_node_names())
spec = {}
- spec['mon'] = orchestrator.ServiceDescription(
- spec=ServiceSpec(
- 'mon',
- placement=PlacementSpec(
- count=cl['spec'].get('mon', {}).get('count', 1),
- ),
- ),
- size=cl['spec'].get('mon', {}).get('count', 1),
- container_image_name=image_name,
- last_refresh=now,
- )
- spec['mgr'] = orchestrator.ServiceDescription(
- spec=ServiceSpec(
- 'mgr',
- placement=PlacementSpec.from_string('count:1'),
- ),
- size=1,
- container_image_name=image_name,
- last_refresh=now,
- )
+ if service_type == 'mon' or service_type is None:
+ spec['mon'] = orchestrator.ServiceDescription(
+ spec=ServiceSpec(
+ 'mon',
+ placement=PlacementSpec(
+ count=cl['spec'].get('mon', {}).get('count', 1),
+ ),
+ ),
+ size=cl['spec'].get('mon', {}).get('count', 1),
+ container_image_name=image_name,
+ last_refresh=now,
+ )
+ if service_type == 'mgr' or service_type is None:
+ spec['mgr'] = orchestrator.ServiceDescription(
+ spec=ServiceSpec(
+ 'mgr',
+ placement=PlacementSpec.from_string('count:1'),
+ ),
+ size=1,
+ container_image_name=image_name,
+ last_refresh=now,
+ )
if not cl['spec'].get('crashCollector', {}).get('disable', False):
spec['crash'] = orchestrator.ServiceDescription(
spec=ServiceSpec(
last_refresh=now,
)
- # CephFilesystems
- all_fs = self.rook_cluster.rook_api_get(
- "cephfilesystems/")
- self.log.debug('CephFilesystems %s' % all_fs)
- for fs in all_fs.get('items', []):
- svc = 'mds.' + fs['metadata']['name']
- if svc in spec:
- continue
- # FIXME: we are conflating active (+ standby) with count
- active = fs['spec'].get('metadataServer', {}).get('activeCount', 1)
- total_mds = active
- if fs['spec'].get('metadataServer', {}).get('activeStandby', False):
- total_mds = active * 2
- spec[svc] = orchestrator.ServiceDescription(
- spec=ServiceSpec(
- service_type='mds',
- service_id=fs['metadata']['name'],
- placement=PlacementSpec(count=active),
- ),
- size=total_mds,
- container_image_name=image_name,
- last_refresh=now,
- )
-
- # CephObjectstores
- all_zones = self.rook_cluster.rook_api_get(
- "cephobjectstores/")
- self.log.debug('CephObjectstores %s' % all_zones)
- for zone in all_zones.get('items', []):
- rgw_realm = zone['metadata']['name']
- rgw_zone = rgw_realm
- svc = 'rgw.' + rgw_realm + '.' + rgw_zone
- if svc in spec:
- continue
- active = zone['spec']['gateway']['instances'];
- if 'securePort' in zone['spec']['gateway']:
- ssl = True
- port = zone['spec']['gateway']['securePort']
- else:
- ssl = False
- port = zone['spec']['gateway']['port'] or 80
- spec[svc] = orchestrator.ServiceDescription(
- spec=RGWSpec(
- service_id=rgw_realm + '.' + rgw_zone,
- rgw_realm=rgw_realm,
- rgw_zone=rgw_zone,
- ssl=ssl,
- rgw_frontend_port=port,
- placement=PlacementSpec(count=active),
- ),
- size=active,
- container_image_name=image_name,
- last_refresh=now,
- )
-
- # CephNFSes
- all_nfs = self.rook_cluster.rook_api_get(
- "cephnfses/")
- self.log.warning('CephNFS %s' % all_nfs)
- for nfs in all_nfs.get('items', []):
- nfs_name = nfs['metadata']['name']
- svc = 'nfs.' + nfs_name
- if svc in spec:
- continue
- active = nfs['spec'].get('server', {}).get('active')
- spec[svc] = orchestrator.ServiceDescription(
- spec=NFSServiceSpec(
- service_id=nfs_name,
- pool=nfs['spec']['rados']['pool'],
- namespace=nfs['spec']['rados'].get('namespace', None),
- placement=PlacementSpec(count=active),
- ),
- size=active,
- container_image_name=image_name,
- last_refresh=now,
- )
+ if service_type == 'mds' or service_type is None:
+ # CephFilesystems
+ all_fs = self.rook_cluster.rook_api_get(
+ "cephfilesystems/")
+ self.log.debug('CephFilesystems %s' % all_fs)
+ for fs in all_fs.get('items', []):
+ svc = 'mds.' + fs['metadata']['name']
+ if svc in spec:
+ continue
+ # FIXME: we are conflating active (+ standby) with count
+ active = fs['spec'].get('metadataServer', {}).get('activeCount', 1)
+ total_mds = active
+ if fs['spec'].get('metadataServer', {}).get('activeStandby', False):
+ total_mds = active * 2
+ spec[svc] = orchestrator.ServiceDescription(
+ spec=ServiceSpec(
+ service_type='mds',
+ service_id=fs['metadata']['name'],
+ placement=PlacementSpec(count=active),
+ ),
+ size=total_mds,
+ container_image_name=image_name,
+ last_refresh=now,
+ )
+
+ if service_type == 'rgw' or service_type is None:
+ # CephObjectstores
+ all_zones = self.rook_cluster.rook_api_get(
+ "cephobjectstores/")
+ self.log.debug('CephObjectstores %s' % all_zones)
+ for zone in all_zones.get('items', []):
+ rgw_realm = zone['metadata']['name']
+ rgw_zone = rgw_realm
+ svc = 'rgw.' + rgw_realm + '.' + rgw_zone
+ if svc in spec:
+ continue
+ active = zone['spec']['gateway']['instances'];
+ if 'securePort' in zone['spec']['gateway']:
+ ssl = True
+ port = zone['spec']['gateway']['securePort']
+ else:
+ ssl = False
+ port = zone['spec']['gateway']['port'] or 80
+ spec[svc] = orchestrator.ServiceDescription(
+ spec=RGWSpec(
+ service_id=rgw_realm + '.' + rgw_zone,
+ rgw_realm=rgw_realm,
+ rgw_zone=rgw_zone,
+ ssl=ssl,
+ rgw_frontend_port=port,
+ placement=PlacementSpec(count=active),
+ ),
+ size=active,
+ container_image_name=image_name,
+ last_refresh=now,
+ )
+
+ if service_type == 'nfs' or service_type is None:
+ # CephNFSes
+ all_nfs = self.rook_cluster.rook_api_get(
+ "cephnfses/")
+ self.log.warning('CephNFS %s' % all_nfs)
+ for nfs in all_nfs.get('items', []):
+ nfs_name = nfs['metadata']['name']
+ svc = 'nfs.' + nfs_name
+ if svc in spec:
+ continue
+ active = nfs['spec'].get('server', {}).get('active')
+ spec[svc] = orchestrator.ServiceDescription(
+ spec=NFSServiceSpec(
+ service_id=nfs_name,
+ pool=nfs['spec']['rados']['pool'],
+ namespace=nfs['spec']['rados'].get('namespace', None),
+ placement=PlacementSpec(count=active),
+ ),
+ size=active,
+ container_image_name=image_name,
+ last_refresh=now,
+ )
for dd in self._list_daemons():
if dd.service_name() not in spec: