self.set_store(PrometheusService.PASS_CFG_KEY, password)
return 'prometheus credentials updated correctly'
- @handle_orch_error
- def set_prometheus_cert(self, cert: str) -> str:
- self.set_store(PrometheusService.PROMETHEUS_CERT_CFG_KEY, cert)
- return 'prometheus cert stored correctly'
-
- @handle_orch_error
- def get_prometheus_cert(self) -> str:
- prometheus_cert = self.get_store(PrometheusService.PROMETHEUS_CERT_CFG_KEY)
- if prometheus_cert is None:
- prometheus_cert = ''
- return prometheus_cert
-
@handle_orch_error
def set_custom_prometheus_alerts(self, alerts_file: str) -> str:
self.set_store('services/prometheus/alerting/custom_alerts.yml', alerts_file)
@handle_orch_error
def set_prometheus_target(self, url: str) -> str:
try:
- if url.startswith("http://") or url.startswith("https://"):
- return f"Invalid URL '{url}'. It should be in the format host_ip:port"
-
- parsed_url_with_scheme = urlparse(f'http://{url}')
- host = parsed_url_with_scheme.hostname
- port = parsed_url_with_scheme.port
-
- if not host or port is None:
- raise ValueError("Hostname or port is missing.")
-
+ parsed_url = urlparse(url)
+ host = parsed_url.hostname
+ port = parsed_url.port
+ if not host:
+ return 'Invalid URL. Hostname is missing.'
ipaddress.ip_address(host)
-
- except (ValueError, OSError) as e:
- return f"Invalid URL. {e}"
+ url = f"{host}:{port}" if port else host
+ except ValueError as e:
+ return f'Invalid url. {str(e)}'
prometheus_spec = cast(PrometheusSpec, self.spec_store['prometheus'].spec)
+ if not prometheus_spec:
+ return "Service prometheus not found\n"
+ # Add the target URL if it does not already exist
if url not in prometheus_spec.targets:
prometheus_spec.targets.append(url)
else:
return f"Target '{url}' already exists.\n"
- if not prometheus_spec:
- return "Service prometheus not found\n"
+ # Redeploy daemons after applying the configuration
daemons: List[orchestrator.DaemonDescription] = self.cache.get_daemons_by_type('prometheus')
spec = ServiceSpec.from_json(prometheus_spec.to_json())
self.apply([spec], no_overwrite=False)
'password': password,
'certificate': self.cert_mgr.get_root_ca()}
+ @handle_orch_error
+ def get_security_config(self) -> Dict[str, bool]:
+ security_enabled, mgmt_gw_enabled, _ = self._get_security_config()
+ return {'security_enabled': security_enabled,
+ 'mgmt_gw_enabled': mgmt_gw_enabled}
+
@handle_orch_error
def get_alertmanager_access_info(self) -> Dict[str, str]:
security_enabled, _, _ = self._get_security_config()
import errno
import logging
-import json
-import logging
import os
import socket
from typing import List, Any, Tuple, Dict, Optional, cast
DEFAULT_MGR_PROMETHEUS_PORT = 9283
USER_CFG_KEY = 'prometheus/web_user'
PASS_CFG_KEY = 'prometheus/web_password'
- PROMETHEUS_CERT_CFG_KEY = 'prometheus/cert'
def config(self, spec: ServiceSpec) -> None:
# make sure module is enabled
alertmanager_user, alertmanager_password = self.mgr._get_alertmanager_credentials()
prometheus_user, prometheus_password = self.mgr._get_prometheus_credentials()
+ federate_path = self.get_target_cluster_federate_path(targets)
+ cluster_credentials: Dict[str, Any] = {}
+ cluster_credentials_files: Dict[str, Any] = {'files': {}}
FSID = self.mgr._cluster_fsid
-
- clusters_credentials = {}
- multi_cluster_config_str = str(self.mgr.get_module_option_ex('dashboard', 'MULTICLUSTER_CONFIG'))
- try:
- multi_cluster_config = json.loads(multi_cluster_config_str)
- except json.JSONDecodeError as e:
- multi_cluster_config = None
- logger.error(f'Invalid JSON format for multi-cluster config: {e}')
-
- if multi_cluster_config:
- for url in targets:
- credentials = self.find_prometheus_credentials(multi_cluster_config, url)
- if credentials:
- clusters_credentials[url] = credentials
- clusters_credentials[url]['cert_file_name'] = ''
+ if targets:
+ if 'dashboard' in self.mgr.get('mgr_map')['modules']:
+ cluster_credentials_files, cluster_credentials = self.mgr.remote(
+ 'dashboard', 'get_cluster_credentials_files', targets
+ )
+ else:
+ logger.error("dashboard module not found")
# generate the prometheus configuration
context = {
'cluster_fsid': FSID,
'nfs_sd_url': nfs_sd_url,
'smb_sd_url': smb_sd_url,
- 'clusters_credentials': clusters_credentials
+ 'clusters_credentials': cluster_credentials,
+ 'federate_path': federate_path
}
ip_to_bind_to = ''
}
if security_enabled:
- r2: Dict[str, Any] = {'files': {}}
- unique_id_counter = 1
- for url, credentials in clusters_credentials.items():
- unique_id = unique_id_counter
- unique_id_counter += 1
- r2['files'][f'prometheus_{unique_id}_cert.crt'] = credentials['certificate']
- credentials['cert_file_name'] = f'prometheus_{unique_id}_cert.crt'
- context['clusters_credentials'] = clusters_credentials
# Following key/cert are needed for:
# 1- run the prometheus server (web.yml config)
# 2- use mTLS to scrape node-exporter (prometheus acts as client)
'web_config': '/etc/prometheus/web.yml',
'use_url_prefix': mgmt_gw_enabled
}
+ r['files'].update(cluster_credentials_files['files'])
else:
r = {
'files': {
return HandleCommandResult(-errno.EBUSY, '', warn_message)
return HandleCommandResult(0, warn_message, '')
- def find_prometheus_credentials(self, multicluster_config: Dict[str, Any], url: str) -> Optional[Dict[str, Any]]:
- for _, clusters in multicluster_config['config'].items():
- for cluster in clusters:
- prometheus_url = cluster.get('prometheus_url')
- if prometheus_url:
- valid_url = prometheus_url.replace("https://", "").replace("http://", "") # since target URLs are without scheme
- if valid_url == url: # check if the target URL matches with the prometheus URL (without scheme) in the config
- return cluster.get('prometheus_access_info')
- return None
+ def get_target_cluster_federate_path(self, targets: List[str]) -> str:
+ for target in targets:
+ if ':' in target:
+ return '/federate'
+ return '/prometheus/federate'
class NodeExporterService(CephadmService):
- job_name: 'federate_{{ loop.index }}'
scrape_interval: 15s
honor_labels: true
- metrics_path: '/federate'
+ metrics_path: {{ federate_path }}
relabel_configs:
- source_labels: [__address__]
target_label: cluster
replacement: {{ cluster_fsid }}
-{% if secure_monitoring_stack %}
+{% if security_enabled %}
scheme: https
tls_config:
ca_file: {{ details['cert_file_name'] }}
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
def test_valid_url(self, cephadm_module):
- # Test with valid URLs
+ # Test with valid IPv4 and IPv6 urls
test_cases = [
- ("192.168.100.100:9090", "prometheus multi-cluster targets updated"),
- ("127.0.0.1:8080", "prometheus multi-cluster targets updated"),
+ ("http://192.168.100.100:9090", "prometheus multi-cluster targets updated"), # Valid IPv4
+ ("https://192.168.100.100/prometheus", "prometheus multi-cluster targets updated"), # Valid IPv4 without port
+ ("http://[2001:0db8:85a3::8a2e:0370:7334]:9090", "prometheus multi-cluster targets updated"), # Valid IPv6 with port
+ ("https://[2001:0db8:85a3::8a2e:0370:7334]/prometheus", "prometheus multi-cluster targets updated"), # Valid IPv6 without port
]
with with_host(cephadm_module, 'test'):
with with_service(cephadm_module, ServiceSpec(service_type='prometheus'), CephadmOrchestrator.apply_prometheus, 'test'):
for url, expected_output in test_cases:
c = cephadm_module.set_prometheus_target(url)
- assert wait(cephadm_module,
- c) == expected_output
+ assert wait(cephadm_module, c) == expected_output
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
def test_invalid_url(self, cephadm_module):
- # Test with invalid URLs
+ # Test with invalid IPv4 and IPv6 urls
test_cases = [
- ("http://example.com:9090", "Invalid URL 'http://example.com:9090'. It should be in the format host_ip:port"),
- ("127.0.0.1:67700", "Invalid URL. Port out of range 0-65535")
+ ("https://192.168.100.100:99999", "Invalid url. Port out of range 0-65535"), # Port out of range
+ ("http://[2001:0db8:85a3::8a2e:0370:7334]:99999", "Invalid url. Port out of range 0-65535"), # IPv6 with invalid port
+ ("https://192.168.100.999:9090", "Invalid url. '192.168.100.999' does not appear to be an IPv4 or IPv6 address"), # Invalid IPv4
+ ("http://[fe80:2030:31:24]:9090", "Invalid url. 'fe80:2030:31:24' does not appear to be an IPv4 or IPv6 address") # Invalid IPv6
]
with with_host(cephadm_module, 'test'):
with with_service(cephadm_module, ServiceSpec(service_type='prometheus'), CephadmOrchestrator.apply_prometheus, 'test'):
for url, expected_output in test_cases:
c = cephadm_module.set_prometheus_target(url)
- assert wait(cephadm_module,
- c) == expected_output
+ assert wait(cephadm_module, c) == expected_output
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
def test_host(self, cephadm_module):
http_sd_configs:
- url: http://[::1]:8765/sd/prometheus/sd-config?service=smb
- - job_name: 'federate'
- scrape_interval: 15s
- honor_labels: true
- metrics_path: '/federate'
- params:
- 'match[]':
- - '{job="ceph"}'
- - '{job="node"}'
- - '{job="haproxy"}'
- - '{job="ceph-exporter"}'
- static_configs:
- - targets: []
""").lstrip()
_run_cephadm.assert_called_with(
import base64
import ipaddress
import json
-import tempfile
import logging
+import tempfile
import time
-from typing import Any, Dict
+from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urlparse
import requests
@APIRouter('/multi-cluster', Scope.CONFIG_OPT)
@APIDoc('Multi-cluster Management API', 'Multi-cluster')
+# pylint: disable=R0904
class MultiCluster(RESTController):
def _proxy(self, method, base_url, path, params=None, payload=None, verify=False,
token=None, cert=None):
prometheus_url = self._proxy('GET', url, 'api/multi-cluster/get_prometheus_api_url',
token=cluster_token, verify=ssl_verify,
cert=ssl_certificate)
-
+ logger.info('prometheus_url: %s', prometheus_url)
prometheus_access_info = self._proxy('GET', url,
'ui-api/multi-cluster/get_prometheus_access_info', # noqa E501 #pylint: disable=line-too-long
token=cluster_token, verify=ssl_verify,
return cors_endpoints_string
def check_cluster_connection(self, url, payload, username, ssl_verify, ssl_certificate,
- action):
+ action, cluster_token=None):
try:
hub_cluster_version = mgr.version.split('ceph version ')[1]
multi_cluster_content = self._proxy('GET', url, 'api/multi-cluster/get_config',
cluster_token = content['token']
+ if cluster_token:
+ self.check_connection_errors(url, cluster_token, ssl_verify, ssl_certificate, action)
+ return cluster_token
+
+ def check_connection_errors(self, url, cluster_token, ssl_verify, ssl_certificate, action):
managed_by_clusters_content = self._proxy('GET', url, 'api/settings/MANAGED_BY_CLUSTERS',
token=cluster_token, verify=ssl_verify,
cert=ssl_certificate)
raise DashboardException(msg='Cluster is already managed by another cluster',
code='cluster_managed_by_another_cluster',
component='multi-cluster')
- return cluster_token
+
+ self.check_security_config(url, cluster_token, ssl_verify, ssl_certificate)
+
+ def check_security_config(self, url, cluster_token, ssl_verify, ssl_certificate):
+ remote_security_cfg = self._proxy('GET', url,
+ 'api/multi-cluster/security_config',
+ token=cluster_token, verify=ssl_verify,
+ cert=ssl_certificate)
+ local_security_cfg = self._get_security_config()
+
+ if remote_security_cfg and local_security_cfg:
+ remote_security_enabled = remote_security_cfg['security_enabled']
+ local_security_enabled = local_security_cfg['security_enabled']
+
+ def raise_mismatch_exception(config_name, local_enabled):
+ enabled_on = "local" if local_enabled else "remote"
+ disabled_on = "remote" if local_enabled else "local"
+ raise DashboardException(
+ msg=f'{config_name} is enabled on the {enabled_on} cluster, but not on the {disabled_on} cluster. ' # noqa E501 #pylint: disable=line-too-long
+ f'Both clusters should either have {config_name} enabled or disabled.',
+ code=f'{config_name.lower()}_mismatch', component='multi-cluster'
+ )
+
+ if remote_security_enabled != local_security_enabled:
+ raise_mismatch_exception('Security', local_security_enabled)
def set_multi_cluster_config(self, fsid, username, url, cluster_alias, token,
prometheus_url=None, ssl_verify=False, ssl_certificate=None,
@UpdatePermission
# pylint: disable=W0613
def reconnect_cluster(self, url: str, username=None, password=None,
- ssl_verify=False, ssl_certificate=None, ttl=None):
+ ssl_verify=False, ssl_certificate=None, ttl=None,
+ cluster_token=None):
multicluster_config = self.load_multi_cluster_config()
- if username and password:
+ if username and password and cluster_token is None:
payload = {
'username': username,
'password': password,
cluster_token = self.check_cluster_connection(url, payload, username,
ssl_verify, ssl_certificate,
'reconnect')
+ else:
+ self.check_connection_errors(url, cluster_token, ssl_verify, ssl_certificate,
+ 'reconnect')
+ if cluster_token:
prometheus_url = self._proxy('GET', url, 'api/multi-cluster/get_prometheus_api_url',
token=cluster_token, verify=ssl_verify,
cert=ssl_certificate)
cluster['prometheus_access_info'] = prometheus_access_info
_remove_prometheus_targets(cluster['prometheus_url'])
time.sleep(5)
+ cluster['prometheus_url'] = prometheus_url
_set_prometheus_targets(prometheus_url)
Settings.MULTICLUSTER_CONFIG = json.dumps(multicluster_config)
return True
clusters_token_map = json.loads(clustersTokenMap)
return self.check_token_status_array(clusters_token_map)
+ @Endpoint()
+ @ReadPermission
+ def security_config(self):
+ return self._get_security_config()
+
+ def _get_security_config(self):
+ orch_backend = mgr.get_module_option_ex('orchestrator', 'orchestrator')
+ if orch_backend == 'cephadm':
+ cmd = {
+ 'prefix': 'orch get-security-config',
+ }
+ ret_status, out, _ = mgr.mon_command(cmd)
+ if ret_status == 0 and out is not None:
+ security_info = json.loads(out)
+ security_enabled = security_info['security_enabled']
+ mgmt_gw_enabled = security_info['mgmt_gw_enabled']
+ return {
+ 'security_enabled': bool(security_enabled),
+ 'mgmt_gw_enabled': bool(mgmt_gw_enabled)
+ }
+ return None
+
@Endpoint()
@ReadPermission
def get_prometheus_api_url(self):
+ security_content = self._get_security_config()
+ mgmt_gw_enabled = security_content['mgmt_gw_enabled']
prometheus_url = Settings.PROMETHEUS_API_HOST
+
if prometheus_url is not None:
- # check if is url is already in IP format
+ if '.ceph-dashboard' in prometheus_url:
+ prometheus_url = prometheus_url.replace('.ceph-dashboard', '')
+ parsed_url = urlparse(prometheus_url)
+ scheme = parsed_url.scheme
+ hostname = parsed_url.hostname
try:
- url_parts = urlparse(prometheus_url)
- ipaddress.ip_address(url_parts.hostname)
+ # Check if the hostname is already an IP address
+ ipaddress.ip_address(hostname)
valid_ip_url = True
except ValueError:
valid_ip_url = False
- if not valid_ip_url:
- parsed_url = urlparse(prometheus_url)
- hostname = parsed_url.hostname
- orch = OrchClient.instance()
- inventory_hosts = [host.to_json() for host in orch.hosts.list()]
+
+ orch = OrchClient.instance()
+ inventory_hosts = (
+ [host.to_json() for host in orch.hosts.list()]
+ if not valid_ip_url
+ else []
+ )
+
+ def find_node_ip():
for host in inventory_hosts:
- if host['hostname'] == hostname or host['hostname'] in hostname:
- node_ip = host['addr']
- prometheus_url = prometheus_url.replace(hostname, node_ip)
+ if host['hostname'] == hostname or hostname in host['hostname']:
+ return host['addr']
+ return None
+
+ node_ip = find_node_ip() if not valid_ip_url else None
+ prometheus_url = prometheus_url.replace(hostname, node_ip) if node_ip else prometheus_url # noqa E501 #pylint: disable=line-too-long
+ if mgmt_gw_enabled:
+ prometheus_url = f"{scheme}://{node_ip if node_ip else hostname}"
return prometheus_url
+ def find_prometheus_credentials(self, multicluster_config: Dict[str, Any],
+ target: str) -> Optional[Dict[str, Any]]:
+ for _, clusters in multicluster_config['config'].items():
+ for cluster in clusters:
+ prometheus_url = cluster.get('prometheus_url')
+ if prometheus_url:
+ endpoint = (
+ prometheus_url.replace("https://", "").replace("http://", "")
+ ) # since target URLs are without scheme
+
+ if endpoint == target:
+ return cluster.get('prometheus_access_info')
+ return None
+
+ def get_cluster_credentials(self, targets: List[str]) -> Dict[str, Any]:
+ clusters_credentials: Dict[str, Dict[str, Any]] = {}
+ multi_cluster_config = self.load_multi_cluster_config()
+
+ # Return early if no multi_cluster_config is loaded
+ if not multi_cluster_config:
+ return clusters_credentials
+
+ try:
+ for target in targets:
+ credentials = self.find_prometheus_credentials(multi_cluster_config, target)
+ if credentials:
+ clusters_credentials[target] = credentials
+ clusters_credentials[target]['cert_file_name'] = ''
+ else:
+ logger.error('Credentials not found for target: %s', target)
+ except json.JSONDecodeError as e:
+ logger.error('Invalid JSON format for multi-cluster config: %s', e)
+
+ return clusters_credentials
+
+ def get_cluster_credentials_files(self, targets: List[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]: # noqa E501 #pylint: disable=line-too-long
+ cluster_credentials_files: Dict[str, Any] = {'files': {}}
+ clusters_credentials = self.get_cluster_credentials(targets=targets)
+ for i, (_, credentials) in enumerate(clusters_credentials.items()):
+ cluster_credentials_files['files'][f'prometheus_{i+1}_cert.crt'] = credentials['certificate'] # noqa E501 #pylint: disable=line-too-long
+ credentials['cert_file_name'] = f'prometheus_{i+1}_cert.crt'
+ return cluster_credentials_files, clusters_credentials
+
@UIRouter('/multi-cluster', Scope.CONFIG_OPT)
class MultiClusterUi(RESTController):
@Endpoint('GET')
@ReadPermission
def get_prometheus_access_info(self):
- user = ''
- password = ''
- prometheus_cert = ''
orch_backend = mgr.get_module_option_ex('orchestrator', 'orchestrator')
if orch_backend == 'cephadm':
cmd = {
ret_status, out, _ = mgr.mon_command(cmd)
if ret_status == 0 and out is not None:
prom_access_info = json.loads(out)
- user = prom_access_info['user']
- password = prom_access_info['password']
-
- cert_cmd = {
- 'prefix': 'orch prometheus get-prometheus-cert',
- }
- ret, out, _ = mgr.mon_command(cert_cmd)
- if ret == 0 and out is not None:
- cert = json.loads(out)
- prometheus_cert = cert
-
+ user = prom_access_info.get('user', '')
+ password = prom_access_info.get('password', '')
+ certificate = prom_access_info.get('certificate', '')
return {
'user': user,
'password': password,
- 'certificate': prometheus_cert
+ 'certificate': certificate
}
return None
if orch_backend == 'cephadm':
cmd = {
'prefix': 'orch prometheus set-target',
- 'url': prometheus_url.replace('http://', '').replace('https://', '')
+ 'url': prometheus_url
}
mgr.mon_command(cmd)
except KeyError:
spacingClass="mb-3"
[showTitle]="false"
size="slim"
- *ngIf="prometheusConnectionError.length > 0"
+ *ngIf="prometheusConnectionErrors.length > 0"
(dismissed)="onDismissed()"
[dismissible]="true"
i18n>
- <div>
- <p>Couldn't fetch metrics from the following clusters. Please reconnect the respective clusters to re-establish the prometheus connection - <br></p>
- <span *ngFor="let cluster of prometheusConnectionError">
- {{ cluster['cluster_alias']}} - {{ cluster['cluster_name'] }}
- <button class="btn btn-primary btn-sm"
- type="button"
- (click)="openReconnectClusterForm(cluster)">Reconnect
- </button>
- </span>
+ <div>
+ <p><strong>Could not retrieve metrics from the following clusters:</strong></p>
+ <div *ngFor="let cluster of prometheusConnectionErrors">
+ <p>
+ <strong>Cluster Name:</strong> {{ cluster['cluster_alias'] }}<br>
+ <strong>Cluster ID:</strong> {{ cluster['cluster_name'] }}<br>
+ <strong>Issue:</strong> {{ cluster.reconnectionError ? cluster.reconnectionError : 'Security configuration error' }}<br>
+ </p>
</div>
+ </div>
</cd-alert-panel>
<cd-alert-panel type="info"
spacingClass="mb-3"
import { MultiClusterComponent } from './multi-cluster.component';
import { SharedModule } from '~/app/shared/shared.module';
import { DimlessBinaryPipe } from '~/app/shared/pipes/dimless-binary.pipe';
+import { ToastrModule } from 'ngx-toastr';
describe('MultiClusterComponent', () => {
let component: MultiClusterComponent;
beforeEach(async () => {
await TestBed.configureTestingModule({
- imports: [HttpClientTestingModule, SharedModule],
+ imports: [HttpClientTestingModule, SharedModule, ToastrModule.forRoot()],
declarations: [MultiClusterComponent],
providers: [NgbActiveModal, DimlessBinaryPipe]
}).compileComponents();
MultiClusterPromqlsForPoolUtilization as PoolUltilizationQueries
} from '~/app/shared/enum/dashboard-promqls.enum';
import { SettingsService } from '~/app/shared/api/settings.service';
+import { NotificationType } from '~/app/shared/enum/notification-type.enum';
+import { NotificationService } from '~/app/shared/services/notification.service';
@Component({
selector: 'cd-multi-cluster',
multiClusterQueries: any = {};
managedByConfig$: Observable<any>;
clusterDetailsArray: any[];
- prometheusConnectionError: any[] = [];
+ prometheusConnectionErrors: any[] = [];
+ reconnectionError: string;
constructor(
private multiClusterService: MultiClusterService,
private settingsService: SettingsService,
private modalService: ModalService,
private router: Router,
- private prometheusService: PrometheusService
+ private prometheusService: PrometheusService,
+ private notificationService: NotificationService
) {
this.multiClusterQueries = {
cluster: {
}
const clusters: ClusterInfo[] = [];
- this.queriesResults.TOTAL_CAPACITY?.forEach((totalCapacityMetric: any, index:number) => {
+ this.queriesResults.TOTAL_CAPACITY?.forEach((totalCapacityMetric: any, index: number) => {
const clusterName = totalCapacityMetric.metric.cluster;
const totalCapacity = parseInt(totalCapacityMetric.value[1]);
const getMgrMetadata = this.findCluster(this.queriesResults?.MGR_METADATA, clusterName);
);
}
- checkFederateMetricsStatus(federateMetrics: any) {
- this.prometheusConnectionError = [];
- federateMetrics.forEach((entry1: { metric: { instance: any }; value: any }) => {
- const instanceIpPort = entry1.metric.instance;
+ checkFederateMetricsStatus(federatedMetrics: any) {
+ if (!federatedMetrics || federatedMetrics.length === 0) {
+ return;
+ }
+
+ this.prometheusConnectionErrors = [];
+
+ federatedMetrics.forEach((metricEntry: { metric: { instance: string }; value: any }) => {
+ const instanceIpPort = metricEntry.metric.instance;
const instanceIp = instanceIpPort.split(':')[0];
const instancePort = instanceIpPort.split(':')[1];
- const prometheus_federation_status = entry1.value[1];
-
- this.clusterDetailsArray.forEach((entry2) => {
- if (entry2['name'] !== this.localClusterName) {
- const prometheusUrl = entry2['prometheus_url']
- .replace('http://', '')
- .replace('https://', '');
+ const federationStatus = metricEntry.value[1];
+
+ this.clusterDetailsArray?.forEach((clusterDetails) => {
+ if (clusterDetails.name !== this.localClusterName) {
+ const prometheusUrl = clusterDetails.prometheus_url.replace(
+ /^(http:\/\/|https:\/\/)/,
+ ''
+ );
const prometheusIp = prometheusUrl.split(':')[0];
- const prometheusPort = prometheusUrl.split(':')[1];
+ const prometheusPort = prometheusUrl.split(':')[1] ? prometheusUrl.split(':')[1] : '443';
+
+ const existingError = this.prometheusConnectionErrors.find(
+ (errorEntry) => errorEntry.url === clusterDetails.url
+ );
if (
+ !existingError &&
instanceIp === prometheusIp &&
instancePort === prometheusPort &&
- prometheus_federation_status === '0'
+ federationStatus === '0'
) {
- this.prometheusConnectionError.push({
- cluster_name: entry2.name,
- cluster_alias: entry2.cluster_alias,
- url: entry2.url,
- user: entry2.user,
- ssl_verify: entry2.ssl_verify,
- ssl_certificate: entry2.ssl_certificate
+ this.prometheusConnectionErrors.push({
+ cluster_name: clusterDetails.name,
+ cluster_alias: clusterDetails.cluster_alias,
+ url: clusterDetails.url
});
+
+ this.multiClusterService
+ .reConnectCluster(
+ clusterDetails.url,
+ clusterDetails.user,
+ null,
+ clusterDetails.ssl_verify,
+ clusterDetails.ssl_certificate,
+ clusterDetails.ttl,
+ clusterDetails.token
+ )
+ .subscribe({
+ error: (errorResponse: any) => {
+ const reconnectionError = errorResponse.error.detail;
+ const errorIndex = this.prometheusConnectionErrors.findIndex(
+ (errorEntry) => errorEntry.url === clusterDetails.url
+ );
+ if (errorIndex !== -1) {
+ this.prometheusConnectionErrors[
+ errorIndex
+ ].reconnectionError = reconnectionError;
+ }
+ },
+ next: (response: any) => {
+ if (response === true) {
+ const message = $localize`Cluster re-connected successfully`;
+ this.notificationService.show(NotificationType.success, message);
+
+ this.prometheusConnectionErrors = this.prometheusConnectionErrors.filter(
+ (errorEntry) => errorEntry.url !== clusterDetails.url
+ );
+ }
+ }
+ });
}
}
});
});
}
- openReconnectClusterForm(cluster: any) {
- const initialState = {
- action: 'reconnect',
- cluster: cluster
- };
- this.bsModalRef = this.modalService.show(MultiClusterFormComponent, initialState, {
- size: 'lg'
- });
- this.bsModalRef.componentInstance.submitAction.subscribe(() => {
- this.loading = true;
- setTimeout(() => {
- const currentRoute = this.router.url.split('?')[0];
- this.multiClusterService.refreshMultiCluster(currentRoute);
- this.getPrometheusData(this.prometheusService.lastHourDateObject);
- }, this.PROMETHEUS_DELAY);
- });
- }
-
findClusterData(metrics: any, clusterName: string) {
const clusterMetrics = this.findCluster(metrics, clusterName);
return parseInt(clusterMetrics?.value[1] || 0);
password: string,
ssl = false,
cert = '',
- ttl: number
+ ttl: number,
+ cluster_token?: string
) {
- return this.http.put('api/multi-cluster/reconnect_cluster', {
+ const requestBody: any = {
url,
username,
password,
ssl_verify: ssl,
ssl_certificate: cert,
ttl: ttl
- });
+ };
+
+ if (cluster_token) {
+ requestBody.cluster_token = cluster_token;
+ }
+
+ return this.http.put('api/multi-cluster/reconnect_cluster', requestBody);
}
private getClusterObserver() {
import tempfile
import threading
import time
-from typing import TYPE_CHECKING, Optional
+from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from urllib.parse import urlparse
+from .controllers.multi_cluster import MultiCluster
+
if TYPE_CHECKING:
if sys.version_info >= (3, 8):
from typing import Literal
self.set_store(item_key, inbuf)
return 0, f'SSL {item_label} updated', ''
+ def get_cluster_credentials_files(self, targets: List[str]) -> Tuple[Dict[str, Any], Dict[str, Any]]: # noqa E501 #pylint: disable=line-too-long
+ multi_cluster_instance = MultiCluster()
+ cluster_credentials_files, clusters_credentials = multi_cluster_instance.get_cluster_credentials_files(targets) # noqa E501 #pylint: disable=line-too-long
+ return cluster_credentials_files, clusters_credentials
+
@CLIWriteCommand("dashboard set-ssl-certificate")
def set_ssl_certificate(self, mgr_id: Optional[str] = None, inbuf: Optional[str] = None):
return self._set_ssl_item('certificate', 'crt', mgr_id, inbuf)
application/json:
schema:
properties:
+ cluster_token:
+ type: string
password:
type: string
ssl_certificate:
- jwt: []
tags:
- Multi-cluster
+ /api/multi-cluster/security_config:
+ get:
+ parameters: []
+ responses:
+ '200':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: OK
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ tags:
+ - Multi-cluster
/api/multi-cluster/set_config:
put:
parameters: []
"""get prometheus access information"""
raise NotImplementedError()
- def set_alertmanager_access_info(self, user: str, password: str) -> OrchResult[str]:
- """set alertmanager access information"""
+ def get_security_config(self) -> OrchResult[Dict[str, bool]]:
+ """get security config"""
raise NotImplementedError()
- def get_prometheus_cert(self) -> OrchResult[str]:
- """get prometheus cert for multi-cluster"""
+ def set_alertmanager_access_info(self, user: str, password: str) -> OrchResult[str]:
+ """set alertmanager access information"""
raise NotImplementedError()
def set_prometheus_access_info(self, user: str, password: str) -> OrchResult[str]:
result = raise_if_exception(completion)
return HandleCommandResult(stdout=json.dumps(result))
- @_cli_write_command('orch prometheus get-prometheus-cert')
- def _get_prometheus_cert(self) -> HandleCommandResult:
- completion = self.get_prometheus_cert()
- result = raise_if_exception(completion)
- return HandleCommandResult(stdout=json.dumps(result))
-
@_cli_write_command('orch prometheus remove-target')
def _remove_prometheus_target(self, url: str) -> HandleCommandResult:
completion = self.remove_prometheus_target(url)
access_info = raise_if_exception(completion)
return HandleCommandResult(stdout=json.dumps(access_info))
+ @_cli_write_command('orch get-security-config')
+ def _get_security_config(self) -> HandleCommandResult:
+ completion = self.get_security_config()
+ result = raise_if_exception(completion)
+ return HandleCommandResult(stdout=json.dumps(result))
+
@_cli_write_command('orch alertmanager get-credentials')
def _get_alertmanager_access_info(self) -> HandleCommandResult:
completion = self.get_alertmanager_access_info()