- host2
spec:
port: 12345
+ monitoring_port: 567
+ monitoring_ip_addrs:
+ host1: 10.0.0.123
+ host2: 10.0.0.124
+ monitoring_networks:
+ - 192.168.124.0/24
+
In this example, we run the server on the non-default ``port`` of
12345 (instead of the default 2049) on ``host1`` and ``host2``.
+The default monitoring port can be customized using the ``monitoring_port``
+parameter. Additionally, you can specify the ``monitoring_ip_addrs`` or
+``monitoring_networks`` parameters to bind the monitoring port to a specific
+IP address or network. If ``monitoring_ip_addrs`` is provided and the specified
+IP address is assigned to the host, that IP address will be used. If the IP
+address is not present and ``monitoring_networks`` is specified, an IP address
+that matches one of the specified networks will be used. If neither condition
+is met, the default binding will happen on all available network interfaces.
The specification can then be applied by running the following command:
return self.scheduled_daemon_actions.get(host, {}).get(daemon)
+ def get_host_network_ips(self, host: str) -> List[str]:
+ return [
+ ip
+ for net_details in self.networks.get(host, {}).values()
+ for ips in net_details.values()
+ for ip in ips
+ ]
+
class NodeProxyCache:
def __init__(self, mgr: 'CephadmOrchestrator') -> None:
self.set_health_warning('CEPHADM_FAILED_DAEMON', f'{len(failed_daemons)} failed cephadm daemon(s)', len(
failed_daemons), failed_daemons)
- def get_first_matching_network_ip(self, host: str, sspec: ServiceSpec) -> Optional[str]:
- sspec_networks = sspec.networks
+ def get_first_matching_network_ip(
+ self,
+ host: str,
+ sspec: ServiceSpec,
+ sspec_networks: Optional[List[str]] = None
+ ) -> Optional[str]:
+ if not sspec_networks:
+ sspec_networks = sspec.networks
for subnet, ifaces in self.cache.networks.get(host, {}).items():
host_network = ipaddress.ip_network(subnet)
for spec_network_str in sspec_networks:
self.create_rados_config_obj(spec)
port = daemon_spec.ports[0] if daemon_spec.ports else 2049
+ monitoring_port = spec.monitoring_port if spec.monitoring_port else 9587
# create the RGW keyring
rgw_user = f'{rados_user}-rgw'
else:
logger.debug("using haproxy bind address: %r", bind_addr)
+ # check if monitor needs to be bind on specific ip
+ monitoring_addr = spec.monitoring_ip_addrs.get(host) if spec.monitoring_ip_addrs else None
+ if monitoring_addr and monitoring_addr not in self.mgr.cache.get_host_network_ips(host):
+ logger.debug(f"Monitoring IP {monitoring_addr} is not configured on host {daemon_spec.host}.")
+ monitoring_addr = None
+ if not monitoring_addr and spec.monitoring_networks:
+ monitoring_addr = self.mgr.get_first_matching_network_ip(daemon_spec.host, spec, spec.monitoring_networks)
+ if not monitoring_addr:
+ logger.debug(f"No IP address found in the network {spec.monitoring_networks} on host {daemon_spec.host}.")
+ if monitoring_addr:
+ daemon_spec.port_ips.update({str(monitoring_port): monitoring_addr})
+
# generate the ganesha config
def get_ganesha_conf() -> str:
context: Dict[str, Any] = {
"url": f'rados://{POOL_NAME}/{spec.service_id}/{spec.rados_config_name()}',
# fall back to default NFS port if not present in daemon_spec
"port": port,
- "monitoring_port": spec.monitoring_port if spec.monitoring_port else 9587,
+ "monitoring_addr": monitoring_addr,
+ "monitoring_port": monitoring_port,
"bind_addr": bind_addr,
"haproxy_hosts": [],
"nfs_idmap_conf": nfs_idmap_conf,
{% endif %}
{% if haproxy_hosts %}
HAProxy_Hosts = {{ haproxy_hosts|join(", ") }};
+{% endif %}
+{% if monitoring_addr %}
+ Monitoring_Addr = {{ monitoring_addr }};
{% endif %}
Monitoring_Port = {{ monitoring_port }};
}
# check keepalived config
assert keepalived_generated_conf[0] == keepalived_expected_conf
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
+ def test_nfs_config_monitoring_ip(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ with with_host(cephadm_module, 'test', addr='1.2.3.7'):
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.1']
+ }
+ })
+
+ nfs_spec = NFSServiceSpec(service_id="foo", placement=PlacementSpec(hosts=['test']),
+ monitoring_ip_addrs={'test': '1.2.3.1'})
+ with with_service(cephadm_module, nfs_spec) as _:
+ nfs_generated_conf, _ = service_registry.get_service('nfs').generate_config(
+ CephadmDaemonDeploySpec(host='test', daemon_id='foo.test.0.0', service_name=nfs_spec.service_name()))
+ ganesha_conf = nfs_generated_conf['files']['ganesha.conf']
+ assert "Monitoring_Addr = 1.2.3.1" in ganesha_conf
+
+ nfs_spec = NFSServiceSpec(service_id="foo", placement=PlacementSpec(hosts=['test']),
+ monitoring_networks=['1.2.3.0/24'])
+ with with_service(cephadm_module, nfs_spec) as _:
+ nfs_generated_conf, _ = service_registry.get_service('nfs').generate_config(
+ CephadmDaemonDeploySpec(host='test', daemon_id='foo.test.0.0', service_name=nfs_spec.service_name()))
+ ganesha_conf = nfs_generated_conf['files']['ganesha.conf']
+ assert "Monitoring_Addr = 1.2.3.1" in ganesha_conf
+
@patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
@patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
@patch("cephadm.services.nfs.NFSService.purge", MagicMock())
config: Optional[Dict[str, str]] = None,
networks: Optional[List[str]] = None,
port: Optional[int] = None,
+ monitoring_networks: Optional[List[str]] = None,
+ monitoring_ip_addrs: Optional[Dict[str, str]] = None,
monitoring_port: Optional[int] = None,
virtual_ip: Optional[str] = None,
enable_nlm: bool = False,
extra_entrypoint_args=extra_entrypoint_args, custom_configs=custom_configs)
self.port = port
+
+ # monitoring_ip_addrs is a dictionary where each key is a hostname and the corresponding
+ # value is the IP address {hostname: ip} that the monitor should bind to on that host.
+ # monitoring_networks is a list of networks where the monitor is allowed to bind.
+ # user can pass one parameter to bind monitor on specific IP.
+ self.monitoring_ip_addrs = monitoring_ip_addrs
+ self.monitoring_networks = monitoring_networks
self.monitoring_port = monitoring_port
+
self.virtual_ip = virtual_ip
self.enable_haproxy_protocol = enable_haproxy_protocol
self.idmap_conf = idmap_conf