frontend_port: 2049
monitor_port: 9000
virtual_ip: 10.0.0.123/24
+ haproxy_peer_communication_port: <integer> # optional: NFS ingress only; HAProxy peer TCP port (default 1024)
A few notes:
.. prompt:: bash #
ceph config-key get mgr/cephadm/ingress.nfs.myfoo/monitor_password
-
+
+ * The optional ``haproxy_peer_communication_port`` is used when ``backend_service``
+ refers to an NFS service. HAProxy uses this TCP port for peer communication
+ (stick-table synchronization between HAProxy instances on different hosts). The
+ default is *1024*. Cephadm reserves this port alongside other ingress ports when
+ scheduling daemons. Set a different value if *1024* is already in use or blocked.
+ For RGW backends, HAProxy does not use a peers section, so this field is not applicable.
+
* The backend service (``nfs.mynfs`` in this example) should include
a *port* property that is not 2049 to avoid conflicting with the
ingress service, which could be placed on the same host(s).
assert gen_config_lines == exp_config_lines
+ @patch("cephadm.inventory.Inventory.get_addr")
+ @patch("cephadm.utils.resolve_ip")
+ @patch("cephadm.inventory.HostCache.get_daemons_by_service")
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ def test_ingress_haproxy_peer_communication_port(
+ self,
+ _run_cephadm,
+ _get_daemons_by_service,
+ _resolve_ip,
+ _get_addr,
+ cephadm_module: CephadmOrchestrator,
+ ):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ def fake_resolve_ip(hostname: str) -> str:
+ if hostname == 'host1':
+ return '192.168.122.111'
+ elif hostname == 'host2':
+ return '192.168.122.222'
+ else:
+ return 'xxx.xxx.xxx.xxx'
+
+ _resolve_ip.side_effect = fake_resolve_ip
+
+ def fake_get_addr(hostname: str) -> str:
+ return hostname
+
+ _get_addr.side_effect = fake_get_addr
+
+ nfs_service = NFSServiceSpec(
+ service_id="foo",
+ placement=PlacementSpec(
+ count=1,
+ hosts=['host1', 'host2']),
+ port=12049,
+ )
+
+ ispec = IngressSpec(
+ service_type='ingress',
+ service_id='nfs.foo',
+ backend_service='nfs.foo',
+ frontend_port=2049,
+ monitor_port=9049,
+ virtual_ip='192.168.122.100/24',
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ enable_stats=True,
+ haproxy_peer_communication_port=5000,
+ placement=PlacementSpec(
+ hosts=['host1'])
+ )
+
+ cephadm_module.spec_store._specs = {
+ 'nfs.foo': nfs_service,
+ 'ingress.nfs.foo': ispec
+ }
+ cephadm_module.spec_store.spec_created = {
+ 'nfs.foo': datetime_now(),
+ 'ingress.nfs.foo': datetime_now()
+ }
+
+ haproxy_txt = (
+ '# This file is generated by cephadm.\n'
+ 'global\n'
+ ' log 127.0.0.1 local2\n'
+ ' chroot /var/lib/haproxy\n'
+ ' pidfile /var/lib/haproxy/haproxy.pid\n'
+ ' maxconn 8000\n'
+ ' daemon\n'
+ ' stats socket /var/lib/haproxy/stats\n\n'
+ 'defaults\n'
+ ' mode tcp\n'
+ ' log global\n'
+ ' timeout queue 1m\n'
+ ' timeout connect 10s\n'
+ ' timeout client 1m\n'
+ ' timeout server 1m\n'
+ ' timeout check 10s\n'
+ ' maxconn 8000\n\n'
+ 'frontend stats\n'
+ ' mode http\n'
+ ' bind 192.168.122.100:9049\n'
+ ' bind host1:9049\n'
+ ' stats enable\n'
+ ' stats uri /stats\n'
+ ' stats refresh 10s\n'
+ ' stats auth admin:12345\n'
+ ' http-request use-service prometheus-exporter if { path /metrics }\n'
+ ' monitor-uri /health\n\n'
+ 'frontend frontend\n'
+ ' bind 192.168.122.100:2049\n'
+ ' option tcplog\n'
+ ' default_backend backend\n\n'
+ 'peers haproxy_peers\n'
+ ' peer host1 host1:5000\n\n'
+ 'backend backend\n'
+ ' mode tcp\n'
+ ' balance roundrobin\n'
+ ' stick-table type ip size 200k expire 30m peers haproxy_peers\n'
+ ' stick on src\n'
+ ' hash-type consistent\n'
+ ' server nfs.foo.0 192.168.122.111:12049 check\n'
+ )
+ haproxy_expected_conf = {
+ 'files': {'haproxy.cfg': haproxy_txt}
+ }
+
+ nfs_daemons = [
+ DaemonDescription(daemon_type='nfs', daemon_id='foo.0.1.host1.qwerty', hostname='host1', rank=0, rank_generation=1, ports=[12049]),
+ DaemonDescription(daemon_type='nfs', daemon_id='foo.0.0.host2.abcdef', hostname='host2', rank=0, rank_generation=0, ports=[12049])
+ ]
+ _get_daemons_by_service.return_value = nfs_daemons
+
+ haproxy_generated_conf = service_registry.get_service('ingress').haproxy_generate_config(
+ CephadmDaemonDeploySpec(host='host1', daemon_id='ingress', service_name=ispec.service_name()))
+
+ haproxy_generated_conf = haproxy_generated_conf[0]
+ gen_config_lines = [line.rstrip() for line in haproxy_generated_conf['files']['haproxy.cfg'].splitlines()]
+ exp_config_line = [line.rstrip() for line in haproxy_expected_conf['files']['haproxy.cfg'].splitlines()]
+
+ assert gen_config_lines == exp_config_line
+
@patch("cephadm.serve.CephadmServe._run_cephadm")
def test_ingress_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
monitor_networks: Optional[List[str]] = None,
monitor_ip_addrs: Optional[Dict[str, str]] = None,
use_tcp_mode_over_rgw: bool = False,
+ haproxy_peer_communication_port: Optional[int] = None,
):
assert service_type == 'ingress'
self.monitor_networks = monitor_networks
self.monitor_ip_addrs = monitor_ip_addrs
self.use_tcp_mode_over_rgw = use_tcp_mode_over_rgw
+ self.haproxy_peer_communication_port = haproxy_peer_communication_port
def get_port_start(self) -> List[int]:
ports = []
ports.append(cast(int, self.frontend_port))
if self.monitor_port is not None:
ports.append(cast(int, self.monitor_port))
+ is_nfs_backend = bool(
+ self.backend_service and self.backend_service.split('.')[0] == 'nfs'
+ )
+ if self.haproxy_peer_communication_port or is_nfs_backend:
+ ports.append(cast(int, self.haproxy_peer_communication_port) or 1024)
return ports
def get_virtual_ip(self) -> Optional[str]:
raise SpecValidationError(
f'Cannot add ingress: Invalid health_check_interval specified. '
f'Valid units are: {valid_units}')
+ if self.haproxy_peer_communication_port and self.backend_service.split('.')[0] != 'nfs':
+ raise SpecValidationError(
+ 'The haproxy_peer_communication_port is valid only for NFS backend.'
+ )
# validate SSL parametes
if self.monitor_ssl:
CustomContainerSpec,
GrafanaSpec,
HostPlacementSpec,
+ IngressSpec,
IscsiServiceSpec,
NFSServiceSpec,
PlacementSpec,
assert spec.monitor_port == 8081
+def test_ingress_spec_haproxy_peer_communication_port():
+ """NFS ingress reserves peer port 1024 by default; custom value overrides."""
+ nfs_ingress = IngressSpec(
+ service_type='ingress',
+ service_id='nfs.foo',
+ backend_service='nfs.foo',
+ frontend_port=2049,
+ monitor_port=9049,
+ virtual_ip='192.168.1.1/24',
+ )
+ assert nfs_ingress.get_port_start() == [2049, 9049, 1024]
+
+ nfs_custom = IngressSpec(
+ service_type='ingress',
+ service_id='nfs.foo',
+ backend_service='nfs.foo',
+ frontend_port=2049,
+ monitor_port=9049,
+ virtual_ip='192.168.1.1/24',
+ haproxy_peer_communication_port=5000,
+ )
+ assert nfs_custom.get_port_start() == [2049, 9049, 5000]
+
+ rgw_ingress = IngressSpec(
+ service_type='ingress',
+ service_id='rgw.foo',
+ backend_service='rgw.foo',
+ frontend_port=8080,
+ monitor_port=8081,
+ virtual_ip='192.168.1.1/24',
+ )
+ assert rgw_ingress.get_port_start() == [8080, 8081]
+
+ yaml_str = """service_type: ingress
+service_id: nfs.foo
+placement:
+ hosts:
+ - host1
+spec:
+ virtual_ip: 192.168.20.1/24
+ backend_service: nfs.foo
+ frontend_port: 2049
+ monitor_port: 9049
+ haproxy_peer_communication_port: 5000
+"""
+ loaded = ServiceSpec.from_json(yaml.safe_load(yaml_str))
+ assert isinstance(loaded, IngressSpec)
+ assert loaded.haproxy_peer_communication_port == 5000
+ assert loaded.get_port_start() == [2049, 9049, 5000]
+
+
@pytest.mark.parametrize("y, error_match", [
("""
service_type: rgw