- host2
- host3
spec:
- backend_service: rgw.something # adjust to match your existing RGW service
- virtual_ip: <string>/<string> # ex: 192.168.20.1/24
- frontend_port: <integer> # ex: 8080
- monitor_port: <integer> # ex: 1967, used by haproxy for load balancer status
- virtual_interface_networks: [ ... ] # optional: list of CIDR networks
- ssl_cert: | # optional: SSL certificate and key
+ backend_service: rgw.something # adjust to match your existing RGW service
+ virtual_ip: <string>/<string> # ex: 192.168.20.1/24
+ frontend_port: <integer> # ex: 8080
+ monitor_port: <integer> # ex: 1967, used by haproxy for load balancer status
+ virtual_interface_networks: [ ... ] # optional: list of CIDR networks
+ use_keepalived_multicast: <bool> # optional: Default is False.
+ vrrp_interface_network: <string>/<string> # optional: ex: 192.168.20.0/24
+ ssl_cert: | # optional: SSL certificate and key
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
* ``ssl_cert``:
SSL certificate, if SSL is to be enabled. This must contain the both the certificate and
private key blocks in .pem format.
+* ``use_keepalived_multicast``
+ Default is False. By default, cephadm will deploy keepalived config to use unicast IPs,
+ using the IPs of the hosts. The IPs chosen will be the same IPs cephadm uses to connect
+ to the machines. But if multicast is prefered, we can set ``use_keepalived_multicast``
+ to ``True`` and Keepalived will use multicast IP (224.0.0.18) to communicate between instances,
+ using the same interfaces as where the VIPs are.
+* ``vrrp_interface_network``
+ By default, cephadm will configure keepalived to use the same interface where the VIPs are
+ for VRRP communication. If another interface is needed, it can be set via ``vrrp_interface_network``
+ with a network to identify which ethernet interface to use.
.. _ingress-virtual-ip:
f"Unable to identify interface for {spec.virtual_ip} on {host}"
)
+ # Use interface as vrrp_interface for vrrp traffic if vrrp_interface_network not set on the spec
+ vrrp_interface = None
+ if not spec.vrrp_interface_network:
+ vrrp_interface = interface
+ else:
+ for subnet, ifaces in self.mgr.cache.networks.get(host, {}).items():
+ if subnet == spec.vrrp_interface_network:
+ vrrp_interface = list(ifaces.keys())[0]
+ logger.info(
+ f'vrrp will be configured on {host} interface '
+ f'{vrrp_interface} (which has guiding subnet {subnet})'
+ )
+ break
+ else:
+ raise OrchestratorError(
+ f"Unable to identify vrrp interface for {spec.vrrp_interface_network} on {host}"
+ )
+
# script to monitor health
script = '/usr/bin/false'
for d in daemons:
'script': script,
'password': password,
'interface': interface,
+ 'vrrp_interface': vrrp_interface,
'virtual_ips': virtual_ips,
'states': states,
'priorities': priorities,
vrrp_instance VI_{{ x }} {
state {{ states[x] }}
priority {{ priorities[x] }}
- interface {{ interface }}
+ interface {{ vrrp_interface }}
virtual_router_id {{ 50 + x }}
advert_int 1
authentication {
auth_type PASS
auth_pass {{ password }}
}
+{% if not spec.use_keepalived_multicast %}
unicast_src_ip {{ host_ip }}
unicast_peer {
{% for ip in other_ips %}
{{ ip }}
{% endfor %}
}
+{% endif %}
virtual_ipaddress {
{{ virtual_ips[x] }} dev {{ interface }}
}
virtual_ip: Optional[str] = None,
virtual_ips_list: Optional[List[str]] = None,
virtual_interface_networks: Optional[List[str]] = [],
+ use_keepalived_multicast: Optional[bool] = False,
+ vrrp_interface_network: Optional[str] = None,
unmanaged: bool = False,
ssl: bool = False,
keepalive_only: bool = False,
self.virtual_ip = virtual_ip
self.virtual_ips_list = virtual_ips_list
self.virtual_interface_networks = virtual_interface_networks or []
+ self.use_keepalived_multicast = use_keepalived_multicast
+ self.vrrp_interface_network = vrrp_interface_network
self.unmanaged = unmanaged
self.ssl = ssl
self.keepalive_only = keepalive_only