@patch("cephadm.utils.resolve_ip")
@patch("cephadm.inventory.HostCache.get_daemons_by_service")
@patch("cephadm.serve.CephadmServe._run_cephadm")
- def test_ingress_config_nfs_multiple_nfs_same_rank(self, _run_cephadm, _get_daemons_by_service, _resolve_ip, _get_addr, cephadm_module: CephadmOrchestrator):
+ def test_ingress_config_nfs_multiple_nfs_same_rank(
+ self,
+ _run_cephadm,
+ _get_daemons_by_service,
+ _resolve_ip, _get_addr,
+ cephadm_module: CephadmOrchestrator
+ ):
_run_cephadm.side_effect = async_side_effect(('{}', '', 0))
def fake_resolve_ip(hostname: str) -> str:
return hostname
_get_addr.side_effect = fake_get_addr
- nfs_service = NFSServiceSpec(service_id="foo", placement=PlacementSpec(count=1, hosts=['host1', 'host2']),
- port=12049)
-
- ispec = IngressSpec(service_type='ingress',
- service_id='nfs.foo',
- backend_service='nfs.foo',
- frontend_port=2049,
- monitor_port=9049,
- virtual_ip='192.168.122.100/24',
- monitor_user='admin',
- monitor_password='12345',
- keepalived_password='12345')
+ nfs_service = NFSServiceSpec(
+ service_id="foo",
+ placement=PlacementSpec(
+ count=1,
+ hosts=['host1', 'host2']),
+ port=12049,
+ )
+
+ ispec = IngressSpec(
+ service_type='ingress',
+ service_id='nfs.foo',
+ backend_service='nfs.foo',
+ frontend_port=2049,
+ monitor_port=9049,
+ virtual_ip='192.168.122.100/24',
+ monitor_user='admin',
+ monitor_password='12345',
+ keepalived_password='12345',
+ )
cephadm_module.spec_store._specs = {
'nfs.foo': nfs_service,
# for the host1 nfs daemon as we'll end up giving that
# one higher rank_generation but the same rank as the one
# on host2
+ haproxy_txt = (
+ '# This file is generated by cephadm.\n'
+ 'global\n'
+ ' log 127.0.0.1 local2\n'
+ ' chroot /var/lib/haproxy\n'
+ ' pidfile /var/lib/haproxy/haproxy.pid\n'
+ ' maxconn 8000\n'
+ ' daemon\n'
+ ' stats socket /var/lib/haproxy/stats\n\n'
+ 'defaults\n'
+ ' mode tcp\n'
+ ' log global\n'
+ ' timeout queue 1m\n'
+ ' timeout connect 10s\n'
+ ' timeout client 1m\n'
+ ' timeout server 1m\n'
+ ' timeout check 10s\n'
+ ' maxconn 8000\n\n'
+ 'frontend stats\n'
+ ' mode http\n'
+ ' bind 192.168.122.100:9049\n'
+ ' bind host1:9049\n'
+ ' stats enable\n'
+ ' stats uri /stats\n'
+ ' stats refresh 10s\n'
+ ' stats auth admin:12345\n'
+ ' http-request use-service prometheus-exporter if { path /metrics }\n'
+ ' monitor-uri /health\n\n'
+ 'frontend frontend\n'
+ ' bind 192.168.122.100:2049\n'
+ ' default_backend backend\n\n'
+ 'backend backend\n'
+ ' mode tcp\n'
+ ' balance source\n'
+ ' hash-type consistent\n'
+ ' server nfs.foo.0 192.168.122.111:12049\n'
+ )
haproxy_expected_conf = {
- 'files':
- {
- 'haproxy.cfg':
- '# This file is generated by cephadm.\n'
- 'global\n'
- ' log 127.0.0.1 local2\n'
- ' chroot /var/lib/haproxy\n'
- ' pidfile /var/lib/haproxy/haproxy.pid\n'
- ' maxconn 8000\n'
- ' daemon\n'
- ' stats socket /var/lib/haproxy/stats\n\n'
- 'defaults\n'
- ' mode tcp\n'
- ' log global\n'
- ' timeout queue 1m\n'
- ' timeout connect 10s\n'
- ' timeout client 1m\n'
- ' timeout server 1m\n'
- ' timeout check 10s\n'
- ' maxconn 8000\n\n'
- 'frontend stats\n'
- ' mode http\n'
- ' bind 192.168.122.100:9049\n'
- ' bind host1:9049\n'
- ' stats enable\n'
- ' stats uri /stats\n'
- ' stats refresh 10s\n'
- ' stats auth admin:12345\n'
- ' http-request use-service prometheus-exporter if { path /metrics }\n'
- ' monitor-uri /health\n\n'
- 'frontend frontend\n'
- ' bind 192.168.122.100:2049\n'
- ' default_backend backend\n\n'
- 'backend backend\n'
- ' mode tcp\n'
- ' balance source\n'
- ' hash-type consistent\n'
- ' server nfs.foo.0 192.168.122.111:12049\n'
- }
+ 'files': {'haproxy.cfg': haproxy_txt}
}
# verify we get the same cfg regardless of the order in which the nfs daemons are returned