that matches one of the specified networks will be used. If neither condition
is met, the default binding will happen on all available network interfaces.
+TLS/SSL Example
+---------------
+
+Here's an example NFS service specification with TLS/SSL configuration:
+
+.. code-block:: yaml
+
+ service_type: nfs
+ service_id: mynfs
+ placement:
+ hosts:
+ - ceph-node-0
+ spec:
+ port: 12345
+ ssl: true
+ certificate_source: inline|reference|cephadm-signed
+ ssl_cert: |
+ -----BEGIN CERTIFICATE-----
+ (PEM cert contents here)
+ -----END CERTIFICATE-----
+ ssl_key: |
+ -----BEGIN PRIVATE KEY-----
+ (PEM key contents here)
+ -----END PRIVATE KEY-----
+ ssl_ca_cert:
+ -----BEGIN PRIVATE KEY-----
+ (PEM key contents here)
+ -----END PRIVATE KEY-----
+ tls_ktls: true
+ tls_debug: true
+ tls_min_version: TLSv1.3
+ tls_ciphers: AES-256
+
+This example configures an NFS service with TLS encryption enabled using
+inline certificates.
+
+TLS/SSL Parameters
+~~~~~~~~~~~~~~~~~~
+
+The following parameters can be used to configure TLS/SSL encryption for the NFS service:
+
+* ``ssl`` (boolean): Enable or disable SSL/TLS encryption. Default is ``false``.
+
+* ``certificate_source`` (string): Specifies the source of the TLS certificates.
+ Options include:
+
+ - ``cephadm-signed``: Use certificates signed by cephadm's internal CA
+ - ``inline``: Provide certificates directly in the specification using ``ssl_cert``, ``ssl_key``, and ``ssl_ca_cert`` fields
+ - ``reference``: Users can register their own certificate and key with certmgr and
+ set the ``certificate_source`` to ``reference`` in the spec.
+
+* ``ssl_cert`` (string): The SSL certificate in PEM format. Required when using
+ ``inline`` certificate source.
+
+* ``ssl_key`` (string): The SSL private key in PEM format. Required when using
+ ``inline`` certificate source.
+
+* ``ssl_ca_cert`` (string): The SSL CA certificate in PEM format. Required when
+ using ``inline`` certificate source.
+
+* ``custom_sans`` (list): List of custom Subject Alternative Names (SANs) to
+ include in the certificate.
+
+* ``tls_ktls`` (boolean): Enable kernel TLS (kTLS) for improved performance when
+ available. Default is ``false``.
+
+* ``tls_debug`` (boolean): Enable TLS debugging output. Useful for troubleshooting
+ TLS issues. Default is ``false``.
+
+* ``tls_min_version`` (string): Specify the minimum TLS version to accept.
+ Examples: TLSv1.3, TLSv1.2
+
+* ``tls_ciphers`` (string): Specify allowed cipher suites for TLS connections.
+ Example: :-CIPHER-ALL:+AES-256-GCM
+
+.. note:: When ``ssl`` is enabled, a ``certificate_source`` must be specified.
+ If using ``inline`` certificates, all three certificate fields (``ssl_cert``,
+ ``ssl_key``, ``ssl_ca_cert``) must be provided.
+
The specification can then be applied by running the following command:
.. prompt:: bash #
use the orchestrator interface to update the NFS service spec. The safest way
to do that is to export the current spec, modify it, and then re-apply it. For
example, to modify the ``nfs.foo`` service, run commands of the following
-forms:
+forms:
.. prompt:: bash #
permissible values.
``<sectype>`` specifies which authentication methods will be used when
-connecting to the export. Valid values include "krb5p", "krb5i", "krb5", "sys",
+connecting to the export. Valid values include "krb5p", "krb5i", "krb5", "sys", "tls", "mtls"
and "none". More than one value can be supplied. The flag may be specified
multiple times (example: ``--sectype=krb5p --sectype=krb5i``) or multiple
values may be separated by a comma (example: ``--sectype krb5p,krb5i``). The
RGW bucket export
^^^^^^^^^^^^^^^^^
-
+
To export a *bucket*:
.. prompt:: bash #
# create the ganesha conf dir
config_dir = os.path.join(data_dir, 'etc/ganesha')
+ tls_dir = os.path.join(data_dir, 'etc/ganesha/tls')
makedirs(config_dir, uid, gid, 0o755)
-
+ makedirs(tls_dir, uid, gid, 0o755)
+
+ config_files = {
+ fname: content
+ for fname, content in self.files.items()
+ if fname in ['ganesha.conf', 'idmap.conf']
+ }
+ tls_files = {
+ fname: content
+ for fname, content in self.files.items()
+ if fname.startswith('tls')
+ }
# populate files from the config-json
- populate_files(config_dir, self.files, uid, gid)
+ populate_files(config_dir, config_files, uid, gid)
+ populate_files(tls_dir, tls_files, uid, gid)
# write the RGW keyring
if self.rgw:
# the daemon is written, which we rewrite on redeploy, but not
# on reconfig.
action = 'redeploy'
-
+ elif dd.daemon_type == 'nfs':
+ # check what has changed, based on that decide action
+ only_kmip_updated = all(s.startswith('kmip') for s in list(sym_diff))
+ if not only_kmip_updated:
+ action = 'redeploy'
elif spec is not None and hasattr(spec, 'extra_container_args') and dd.extra_container_args != spec.extra_container_args:
self.log.debug(
f'{dd.name()} container cli args {dd.extra_container_args} -> {spec.extra_container_args}')
import os
import subprocess
import tempfile
-from typing import Dict, Tuple, Any, List, cast, Optional
+from typing import Dict, Tuple, Any, List, cast, Optional, TYPE_CHECKING
from configparser import ConfigParser
from io import StringIO
from .service_registry import register_cephadm_service
from orchestrator import DaemonDescription, OrchestratorError
-
+from cephadm import utils
from cephadm.services.cephadmservice import AuthEntity, CephadmDaemonDeploySpec, CephService
+if TYPE_CHECKING:
+ from ..module import CephadmOrchestrator
logger = logging.getLogger(__name__)
assert self.TYPE == spec.service_type
create_ganesha_pool(self.mgr)
+ @classmethod
+ def get_dependencies(
+ cls,
+ mgr: "CephadmOrchestrator",
+ spec: Optional[ServiceSpec] = None,
+ daemon_type: Optional[str] = None
+ ) -> List[str]:
+ assert spec
+ deps: List[str] = []
+ nfs_spec = cast(NFSServiceSpec, spec)
+ # add dependency of tls fields
+ if (spec.ssl and spec.ssl_cert and spec.ssl_key and spec.ssl_ca_cert):
+ deps.append(f'ssl_cert: {str(utils.md5_hash(spec.ssl_cert))}')
+ deps.append(f'ssl_key: {str(utils.md5_hash(spec.ssl_key))}')
+ deps.append(f'ssl_ca_cert: {str(utils.md5_hash(spec.ssl_ca_cert))}')
+ deps.append(f'tls_ktls: {nfs_spec.tls_ktls}')
+ deps.append(f'tls_debug: {nfs_spec.tls_debug}')
+ deps.append(f'tls_min_version: {nfs_spec.tls_min_version}')
+ deps.append(f'tls_ciphers: {nfs_spec.tls_ciphers}')
+ return sorted(deps)
+
def prepare_create(self, daemon_spec: CephadmDaemonDeploySpec) -> CephadmDaemonDeploySpec:
assert self.TYPE == daemon_spec.daemon_type
daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec)
def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]:
assert self.TYPE == daemon_spec.daemon_type
+ super().register_for_certificates(daemon_spec)
daemon_type = daemon_spec.daemon_type
daemon_id = daemon_spec.daemon_id
host = daemon_spec.host
spec = cast(NFSServiceSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
- deps: List[str] = []
-
nodeid = f'{daemon_spec.rank}'
nfs_idmap_conf = '/etc/ganesha/idmap.conf'
"nfs_idmap_conf": nfs_idmap_conf,
"enable_nlm": str(spec.enable_nlm).lower(),
"cluster_id": self.mgr._cluster_fsid,
+ "tls_add": spec.ssl,
+ "tls_ciphers": spec.tls_ciphers,
+ "tls_min_version": spec.tls_min_version,
+ "tls_ktls": spec.tls_ktls,
+ "tls_debug": spec.tls_debug,
}
if spec.enable_haproxy_protocol:
context["haproxy_hosts"] = self._haproxy_hosts()
'ganesha.conf': get_ganesha_conf(),
'idmap.conf': get_idmap_conf()
}
+ if spec.ssl:
+ tls_creds = self.get_certificates(daemon_spec, ca_cert_required=True)
+ config['files'].update({
+ 'tls_cert.pem': tls_creds.cert,
+ 'tls_key.pem': tls_creds.key,
+ 'tls_ca_cert.pem': tls_creds.ca_cert,
+ })
config.update(
self.get_config_and_keyring(
daemon_type, daemon_id,
logger.debug('Generated cephadm config-json: %s' % config)
return config
- return get_cephadm_config(), deps
+ return get_cephadm_config(), self.get_dependencies(self.mgr, spec)
def create_rados_config_obj(self,
spec: NFSServiceSpec,
name = "client.{{ rgw_user }}";
}
+{% if tls_add %}
+TLS_CONFIG{
+ Enable_TLS = {{ tls_add }};
+ TLS_Cert_File = /etc/ganesha/tls/tls_cert.pem;
+ TLS_Key_File = /etc/ganesha/tls/tls_key.pem;
+ TLS_CA_File = /etc/ganesha/tls/tls_ca_cert.pem;
+ {% if tls_ciphers %}
+ TLS_Ciphers = "{{ tls_ciphers }}";
+ {% endif %}
+ {% if tls_min_version %}
+ TLS_Min_Version = "{{ tls_min_version }}";
+ {% endif %}
+ {% if tls_ktls %}
+ Enable_KTLS = {{ tls_ktls }};
+ {% endif %}
+ {% if tls_debug %}
+ Enable_debug = {{ tls_debug }};
+ {% endif %}
+}
+
+{% endif %}
%url {{ url }}
nvmeof_root_ca_cert = 'fake-nvmeof-root-ca-cert'
grafana_cert_host_1 = 'grafana-cert-host-1'
grafana_cert_host_2 = 'grafana-cert-host-2'
+ nfs_ssl_cert = 'nfs-ssl-cert'
+ nfs_ssl_ca_cert = 'nfs-ssl-ca-cert'
cephadm_module.cert_mgr.save_cert('rgw_ssl_cert', rgw_frontend_rgw_foo_host2_cert, service_name='rgw.foo', user_made=True)
cephadm_module.cert_mgr.save_cert('nvmeof_ssl_cert', nvmeof_ssl_cert, service_name='nvmeof.self-signed.foo', user_made=False)
cephadm_module.cert_mgr.save_cert('nvmeof_client_cert', nvmeof_client_cert, service_name='nvmeof.foo', user_made=True)
cephadm_module.cert_mgr.save_cert('nvmeof_root_ca_cert', nvmeof_root_ca_cert, service_name='nvmeof.foo', user_made=True)
cephadm_module.cert_mgr.save_cert('grafana_ssl_cert', grafana_cert_host_1, host='host-1', user_made=True)
cephadm_module.cert_mgr.save_cert('grafana_ssl_cert', grafana_cert_host_2, host='host-2', user_made=True)
+ cephadm_module.cert_mgr.save_cert('nfs_ssl_cert', nfs_ssl_cert, service_name='nfs.foo', user_made=True)
+ cephadm_module.cert_mgr.save_cert('nfs_ssl_ca_cert', nfs_ssl_ca_cert, service_name='nfs.foo', user_made=True)
expected_calls = [
mock.call(f'{TLSOBJECT_STORE_CERT_PREFIX}rgw_ssl_cert', json.dumps({'rgw.foo': Cert(rgw_frontend_rgw_foo_host2_cert, True).to_json()})),
mock.call(f'{TLSOBJECT_STORE_CERT_PREFIX}nvmeof_root_ca_cert', json.dumps({'nvmeof.foo': Cert(nvmeof_root_ca_cert, True).to_json()})),
mock.call(f'{TLSOBJECT_STORE_CERT_PREFIX}grafana_ssl_cert', json.dumps({'host-1': Cert(grafana_cert_host_1, True).to_json()})),
mock.call(f'{TLSOBJECT_STORE_CERT_PREFIX}grafana_ssl_cert', json.dumps({'host-1': Cert(grafana_cert_host_1, True).to_json(),
- 'host-2': Cert(grafana_cert_host_2, True).to_json()}))
+ 'host-2': Cert(grafana_cert_host_2, True).to_json()})),
+ mock.call(f'{TLSOBJECT_STORE_CERT_PREFIX}nfs_ssl_cert', json.dumps({'nfs.foo': Cert(nfs_ssl_cert, True).to_json()})),
+ mock.call(f'{TLSOBJECT_STORE_CERT_PREFIX}nfs_ssl_ca_cert', json.dumps({'nfs.foo': Cert(nfs_ssl_ca_cert, True).to_json()})),
]
_set_store.assert_has_calls(expected_calls)
}
compare_certls_dicts(expected_ls)
+ cephadm_module.cert_mgr.save_cert('nfs_ssl_cert', CEPHADM_SELF_GENERATED_CERT_1, service_name='nfs.foo', user_made=True)
+ expected_ls["nfs_ssl_cert"] = {
+ "scope": "service",
+ "certificates": {
+ "nfs.foo": get_generated_cephadm_cert_info_1(),
+ },
+ }
+ compare_certls_dicts(expected_ls)
+
+ cephadm_module.cert_mgr.save_cert('nfs_ssl_ca_cert', CEPHADM_SELF_GENERATED_CERT_2, service_name='nfs.foo', user_made=True)
+ expected_ls["nfs_ssl_ca_cert"] = {
+ "scope": "service",
+ "certificates": {
+ "nfs.foo": get_generated_cephadm_cert_info_2(),
+ },
+ }
+ compare_certls_dicts(expected_ls)
+
# Services with host target/scope
cephadm_module.cert_mgr.save_cert('grafana_ssl_cert', CEPHADM_SELF_GENERATED_CERT_1, host='host1', user_made=True)
cephadm_module.cert_mgr.save_cert('grafana_ssl_cert', CEPHADM_SELF_GENERATED_CERT_2, host='host2', user_made=True)
'grafana_ssl_cert': ('host1', 'grafana-cert', TLSObjectScope.HOST),
'oauth2_proxy_ssl_cert': ('host1', 'oauth2-proxy', TLSObjectScope.HOST),
'mgmt_gateway_ssl_cert': ('mgmt-gateway', 'mgmt-gw-cert', TLSObjectScope.GLOBAL),
+ 'nfs_ssl_cert': ('nfs.foo', 'nfs-ssl-cert', TLSObjectScope.SERVICE),
+ 'nfs_ssl_ca_cert': ('nfs.foo', 'nfs-ssl-ca-cert', TLSObjectScope.SERVICE),
}
unknown_certs = {
'unknown_per_service_cert': ('unknown-svc.foo', 'unknown-cert', TLSObjectScope.SERVICE),
'oauth2_proxy_ssl_key': ('host1', 'oauth2-proxy', TLSObjectScope.HOST),
'ingress_ssl_key': ('ingress', 'ingress-ssl-key', TLSObjectScope.SERVICE),
'iscsi_ssl_key': ('iscsi', 'iscsi-ssl-key', TLSObjectScope.SERVICE),
+ 'nfs_ssl_key': ('nfs.foo', 'nfs-ssl-key', TLSObjectScope.SERVICE),
}
unknown_keys = {
'unknown_per_service_key': ('unknown-svc.foo', 'unknown-key', TLSObjectScope.SERVICE),
good_certs = {
'rgw_ssl_cert': ('rgw.foo', 'good-cert', TLSObjectScope.SERVICE),
'mgmt_gateway_ssl_cert': ('mgmt-gateway', 'good-global-cert', TLSObjectScope.GLOBAL),
+ 'nfs_ssl_cert': ('nfs.foo', 'nfs-ssl-cert', TLSObjectScope.SERVICE),
+ 'nfs_ssl_ca_cert': ('nfs.foo', 'nfs-ssl-ca-cert', TLSObjectScope.SERVICE),
}
good_keys = {
'rgw_ssl_key': ('rgw.foo', 'good-key', TLSObjectScope.SERVICE),
+ 'nfs_ssl_key': ('nfs.foo', 'nfs-ssl-key', TLSObjectScope.SERVICE),
}
# Helpers to dump valid JSON structures
# Good entries loaded correctly
assert 'rgw_ssl_cert' in cert_store
assert cert_store['rgw_ssl_cert']['rgw.foo'] == Cert('good-cert', True)
+ assert 'nfs_ssl_cert' in cert_store
+ assert cert_store['nfs_ssl_cert']['nfs.foo'] == Cert('nfs-ssl-cert', True)
+ assert 'nfs_ssl_ca_cert' in cert_store
+ assert cert_store['nfs_ssl_ca_cert']['nfs.foo'] == Cert('nfs-ssl-ca-cert', True)
assert 'mgmt_gateway_ssl_cert' in cert_store
assert cert_store['mgmt_gateway_ssl_cert'] == Cert('good-global-cert', True)
assert 'rgw_ssl_key' in key_store
assert key_store['rgw_ssl_key']['rgw.foo'] == PrivKey('good-key')
+ assert 'nfs_ssl_key' in key_store
+ assert key_store['nfs_ssl_key']['nfs.foo'] == PrivKey('nfs-ssl-key')
# Bad ones: object names exist (pre-registered), but **no targets** were added
# Service / Host scoped => dict should be empty
assert gen_config_lines == exp_config_lines
+ @patch("cephadm.serve.CephadmServe._run_cephadm")
+ @patch("cephadm.services.nfs.NFSService.fence_old_ranks", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.run_grace_tool", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.purge", MagicMock())
+ @patch("cephadm.services.nfs.NFSService.create_rados_config_obj", MagicMock())
+ def test_nfs_tls(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ _run_cephadm.side_effect = async_side_effect(('{}', '', 0))
+
+ with with_host(cephadm_module, 'test', addr='1.2.3.7'):
+ cephadm_module.cache.update_host_networks('test', {
+ '1.2.3.0/24': {
+ 'if0': ['1.2.3.1']
+ }
+ })
+
+ nfs_spec = NFSServiceSpec(service_id="foo", placement=PlacementSpec(hosts=['test']),
+ ssl=True, ssl_cert=ceph_generated_cert, ssl_key=ceph_generated_key,
+ ssl_ca_cert=cephadm_root_ca, certificate_source='inline', tls_ktls=True,
+ tls_debug=True, tls_min_version='TLSv1.3',
+ tls_ciphers='ECDHE-ECDSA-AES256')
+ with with_service(cephadm_module, nfs_spec) as _:
+ nfs_generated_conf, _ = service_registry.get_service('nfs').generate_config(
+ CephadmDaemonDeploySpec(host='test', daemon_id='foo.test.0.0', service_name=nfs_spec.service_name()))
+ ganesha_conf = nfs_generated_conf['files']['ganesha.conf']
+ expected_tls_block = (
+ 'TLS_CONFIG{\n'
+ ' Enable_TLS = True;\n'
+ ' TLS_Cert_File = /etc/ganesha/tls/tls_cert.pem;\n'
+ ' TLS_Key_File = /etc/ganesha/tls/tls_key.pem;\n'
+ ' TLS_CA_File = /etc/ganesha/tls/tls_ca_cert.pem;\n'
+ ' TLS_Ciphers = "ECDHE-ECDSA-AES256";\n'
+ ' TLS_Min_Version = "TLSv1.3";\n'
+ ' Enable_KTLS = True;\n'
+ ' Enable_debug = True;\n'
+ '}\n'
+ )
+ assert expected_tls_block in ganesha_conf
+
class TestCephFsMirror:
@patch("cephadm.serve.CephadmServe._run_cephadm")
virtual_ip: Optional[str] = None,
ingress_mode: Optional[IngressType] = None,
port: Optional[int] = None,
+ ssl: bool = False,
+ ssl_cert: Optional[str] = None,
+ ssl_key: Optional[str] = None,
+ ssl_ca_cert: Optional[str] = None,
+ tls_ktls: bool = False,
+ tls_debug: bool = False,
+ tls_min_version: Optional[str] = None,
+ tls_ciphers: Optional[str] = None,
) -> None:
if not port:
port = 2049 # default nfs port
# use non-default port so we don't conflict with ingress
port=ganesha_port,
virtual_ip=virtual_ip_for_ganesha,
- enable_haproxy_protocol=enable_haproxy_protocol)
+ enable_haproxy_protocol=enable_haproxy_protocol,
+ ssl=ssl,
+ ssl_cert=ssl_cert,
+ ssl_key=ssl_key,
+ ssl_ca_cert=ssl_ca_cert,
+ tls_ktls=tls_ktls,
+ tls_debug=tls_debug,
+ tls_min_version=tls_min_version,
+ tls_ciphers=tls_ciphers)
completion = self.mgr.apply_nfs(spec)
orchestrator.raise_if_exception(completion)
ispec = IngressSpec(service_type='ingress',
# standalone nfs
spec = NFSServiceSpec(service_type='nfs', service_id=cluster_id,
placement=PlacementSpec.from_string(placement),
- port=port)
+ port=port,
+ ssl=ssl,
+ ssl_cert=ssl_cert,
+ ssl_key=ssl_key,
+ ssl_ca_cert=ssl_ca_cert,
+ tls_ktls=tls_ktls,
+ tls_debug=tls_debug,
+ tls_min_version=tls_min_version,
+ tls_ciphers=tls_ciphers)
completion = self.mgr.apply_nfs(spec)
orchestrator.raise_if_exception(completion)
log.debug("Successfully deployed nfs daemons with cluster id %s and placement %s",
ingress: Optional[bool] = None,
ingress_mode: Optional[IngressType] = None,
port: Optional[int] = None,
+ ssl: bool = False,
+ ssl_cert: Optional[str] = None,
+ ssl_key: Optional[str] = None,
+ ssl_ca_cert: Optional[str] = None,
+ tls_ktls: bool = False,
+ tls_debug: bool = False,
+ tls_min_version: Optional[str] = None,
+ tls_ciphers: Optional[str] = None,
) -> None:
try:
if virtual_ip:
self.create_empty_rados_obj(cluster_id)
if cluster_id not in available_clusters(self.mgr):
- self._call_orch_apply_nfs(cluster_id, placement, virtual_ip, ingress_mode, port)
+ self._call_orch_apply_nfs(cluster_id, placement, virtual_ip, ingress_mode, port,
+ ssl, ssl_cert, ssl_key, ssl_ca_cert, tls_ktls, tls_debug,
+ tls_min_version, tls_ciphers)
return
raise NonFatalError(f"{cluster_id} cluster already exists")
except Exception as e:
def _validate_sec_type(sec_type: str) -> None:
- valid_sec_types = ["none", "sys", "krb5", "krb5i", "krb5p"]
+ valid_sec_types = ["none", "sys", "krb5", "krb5i", "krb5p", "tls", "mtls"]
if not isinstance(sec_type, str) or sec_type not in valid_sec_types:
raise NFSInvalidOperation(
f"SecType {sec_type} invalid, valid types are {valid_sec_types}")
import logging
import threading
from typing import Tuple, Optional, List, Dict, Any
+import yaml
from mgr_module import MgrModule, CLICommand, Option, CLICheckNonemptyFileInput
import object_format
ingress: Optional[bool] = None,
virtual_ip: Optional[str] = None,
ingress_mode: Optional[IngressType] = None,
- port: Optional[int] = None) -> None:
+ port: Optional[int] = None,
+ inbuf: Optional[str] = None) -> None:
"""Create an NFS Cluster"""
+ ssl_cert = ssl_key = ssl_ca_cert = tls_min_version = tls_ciphers = None
+ ssl = tls_ktls = tls_debug = False
+ if inbuf:
+ config = yaml.safe_load(inbuf)
+ ssl = config.get('ssl')
+ ssl_cert = config.get('ssl_cert')
+ ssl_key = config.get('ssl_key')
+ ssl_ca_cert = config.get('ssl_ca_cert')
+ tls_min_version = config.get('tls_min_version')
+ tls_ktls = config.get('tls_ktls')
+ tls_debug = config.get('tls_debug')
+ tls_ciphers = config.get('tls_ciphers')
+
return self.nfs.create_nfs_cluster(cluster_id=cluster_id, placement=placement,
virtual_ip=virtual_ip, ingress=ingress,
- ingress_mode=ingress_mode, port=port)
+ ingress_mode=ingress_mode, port=port,
+ ssl=ssl,
+ ssl_cert=ssl_cert,
+ ssl_key=ssl_key,
+ ssl_ca_cert=ssl_ca_cert,
+ tls_ktls=tls_ktls,
+ tls_debug=tls_debug,
+ tls_min_version=tls_min_version,
+ tls_ciphers=tls_ciphers)
@CLICommand('nfs cluster rm', perm='rw')
@object_format.EmptyResponder()
'access_type': None,
'squash': None
}],
- 'sectype': ["krb5p", "krb5i", "sys"],
+ 'sectype': ["krb5p", "krb5i", "sys", "mtls", "tls"],
'fsal': {
'name': 'RGW',
'user_id': 'nfs.foo.bucket',
info = conf._get_export_dict(self.cluster_id, "/rgw/bucket")
assert info["export_id"] == 2
assert info["path"] == "bucket"
- assert info["sectype"] == ["krb5p", "krb5i", "sys"]
+ assert info["sectype"] == ["krb5p", "krb5i", "sys", "mtls", "tls"]
def test_update_export_with_ganesha_conf(self):
self._do_mock_test(self._do_test_update_export_with_ganesha_conf)
extra_entrypoint_args: Optional[GeneralArgList] = None,
idmap_conf: Optional[Dict[str, Dict[str, str]]] = None,
custom_configs: Optional[List[CustomConfig]] = None,
+ ssl: bool = False,
+ ssl_cert: Optional[str] = None,
+ ssl_key: Optional[str] = None,
+ ssl_ca_cert: Optional[str] = None,
+ certificate_source: Optional[str] = None,
+ custom_sans: Optional[List[str]] = None,
+ tls_ktls: bool = False,
+ tls_debug: bool = False,
+ tls_min_version: Optional[str] = None,
+ tls_ciphers: Optional[str] = None,
):
assert service_type == 'nfs'
super(NFSServiceSpec, self).__init__(
placement=placement, unmanaged=unmanaged, preview_only=preview_only,
config=config, networks=networks, extra_container_args=extra_container_args,
extra_entrypoint_args=extra_entrypoint_args, custom_configs=custom_configs,
- ip_addrs=ip_addrs)
+ ip_addrs=ip_addrs, ssl=ssl, ssl_cert=ssl_cert, ssl_key=ssl_key, ssl_ca_cert=ssl_ca_cert,
+ certificate_source=certificate_source, custom_sans=custom_sans)
self.port = port
self.idmap_conf = idmap_conf
self.enable_nlm = enable_nlm
+ # TLS fields
+ self.tls_ciphers = tls_ciphers
+ self.tls_ktls = tls_ktls
+ self.tls_debug = tls_debug
+ self.tls_min_version = tls_min_version
+
def get_port_start(self) -> List[int]:
if self.port:
return [self.port]
raise SpecValidationError("Invalid NFS spec: Cannot set virtual_ip and "
f"{'ip_addrs' if self.ip_addrs else 'networks'} fields")
+ # TLS certificate validation
+ if self.ssl and not self.certificate_source:
+ raise SpecValidationError('If SSL is enabled, a certificate source must be provided.')
+ if self.certificate_source == CertificateSource.INLINE.value:
+ tls_field_names = [
+ 'ssl_cert',
+ 'ssl_key',
+ 'ssl_ca_cert',
+ ]
+ tls_fields = [getattr(self, tls_field) for tls_field in tls_field_names]
+ if any(tls_fields) and not all(tls_fields):
+ raise SpecValidationError(
+ f'Either none or all of {tls_field_names} attributes must be set'
+ )
+
yaml.add_representer(NFSServiceSpec, ServiceSpec.yaml_representer)