import logging
from typing import TYPE_CHECKING, Iterator, Optional, Dict, Any, List
-from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec, RGWSpec
+from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec, RGWSpec, CertificateSource
from cephadm.schedule import HostAssignment
from cephadm.utils import SpecialHostLabels
import rados
+from mgr_util import parse_combined_pem_file, get_cert_issuer_info
+from cephadm.tlsobject_types import CertKeyPair
from mgr_module import NFS_POOL_NAME
from orchestrator import OrchestratorError, DaemonDescription
if TYPE_CHECKING:
from .module import CephadmOrchestrator
-LAST_MIGRATION = 7
+LAST_MIGRATION = 8
logger = logging.getLogger(__name__)
r = mgr.get_store('rgw_migration_queue')
self.rgw_migration_queue = json.loads(r) if r else []
+ r = mgr.get_store('rgw_ssl_migration_queue')
+ self.rgw_ssl_migration_queue = json.loads(r) if r else []
+
# for some migrations, we don't need to do anything except for
# incrementing migration_current.
# let's try to shortcut things here.
"cephadm migration still ongoing. Please wait, until the migration is complete.")
def migrate(self, startup: bool = False) -> None:
+
+ logger.info('running migrations')
+
if self.mgr.migration_current == 0:
if self.migrate_0_1():
self.set(1)
if self.migrate_6_7():
self.set(7)
+ if self.mgr.migration_current == 7:
+ if self.migrate_7_8():
+ self.set(8)
+
def migrate_0_1(self) -> bool:
"""
Migration 0 -> 1
Any extra arguments detected on rgw_frontend_type field will be parsed and passed in the
new spec field rgw_frontend_extra_args.
"""
- self.mgr.log.debug(f'Starting rgw migration (queue length is {len(self.rgw_migration_queue)})')
+ logger.info(f'Starting rgw migration (queue length is {len(self.rgw_migration_queue)})')
for s in self.rgw_migration_queue:
spec = s['spec']
if self.rgw_spec_needs_migration(spec):
self.mgr.spec_store.save(rgw_spec)
else:
logger.info(f"No Migration is needed for rgw spec: {spec}")
+
self.rgw_migration_queue = []
return True
logger.info(f'Migrating certs/keys for {spec.service_name()} spec to cert store')
self.mgr.spec_store._save_certs_and_keys(spec)
- # grafana certs are stored based on the host they are placed on
+ # Grafana certs are stored based on the host they are placed on
+ grafana_cephadm_signed_certs = True
for grafana_daemon in self.mgr.cache.get_daemons_by_type('grafana'):
logger.info(f'Checking for cert/key for {grafana_daemon.name()}')
hostname = grafana_daemon.hostname
grafana_cert_path = f'{hostname}/grafana_crt'
grafana_key_path = f'{hostname}/grafana_key'
grafana_cert = self.mgr.get_store(grafana_cert_path)
- if grafana_cert:
- logger.info(f'Migrating {grafana_daemon.name()} cert to cert store')
- self.mgr.cert_mgr.save_cert('grafana_cert', grafana_cert, host=hostname)
grafana_key = self.mgr.get_store(grafana_key_path)
- if grafana_key:
- logger.info(f'Migrating {grafana_daemon.name()} key to cert store')
- self.mgr.cert_mgr.save_key('grafana_key', grafana_key, host=hostname)
+ if grafana_cert:
+ (org, cn) = get_cert_issuer_info(grafana_cert)
+ if org == 'Ceph':
+ logger.info(f'Migrating {grafana_daemon.name()}/{hostname} cert/key to cert store (as cephadm-signed certs)')
+ self.mgr.cert_mgr.register_self_signed_cert_key_pair('grafana')
+ self.mgr.cert_mgr.save_self_signed_cert_key_pair('grafana', CertKeyPair(grafana_cert, grafana_key), host=hostname)
+ else:
+ logger.info(f'Migrating {grafana_daemon.name()}/{hostname} cert/key to cert store (as custom-certs)')
+ grafana_cephadm_signed_certs = False
+ self.mgr.cert_mgr.save_cert('grafana_ssl_cert', grafana_cert, host=hostname)
+ self.mgr.cert_mgr.save_key('grafana_ssl_key', grafana_key, host=hostname)
+
+ if not grafana_cephadm_signed_certs:
+ # Update the spec to specify the right certificate source
+ grafana_spec = self.mgr.spec_store['grafana'].spec
+ grafana_spec.certificate_source = CertificateSource.REFERENCE.value
+ self.mgr.spec_store.save(grafana_spec)
# NOTE: prometheus, alertmanager, and node-exporter certs were not stored
# and appeared to just be generated at daemon deploy time if secure_monitoring_stack
# was set to true. Therefore we have nothing to migrate for those daemons
return True
+ def migrate_7_8(self) -> bool:
+ logger.info(f'Starting rgw SSL/TLS migration (queue length is {len(self.rgw_ssl_migration_queue)})')
+ for s in self.rgw_ssl_migration_queue:
+
+ svc_spec = s['spec'] # this is the RGWspec
+
+ if 'spec' not in svc_spec:
+ logger.info(f"No SSL/TLS fields migration is needed for rgw spec: {svc_spec}")
+ continue
+
+ cert_field = svc_spec['spec'].get('rgw_frontend_ssl_certificate')
+ if not cert_field:
+ logger.info(f"No SSL/TLS fields migration is needed for rgw spec: {svc_spec}")
+ continue
+
+ cert_str = '\n'.join(cert_field) if isinstance(cert_field, list) else cert_field
+ ssl_cert, ssl_key = parse_combined_pem_file(cert_str)
+ new_spec = svc_spec.copy()
+ new_spec['spec'].update({
+ 'rgw_frontend_ssl_certificate': None,
+ 'certificate_source': CertificateSource.INLINE.value,
+ 'ssl_cert': ssl_cert,
+ 'ssl_key': ssl_key,
+ })
+
+ logger.info(f"Migrating {svc_spec} to new RGW SSL/TLS format {new_spec}")
+ self.mgr.spec_store.save(RGWSpec.from_json(new_spec))
+
+ self.rgw_ssl_migration_queue = []
+ return True
+
def queue_migrate_rgw_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
"""
ls = json.loads(queued)
ls.append(spec_dict)
mgr.set_store('rgw_migration_queue', json.dumps(ls))
- mgr.log.info(f'Queued rgw.{service_id} for migration')
+ logger.info(f'Queued rgw.{service_id} for migration')
+
+
+def queue_migrate_rgw_ssl_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
+ service_id = spec_dict['spec']['service_id']
+ queued = mgr.get_store('rgw_ssl_migration_queue') or '[]'
+ ls = json.loads(queued)
+ ls.append(spec_dict)
+ mgr.set_store('rgw_ssl_migration_queue', json.dumps(ls))
+ logger.info(f'Queued rgw.{service_id} for TLS migration')
def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
HostPlacementSpec,
RGWSpec,
IngressSpec,
- IscsiServiceSpec
+ IscsiServiceSpec,
+ GrafanaSpec
)
from ceph.utils import datetime_to_str, datetime_now
from cephadm import CephadmOrchestrator
from orchestrator import DaemonDescription
from tests import mock
+COMBINED_CERT_KEY = """
+-----BEGIN CERTIFICATE-----
+MIIDZTCCAk2gAwIBAgIUcf+7lpo2INwTIulhXOb78i4PL7gwDQYJKoZIhvcNAQEL
+BQAwQjELMAkGA1UEBhMCWFgxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoGA1UE
+CgwTRGVmYXVsdCBDb21wYW55IEx0ZDAeFw0yNTAxMjgxNDE0MzlaFw0yNTA1MDgx
+NDE0MzlaMEIxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAa
+BgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQwggEiMA0GCSqGSIb3DQEBAQUAA4IB
+DwAwggEKAoIBAQC5xpfgFsX7I19HGW2YE6vz0TNni2dM1ItQoP0WaX55bNEwLsj9
+hHTZ7vgTH6ZkaNp0U73Mq+0tM8UPRrNFBKhy5cE/D+l7aV5KUr4mgPK6Tgrgk0iS
+83nymladgSKRjN75HH8SMg2lLVoivfrAAMh58JA2zFUFZaZQnD1eL/+waht9qpCd
+ilsY3MVKuElZ3ndxSaTuISLhPS8GO7jkCbCThfkrnk5IeCd5trN8ho55Ev5U5Axg
+bUgHlJxzUr9wLTzKW0x9D5qbLTvaC9VsUN+SdQW01pTs4MLPuKsnjLGaG91sEbZl
+n4Ub7bXvNey9z0heGE/NJX+Q5EkkhFV5TLvZAgMBAAGjUzBRMB0GA1UdDgQWBBSz
+OgD/EZsfAuDpt4wv1qVMcNlbajAfBgNVHSMEGDAWgBSzOgD/EZsfAuDpt4wv1qVM
+cNlbajAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBJ/PMFQFn2
+6PeHEneLnxQqggg2FulM6tYc+XHuRCUW9koNBHVn5/CTw6MZ6oxRwVtY4w9GHZSk
+TvL6xAwk5exIwYJFdLe5XMNXtIy6Hz9BVVLRkL9u/yDXh0Bsi5vVwF14rL7956K4
+XQQXdUCuT5GF3u+2g+nnbYz1N00XG8YMiT0a8ZKrVUFi3l12muULzrw5YsBWenGC
+DdVBRQEsl2ZJYN+/01TO9fScbv9ANQFUJpvtVCQjTWj4WOIhnhm8dHXD3ppMdccT
+y7jEpinQvVQxfGIshLMi4rtK5sMpS4Qx5gzyU4ccHSDgdSrIC7zjNY9YdS0X7+je
+QTkccglYXmZ6
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5xpfgFsX7I19H
+GW2YE6vz0TNni2dM1ItQoP0WaX55bNEwLsj9hHTZ7vgTH6ZkaNp0U73Mq+0tM8UP
+RrNFBKhy5cE/D+l7aV5KUr4mgPK6Tgrgk0iS83nymladgSKRjN75HH8SMg2lLVoi
+vfrAAMh58JA2zFUFZaZQnD1eL/+waht9qpCdilsY3MVKuElZ3ndxSaTuISLhPS8G
+O7jkCbCThfkrnk5IeCd5trN8ho55Ev5U5AxgbUgHlJxzUr9wLTzKW0x9D5qbLTva
+C9VsUN+SdQW01pTs4MLPuKsnjLGaG91sEbZln4Ub7bXvNey9z0heGE/NJX+Q5Ekk
+hFV5TLvZAgMBAAECggEACCGMWi871/X3YJn9mdiISSjsLcS7OEwTgOt/fyd7vhCD
+7IoY0j6lwqXazzN3ksgRONAzNOTPyyH5XZyD207DmT4XHVbFGFmQbILsmtDSTuTq
+IK1WLSBhjHJW4irHerKGcrNdmHC101MYH0lxHATRU8PW/Ay7c1cqVoCZRnHvFgLQ
+YZHxhskDnMTaXX0lw+CCq7ajUg2Su2u7tC7LiG/n4cjBNTblB7vmyAiFo1xoYqam
+GuwtkLGZW1RxvCi13HGIKAU9VnwKOyzhJp9ZBcx1Xshiaqazwhpf8PhP8mT2kLFg
+ti5NVxadbD78VGMC5bfH6lZdm4/MLlaqMejb6QXCRQKBgQDcd72c4FJpXpXWMR6g
+ROw60tn6qjSpH0YJ96bf19UGgNcYVUCiZrgG7ENx6SabjUJwqxi3qCxneD+J7caL
+Befd2Can4vf6U3o3DV/a86Dz6Qd4n7n6MU39aOg2jsCriknfOUkWfnGgvMaPzduU
+O1rFF0xpezIQkU3HjaN4aLGSswKBgQDXt3/EsRIk8xYQvcUTaWAQdaxtRewS9Tc2
+m6MdU6der8C6fTydggUBdkURawFehdpNmKiymBJJFniCs/EuGmKKHjupW04Kmwin
+isaA+tSwLQ01tL1G7xhydb85sbfBXzel4fztmk2OB+IpB4rvTFlP8t2z/bQQumjN
+WPLUwz7NQwKBgFZ4AD5PHQOGvW3Mxh5F6gEIQcY2i4Dpaybtot2YYUyzq6k3hqor
+b3IHqEw9DY9kz/IwqPkfVIsgdos6XuyX3GD+Lesa8feUVhLRhA70DuSbOPruapre
+S6BgTPNY+ehNzLtoVGomHZrVb2tnaf+xZ+B1Str0Hqaw1ri1rK/FICBRAoGBALbn
+T95mhQvvUPZA8ajT4DAUlm7QqqooYPhcXqGvHGqcer2lEpA6fiQPM+Dg6fhLZh4F
+IoTLjDWMaAHqsMR2erbBi7S9Rh6X9W6ZrFYQV+ZJTLoM1bAfaosia1Fv7m53Xae5
+Rcvw2XFkHc7MJnFgOxoewvyqUNMeO15h3QOpyMYhAoGABm6bQcIdmv3e+GVoraXA
+lsmM4/lRi/HmRHGtQ7kjKvT09YBQ3/qm04QwvwQtik7ws7t8VODQSgZC6re0TU7Y
+RPw+RGrt0nnmMUP2jJ6SKPCXmw55tW7FcvBJeAM4komEUoLrnKfwkaRy8SKSt8a0
+HlBxebJND7cfu20WpwErmhU=
+-----END PRIVATE KEY-----
+"""
+
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
assert 'rgw.foo' not in cephadm_module.spec_store.all_specs
+@mock.patch('cephadm.migrations.get_cert_issuer_info')
+def test_migrate_grafana_cephadm_signed(mock_get_cert_issuer_info, cephadm_module: CephadmOrchestrator):
+ mock_get_cert_issuer_info.return_value = ('Ceph', 'MockCephCN')
+
+ cephadm_module.set_store('host1/grafana_crt', 'grafana_cert1')
+ cephadm_module.set_store('host1/grafana_key', 'grafana_key1')
+ cephadm_module.set_store('host2/grafana_crt', 'grafana_cert2')
+ cephadm_module.set_store('host2/grafana_key', 'grafana_key2')
+ cephadm_module.cache.daemons = {'host1': {'grafana.host1': DaemonDescription('grafana', 'host1', 'host1')},
+ 'host2': {'grafana.host2': DaemonDescription('grafana', 'host2', 'host2')}}
+
+ cephadm_module.migration.migrate_6_7()
+
+ assert cephadm_module.cert_mgr.get_cert('cephadm-signed_grafana_cert', host='host1')
+ assert cephadm_module.cert_mgr.get_cert('cephadm-signed_grafana_cert', host='host2')
+ assert cephadm_module.cert_mgr.get_key('cephadm-signed_grafana_key', host='host1')
+ assert cephadm_module.cert_mgr.get_key('cephadm-signed_grafana_key', host='host2')
+
+
+@mock.patch('cephadm.migrations.get_cert_issuer_info')
+def test_migrate_grafana_custom_certs(mock_get_cert_issuer_info, cephadm_module: CephadmOrchestrator):
+ from datetime import datetime, timezone
+
+ grafana_spec = GrafanaSpec(service_id='grafana', ssl=True)
+ cephadm_module.spec_store._specs = {
+ 'grafana': grafana_spec,
+ }
+ cephadm_module.spec_store.spec_created['grafana'] = datetime.now(timezone.utc)
+
+ cephadm_module.set_store('host1/grafana_crt', 'grafana_cert1')
+ cephadm_module.set_store('host1/grafana_key', 'grafana_key1')
+ cephadm_module.set_store('host2/grafana_crt', 'grafana_cert2')
+ cephadm_module.set_store('host2/grafana_key', 'grafana_key2')
+ cephadm_module.cache.daemons = {'host1': {'grafana.host1': DaemonDescription('grafana', 'host1', 'host1')},
+ 'host2': {'grafana.host2': DaemonDescription('grafana', 'host2', 'host2')}}
+
+ mock_get_cert_issuer_info.return_value = ('CustomOrg', 'MockCustomOrg') # Force grafana certs to be custom
+ cephadm_module.migration.migrate_6_7()
+
+ assert cephadm_module.cert_mgr.get_cert('grafana_ssl_cert', host='host1')
+ assert cephadm_module.cert_mgr.get_cert('grafana_ssl_cert', host='host2')
+ assert cephadm_module.cert_mgr.get_key('grafana_ssl_key', host='host1')
+ assert cephadm_module.cert_mgr.get_key('grafana_ssl_key', host='host2')
+
+
def test_migrate_cert_store(cephadm_module: CephadmOrchestrator):
- rgw_spec = RGWSpec(service_id='foo', rgw_frontend_ssl_certificate='rgw_cert', ssl=True)
+ rgw_spec = RGWSpec(service_id='foo', rgw_frontend_ssl_certificate=COMBINED_CERT_KEY, ssl=True)
iscsi_spec = IscsiServiceSpec(service_id='foo', pool='foo', ssl_cert='iscsi_cert', ssl_key='iscsi_key')
ingress_spec = IngressSpec(service_id='rgw.foo', ssl_cert='ingress_cert', ssl_key='ingress_key', ssl=True)
cephadm_module.spec_store._specs = {
cephadm_module.set_store('service_discovery/root/cert', 'service_discovery_cert')
cephadm_module.set_store('service_discovery/root/key', 'service_discovery_key')
- cephadm_module.set_store('host1/grafana_crt', 'grafana_cert1')
- cephadm_module.set_store('host1/grafana_key', 'grafana_key1')
- cephadm_module.set_store('host2/grafana_crt', 'grafana_cert2')
- cephadm_module.set_store('host2/grafana_key', 'grafana_key2')
- cephadm_module.cache.daemons = {'host1': {'grafana.host1': DaemonDescription('grafana', 'host1', 'host1')},
- 'host2': {'grafana.host2': DaemonDescription('grafana', 'host2', 'host2')}}
-
cephadm_module.migration.migrate_6_7()
- assert cephadm_module.cert_mgr.get_cert('rgw_frontend_ssl_cert', service_name='rgw.foo')
+ assert cephadm_module.cert_mgr.get_cert('rgw_ssl_cert', service_name='rgw.foo')
assert cephadm_module.cert_mgr.get_cert('iscsi_ssl_cert', service_name='iscsi.foo')
assert cephadm_module.cert_mgr.get_key('iscsi_ssl_key', service_name='iscsi.foo')
assert cephadm_module.cert_mgr.get_cert('ingress_ssl_cert', service_name='ingress.rgw.foo')
assert cephadm_module.cert_mgr.get_key('ingress_ssl_key', service_name='ingress.rgw.foo')
-
- assert cephadm_module.cert_mgr.get_cert('grafana_cert', host='host1')
- assert cephadm_module.cert_mgr.get_cert('grafana_cert', host='host2')
- assert cephadm_module.cert_mgr.get_key('grafana_key', host='host1')
- assert cephadm_module.cert_mgr.get_key('grafana_key', host='host2')