spec:
rgw_frontend_port: 8080
+Passing Frontend Extra Arguments
+--------------------------------
+
+The RGW service specification can be used to pass extra arguments to the rgw frontend by using
+the `rgw_frontend_extra_args` arguments list.
+
+example spec file:
+
+.. code-block:: yaml
+
+ service_type: rgw
+ service_id: foo
+ placement:
+ label: rgw
+ count_per_host: 2
+ spec:
+ rgw_realm: myrealm
+ rgw_zone: myzone
+ rgw_frontend_type: "beast"
+ rgw_frontend_port: 5000
+ rgw_frontend_extra_args:
+ - "tcp_nodelay=1"
+ - "max_header_size=65536"
+
+.. note:: cephadm combines the arguments from the `spec` section and the ones from
+ the `rgw_frontend_extra_args` into a single space-separated arguments list
+ which is used to set the value of `rgw_frontends` configuration parameter.
Multisite zones
---------------
- ceph nfs cluster ls | grep foo
- ceph nfs export ls foo --detailed
- rados -p .nfs --all ls -
- - ceph config get mgr mgr/cephadm/migration_current | grep 5
+ - ceph config get mgr mgr/cephadm/migration_current | grep 6
from cephadm.services.cephadmservice import CephadmDaemonDeploySpec
from .utils import resolve_ip
-from .migrations import queue_migrate_nfs_spec
+from .migrations import queue_migrate_nfs_spec, queue_migrate_rgw_spec
if TYPE_CHECKING:
from .module import CephadmOrchestrator
):
self.mgr.log.debug(f'found legacy nfs spec {j}')
queue_migrate_nfs_spec(self.mgr, j)
+
+ if (
+ (self.mgr.migration_current or 0) < 6
+ and j['spec'].get('service_type') == 'rgw'
+ ):
+ queue_migrate_rgw_spec(self.mgr, j)
+
spec = ServiceSpec.from_json(j['spec'])
created = str_to_datetime(cast(str, j['created']))
self._specs[service_name] = spec
import json
+import re
import logging
-from typing import TYPE_CHECKING, Iterator, Optional, Dict, Any
+from typing import TYPE_CHECKING, Iterator, Optional, Dict, Any, List
-from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
+from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec, RGWSpec
from cephadm.schedule import HostAssignment
import rados
if TYPE_CHECKING:
from .module import CephadmOrchestrator
-LAST_MIGRATION = 5
+LAST_MIGRATION = 6
logger = logging.getLogger(__name__)
v = mgr.get_store('nfs_migration_queue')
self.nfs_migration_queue = json.loads(v) if v else []
+ r = mgr.get_store('rgw_migration_queue')
+ self.rgw_migration_queue = json.loads(r) if r else []
+
# for some migrations, we don't need to do anything except for
# incrementing migration_current.
# let's try to shortcut things here.
if self.migrate_4_5():
self.set(5)
+ if self.mgr.migration_current == 5:
+ if self.migrate_5_6():
+ self.set(6)
+
def migrate_0_1(self) -> bool:
"""
Migration 0 -> 1
self.mgr.log.info('Done migrating registry login info')
return True
+ def migrate_rgw_spec(self, spec: Dict[Any, Any]) -> Optional[RGWSpec]:
+ """ Migrate an old rgw spec to the new format."""
+ new_spec = spec.copy()
+ field_content: List[str] = re.split(' +', new_spec['spec']['rgw_frontend_type'])
+ valid_spec = False
+ if 'beast' in field_content:
+ new_spec['spec']['rgw_frontend_type'] = 'beast'
+ field_content.remove('beast')
+ valid_spec = True
+ elif 'civetweb' in field_content:
+ new_spec['spec']['rgw_frontend_type'] = 'civetweb'
+ field_content.remove('civetweb')
+ valid_spec = True
+ else:
+ # Error: Should not happen as that would be an invalid RGW spec. In that case
+ # we keep the spec as it, mark it as unmanaged to avoid the daemons being deleted
+ # and raise a health warning so the user can fix the issue manually later.
+ self.mgr.log.error("Cannot migrate RGW spec, bad rgw_frontend_type value: {spec['spec']['rgw_frontend_type']}.")
+
+ if valid_spec:
+ new_spec['spec']['rgw_frontend_extra_args'] = []
+ new_spec['spec']['rgw_frontend_extra_args'].extend(field_content)
+
+ return RGWSpec.from_json(new_spec)
+
+ def rgw_spec_needs_migration(self, spec: Dict[Any, Any]) -> bool:
+ return 'rgw_frontend_type' in spec['spec'] \
+ and spec['spec']['rgw_frontend_type'] is not None \
+ and spec['spec']['rgw_frontend_type'].strip() not in ['beast', 'civetweb']
+
+ def migrate_5_6(self) -> bool:
+ """
+ Migration 5 -> 6
+
+ Old RGW spec used to allow 'bad' values on the rgw_frontend_type field. For example
+ the following value used to be valid:
+
+ rgw_frontend_type: "beast endpoint=10.16.96.54:8043 tcp_nodelay=1"
+
+ As of 17.2.6 release, these kind of entries are not valid anymore and a more strict check
+ has been added to validate this field.
+
+ This migration logic detects this 'bad' values and tries to transform them to the new
+ valid format where rgw_frontend_type field can only be either 'beast' or 'civetweb'.
+ Any extra arguments detected on rgw_frontend_type field will be parsed and passed in the
+ new spec field rgw_frontend_extra_args.
+ """
+ self.mgr.log.debug(f'Starting rgw migration (queue length is {len(self.rgw_migration_queue)})')
+ for s in self.rgw_migration_queue:
+ spec = s['spec']
+ if self.rgw_spec_needs_migration(spec):
+ rgw_spec = self.migrate_rgw_spec(spec)
+ if rgw_spec is not None:
+ logger.info(f"Migrating {spec} to new RGW with extra args format {rgw_spec}")
+ self.mgr.spec_store.save(rgw_spec)
+ else:
+ logger.info(f"No Migration is needed for rgw spec: {spec}")
+ self.rgw_migration_queue = []
+ return True
+
+
+def queue_migrate_rgw_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
+ """
+ As aprt of 17.2.6 a stricter RGW spec validation has been added so the field
+ rgw_frontend_type cannot be used to pass rgw-frontends parameters.
+ """
+ service_id = spec_dict['spec']['service_id']
+ queued = mgr.get_store('rgw_migration_queue') or '[]'
+ ls = json.loads(queued)
+ ls.append(spec_dict)
+ mgr.set_store('rgw_migration_queue', json.dumps(ls))
+ mgr.log.info(f'Queued rgw.{service_id} for migration')
+
def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
"""
args.append(f"port={build_url(host=daemon_spec.ip, port=port).lstrip('/')}")
else:
args.append(f"port={port}")
+ else:
+ raise OrchestratorError(f'Invalid rgw_frontend_type parameter: {ftype}. Valid values are: beast, civetweb.')
+
+ if spec.rgw_frontend_extra_args is not None:
+ args.extend(spec.rgw_frontend_extra_args)
+
frontend = f'{ftype} {" ".join(args)}'
ret, out, err = self.mgr.check_mon_command({
ongoing = cephadm_module.migration.is_migration_ongoing()
assert ongoing
assert cephadm_module.migration_current == 0
+
+
+@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+def test_migrate_rgw_spec(cephadm_module: CephadmOrchestrator):
+ with with_host(cephadm_module, 'host1'):
+ cephadm_module.set_store(
+ SPEC_STORE_PREFIX + 'rgw',
+ json.dumps({
+ 'spec': {
+ 'service_type': 'rgw',
+ 'service_name': 'rgw.foo',
+ 'service_id': 'foo',
+ 'placement': {
+ 'hosts': ['host1']
+ },
+ 'spec': {
+ 'rgw_frontend_type': 'beast tcp_nodelay=1 request_timeout_ms=65000 rgw_thread_pool_size=512',
+ 'rgw_frontend_port': '5000',
+ },
+ },
+ 'created': datetime_to_str(datetime_now()),
+ }, sort_keys=True),
+ )
+
+ # make sure rgw_migration_queue is populated accordingly
+ cephadm_module.migration_current = 1
+ cephadm_module.spec_store.load()
+ ls = json.loads(cephadm_module.get_store('rgw_migration_queue'))
+ assert 'rgw' == ls[0]['spec']['service_type']
+
+ # shortcut rgw_migration_queue loading by directly assigning
+ # ls output to rgw_migration_queue list
+ cephadm_module.migration.rgw_migration_queue = ls
+
+ # skip other migrations and go directly to 5_6 migration (RGW spec)
+ cephadm_module.migration_current = 5
+ cephadm_module.migration.migrate()
+ assert cephadm_module.migration_current == LAST_MIGRATION
+
+ # make sure the spec has been migrated and the the param=value entries
+ # that were part of the rgw_frontend_type are now in the new
+ # 'rgw_frontend_extra_args' list
+ assert 'rgw.foo' in cephadm_module.spec_store.all_specs
+ rgw_spec = cephadm_module.spec_store.all_specs['rgw.foo']
+ assert dict(rgw_spec.to_json()) == {'service_type': 'rgw',
+ 'service_id': 'foo',
+ 'service_name': 'rgw.foo',
+ 'placement': {'hosts': ['host1']},
+ 'spec': {
+ 'rgw_frontend_extra_args': ['tcp_nodelay=1',
+ 'request_timeout_ms=65000',
+ 'rgw_thread_pool_size=512'],
+ 'rgw_frontend_port': '5000',
+ 'rgw_frontend_type': 'beast',
+ }}
class TestRGWService:
@pytest.mark.parametrize(
- "frontend, ssl, expected",
+ "frontend, ssl, extra_args, expected",
[
- ('beast', False, 'beast endpoint=[fd00:fd00:fd00:3000::1]:80'),
- ('beast', True,
- 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo'),
- ('civetweb', False, 'civetweb port=[fd00:fd00:fd00:3000::1]:80'),
- ('civetweb', True,
+ ('beast', False, ['tcp_nodelay=1'],
+ 'beast endpoint=[fd00:fd00:fd00:3000::1]:80 tcp_nodelay=1'),
+ ('beast', True, ['tcp_nodelay=0', 'max_header_size=65536'],
+ 'beast ssl_endpoint=[fd00:fd00:fd00:3000::1]:443 ssl_certificate=config://rgw/cert/rgw.foo tcp_nodelay=0 max_header_size=65536'),
+ ('civetweb', False, [], 'civetweb port=[fd00:fd00:fd00:3000::1]:80'),
+ ('civetweb', True, None,
'civetweb port=[fd00:fd00:fd00:3000::1]:443s ssl_certificate=config://rgw/cert/rgw.foo'),
]
)
@patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
- def test_rgw_update(self, frontend, ssl, expected, cephadm_module: CephadmOrchestrator):
+ def test_rgw_update(self, frontend, ssl, extra_args, expected, cephadm_module: CephadmOrchestrator):
with with_host(cephadm_module, 'host1'):
cephadm_module.cache.update_host_networks('host1', {
'fd00:fd00:fd00:3000::/64': {
s = RGWSpec(service_id="foo",
networks=['fd00:fd00:fd00:3000::/64'],
ssl=ssl,
- rgw_frontend_type=frontend)
+ rgw_frontend_type=frontend,
+ rgw_frontend_extra_args=extra_args)
with with_service(cephadm_module, s) as dds:
_, f, _ = cephadm_module.check_mon_command({
'prefix': 'config get',
rgw_frontend_port: Optional[int] = None,
rgw_frontend_ssl_certificate: Optional[List[str]] = None,
rgw_frontend_type: Optional[str] = None,
+ rgw_frontend_extra_args: Optional[List[str]] = None,
unmanaged: bool = False,
ssl: bool = False,
preview_only: bool = False,
self.rgw_frontend_ssl_certificate: Optional[List[str]] = rgw_frontend_ssl_certificate
#: civetweb or beast (default: beast). See :ref:`rgw_frontends`
self.rgw_frontend_type: Optional[str] = rgw_frontend_type
+ #: List of extra arguments for rgw_frontend in the form opt=value. See :ref:`rgw_frontends`
+ self.rgw_frontend_extra_args: Optional[List[str]] = rgw_frontend_extra_args
#: enable SSL
self.ssl = ssl
self.rgw_realm_token = rgw_realm_token
if self.rgw_zone and not self.rgw_realm:
raise SpecValidationError('Cannot add RGW: Zone specified but no realm specified')
+ if self.rgw_frontend_type is not None:
+ if self.rgw_frontend_type not in ['beast', 'civetweb']:
+ raise SpecValidationError(
+ 'Invalid rgw_frontend_type value. Valid values are: beast, civetweb.\n'
+ 'Additional rgw type parameters can be passed using rgw_frontend_extra_args.'
+ )
+
yaml.add_representer(RGWSpec, ServiceSpec.yaml_representer)
assert spec.validate() is None
ServiceSpec.from_json(spec.to_json())
+
+@pytest.mark.parametrize(
+ "realm, zone, frontend_type, raise_exception, msg",
+ [
+ ('realm', 'zone1', 'beast', False, ''),
+ ('realm', 'zone2', 'civetweb', False, ''),
+ ('realm', None, 'beast', True, 'Cannot add RGW: Realm specified but no zone specified'),
+ (None, 'zone1', 'beast', True, 'Cannot add RGW: Zone specified but no realm specified'),
+ ('realm', 'zone', 'invalid-beast', True, '^Invalid rgw_frontend_type value'),
+ ('realm', 'zone', 'invalid-civetweb', True, '^Invalid rgw_frontend_type value'),
+ ])
+def test_rgw_servicespec_parse(realm, zone, frontend_type, raise_exception, msg):
+ spec = RGWSpec(service_id='foo',
+ rgw_realm=realm,
+ rgw_zone=zone,
+ rgw_frontend_type=frontend_type)
+ if raise_exception:
+ with pytest.raises(SpecValidationError, match=msg):
+ spec.validate()
+ else:
+ spec.validate()
+
def test_osd_unmanaged():
osd_spec = {"placement": {"host_pattern": "*"},
"service_id": "all-available-devices",