while ! bin/ceph dashboard get-grafana-api-url | grep $host ; do sleep 1 ; done
-bin/ceph orch apply rgw myrealm myzone 1
+bin/ceph orch apply rgw foo --placement=1
bin/ceph orch ps
bin/ceph orch ls
def config(self, spec: RGWSpec, rgw_id: str) -> None: # type: ignore
assert self.TYPE == spec.service_type
- # ensure rgw_realm and rgw_zone is set for these daemons
- ret, out, err = self.mgr.check_mon_command({
- 'prefix': 'config set',
- 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
- 'name': 'rgw_zone',
- 'value': spec.rgw_zone,
- })
- ret, out, err = self.mgr.check_mon_command({
- 'prefix': 'config set',
- 'who': f"{utils.name_to_config_section('rgw')}.{spec.rgw_realm}",
- 'name': 'rgw_realm',
- 'value': spec.rgw_realm,
- })
+ # set rgw_realm and rgw_zone, if present
+ if spec.rgw_realm:
+ ret, out, err = self.mgr.check_mon_command({
+ 'prefix': 'config set',
+ 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
+ 'name': 'rgw_realm',
+ 'value': spec.rgw_realm,
+ })
+ if spec.rgw_zone:
+ ret, out, err = self.mgr.check_mon_command({
+ 'prefix': 'config set',
+ 'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
+ 'name': 'rgw_zone',
+ 'value': spec.rgw_zone,
+ })
+
ret, out, err = self.mgr.check_mon_command({
'prefix': 'config set',
'who': f"{utils.name_to_config_section('rgw')}.{spec.service_id}",
'count': 1,
'hosts': ["test"]
},
- 'spec': {
- 'rgw_realm': 'r',
- 'rgw_zone': 'z',
- },
'service_id': 'r.z',
'service_name': 'rgw.r.z',
'service_type': 'rgw',
def test_rgw_update(self, cephadm_module):
with with_host(cephadm_module, 'host1'):
with with_host(cephadm_module, 'host2'):
- with with_service(cephadm_module, RGWSpec(rgw_realm='realm', rgw_zone='zone1', unmanaged=True)):
+ with with_service(cephadm_module, RGWSpec(service_id="foo", unmanaged=True)):
ps = PlacementSpec(hosts=['host1'], count=1)
c = cephadm_module.add_rgw(
- RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
+ RGWSpec(service_id="foo", placement=ps))
[out] = wait(cephadm_module, c)
- match_glob(out, "Deployed rgw.realm.zone1.host1.* on host 'host1'")
+ match_glob(out, "Deployed rgw.foo.* on host 'host1'")
ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
r = CephadmServe(cephadm_module)._apply_service(
- RGWSpec(rgw_realm='realm', rgw_zone='zone1', placement=ps))
+ RGWSpec(service_id="foo", placement=ps))
assert r
- assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host1')
- assert_rm_daemon(cephadm_module, 'rgw.realm.zone1', 'host2')
+ assert_rm_daemon(cephadm_module, 'rgw.foo', 'host1')
+ assert_rm_daemon(cephadm_module, 'rgw.foo', 'host2')
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm(
json.dumps([
(ServiceSpec('alertmanager'), CephadmOrchestrator.add_alertmanager),
(ServiceSpec('rbd-mirror'), CephadmOrchestrator.add_rbd_mirror),
(ServiceSpec('mds', service_id='fsname'), CephadmOrchestrator.add_mds),
- (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.add_rgw),
+ (RGWSpec(service_id="foo"), CephadmOrchestrator.add_rgw),
(ServiceSpec('cephadm-exporter'), CephadmOrchestrator.add_cephadm_exporter),
]
)
)]
)
), CephadmOrchestrator.apply_mds),
- (RGWSpec(rgw_realm='realm', rgw_zone='zone'), CephadmOrchestrator.apply_rgw),
+ (RGWSpec(service_id='foo'), CephadmOrchestrator.apply_rgw),
(RGWSpec(
+ service_id='bar',
rgw_realm='realm', rgw_zone='zone',
placement=PlacementSpec(
hosts=[HostPlacementSpec(
hostname='test',
- name='realm.zone.a',
+ name='bar',
network=''
)]
)
"service_type": "rgw",
"service_id": "default-rgw-realm.eu-central-1.1",
"rgw_realm": "default-rgw-realm",
- "rgw_zone": "eu-central-1",
- "subcluster": "1"
+ "rgw_zone": "eu-central-1"
},
{
"service_type": "osd",
# https://tracker.ceph.com/issues/44934
(
RGWSpec(
+ service_id="foo",
rgw_realm="default-rgw-realm",
rgw_zone="eu-central-1",
- subcluster='1',
),
DaemonDescription(
daemon_type='rgw',
- daemon_id="default-rgw-realm.eu-central-1.1.ceph-001.ytywjo",
+ daemon_id="foo.ceph-001.ytywjo",
hostname="ceph-001",
),
True
),
(
- # no subcluster
+ # no realm
RGWSpec(
- rgw_realm="default-rgw-realm",
+ service_id="foo.bar",
rgw_zone="eu-central-1",
),
DaemonDescription(
daemon_type='rgw',
- daemon_id="default-rgw-realm.eu-central-1.ceph-001.ytywjo",
+ daemon_id="foo.bar.ceph-001.ytywjo",
hostname="ceph-001",
),
True
),
(
- # with tld
+ # no realm or zone
RGWSpec(
- rgw_realm="default-rgw-realm",
- rgw_zone="eu-central-1",
- subcluster='1',
+ service_id="bar",
),
DaemonDescription(
daemon_type='rgw',
- daemon_id="default-rgw-realm.eu-central-1.1.host.domain.tld.ytywjo",
+ daemon_id="bar.host.domain.tld.ytywjo",
hostname="host.domain.tld",
),
True
(
# explicit naming
RGWSpec(
- rgw_realm="realm",
- rgw_zone="zone",
+ service_id="realm.zone",
),
DaemonDescription(
daemon_type='rgw',
# without host
RGWSpec(
service_type='rgw',
- rgw_realm="default-rgw-realm",
- rgw_zone="eu-central-1",
- subcluster='1',
+ service_id="foo",
+ ),
+ DaemonDescription(
+ daemon_type='rgw',
+ daemon_id="foo.hostname.ytywjo",
+ hostname=None,
+ ),
+ False
+ ),
+ (
+ # without host (2)
+ RGWSpec(
+ service_type='rgw',
+ service_id="default-rgw-realm.eu-central-1.1",
),
DaemonDescription(
daemon_type='rgw',
False
),
(
- # zone contains hostname
- # https://tracker.ceph.com/issues/45294
+ # service_id contains hostname
+ # (sort of) https://tracker.ceph.com/issues/45294
RGWSpec(
- rgw_realm="default.rgw.realm",
- rgw_zone="ceph.001",
- subcluster='1',
+ service_id="default.rgw.realm.ceph.001",
),
DaemonDescription(
daemon_type='rgw',
- daemon_id="default.rgw.realm.ceph.001.1.ceph.001.ytywjo",
+ daemon_id="default.rgw.realm.ceph.001.ceph.001.ytywjo",
hostname="ceph.001",
),
True
@_cli_write_command('orch daemon add rgw')
def _rgw_add(self,
- realm_name: str,
- zone_name: str,
- subcluster: Optional[str] = None,
+ svc_id: str,
port: Optional[int] = None,
ssl: bool = False,
placement: Optional[str] = None,
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = RGWSpec(
- rgw_realm=realm_name,
- rgw_zone=zone_name,
- subcluster=subcluster,
+ service_id=svc_id,
rgw_frontend_port=port,
ssl=ssl,
placement=PlacementSpec.from_string(placement),
@_cli_write_command('orch apply rgw')
def _apply_rgw(self,
- realm_name: str,
- zone_name: str,
- subcluster: Optional[str] = None,
+ svc_id: str,
+ realm_name: Optional[str] = None,
+ zone_name: Optional[str] = None,
port: Optional[int] = None,
ssl: bool = False,
placement: Optional[str] = None,
raise OrchestratorValidationError('unrecognized command -i; -h or --help for usage')
spec = RGWSpec(
+ service_id=svc_id,
rgw_realm=realm_name,
rgw_zone=zone_name,
- subcluster=subcluster,
rgw_frontend_port=port,
ssl=ssl,
placement=PlacementSpec.from_string(placement),
_update_fs, _create_fs)
def apply_objectstore(self, spec: RGWSpec) -> str:
-
- # FIXME: service_id is $realm.$zone, but rook uses realm
- # $crname and zone $crname. The '.' will confuse kubernetes.
- # For now, assert that realm==zone.
assert spec.service_id is not None
- (realm, zone) = spec.service_id.split('.', 1)
- assert realm == zone
- assert spec.subcluster is None
- name = realm
+
+ name = spec.service_id
+
+ if '.' in spec.service_id:
+ # rook does not like . in the name. this is could
+ # there because it is a legacy rgw spec that was named
+ # like $realm.$zone, except that I doubt there were any
+ # users of this code. Instead, focus on future users and
+ # translate . to - (fingers crossed!) instead.
+ name = spec.service_id.replace('.', '-')
+
+ # FIXME: pass realm and/or zone through to the CR
def _create_zone() -> cos.CephObjectStore:
port = None
placement: Optional[PlacementSpec] = None,
rgw_realm: Optional[str] = None,
rgw_zone: Optional[str] = None,
- subcluster: Optional[str] = None,
rgw_frontend_port: Optional[int] = None,
rgw_frontend_ssl_certificate: Optional[List[str]] = None,
rgw_frontend_ssl_key: Optional[List[str]] = None,
config: Optional[Dict[str, str]] = None,
):
assert service_type == 'rgw', service_type
- if service_id:
- a = service_id.split('.', 2)
- rgw_realm = a[0]
- if len(a) > 1:
- rgw_zone = a[1]
- if len(a) > 2:
- subcluster = a[2]
- else:
- if subcluster:
- service_id = '%s.%s.%s' % (rgw_realm, rgw_zone, subcluster)
- else:
- service_id = '%s.%s' % (rgw_realm, rgw_zone)
super(RGWSpec, self).__init__(
'rgw', service_id=service_id,
placement=placement, unmanaged=unmanaged,
self.rgw_realm = rgw_realm
self.rgw_zone = rgw_zone
- self.subcluster = subcluster
self.rgw_frontend_port = rgw_frontend_port
self.rgw_frontend_ssl_certificate = rgw_frontend_ssl_certificate
self.rgw_frontend_ssl_key = rgw_frontend_ssl_key
ports.append(f"port={self.get_port()}")
return f'beast {" ".join(ports)}'
- def validate(self) -> None:
- super(RGWSpec, self).validate()
-
- if not self.rgw_realm:
- raise ServiceSpecValidationError(
- 'Cannot add RGW: No realm specified')
- if not self.rgw_zone:
- raise ServiceSpecValidationError(
- 'Cannot add RGW: No zone specified')
-
yaml.add_representer(RGWSpec, ServiceSpec.yaml_representer)
spec:
rgw_realm: default-rgw-realm
rgw_zone: eu-central-1
- subcluster: '1'
---
service_type: osd
service_id: osd_spec_default
),
(
ServiceSpec(
- service_type='rgw'
+ service_type='rgw',
+ service_id='foo',
),
- RGWSpec(),
+ RGWSpec(service_id='foo'),
True
),
])