]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/cephadm: streamline rgw deployment
authorDaniel-Pivonka <dpivonka@redhat.com>
Thu, 16 Jul 2020 12:24:47 +0000 (08:24 -0400)
committerDaniel-Pivonka <dpivonka@redhat.com>
Wed, 29 Jul 2020 18:23:07 +0000 (14:23 -0400)
cephadm will create realm, zonegroup, and zone if needed before creating rgw service

fixes: https://tracker.ceph.com/issues/43681
Signed-off-by: Daniel-Pivonka <dpivonka@redhat.com>
doc/cephadm/install.rst
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/services/cephadmservice.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/pybind/mgr/cephadm/tests/test_migration.py
src/pybind/mgr/cephadm/utils.py

index fe60972923bc351ea4959aaa45d25d31e3a5e56f..4156dc6fa4c70ab6643eb50f0522a91105dd2f24 100644 (file)
@@ -382,18 +382,6 @@ that configuration isn't already in place (usually in the
 daemons will start up with default settings (e.g., binding to port
 80).
 
-If a realm has not been created yet, first create a realm::
-
-  # radosgw-admin realm create --rgw-realm=<realm-name> --default
-
-Next create a new zonegroup::
-
-  # radosgw-admin zonegroup create --rgw-zonegroup=<zonegroup-name>  --master --default
-
-Next create a zone::
-
-  # radosgw-admin zone create --rgw-zonegroup=<zonegroup-name> --rgw-zone=<zone-name> --master --default
-
 To deploy a set of radosgw daemons for a particular realm and zone::
 
   # ceph orch apply rgw *<realm-name>* *<zone-name>* --placement="*<num-daemons>* [*<host1>* ...]"
@@ -401,11 +389,18 @@ To deploy a set of radosgw daemons for a particular realm and zone::
 For example, to deploy 2 rgw daemons serving the *myorg* realm and the *us-east-1*
 zone on *myhost1* and *myhost2*::
 
-  # radosgw-admin realm create --rgw-realm=myorg --default
-  # radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
-  # radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=us-east-1 --master --default
   # ceph orch apply rgw myorg us-east-1 --placement="2 myhost1 myhost2"
 
+Cephadm will wait for a healthy cluster and automatically create the supplied realm and zone if they do not exist before deploying the rgw daemon(s)
+
+Alternatively, the realm, zonegroup, and zone can be manually created using ``radosgw-admin`` commands::
+
+  # radosgw-admin realm create --rgw-realm=<realm-name> --default
+
+  # radosgw-admin zonegroup create --rgw-zonegroup=<zonegroup-name>  --master --default
+
+  # radosgw-admin zone create --rgw-zonegroup=<zonegroup-name> --rgw-zone=<zone-name> --master --default
+
 See :ref:`orchestrator-cli-placement-spec` for details of the placement specification.
 
 Deploying NFS ganesha
index d88966c79268f68d7946771287dffc99edb96b5f..0679f4f41f6170222f98c5a1f06cd7ca4968df5e 100644 (file)
@@ -22,7 +22,7 @@ import subprocess
 from ceph.deployment import inventory
 from ceph.deployment.drive_group import DriveGroupSpec
 from ceph.deployment.service_spec import \
-    NFSServiceSpec, ServiceSpec, PlacementSpec, assert_valid_host
+    NFSServiceSpec, RGWSpec, ServiceSpec, PlacementSpec, assert_valid_host
 from cephadm.services.cephadmservice import CephadmDaemonSpec
 
 from mgr_module import MgrModule, HandleCommandResult
@@ -1979,12 +1979,18 @@ you may want to run:
         self.log.debug('Hosts that will loose daemons: %s' % remove_daemon_hosts)
 
         for host, network, name in add_daemon_hosts:
-            if not did_config and config_func:
-                config_func(spec)
-                did_config = True
             daemon_id = self.get_unique_name(daemon_type, host, daemons,
                                              prefix=spec.service_id,
                                              forcename=name)
+
+            if not did_config and config_func:
+                if daemon_type == 'rgw':
+                    rgw_config_func = cast(Callable[[RGWSpec, str], None], config_func)
+                    rgw_config_func(cast(RGWSpec, spec), daemon_id)
+                else:
+                    config_func(spec)
+                did_config = True
+
             daemon_spec = self.cephadm_services[daemon_type].make_daemon_spec(host, daemon_id, network, spec)
             self.log.debug('Placing %s.%s on host %s' % (
                 daemon_type, daemon_id, host))
@@ -2119,14 +2125,21 @@ you may want to run:
             raise OrchestratorError('too few hosts: want %d, have %s' % (
                 count, hosts))
 
-        if config_func:
-            config_func(spec)
+        did_config = False
 
         args = []  # type: List[CephadmDaemonSpec]
         for host, network, name in hosts:
             daemon_id = self.get_unique_name(daemon_type, host, daemons,
                                              prefix=spec.service_id,
                                              forcename=name)
+
+            if not did_config and config_func:
+                if daemon_type == 'rgw':
+                    config_func(spec, daemon_id)
+                else:
+                    config_func(spec)
+                did_config = True
+
             daemon_spec = self.cephadm_services[daemon_type].make_daemon_spec(host, daemon_id, network, spec)
             self.log.debug('Placing %s.%s on host %s' % (
                 daemon_type, daemon_id, host))
index 6a1bda7c49ff8003e7adaeb08876a46db62c5ef0..11f01d5aa1a0a17ddea2f23939811f474057f8e6 100644 (file)
@@ -1,5 +1,6 @@
 import json
 import logging
+import subprocess
 from abc import ABCMeta, abstractmethod
 from typing import TYPE_CHECKING, List, Callable, Any, TypeVar, Generic,  Optional, Dict, Any, Tuple
 
@@ -353,9 +354,13 @@ class MdsService(CephadmService):
 class RgwService(CephadmService):
     TYPE = 'rgw'
 
-    def config(self, spec: RGWSpec) -> None:
+
+    def config(self, spec: RGWSpec, rgw_id: str):
         assert self.TYPE == spec.service_type
 
+        # create realm, zonegroup, and zone if needed
+        self.create_realm_zonegroup_zone(spec, rgw_id)
+
         # ensure rgw_realm and rgw_zone is set for these daemons
         ret, out, err = self.mgr.check_mon_command({
             'prefix': 'config set',
@@ -414,6 +419,13 @@ class RgwService(CephadmService):
         assert self.TYPE == daemon_spec.daemon_type
         rgw_id, host = daemon_spec.daemon_id, daemon_spec.host
 
+        keyring = self.get_keyring(rgw_id)
+
+        daemon_spec.keyring = keyring
+
+        return self.mgr._create_daemon(daemon_spec)
+
+    def get_keyring(self, rgw_id: str):
         ret, keyring, err = self.mgr.check_mon_command({
             'prefix': 'auth get-or-create',
             'entity': f"{utils.name_to_config_section('rgw')}.{rgw_id}",
@@ -421,10 +433,94 @@ class RgwService(CephadmService):
                      'mgr', 'allow rw',
                      'osd', 'allow rwx'],
         })
-
-        daemon_spec.keyring = keyring
-
-        return self.mgr._create_daemon(daemon_spec)
+        return keyring
+
+    def create_realm_zonegroup_zone(self, spec: RGWSpec, rgw_id: str):
+        if utils.get_cluster_health(self.mgr) != 'HEALTH_OK':
+            raise OrchestratorError('Health not ok, will try agin when health ok')
+
+        #get keyring needed to run rados commands and strip out just the keyring
+        keyring = self.get_keyring(rgw_id).split('key = ',1)[1].rstrip()
+
+        # We can call radosgw-admin within the container, cause cephadm gives the MGR the required keyring permissions
+        # get realms
+        cmd = ['radosgw-admin',
+               '--key=%s'%keyring,
+               '--user', 'rgw.%s'%rgw_id,
+               'realm', 'list',
+               '--format=json']
+        result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        # create realm if needed
+        cmd = ['radosgw-admin',
+               '--key=%s'%keyring,
+               '--user', 'rgw.%s'%rgw_id,
+               'realm', 'create',
+               '--rgw-realm=%s'%spec.rgw_realm,
+               '--default']
+        if not result.stdout:
+            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            self.mgr.log.info('created realm: %s'%spec.rgw_realm)
+        else:
+            try:
+                j = json.loads(result.stdout)
+                if 'realms' not in j or spec.rgw_realm not in j['realms']:
+                    result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                    self.mgr.log.info('created realm: %s'%spec.rgw_realm)
+            except Exception as e:
+                raise OrchestratorError('failed to parse realm info')
+
+        # get zonegroup
+        cmd = ['radosgw-admin',
+               '--key=%s'%keyring,
+               '--user', 'rgw.%s'%rgw_id,
+               'zonegroup', 'list',
+               '--format=json']
+        result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        #create zonegroup if needed
+        cmd = ['radosgw-admin',
+               '--key=%s'%keyring,
+               '--user', 'rgw.%s'%rgw_id,
+               'zonegroup', 'create',
+               '--rgw-zonegroup=default',
+               '--master', '--default']
+        if not result.stdout:
+            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            self.mgr.log.info('created zonegroup: default')
+        else:
+            try:
+                j = json.loads(result.stdout)
+                if 'zonegroups' not in j or 'default' not in j['zonegroups']:
+                    result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                    self.mgr.log.info('created zonegroup: default')
+            except Exception as e:
+                raise OrchestratorError('failed to parse zonegroup info')
+
+        #get zones
+        cmd = ['radosgw-admin',
+               '--key=%s'%keyring,
+               '--user', 'rgw.%s'%rgw_id,
+               'zone', 'list',
+               '--format=json']
+        result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        #create zone if needed
+        cmd = ['radosgw-admin',
+               '--key=%s'%keyring,
+               '--user', 'rgw.%s'%rgw_id,
+               'zone', 'create',
+               '--rgw-zonegroup=default',
+               '--rgw-zone=%s'%spec.rgw_zone,
+               '--master', '--default']
+        if not result.stdout:
+            result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            self.mgr.log.info('created zone: %s'%spec.rgw_zone)
+        else:
+            try:
+                j = json.loads(result.stdout)
+                if 'zones' not in j or spec.rgw_zone not in j['zones']:
+                    result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                    self.mgr.log.info('created zone: %s'%spec.rgw_zone)
+            except Exception as e:
+                raise OrchestratorError('failed to parse zone info')
 
 
 class RbdMirrorService(CephadmService):
index e09984fa4fdfc71df05dff97d68d3d4f60f55226..3ffe5fe8b5e5c7664e920c7a712d8c4b4768f0ea 100644 (file)
@@ -377,6 +377,7 @@ class TestCephadm(object):
             assert out == set()
 
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None)
     def test_rgw_update(self, cephadm_module):
         with with_host(cephadm_module, 'host1'):
             with with_host(cephadm_module, 'host2'):
@@ -426,6 +427,7 @@ class TestCephadm(object):
         ]
     )
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    @mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None)
     def test_daemon_add(self, spec: ServiceSpec, meth, cephadm_module):
         with with_host(cephadm_module, 'test'):
             spec.placement = PlacementSpec(hosts=['test'], count=1)
index 0a5c4a44ec15ae5f27a27fbe04474cb90f349514..3a8674e70935083cb0ea4b28878524019b7741b3 100644 (file)
@@ -7,6 +7,7 @@ from orchestrator import ServiceDescription
 from tests import mock
 
 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
+@mock.patch("cephadm.services.cephadmservice.RgwService.create_realm_zonegroup_zone", lambda _,__,___: None)
 def test_service_ls(cephadm_module: CephadmOrchestrator):
     with with_host(cephadm_module, 'host1'):
         with with_host(cephadm_module, 'host2'):
index a933fbc6dd76ca4a3b0a1e56d949685d816743da..91f36de0da8474f553688858126113a28d80a5ac 100644 (file)
@@ -1,7 +1,8 @@
 import logging
+import re
+import json
 from functools import wraps
 from typing import Optional, Callable, TypeVar, List
-
 from orchestrator import OrchestratorError
 
 
@@ -43,7 +44,6 @@ def name_to_auth_entity(daemon_type,  # type: str
     else:
         raise OrchestratorError("unknown auth entity name")
 
-
 def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]:
     @wraps(f)
     def forall_hosts_wrapper(*args) -> List[T]:
@@ -73,4 +73,17 @@ def forall_hosts(f: Callable[..., T]) -> Callable[..., List[T]]:
         return CephadmOrchestrator.instance._worker_pool.map(do_work, vals)
 
 
-    return forall_hosts_wrapper
\ No newline at end of file
+    return forall_hosts_wrapper
+
+def get_cluster_health(mgr):
+    # check cluster health
+    ret, out, err = mgr.check_mon_command({
+        'prefix': 'health',
+        'format': 'json',
+    })
+    try:
+        j = json.loads(out)
+    except Exception as e:
+        raise OrchestratorError('failed to parse health status')
+
+    return j['status']