]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks: update to run ceph-mgr daemons
authorJohn Spray <john.spray@redhat.com>
Tue, 27 Sep 2016 11:22:45 +0000 (12:22 +0100)
committerJohn Spray <john.spray@redhat.com>
Tue, 1 Nov 2016 11:21:51 +0000 (12:21 +0100)
Signed-off-by: John Spray <john.spray@redhat.com>
tasks/ceph.py
tasks/ceph_manager.py
tasks/systemd.py

index 1817213ed3aa94a6f74fb38a73d1d6db5aeccfcd..a829b4bfb2f390125b2ae532707371a50968003a 100644 (file)
@@ -23,7 +23,7 @@ from teuthology.orchestra import run
 import ceph_client as cclient
 from teuthology.orchestra.daemon import DaemonGroup
 
-CEPH_ROLE_TYPES = ['mon', 'osd', 'mds', 'rgw']
+CEPH_ROLE_TYPES = ['mon', 'mgr', 'osd', 'mds', 'rgw']
 
 log = logging.getLogger(__name__)
 
@@ -39,6 +39,9 @@ def generate_caps(type_):
             mon='allow *',
             osd='allow *',
         ),
+        mgr=dict(
+            mon='allow *',
+        ),
         mds=dict(
             mon='allow *',
             osd='allow *',
@@ -598,6 +601,35 @@ def cluster(ctx, config):
         ),
     )
 
+    log.info('Setting up mgr nodes...')
+    mgrs = ctx.cluster.only(teuthology.is_type('mgr', cluster_name))
+    for remote, roles_for_host in mgrs.remotes.iteritems():
+        for role in teuthology.cluster_roles_of_type(roles_for_host, 'mgr',
+                                                     cluster_name):
+            _, _, id_ = teuthology.split_role(role)
+            mgr_dir = '/var/lib/ceph/mgr/{cluster}-{id}'.format(
+                cluster=cluster_name,
+                id=id_,
+            )
+            remote.run(
+                args=[
+                    'sudo',
+                    'mkdir',
+                    '-p',
+                    mgr_dir,
+                    run.Raw('&&'),
+                    'sudo',
+                    'adjust-ulimits',
+                    'ceph-coverage',
+                    coverage_dir,
+                    'ceph-authtool',
+                    '--create-keyring',
+                    '--gen-key',
+                    '--name=mgr.{id}'.format(id=id_),
+                    mgr_dir + '/keyring',
+                ],
+            )
+
     log.info('Setting up mds nodes...')
     mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
     for remote, roles_for_host in mdss.remotes.iteritems():
@@ -753,7 +785,7 @@ def cluster(ctx, config):
     keys_fp = StringIO()
     keys = []
     for remote, roles_for_host in ctx.cluster.remotes.iteritems():
-        for type_ in ['mds', 'osd']:
+        for type_ in ['mgr',  'mds', 'osd']:
             for role in teuthology.cluster_roles_of_type(roles_for_host, type_, cluster_name):
                 _, _, id_ = teuthology.split_role(role)
                 data = teuthology.get_file(
@@ -1516,6 +1548,7 @@ def task(ctx, config):
             cluster=config['cluster'],
         )),
         lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
+        lambda: run_daemon(ctx=ctx, config=config, type_='mgr'),
         lambda: crush_setup(ctx=ctx, config=config),
         lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
         lambda: cephfs_setup(ctx=ctx, config=config),
index 876d025457624e5c298c3c84868392f53ec59958..436d60d932a67ed151f28745658fe05421ae08c5 100644 (file)
@@ -888,6 +888,9 @@ class CephManager:
     """
     Ceph manager object.
     Contains several local functions that form a bulk of this module.
+
+    Note: this class has nothing to do with the Ceph daemon (ceph-mgr) of
+    the same name.
     """
 
     REPLICATED_POOL = 1
index 1eb9dcf2051ba8e82fa7374848faf1e7b7cefa84..6963a12e2b9c55d92b1391604330f1cc04f464d6 100644 (file)
@@ -66,8 +66,10 @@ def task(ctx, config):
         name = remote.shortname
         mon_name = 'ceph-mon@' + name + '.service'
         mds_name = 'ceph-mds@' + name + '.service'
+        mgr_name = 'ceph-mgr@' + name + '.service'
         mon_role_name = 'mon.' + name
         mds_role_name = 'mds.' + name
+        mgr_role_name = 'mgr.' + name
         m_osd = re.search('--id (\d+) --setuser ceph', r.stdout.getvalue())
         if m_osd:
             osd_service = 'ceph-osd@{m}.service'.format(m=m_osd.group(1))
@@ -98,6 +100,18 @@ def task(ctx, config):
                 log.info("Failed to stop ceph mon service")
             remote.run(args=['sudo', 'systemctl', 'start', mon_name])
             time.sleep(4)
+        if mgr_role_name in roles:
+            remote.run(args=['sudo', 'systemctl', 'status', mgr_name])
+            remote.run(args=['sudo', 'systemctl', 'stop', mgr_name])
+            time.sleep(4)  # immediate check will result in deactivating state
+            r = remote.run(args=['sudo', 'systemctl', 'status', mgr_name],
+                           stdout=StringIO(), check_status=False)
+            if r.stdout.getvalue().find('Active: inactive'):
+                log.info("Sucessfully stopped single ceph mgr service")
+            else:
+                log.info("Failed to stop ceph mgr service")
+            remote.run(args=['sudo', 'systemctl', 'start', mgr_name])
+            time.sleep(4)
         if mds_role_name in roles:
             remote.run(args=['sudo', 'systemctl', 'status', mds_name])
             remote.run(args=['sudo', 'systemctl', 'stop', mds_name])