]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/ceph2: set up managers
authorSage Weil <sage@redhat.com>
Tue, 12 Nov 2019 22:15:32 +0000 (22:15 +0000)
committerSage Weil <sage@redhat.com>
Thu, 21 Nov 2019 16:46:54 +0000 (10:46 -0600)
Signed-off-by: Sage Weil <sage@redhat.com>
qa/tasks/ceph2.py
qa/tasks/ceph_manager.py

index a36de3c17f7bae78fe9e86d82de989fb2a6fa68f..77982304dd29f7e40f46f7a8b20f1020b7ecaca4 100644 (file)
@@ -642,6 +642,7 @@ def task(ctx, config):
         ctx.daemons = DaemonGroup(use_ceph_daemon=True)
     if not hasattr(ctx, 'ceph'):
         ctx.ceph = {}
+        ctx.managers = {}
     if 'cluster' not in config:
         config['cluster'] = 'ceph'
     cluster_name = config['cluster']
@@ -690,6 +691,14 @@ def task(ctx, config):
             lambda: ceph_mdss(ctx=ctx, config=config),
             lambda: distribute_config_and_admin_keyring(ctx=ctx, config=config),
     ):
+        ctx.managers[cluster_name] = CephManager(
+            ctx.ceph[cluster_name].bootstrap_remote,
+            ctx=ctx,
+            logger=log.getChild('ceph_manager.' + cluster_name),
+            cluster=cluster_name,
+            ceph_daemon=True,
+        )
+
         try:
             log.info('Setup complete, yielding')
             yield
index 029cb9bfdfa19d077369310523058208616c7606..d78302eaee41609c84a9923a60c77ecc8d63df6f 100644 (file)
@@ -33,6 +33,22 @@ DEFAULT_CONF_PATH = '/etc/ceph/ceph.conf'
 
 log = logging.getLogger(__name__)
 
+# this is for ceph-daemon clusters
+def shell(ctx, cluster_name, remote, args, **kwargs):
+    testdir = teuthology.get_testdir(ctx)
+    return remote.run(
+        args=[
+            'sudo',
+            '{}/ceph-daemon'.format(testdir),
+            '--image', ctx.image,
+            'shell',
+            '-c', '{}/{}.conf'.format(testdir, cluster_name),
+            '-k', '{}/{}.keyring'.format(testdir, cluster_name),
+            '--fsid', ctx.ceph[cluster_name].fsid,
+            '--',
+            ] + args,
+        **kwargs
+    )
 
 def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'):
     conf_fp = StringIO()
@@ -1200,13 +1216,14 @@ class CephManager:
     """
 
     def __init__(self, controller, ctx=None, config=None, logger=None,
-                 cluster='ceph'):
+                 cluster='ceph', ceph_daemon=False):
         self.lock = threading.RLock()
         self.ctx = ctx
         self.config = config
         self.controller = controller
         self.next_pool_id = 0
         self.cluster = cluster
+        self.ceph_daemon = ceph_daemon
         if (logger):
             self.log = lambda x: logger.info(x)
         else:
@@ -1231,22 +1248,27 @@ class CephManager:
         """
         Start ceph on a raw cluster.  Return count
         """
-        testdir = teuthology.get_testdir(self.ctx)
-        ceph_args = [
-            'sudo',
-            'adjust-ulimits',
-            'ceph-coverage',
-            '{tdir}/archive/coverage'.format(tdir=testdir),
-            'timeout',
-            '120',
-            'ceph',
-            '--cluster',
-            self.cluster,
-        ]
-        ceph_args.extend(args)
-        proc = self.controller.run(
-            args=ceph_args,
-            stdout=StringIO(),
+        if self.ceph_daemon:
+            proc = shell(self.ctx, self.cluster, self.controller,
+                         args=['ceph'] + list(args),
+                         stdout=StringIO())
+        else:
+            testdir = teuthology.get_testdir(self.ctx)
+            ceph_args = [
+                'sudo',
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'timeout',
+                '120',
+                'ceph',
+                '--cluster',
+                self.cluster,
+            ]
+            ceph_args.extend(args)
+            proc = self.controller.run(
+                args=ceph_args,
+                stdout=StringIO(),
             )
         return proc.stdout.getvalue()
 
@@ -1254,22 +1276,27 @@ class CephManager:
         """
         Start ceph on a cluster.  Return success or failure information.
         """
-        testdir = teuthology.get_testdir(self.ctx)
-        ceph_args = [
-            'sudo',
-            'adjust-ulimits',
-            'ceph-coverage',
-            '{tdir}/archive/coverage'.format(tdir=testdir),
-            'timeout',
-            '120',
-            'ceph',
-            '--cluster',
-            self.cluster,
-        ]
-        ceph_args.extend(args)
-        kwargs['args'] = ceph_args
-        kwargs['check_status'] = False
-        proc = self.controller.run(**kwargs)
+        if self.ceph_daemon:
+            proc = shell(self.ctx, self.cluster, self.controller,
+                         args=['ceph'] + list(args),
+                         check_status=False)
+        else:
+            testdir = teuthology.get_testdir(self.ctx)
+            ceph_args = [
+                'sudo',
+                'adjust-ulimits',
+                'ceph-coverage',
+                '{tdir}/archive/coverage'.format(tdir=testdir),
+                'timeout',
+                '120',
+                'ceph',
+                '--cluster',
+                self.cluster,
+            ]
+            ceph_args.extend(args)
+            kwargs['args'] = ceph_args
+            kwargs['check_status'] = False
+            proc = self.controller.run(**kwargs)
         return proc.exitstatus
 
     def run_ceph_w(self, watch_channel=None):