]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks/ceph: only run ceph_log and valgrind_post once
authorJosh Durgin <jdurgin@redhat.com>
Tue, 29 Mar 2016 23:46:11 +0000 (16:46 -0700)
committerJosh Durgin <jdurgin@redhat.com>
Fri, 20 May 2016 18:19:56 +0000 (11:19 -0700)
These setup and parse logs on all hosts, so they should be run only
for the first cluster setup. This cluster will be torn down last, so
the cleanup happens after all clusters are shutdown as well.

Signed-off-by: Josh Durgin <jdurgin@redhat.com>
(cherry picked from commit 3203b76792e6ab7f35d465cebe602243681d5d9e)

tasks/ceph.py

index 9ac7983d237a55f145479889e7f06501ff191afa..ef60243adcaf0ac2ea7957eccf65c4b7eb899ce0 100644 (file)
@@ -1414,7 +1414,9 @@ def task(ctx, config):
     overrides = ctx.config.get('overrides', {})
     teuthology.deep_merge(config, overrides.get('ceph', {}))
 
+    first_ceph_cluster = False
     if not hasattr(ctx, 'daemons'):
+        first_ceph_cluster = True
         ctx.daemons = DaemonGroup()
 
     testdir = teuthology.get_testdir(ctx)
@@ -1436,26 +1438,35 @@ def task(ctx, config):
 
     validate_config(ctx, config)
 
-    with contextutil.nested(
+    subtasks = []
+    if first_ceph_cluster:
+        # these tasks handle general log setup and parsing on all hosts,
+        # so they should only be run once
+        subtasks = [
             lambda: ceph_log(ctx=ctx, config=None),
             lambda: valgrind_post(ctx=ctx, config=config),
-            lambda: cluster(ctx=ctx, config=dict(
-                conf=config.get('conf', {}),
-                fs=config.get('fs', None),
-                mkfs_options=config.get('mkfs_options', None),
-                mount_options=config.get('mount_options', None),
-                block_journal=config.get('block_journal', None),
-                tmpfs_journal=config.get('tmpfs_journal', None),
-                log_whitelist=config.get('log-whitelist', []),
-                cpu_profile=set(config.get('cpu_profile', []),),
-                cluster=config['cluster'],
-            )),
-            lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
-            lambda: crush_setup(ctx=ctx, config=config),
-            lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
-            lambda: cephfs_setup(ctx=ctx, config=config),
-            lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
-    ):
+        ]
+
+    subtasks += [
+        lambda: cluster(ctx=ctx, config=dict(
+            conf=config.get('conf', {}),
+            fs=config.get('fs', None),
+            mkfs_options=config.get('mkfs_options', None),
+            mount_options=config.get('mount_options', None),
+            block_journal=config.get('block_journal', None),
+            tmpfs_journal=config.get('tmpfs_journal', None),
+            log_whitelist=config.get('log-whitelist', []),
+            cpu_profile=set(config.get('cpu_profile', []),),
+            cluster=config['cluster'],
+        )),
+        lambda: run_daemon(ctx=ctx, config=config, type_='mon'),
+        lambda: crush_setup(ctx=ctx, config=config),
+        lambda: run_daemon(ctx=ctx, config=config, type_='osd'),
+        lambda: cephfs_setup(ctx=ctx, config=config),
+        lambda: run_daemon(ctx=ctx, config=config, type_='mds'),
+    ]
+
+    with contextutil.nested(*subtasks):
         try:
             if config.get('wait-for-healthy', True):
                 healthy(ctx=ctx, config=dict(cluster=config['cluster']))