]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/ceph2: add restart
authorSage Weil <sage@redhat.com>
Tue, 12 Nov 2019 23:59:24 +0000 (23:59 +0000)
committerSage Weil <sage@redhat.com>
Thu, 21 Nov 2019 16:46:54 +0000 (10:46 -0600)
Signed-off-by: Sage Weil <sage@redhat.com>
qa/tasks/ceph2.py

index 059805ed8a8a5efa2403cf0d6f9188b9dfdfd3df..2c54cb31212707d2f2e3a3217cf051a7d16f2146 100644 (file)
@@ -93,9 +93,9 @@ def normalize_hostnames(ctx):
 def download_ceph_daemon(ctx, config):
     cluster_name = config['cluster']
     testdir = teuthology.get_testdir(ctx)
-    branch = config.get('ceph-daemon-branch', 'master')
+    branch = config.get('ceph_daemon_branch', 'master')
 
-    log.info('Downloading ceph-daemon...')
+    log.info('Downloading ceph-daemon (branch %s)...' % branch)
     ctx.cluster.run(
         args=[
             'curl', '--silent',
@@ -597,6 +597,91 @@ def stop(ctx, config):
 
     yield
 
+@contextlib.contextmanager
+def tweaked_option(ctx, config):
+    """
+    set an option, and then restore it with its original value
+
+    Note, due to the way how tasks are executed/nested, it's not suggested to
+    use this method as a standalone task. otherwise, it's likely that it will
+    restore the tweaked option at the /end/ of 'tasks' block.
+    """
+    saved_options = {}
+    # we can complicate this when necessary
+    options = ['mon-health-to-clog']
+    type_, id_ = 'mon', '*'
+    cluster = config.get('cluster', 'ceph')
+    manager = ctx.managers[cluster]
+    if id_ == '*':
+        get_from = next(teuthology.all_roles_of_type(ctx.cluster, type_))
+    else:
+        get_from = id_
+    for option in options:
+        if option not in config:
+            continue
+        value = 'true' if config[option] else 'false'
+        option = option.replace('-', '_')
+        old_value = manager.get_config(type_, get_from, option)
+        if value != old_value:
+            saved_options[option] = old_value
+            manager.inject_args(type_, id_, option, value)
+    yield
+    for option, value in saved_options.items():
+        manager.inject_args(type_, id_, option, value)
+
+@contextlib.contextmanager
+def restart(ctx, config):
+    """
+   restart ceph daemons
+
+   For example::
+      tasks:
+      - ceph.restart: [all]
+
+   For example::
+      tasks:
+      - ceph.restart: [osd.0, mon.1, mds.*]
+
+   or::
+
+      tasks:
+      - ceph.restart:
+          daemons: [osd.0, mon.1]
+          wait-for-healthy: false
+          wait-for-osds-up: true
+
+    :param ctx: Context
+    :param config: Configuration
+    """
+    if config is None:
+        config = {}
+    elif isinstance(config, list):
+        config = {'daemons': config}
+
+    daemons = ctx.daemons.resolve_role_list(
+        config.get('daemons', None), CEPH_ROLE_TYPES, True)
+    clusters = set()
+
+    log.info('daemons %s' % daemons)
+    with tweaked_option(ctx, config):
+        for role in daemons:
+            cluster, type_, id_ = teuthology.split_role(role)
+            d = ctx.daemons.get_daemon(type_, id_, cluster)
+            assert d, 'daemon %s does not exist' % role
+            d.stop()
+            if type_ == 'osd':
+                ctx.managers[cluster].mark_down_osd(id_)
+            d.restart()
+            clusters.add(cluster)
+
+    if config.get('wait-for-healthy', True):
+        for cluster in clusters:
+            healthy(ctx=ctx, config=dict(cluster=cluster))
+    if config.get('wait-for-osds-up', False):
+        for cluster in clusters:
+            wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster))
+    yield
+
 @contextlib.contextmanager
 def distribute_config_and_admin_keyring(ctx, config):
     """