From 5bc37a22054e6858f616b7e6db5988042ecc22b5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 12 Nov 2019 23:59:24 +0000 Subject: [PATCH] qa/tasks/ceph2: add restart Signed-off-by: Sage Weil --- qa/tasks/ceph2.py | 89 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 2 deletions(-) diff --git a/qa/tasks/ceph2.py b/qa/tasks/ceph2.py index 059805ed8a8..2c54cb31212 100644 --- a/qa/tasks/ceph2.py +++ b/qa/tasks/ceph2.py @@ -93,9 +93,9 @@ def normalize_hostnames(ctx): def download_ceph_daemon(ctx, config): cluster_name = config['cluster'] testdir = teuthology.get_testdir(ctx) - branch = config.get('ceph-daemon-branch', 'master') + branch = config.get('ceph_daemon_branch', 'master') - log.info('Downloading ceph-daemon...') + log.info('Downloading ceph-daemon (branch %s)...' % branch) ctx.cluster.run( args=[ 'curl', '--silent', @@ -597,6 +597,91 @@ def stop(ctx, config): yield +@contextlib.contextmanager +def tweaked_option(ctx, config): + """ + set an option, and then restore it with its original value + + Note, due to the way how tasks are executed/nested, it's not suggested to + use this method as a standalone task. otherwise, it's likely that it will + restore the tweaked option at the /end/ of 'tasks' block. + """ + saved_options = {} + # we can complicate this when necessary + options = ['mon-health-to-clog'] + type_, id_ = 'mon', '*' + cluster = config.get('cluster', 'ceph') + manager = ctx.managers[cluster] + if id_ == '*': + get_from = next(teuthology.all_roles_of_type(ctx.cluster, type_)) + else: + get_from = id_ + for option in options: + if option not in config: + continue + value = 'true' if config[option] else 'false' + option = option.replace('-', '_') + old_value = manager.get_config(type_, get_from, option) + if value != old_value: + saved_options[option] = old_value + manager.inject_args(type_, id_, option, value) + yield + for option, value in saved_options.items(): + manager.inject_args(type_, id_, option, value) + +@contextlib.contextmanager +def restart(ctx, config): + """ + restart ceph daemons + + For example:: + tasks: + - ceph.restart: [all] + + For example:: + tasks: + - ceph.restart: [osd.0, mon.1, mds.*] + + or:: + + tasks: + - ceph.restart: + daemons: [osd.0, mon.1] + wait-for-healthy: false + wait-for-osds-up: true + + :param ctx: Context + :param config: Configuration + """ + if config is None: + config = {} + elif isinstance(config, list): + config = {'daemons': config} + + daemons = ctx.daemons.resolve_role_list( + config.get('daemons', None), CEPH_ROLE_TYPES, True) + clusters = set() + + log.info('daemons %s' % daemons) + with tweaked_option(ctx, config): + for role in daemons: + cluster, type_, id_ = teuthology.split_role(role) + d = ctx.daemons.get_daemon(type_, id_, cluster) + assert d, 'daemon %s does not exist' % role + d.stop() + if type_ == 'osd': + ctx.managers[cluster].mark_down_osd(id_) + d.restart() + clusters.add(cluster) + + if config.get('wait-for-healthy', True): + for cluster in clusters: + healthy(ctx=ctx, config=dict(cluster=cluster)) + if config.get('wait-for-osds-up', False): + for cluster in clusters: + wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster)) + yield + @contextlib.contextmanager def distribute_config_and_admin_keyring(ctx, config): """ -- 2.39.5