From f80efc33f53c2b1aedcdbf326962d67d74c7c633 Mon Sep 17 00:00:00 2001 From: Samuel Just Date: Fri, 1 May 2015 09:13:27 -0700 Subject: [PATCH] suites/rados: add test for 11429 This patch also adds some convenience facilities for making some of the ceph_manager methods into tasks usable from a yaml file. Signed-off-by: Samuel Just (cherry picked from commit 015ed70f8a46a44e35433d1f701e37f68af31cf9) Conflicts: tasks/radosbench.py the pool creation call changed and the conflict is because they were indented to implement the option to not create a pool --- suites/rados/singleton-nomsgr/all/11429.yaml | 105 +++++++++++++++++++ tasks/ceph_manager.py | 17 +++ tasks/radosbench.py | 16 +-- tasks/utility.py | 9 ++ 4 files changed, 141 insertions(+), 6 deletions(-) create mode 100644 suites/rados/singleton-nomsgr/all/11429.yaml create mode 100644 tasks/utility.py diff --git a/suites/rados/singleton-nomsgr/all/11429.yaml b/suites/rados/singleton-nomsgr/all/11429.yaml new file mode 100644 index 0000000000000..ffae57c5a6243 --- /dev/null +++ b/suites/rados/singleton-nomsgr/all/11429.yaml @@ -0,0 +1,105 @@ +overrides: + ceph: + conf: + mon: + debug mon: 20 + debug ms: 1 + debug paxos: 20 + mon warn on legacy crush tunables: false + mon min osdmap epochs: 3 + osd: + osd map cache size: 2 + osd map max advance: 1 + debug filestore: 20 + debug journal: 20 + debug ms: 1 + debug osd: 20 + log-whitelist: + - osd_map_cache_size + - slow request + - scrub mismatch + - ScrubResult +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - mon.b + - mon.c + - osd.2 + - client.0 +tasks: +- install: + branch: v0.80.8 +- print: '**** done installing firefly' +- ceph: + fs: xfs +- print: '**** done ceph' +- full_sequential: + - ceph_manager.create_pool: + args: ['toremove'] + kwargs: + pg_num: 4096 + - utility.sleep: + to_sleep: 30 + - ceph_manager.wait_for_clean: null + - radosbench: + clients: [client.0] + time: 120 + size: 1 + pool: toremove + create_pool: false + - ceph_manager.remove_pool: + args: ['toremove'] + - utility.sleep: + to_sleep: 10 + - ceph.restart: + daemons: + - osd.0 + - osd.1 + - osd.2 + - utility.sleep: + to_sleep: 30 + - ceph_manager.wait_for_clean: null + - radosbench: + clients: [client.0] + time: 60 + size: 1 + - ceph_manager.create_pool: + args: ['newpool'] + - loop: + count: 100 + body: + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 2] + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 1] + - utility.sleep: + to_sleep: 30 + - ceph_manager.wait_for_clean: null + - loop: + count: 100 + body: + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 2] + - ceph_manager.set_pool_property: + args: ['newpool', 'min_size', 1] + - utility.sleep: + to_sleep: 30 + - ceph_manager.wait_for_clean: null + - utility.sleep: + to_sleep: 30 + - install.upgrade: + mon.a: null + - ceph.restart: + daemons: + - osd.0 + - osd.1 + - osd.2 + - utility.sleep: + to_sleep: 30 + - radosbench: + clients: [client.0] + time: 30 + size: 1 + - ceph_manager.wait_for_clean: null diff --git a/tasks/ceph_manager.py b/tasks/ceph_manager.py index d80b13d04f7be..58d19ef5d65ae 100644 --- a/tasks/ceph_manager.py +++ b/tasks/ceph_manager.py @@ -1646,3 +1646,20 @@ class CephManager: Return path to osd data with {id} needing to be replaced """ return "/var/lib/ceph/osd/ceph-{id}" + +def utility_task(name): + def task(ctx, config): + if config is None: + config = {} + args = config.get('args', []) + kwargs = config.get('kwargs', {}) + fn = getattr(ctx.manager, name) + fn(*args, **kwargs) + return task + +revive_osd = utility_task("revive_osd") +kill_osd = utility_task("kill_osd") +create_pool = utility_task("create_pool") +remove_pool = utility_task("remove_pool") +wait_for_clean = utility_task("wait_for_clean") +set_pool_property = utility_task("set_pool_property") diff --git a/tasks/radosbench.py b/tasks/radosbench.py index 56050034d6729..d260ad81500a2 100644 --- a/tasks/radosbench.py +++ b/tasks/radosbench.py @@ -20,8 +20,10 @@ def task(ctx, config): clients: [client list] time: pool: + size: write size to use unique_pool: use a unique pool, defaults to False ec_pool: create ec pool, defaults to False + create_pool: create pool, defaults to False example: @@ -47,12 +49,13 @@ def task(ctx, config): (remote,) = ctx.cluster.only(role).remotes.iterkeys() pool = 'data' - if config.get('pool'): - pool = config.get('pool') - if pool is not 'data': - ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False)) - else: - pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) + if config.get('create_pool', True): + if config.get('pool'): + pool = config.get('pool') + if pool is not 'data': + ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False)) + else: + pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) proc = remote.run( args=[ @@ -62,6 +65,7 @@ def task(ctx, config): '{tdir}/archive/coverage', 'rados', '--name', role, + '-b', str(config.get('size', 4<<20)), '-p' , pool, 'bench', str(config.get('time', 360)), 'write', ]).format(tdir=testdir), diff --git a/tasks/utility.py b/tasks/utility.py new file mode 100644 index 0000000000000..96e0f7de4db8d --- /dev/null +++ b/tasks/utility.py @@ -0,0 +1,9 @@ +import logging +import time + +log = logging.getLogger(__name__) + +def sleep(ctx, config): + to_sleep = config.get("to_sleep", 5) + log.info("Sleeping for {to_sleep}".format(to_sleep=to_sleep)) + time.sleep(to_sleep) -- 2.39.5