This patch also adds some convenience facilities for making
some of the ceph_manager methods into tasks usable from a
yaml file.
Signed-off-by: Samuel Just <sjust@redhat.com>
(cherry picked from commit
015ed70f8a46a44e35433d1f701e37f68af31cf9)
Conflicts:
tasks/radosbench.py
the pool creation call changed and the conflict is because
they were indented to implement the option to not create
a pool
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ debug mon: 20
+ debug ms: 1
+ debug paxos: 20
+ mon warn on legacy crush tunables: false
+ mon min osdmap epochs: 3
+ osd:
+ osd map cache size: 2
+ osd map max advance: 1
+ debug filestore: 20
+ debug journal: 20
+ debug ms: 1
+ debug osd: 20
+ log-whitelist:
+ - osd_map_cache_size
+ - slow request
+ - scrub mismatch
+ - ScrubResult
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - mon.b
+ - mon.c
+ - osd.2
+ - client.0
+tasks:
+- install:
+ branch: v0.80.8
+- print: '**** done installing firefly'
+- ceph:
+ fs: xfs
+- print: '**** done ceph'
+- full_sequential:
+ - ceph_manager.create_pool:
+ args: ['toremove']
+ kwargs:
+ pg_num: 4096
+ - utility.sleep:
+ to_sleep: 30
+ - ceph_manager.wait_for_clean: null
+ - radosbench:
+ clients: [client.0]
+ time: 120
+ size: 1
+ pool: toremove
+ create_pool: false
+ - ceph_manager.remove_pool:
+ args: ['toremove']
+ - utility.sleep:
+ to_sleep: 10
+ - ceph.restart:
+ daemons:
+ - osd.0
+ - osd.1
+ - osd.2
+ - utility.sleep:
+ to_sleep: 30
+ - ceph_manager.wait_for_clean: null
+ - radosbench:
+ clients: [client.0]
+ time: 60
+ size: 1
+ - ceph_manager.create_pool:
+ args: ['newpool']
+ - loop:
+ count: 100
+ body:
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 2]
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 1]
+ - utility.sleep:
+ to_sleep: 30
+ - ceph_manager.wait_for_clean: null
+ - loop:
+ count: 100
+ body:
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 2]
+ - ceph_manager.set_pool_property:
+ args: ['newpool', 'min_size', 1]
+ - utility.sleep:
+ to_sleep: 30
+ - ceph_manager.wait_for_clean: null
+ - utility.sleep:
+ to_sleep: 30
+ - install.upgrade:
+ mon.a: null
+ - ceph.restart:
+ daemons:
+ - osd.0
+ - osd.1
+ - osd.2
+ - utility.sleep:
+ to_sleep: 30
+ - radosbench:
+ clients: [client.0]
+ time: 30
+ size: 1
+ - ceph_manager.wait_for_clean: null
Return path to osd data with {id} needing to be replaced
"""
return "/var/lib/ceph/osd/ceph-{id}"
+
+def utility_task(name):
+ def task(ctx, config):
+ if config is None:
+ config = {}
+ args = config.get('args', [])
+ kwargs = config.get('kwargs', {})
+ fn = getattr(ctx.manager, name)
+ fn(*args, **kwargs)
+ return task
+
+revive_osd = utility_task("revive_osd")
+kill_osd = utility_task("kill_osd")
+create_pool = utility_task("create_pool")
+remove_pool = utility_task("remove_pool")
+wait_for_clean = utility_task("wait_for_clean")
+set_pool_property = utility_task("set_pool_property")
clients: [client list]
time: <seconds to run>
pool: <pool to use>
+ size: write size to use
unique_pool: use a unique pool, defaults to False
ec_pool: create ec pool, defaults to False
+ create_pool: create pool, defaults to False
example:
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
pool = 'data'
- if config.get('pool'):
- pool = config.get('pool')
- if pool is not 'data':
- ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False))
- else:
- pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
+ if config.get('create_pool', True):
+ if config.get('pool'):
+ pool = config.get('pool')
+ if pool is not 'data':
+ ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False))
+ else:
+ pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
proc = remote.run(
args=[
'{tdir}/archive/coverage',
'rados',
'--name', role,
+ '-b', str(config.get('size', 4<<20)),
'-p' , pool,
'bench', str(config.get('time', 360)), 'write',
]).format(tdir=testdir),
--- /dev/null
+import logging
+import time
+
+log = logging.getLogger(__name__)
+
+def sleep(ctx, config):
+ to_sleep = config.get("to_sleep", 5)
+ log.info("Sleeping for {to_sleep}".format(to_sleep=to_sleep))
+ time.sleep(to_sleep)