]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
suites/rados: add test for 11429
authorSamuel Just <sjust@redhat.com>
Fri, 1 May 2015 16:13:27 +0000 (09:13 -0700)
committerLoic Dachary <ldachary@redhat.com>
Tue, 12 May 2015 15:56:45 +0000 (17:56 +0200)
This patch also adds some convenience facilities for making
some of the ceph_manager methods into tasks usable from a
yaml file.

Signed-off-by: Samuel Just <sjust@redhat.com>
(cherry picked from commit 015ed70f8a46a44e35433d1f701e37f68af31cf9)

Conflicts:
tasks/radosbench.py
the pool creation call changed and the conflict is because
they were indented to implement the option to not create
a pool

suites/rados/singleton-nomsgr/all/11429.yaml [new file with mode: 0644]
tasks/ceph_manager.py
tasks/radosbench.py
tasks/utility.py [new file with mode: 0644]

diff --git a/suites/rados/singleton-nomsgr/all/11429.yaml b/suites/rados/singleton-nomsgr/all/11429.yaml
new file mode 100644 (file)
index 0000000..ffae57c
--- /dev/null
@@ -0,0 +1,105 @@
+overrides:
+  ceph:
+    conf:
+      mon:
+        debug mon: 20
+        debug ms: 1
+        debug paxos: 20
+        mon warn on legacy crush tunables: false
+        mon min osdmap epochs: 3
+      osd:
+        osd map cache size: 2
+        osd map max advance: 1
+        debug filestore: 20
+        debug journal: 20
+        debug ms: 1
+        debug osd: 20
+    log-whitelist:
+    - osd_map_cache_size
+    - slow request
+    - scrub mismatch
+    - ScrubResult
+roles:
+- - mon.a
+  - mds.a
+  - osd.0
+  - osd.1
+  - mon.b
+  - mon.c
+  - osd.2
+  - client.0
+tasks:
+- install:
+    branch: v0.80.8
+- print: '**** done installing firefly'
+- ceph:
+    fs: xfs
+- print: '**** done ceph'
+- full_sequential:
+  - ceph_manager.create_pool:
+      args: ['toremove']
+      kwargs:
+        pg_num: 4096
+  - utility.sleep:
+      to_sleep: 30
+  - ceph_manager.wait_for_clean: null
+  - radosbench:
+      clients: [client.0]
+      time: 120
+      size: 1
+      pool: toremove
+      create_pool: false
+  - ceph_manager.remove_pool:
+      args: ['toremove']
+  - utility.sleep:
+      to_sleep: 10
+  - ceph.restart:
+      daemons:
+        - osd.0
+        - osd.1
+        - osd.2
+  - utility.sleep:
+      to_sleep: 30
+  - ceph_manager.wait_for_clean: null
+  - radosbench:
+      clients: [client.0]
+      time: 60
+      size: 1
+  - ceph_manager.create_pool:
+      args: ['newpool']
+  - loop:
+      count: 100
+      body:
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 2]
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 1]
+  - utility.sleep:
+      to_sleep: 30
+  - ceph_manager.wait_for_clean: null
+  - loop:
+      count: 100
+      body:
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 2]
+        - ceph_manager.set_pool_property:
+            args: ['newpool', 'min_size', 1]
+  - utility.sleep:
+      to_sleep: 30
+  - ceph_manager.wait_for_clean: null
+  - utility.sleep:
+      to_sleep: 30
+  - install.upgrade:
+      mon.a: null
+  - ceph.restart:
+      daemons:
+        - osd.0
+        - osd.1
+        - osd.2
+  - utility.sleep:
+      to_sleep: 30
+  - radosbench:
+      clients: [client.0]
+      time: 30
+      size: 1
+  - ceph_manager.wait_for_clean: null
index d80b13d04f7bea89263c1a06e7ebc53011fe7f95..58d19ef5d65ae83bd63e083dd0e2f6bb578e8db8 100644 (file)
@@ -1646,3 +1646,20 @@ class CephManager:
         Return path to osd data with {id} needing to be replaced
         """
         return "/var/lib/ceph/osd/ceph-{id}"
+
+def utility_task(name):
+    def task(ctx, config):
+        if config is None:
+            config = {}
+        args = config.get('args', [])
+        kwargs = config.get('kwargs', {})
+        fn = getattr(ctx.manager, name)
+        fn(*args, **kwargs)
+    return task
+
+revive_osd = utility_task("revive_osd")
+kill_osd = utility_task("kill_osd")
+create_pool = utility_task("create_pool")
+remove_pool = utility_task("remove_pool")
+wait_for_clean = utility_task("wait_for_clean")
+set_pool_property = utility_task("set_pool_property")
index 56050034d6729dfb6050d3d8d74151eccb9027f8..d260ad81500a2b508fcef8acce6d3061b0508437 100644 (file)
@@ -20,8 +20,10 @@ def task(ctx, config):
         clients: [client list]
         time: <seconds to run>
         pool: <pool to use>
+        size: write size to use
         unique_pool: use a unique pool, defaults to False
         ec_pool: create ec pool, defaults to False
+        create_pool: create pool, defaults to False
 
     example:
 
@@ -47,12 +49,13 @@ def task(ctx, config):
         (remote,) = ctx.cluster.only(role).remotes.iterkeys()
 
         pool = 'data'
-        if config.get('pool'):
-            pool = config.get('pool')
-            if pool is not 'data':
-                ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False))
-        else:
-            pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
+        if config.get('create_pool', True):
+            if config.get('pool'):
+                pool = config.get('pool')
+                if pool is not 'data':
+                    ctx.manager.create_pool(pool, ec_pool=config.get('ec_pool', False))
+            else:
+                pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
 
         proc = remote.run(
             args=[
@@ -62,6 +65,7 @@ def task(ctx, config):
                           '{tdir}/archive/coverage',
                           'rados',
                           '--name', role,
+                          '-b', str(config.get('size', 4<<20)),
                           '-p' , pool,
                           'bench', str(config.get('time', 360)), 'write',
                           ]).format(tdir=testdir),
diff --git a/tasks/utility.py b/tasks/utility.py
new file mode 100644 (file)
index 0000000..96e0f7d
--- /dev/null
@@ -0,0 +1,9 @@
+import logging
+import time
+
+log = logging.getLogger(__name__)
+
+def sleep(ctx, config):
+    to_sleep = config.get("to_sleep", 5)
+    log.info("Sleeping for {to_sleep}".format(to_sleep=to_sleep))
+    time.sleep(to_sleep)