]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
suites/rados: add test for 16113
authorSamuel Just <sjust@redhat.com>
Thu, 2 Jun 2016 21:05:38 +0000 (14:05 -0700)
committerSamuel Just <sjust@redhat.com>
Mon, 6 Jun 2016 17:59:01 +0000 (10:59 -0700)
Signed-off-by: Samuel Just <sjust@redhat.com>
suites/rados/singleton-nomsgr/all/16113.yaml [new file with mode: 0644]

diff --git a/suites/rados/singleton-nomsgr/all/16113.yaml b/suites/rados/singleton-nomsgr/all/16113.yaml
new file mode 100644 (file)
index 0000000..6977aaf
--- /dev/null
@@ -0,0 +1,94 @@
+os_type: ubuntu
+overrides:
+  ceph:
+    conf:
+      mon:
+        debug mon: 20
+        debug ms: 1
+        debug paxos: 20
+        mon warn on legacy crush tunables: false
+        mon min osdmap epochs: 3
+        osd pool default size: 2
+        osd pool default min size: 1
+      osd:
+        osd map cache size: 2
+        osd map max advance: 1
+        debug filestore: 20
+        debug journal: 20
+        debug ms: 1
+        debug osd: 20
+    log-whitelist:
+    - osd_map_cache_size
+    - slow request
+    - scrub mismatch
+    - ScrubResult
+    - failed to encode
+    - wrongly marked me down
+roles:
+- - mon.a
+  - osd.0
+  - osd.1
+  - mon.b
+  - mon.c
+  - osd.2
+  - client.0
+openstack:
+  - volumes: # attached to each instance
+      count: 3
+      size: 10 # GB
+tasks:
+- install:
+    branch: hammer
+- print: '**** done installing hammer'
+- ceph:
+    fs: xfs
+- print: '**** done ceph'
+- ceph_manager.create_pool:
+    args: ['test']
+    kwargs:
+      pg_num: 1024
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- sequential:
+  - radosbench:
+      pool: test
+      size: 1
+      time: 100
+      cleanup: false
+      create_pool: false
+- install.upgrade:
+    mon.a: null
+- ceph.restart: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2]
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- exec:
+    mon.a:
+      - ceph osd set sortbitwise
+- sleep:
+    duration: 10
+- ceph_manager.wait_for_clean: null
+- sequential:
+  - radosbench:
+      pool: test
+      size: 1
+      time: 400
+      cleanup: false
+      create_pool: false
+  - sleep:
+      duration: 30
+  - ceph_manager.kill_osd:
+      kwargs:
+        osd: 0
+  - sleep:
+      duration: 30
+  - ceph_manager.revive_osd:
+      kwargs:
+        osd: 0
+  - sleep:
+      duration: 30
+  - ceph_manager.wait_for_clean: null
+- sleep:
+    duration: 30
+- ceph_manager.wait_for_clean: null