+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 10 # GB
-
-overrides:
- ceph:
- fs: xfs
- conf:
- osd:
- filestore xfs extsize: true
-
-tasks:
-- install:
-- ceph:
-- workunit:
- clients:
- all:
- - rados/test_alloc_hint.sh
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ filestore xfs extsize: true
+
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_alloc_hint.sh
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 6
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(TOO_FEW_PGS\)
+ - \(POOL_APP_NOT_ENABLED\)
+- ceph_objectstore_tool:
+ objects: 20
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- exec:
+ client.0:
+ - ceph_test_filejournal
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ journal aio: true
+- filestore_idempotent:
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- filestore_idempotent:
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - objectstore/test_fuse.sh
+
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb
+ - rm -rf $TESTDIR/kvtest
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all:
+ - osdc/stress_objectcacher.sh
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ulimit -c 0 && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-filestore 20 --debug-bluestore 20" ceph_test_objectstore --gtest_filter=-*/3
+ - rm -rf $TESTDIR/ostest
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0]
-openstack:
-- volumes: # attached to each instance
- count: 6
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- fs: xfs
- conf:
- global:
- osd max object name len: 460
- osd max object namespace len: 64
- log-whitelist:
- - overall HEALTH_
- - \(OSDMAP_FLAGS\)
- - \(OSD_
- - \(PG_
- - \(TOO_FEW_PGS\)
- - \(POOL_APP_NOT_ENABLED\)
-- ceph_objectstore_tool:
- objects: 20
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-openstack:
-- volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- fs: xfs
-- exec:
- client.0:
- - ceph_test_filejournal
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-openstack:
-- volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- fs: xfs
- conf:
- global:
- journal aio: true
-- filestore_idempotent:
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-openstack:
-- volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- fs: xfs
-- filestore_idempotent:
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-tasks:
-- install:
-- workunit:
- clients:
- all:
- - objectstore/test_fuse.sh
-
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-tasks:
-- install:
-- exec:
- client.0:
- - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb
- - rm -rf $TESTDIR/kvtest
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-openstack:
-- volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- ceph:
- fs: xfs
-- workunit:
- clients:
- all:
- - osdc/stress_objectcacher.sh
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, osd.1, client.0]
-openstack:
-- volumes: # attached to each instance
- count: 2
- size: 10 # GB
-tasks:
-- install:
-- exec:
- client.0:
- - mkdir $TESTDIR/ostest && cd $TESTDIR/ostest && ulimit -c 0 && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-filestore 20 --debug-bluestore 20" ceph_test_objectstore --gtest_filter=-*/3
- - rm -rf $TESTDIR/ostest