--- /dev/null
+overrides:
+ install:
+ extra_system_packages:
+ rpm:
+ - fio
+ deb:
+ - fio
+ ceph-deploy:
+ conf:
+ global:
+ osd crush chooseleaf type: 0
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+ global:
+ ms cluster mode: crc
+ ms service mode: crc
+ ms client mode: crc
+ ms mon service mode: crc
+ ms mon cluster mode: crc
+ ms mon client mode: crc
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ debug monc: 20
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - PG_DEGRADED
+ - OSD_DOWN
+ - OSD_HOST_DOWN
+ - OSD_ROOT_DOWN
+ - PG_AVAILABILITY
+tasks:
+- exec:
+ mon.a:
+ - ceph -s
+
+- exec:
+ client.0:
+ - |
+ set -ex
+ if ! ceph osd pool ls | grep -qx 'testpool'; then
+ ceph osd pool create testpool 64 64
+ fi
+ ceph osd pool application enable testpool rbd || true
+ ceph -s
+ if ! rbd info testpool/testimage; then
+ rbd create testpool/testimage --size 10G --image-format=2
+ fi
+- exec:
+ client.0:
+ - |
+ set -ex
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
+ ceph config set osd seastore_require_partition_count_match_reactor_count false
+ ceph config set osd.0 crimson_cpu_num 2
+ ceph config set osd.1 crimson_cpu_num 2
+ ceph config set osd.2 crimson_cpu_num 2
+
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
+- exec:
+ osd.0:
+ - |
+ set -ex
+ ceph health detail
+ ceph daemon osd.0 config show
+- exec:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
+ ceph config set osd.0 crimson_cpu_num 5
+ ceph config set osd.1 crimson_cpu_num 5
+ ceph config set osd.2 crimson_cpu_num 5
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
+- exec:
+ osd.0:
+ - |
+ set -ex
+ ceph health detail
+ ceph daemon osd.0 config show
+
+- exec:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
\ No newline at end of file