--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_install.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ install:
+ extra_system_packages:
+ rpm:
+ - fio
+ deb:
+ - fio
+ ceph-deploy:
+ conf:
+ global:
+ osd crush chooseleaf type: 0
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+ crimson cpu num: 3
+ global:
+ ms cluster mode: crc
+ ms service mode: crc
+ ms client mode: crc
+ ms mon service mode: crc
+ ms mon cluster mode: crc
+ ms mon client mode: crc
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
--- /dev/null
+.qa/distros/crimson-supported-all-distro/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_qa_overrides.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ debug monc: 20
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
--- /dev/null
+# no need to verify os + flavor + sha1
+verify_ceph_hash: false
+tasks:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
+ debug osd: 10
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/objectstore/crimson/seastore/segmented$/crimson_seastore_segmented.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- exec:
+ mon.a:
+ - ceph -s
+
+- workunit:
+ clients:
+ client.0:
+ - |
+ set -ex
+ ceph osd pool create testpool 64 64
+ ceph osd pool application enable testpool rbd
+ rbd create testpool/testimage --size 10G --image-format=2
+- workunit:
+ clients:
+ client.0:
+ - |
+ set -ex
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting
+
+- exec:
+ mon.a:
+ - ceph config set osd.* crimson_cpu_num 2
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+
+- ceph.wait_for_health:
+ timeout: 300
+ health: ["HEALTH_OK", "HEALTH_WARN"]
+- workunit:
+ clients:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting
+- exec:
+ mon.a:
+ - ceph config set osd.* crimson_cpu_num 5
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+
+- ceph.wait_for_health:
+ timeout: 300
+ health: ["HEALTH_OK", "HEALTH_WARN"]
+- workunit:
+ clients:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting
+
+- workunit:
+ clients:
+ client.0:
+ - |
+ set -ex
+ rbd rm testpool/testimage