--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_install.yaml
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+ crimson cpu num: 3
+ global:
+ ms cluster mode: crc
+ ms service mode: crc
+ ms client mode: crc
+ ms mon service mode: crc
+ ms mon cluster mode: crc
+ ms mon client mode: crc
+
--- /dev/null
+.qa/distros/crimson-supported-all-distro/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_qa_overrides.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ debug monc: 20
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
--- /dev/null
+# no need to verify os + flavor + sha1
+verify_ceph_hash: false
+tasks:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
+ debug osd: 10
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+.qa/objectstore/crimson
\ No newline at end of file
--- /dev/null
+../perf/settings
\ No newline at end of file
--- /dev/null
+tasks:
+- exec:
+ mon.a:
+ - ceph -s
+- cbt:
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ perf_cmd: 'perf'
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ pid_glob: crimson-osd.*.pid
+
+- exec:
+ mon.a:
+ - cephadm shell -- ceph orch stop osd --all
+- exec:
+ mon.a:
+ - |
+ ceph config set osd.* crimson_cpu_num 2
+ for osd in $(cephadm shell -- ceph osd ls | awk '{print $1}'); do
+ cephadm shell -- ceph orch daemon restart osd.$osd
+ done
+
+- exec:
+ mon.a:
+ - ceph -s
+- cbt:
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ perf_cmd: 'perf'
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ pid_glob: crimson-osd.*.pid
+
+- exec:
+ mon.a:
+ - cephadm shell -- ceph orch stop osd --all
+- exec:
+ mon.a:
+ - |
+ ceph config set osd.* crimson_cpu_num 5
+ for osd in $(cephadm shell -- ceph osd ls | awk '{print $1}'); do
+ cephadm shell -- ceph orch daemon restart osd.$osd
+ done
+
+- exec:
+ mon.a:
+ - ceph -s
+- cbt:
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ perf_cmd: 'perf'
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ pid_glob: crimson-osd.*.pid