--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+
+# no need to verify os + flavor + sha1
+verify_ceph_hash: false
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ debug monc: 20
+ crimson cpu num: 3
+ osd shutdown pgref assert: true
+ # crimson's osd objectstore option
+ osd objectstore: seastore
+ debug seastore: 20
+ debug seastore onode: 20
+ debug seastore odata: 20
+ debug seastore omap: 20
+ debug seastore tm: 20
+ debug seastore t: 20
+ debug seastore cleaner: 20
+ debug seastore epm: 20
+ debug seastore lba: 20
+ debug seastore fixedkv tree: 20
+ debug seastore cache: 20
+ debug seastore journal: 20
+ debug seastore device: 20
+ debug seastore backref: 20
+ global:
+ ms cluster mode: crc
+ ms service mode: crc
+ ms client mode: crc
+ ms mon service mode: crc
+ ms mon cluster mode: crc
+ ms mon client mode: crc
+ mon:
+ mon min osdmap epochs: 50
+- exec:
+ mon.a:
+ - ceph -s
+- cbt:
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ perf_cmd: 'perf'
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ pid_glob: crimson-osd.*.pid
+
+- exec:
+ mon.a:
+ - cephadm shell -- ceph orch stop osd --all
+- exec:
+ mon.a:
+ - |
+ ceph config set osd.* crimson_cpu_num 2
+ for osd in $(cephadm shell -- ceph osd ls | awk '{print $1}'); do
+ cephadm shell -- ceph orch daemon restart osd.$osd
+ done
+
+- exec:
+ mon.a:
+ - ceph -s
+- cbt:
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ perf_cmd: 'perf'
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ pid_glob: crimson-osd.*.pid
+
+- exec:
+ mon.a:
+ - cephadm shell -- ceph orch stop osd --all
+- exec:
+ mon.a:
+ - |
+ ceph config set osd.* crimson_cpu_num 5
+ for osd in $(cephadm shell -- ceph osd ls | awk '{print $1}'); do
+ cephadm shell -- ceph orch daemon restart osd.$osd
+ done
+
+- exec:
+ mon.a:
+ - ceph -s
+- cbt:
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
+ monitoring_profiles:
+ perf:
+ nodes:
+ - osds
+ perf_cmd: 'perf'
+ args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}'
+ pid_glob: crimson-osd.*.pid