From 46aeaf37a7c5aa44af36b2a7ec3f3f9be1c3e57f Mon Sep 17 00:00:00 2001 From: Chunmei Liu Date: Thu, 27 Nov 2025 07:47:37 +0000 Subject: [PATCH] qa/suites/crimson-rados: add fio test case for osd shard number changes upon restart Signed-off-by: Chunmei Liu --- qa/suites/crimson-rados/osd_shards/.qa | 1 + .../osd_shards/crimson-supported-all-distro | 1 + .../osd_shards/crimson_fio_restart.yaml | 161 ++++++++++++++++++ .../osd_shards/crimson_qa_overrides.yaml | 1 + 4 files changed, 164 insertions(+) create mode 120000 qa/suites/crimson-rados/osd_shards/.qa create mode 120000 qa/suites/crimson-rados/osd_shards/crimson-supported-all-distro create mode 100644 qa/suites/crimson-rados/osd_shards/crimson_fio_restart.yaml create mode 120000 qa/suites/crimson-rados/osd_shards/crimson_qa_overrides.yaml diff --git a/qa/suites/crimson-rados/osd_shards/.qa b/qa/suites/crimson-rados/osd_shards/.qa new file mode 120000 index 00000000000..a602a0353e7 --- /dev/null +++ b/qa/suites/crimson-rados/osd_shards/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/crimson-rados/osd_shards/crimson-supported-all-distro b/qa/suites/crimson-rados/osd_shards/crimson-supported-all-distro new file mode 120000 index 00000000000..a5b729b9efa --- /dev/null +++ b/qa/suites/crimson-rados/osd_shards/crimson-supported-all-distro @@ -0,0 +1 @@ +.qa/distros/crimson-supported-all-distro/ \ No newline at end of file diff --git a/qa/suites/crimson-rados/osd_shards/crimson_fio_restart.yaml b/qa/suites/crimson-rados/osd_shards/crimson_fio_restart.yaml new file mode 100644 index 00000000000..7a9b65a1885 --- /dev/null +++ b/qa/suites/crimson-rados/osd_shards/crimson_fio_restart.yaml @@ -0,0 +1,161 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] + +# no need to verify os + flavor + sha1 +verify_ceph_hash: false +tasks: +- install: +- ceph: + conf: + osd: + debug monc: 20 + crimson cpu num: 3 + osd shutdown pgref assert: true + # crimson's osd objectstore option + osd objectstore: seastore + debug seastore: 20 + debug seastore onode: 20 + debug seastore odata: 20 + debug seastore omap: 20 + debug seastore tm: 20 + debug seastore t: 20 + debug seastore cleaner: 20 + debug seastore epm: 20 + debug seastore lba: 20 + debug seastore fixedkv tree: 20 + debug seastore cache: 20 + debug seastore journal: 20 + debug seastore device: 20 + debug seastore backref: 20 + global: + ms cluster mode: crc + ms service mode: crc + ms client mode: crc + ms mon service mode: crc + ms mon cluster mode: crc + ms mon client mode: crc + mon: + mon min osdmap epochs: 50 +- exec: + mon.a: + - ceph -s +- cbt: + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 + monitoring_profiles: + perf: + nodes: + - osds + perf_cmd: 'perf' + args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}' + pid_glob: crimson-osd.*.pid + +- exec: + mon.a: + - cephadm shell -- ceph orch stop osd --all +- exec: + mon.a: + - | + ceph config set osd.* crimson_cpu_num 2 + for osd in $(cephadm shell -- ceph osd ls | awk '{print $1}'); do + cephadm shell -- ceph orch daemon restart osd.$osd + done + +- exec: + mon.a: + - ceph -s +- cbt: + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 + monitoring_profiles: + perf: + nodes: + - osds + perf_cmd: 'perf' + args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}' + pid_glob: crimson-osd.*.pid + +- exec: + mon.a: + - cephadm shell -- ceph orch stop osd --all +- exec: + mon.a: + - | + ceph config set osd.* crimson_cpu_num 5 + for osd in $(cephadm shell -- ceph osd ls | awk '{print $1}'); do + cephadm shell -- ceph orch daemon restart osd.$osd + done + +- exec: + mon.a: + - ceph -s +- cbt: + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 + monitoring_profiles: + perf: + nodes: + - osds + perf_cmd: 'perf' + args: 'stat -p {pid} -o {perf_dir}/perf_stat.{pid}' + pid_glob: crimson-osd.*.pid diff --git a/qa/suites/crimson-rados/osd_shards/crimson_qa_overrides.yaml b/qa/suites/crimson-rados/osd_shards/crimson_qa_overrides.yaml new file mode 120000 index 00000000000..2bf67af1b18 --- /dev/null +++ b/qa/suites/crimson-rados/osd_shards/crimson_qa_overrides.yaml @@ -0,0 +1 @@ +.qa/config/crimson_qa_overrides.yaml \ No newline at end of file -- 2.47.3