--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_install.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ install:
+ extra_system_packages:
+ rpm:
+ - fio
+ deb:
+ - fio
+ ceph:
+ conf:
+ global:
+ osd crush chooseleaf type: 0
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+ ms cluster mode: crc
+ ms service mode: crc
+ ms client mode: crc
+ ms mon service mode: crc
+ ms mon cluster mode: crc
+ ms mon client mode: crc
+ osd:
+ osd shutdown pgref assert: true
+ debug monc: 20
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
--- /dev/null
+.qa/distros/crimson-supported-all-distro/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_qa_overrides.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
\ No newline at end of file
--- /dev/null
+# no need to verify os + flavor + sha1
+verify_ceph_hash: false
+tasks:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
+ debug osd: 10
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/objectstore_crimson/seastore/segmented$/crimson_seastore_segmented.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - PG_DEGRADED
+ - OSD_DOWN
+ - OSD_HOST_DOWN
+ - OSD_ROOT_DOWN
+ - PG_AVAILABILITY
+ osd-mkfs-args:
+ - --crimson_cpu_num
+ - "3"
+tasks:
+- ceph:
+ pre-mgr-commands:
+ - echo PRE_MGR_COMMAND_RUNNING
+ - sudo ceph -s
+ - sudo ceph config set osd crimson_cpu_num 3
+- exec:
+ mon.a:
+ - ceph -s
+
+- exec:
+ client.0:
+ - |
+ set -ex
+ if ! ceph osd pool ls | grep -qx 'testpool'; then
+ ceph osd pool create testpool 64 64
+ fi
+ ceph osd pool application enable testpool rbd || true
+ ceph -s
+ if ! rbd info testpool/testimage; then
+ rbd create testpool/testimage --size 10G --image-format=2
+ fi
+- exec:
+ client.0:
+ - |
+ set -ex
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
+ ceph config set osd seastore_require_partition_count_match_reactor_count false
+ ceph config set osd.0 crimson_cpu_num 2
+ ceph config set osd.1 crimson_cpu_num 2
+ ceph config set osd.2 crimson_cpu_num 2
+
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
+- exec:
+ osd.0:
+ - |
+ set -ex
+ ceph health detail
+ ceph daemon osd.0 config show
+- exec:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
+ ceph config set osd.0 crimson_cpu_num 5
+ ceph config set osd.1 crimson_cpu_num 5
+ ceph config set osd.2 crimson_cpu_num 5
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
+- exec:
+ osd.0:
+ - |
+ set -ex
+ ceph health detail
+ ceph daemon osd.0 config show
+
+- exec:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
\ No newline at end of file
ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role] = fs
devs_to_clean[remote].append(mnt_point)
+ overrides = ctx.config.get('overrides', {})
+ ceph_overrides = overrides.get('ceph', {})
+ mkfs_args = ceph_overrides.get('osd-mkfs-args', [])
+ log.info("OSD mkfs args = %s", mkfs_args)
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
_, _, id_ = teuthology.split_role(role)
try:
'--mkkey',
'-i', id_,
'--monmap', monmap_path]
+ if mkfs_args:
+ args.extend(mkfs_args)
log_path = f'/var/log/ceph/{cluster_name}-osd.{id_}.log'
create_log_cmd, args = \
maybe_redirect_stderr(config, 'osd', args, log_path)