--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_install.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ install:
+ extra_system_packages:
+ rpm:
+ - fio
+ deb:
+ - fio
+ ceph-deploy:
+ conf:
+ global:
+ osd crush chooseleaf type: 0
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+ crimson cpu num: 3
+ global:
+ ms cluster mode: crc
+ ms service mode: crc
+ ms client mode: crc
+ ms mon service mode: crc
+ ms mon cluster mode: crc
+ ms mon client mode: crc
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
--- /dev/null
+.qa/distros/crimson-supported-all-distro/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_qa_overrides.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ debug monc: 20
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
--- /dev/null
+# no need to verify os + flavor + sha1
+verify_ceph_hash: false
+tasks:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
+ debug osd: 10
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/objectstore_crimson/seastore/segmented$/crimson_seastore_segmented.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - PG_DEGRADED
+ - OSD_DOWN
+ - OSD_HOST_DOWN
+ - OSD_ROOT_DOWN
+ - PG_AVAILABILITY
+tasks:
+- exec:
+ mon.a:
+ - ceph -s
+
+- exec:
+ client.0:
+ - |
+ set -ex
+ if ! ceph osd pool ls | grep -qx 'testpool'; then
+ ceph osd pool create testpool 64 64
+ fi
+ ceph osd pool application enable testpool rbd || true
+ ceph -s
+ if ! rbd info testpool/testimage; then
+ rbd create testpool/testimage --size 10G --image-format=2
+ fi
+- exec:
+ client.0:
+ - |
+ set -ex
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
+
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ conf:
+ osd:
+ crimson cpu num: 2
+ seastore require partition count match reactor count: false
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
+- exec:
+ osd.0:
+ - |
+ set -ex
+ ceph health detail
+ ceph daemon osd.0 config show
+- exec:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
+
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ conf:
+ osd:
+ crimson cpu num: 5
+ seastore require partition count match reactor count: false
+ wait-for-healthy: false
+ wait-for-osds-up: true
+
+- exec:
+ osd.0:
+ - |
+ set -ex
+ ceph health detail
+ ceph daemon osd.0 config show
+
+- exec:
+ client.0:
+ - |
+ set -ex
+ rbd info testpool/testimage
+ fio --name=rbdtest \
+ --ioengine=rbd \
+ --pool=testpool \
+ --rbdname=testimage \
+ --clientname=admin \
+ --rw=randwrite \
+ --bs=4k \
+ --iodepth=32 \
+ --numjobs=4 \
+ --runtime=120 \
+ --time_based \
+ --group_reporting \
+ --direct=1 \
+ --invalidate=0
+ ceph daemon osd.0 dump_store_shards
\ No newline at end of file
else:
yield
+@contextlib.contextmanager
+def write_conf_with_override(ctx, cluster, override_conf):
+
+ tmp_conf = deepcopy(ctx.ceph[cluster].conf)
+
+ teuthology.deep_merge(tmp_conf, override_conf)
+
+ conf_fp = BytesIO()
+ tmp_conf.write(conf_fp)
+ conf_fp.seek(0)
+
+ writes = ctx.cluster.run(
+ args=[
+ 'sudo','tee','/etc/ceph/ceph.conf',
+ run.Raw('>'),'/dev/null'
+ ],
+ stdin=run.PIPE,
+ wait=False
+ )
+
+ teuthology.feed_many_stdins_and_close(conf_fp, writes)
+ run.wait(writes)
+
@contextlib.contextmanager
def restart(ctx, config):
"""
daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True)
clusters = set()
+ if 'conf' in config:
+ log.info("Applying temporary conf override before restart")
+ override_conf = config['conf']
+
+ clusters_to_write = set()
+ for role in daemons:
+ cluster, type_, id_ = teuthology.split_role(role)
+ clusters_to_write.add(cluster)
+
+ for cluster in clusters_to_write:
+ write_conf_with_override(ctx, cluster, override_conf)
+ clusters.add(cluster)
+
with suppress_mon_health_to_clog(ctx, config):
for role in daemons:
cluster, type_, id_ = teuthology.split_role(role)