Currently we have 2 types of thrash tests: default and simple.
Seastore passes only the simple one.
The 2 types are seperated to diffrent dirs. `thrash` dir
will be tested with bluestore only.
`thrash_simple` will be tested with both backend.
Once Seastore passes default ones, we can merge back the two dirs
Signed-off-by: Matan Breizman <mbreizma@redhat.com>
(cherry picked from commit
69b241a4629e458ad7ed7b643e42c7b44e18e9fd)
+++ /dev/null
-.qa/config/seastore.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
- conf:
- osd:
- osd debug reject backfill probability: .3
- osd scrub min interval: 60
- osd scrub max interval: 120
- osd max backfills: 3
- osd snap trim sleep: 2
- osd delete sleep: 1
- mon:
- mon min osdmap epochs: 50
- paxos service trim min: 10
- # prune full osdmaps regularly
- mon osdmap full prune min: 15
- mon osdmap full prune interval: 2
- mon osdmap full prune txsize: 2
-tasks:
-- thrashosds:
- timeout: 2400
- dump_ops_enable: false
- sighup_delay: 0
- min_in: 3
- noscrub_toggle_delay: 0
- chance_down: 0
- chance_thrash_pg_upmap: 0
- reweight_osd: 0
- thrash_primary_affinity: false
- ceph_objectstore_tool: false
- chance_inject_pause_short: 0
- chance_thrash_cluster_full: 0
- chance_reset_purged_snaps_last: 0
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/overrides/2-size-2-min-size.yaml
\ No newline at end of file
--- /dev/null
+.qa/overrides/3-size-2-min-size.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/overrides/short_pg_log.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/overrides/more-active-recovery.yaml
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ osd_async_recovery_min_cost: 1
+ osd_object_clean_region_max_num_intervals: 1000
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ osd_async_recovery_min_cost: 1
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ osd_object_clean_region_max_num_intervals: 1000
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, osd.0, osd.1, client.0, node-exporter.a]
+- [mgr.x, osd.2, osd.3, client.1, prometheus.a, node-exporter.b]
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+ crimson alien thread cpu cores: 6-7
+ osd.0:
+ crimson seastar cpu cores: 0-2
+ osd.1:
+ crimson seastar cpu cores: 3-5
+ osd.2:
+ crimson seastar cpu cores: 0-2
+ osd.3:
+ crimson seastar cpu cores: 3-5
+ global:
+ ms cluster mode: crc
+ ms service mode: crc
+ ms client mode: crc
+ ms mon service mode: crc
+ ms mon cluster mode: crc
+ ms mon client mode: crc
--- /dev/null
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
--- /dev/null
+.qa/distros/crimson-supported-all-distro/
\ No newline at end of file
--- /dev/null
+.qa/config/crimson_qa_overrides.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ install:
+ ceph:
+ flavor: crimson
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ debug monc: 20
+ flavor: crimson
--- /dev/null
+# no need to verify os + flavor + sha1
+verify_ceph_hash: false
+tasks:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
+ debug osd: 10
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/config/bluestore.yaml
\ No newline at end of file
--- /dev/null
+.qa/config/seastore.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ osd delete sleep: 1
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 2400
+ dump_ops_enable: false
+ sighup_delay: 0
+ min_in: 3
+ noscrub_toggle_delay: 0
+ chance_down: 0
+ chance_thrash_pg_upmap: 0
+ reweight_osd: 0
+ thrash_primary_affinity: false
+ ceph_objectstore_tool: false
+ chance_inject_pause_short: 0
+ chance_thrash_cluster_full: 0
+ chance_reset_purged_snaps_last: 0
--- /dev/null
+.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 150
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
--- /dev/null
+overrides:
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
--- /dev/null
+overrides:
+ ceph:
+ crush_tunables: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ balance_reads: true
+ max_attr_len: 8192
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ setattr: 25
+ rmattr: 25
--- /dev/null
+overrides:
+ ceph:
+ crush_tunables: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ localize_reads: true
+ max_attr_len: 8192
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ setattr: 25
+ rmattr: 25
--- /dev/null
+overrides:
+ ceph:
+ crush_tunables: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ max_attr_len: 8192
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
+ setattr: 25
+ rmattr: 25
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ balance_reads: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ localize_reads: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 0
+ # TODO: CEPH_OSD_OP_COPY_FROM
+ copy_from: 0
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_fadvise_dontneed: true
+ op_weights:
+ write: 100