- exec:
client.0:
- sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool create datapool 128 128 erasure teuthologyprofile
- sudo ceph osd pool set datapool allow_ec_overwrites true
- rbd pool init datapool
debug bdev: 20
debug bluefs: 20
debug rocksdb: 10
- enable experimental unrecoverable data corrupting features: "*"
osd debug randomize hobject sort order: false
# this doesn't work with failures bc the log writes are not atomic across the two backends
# bluestore bluefs env mirror: true
--- /dev/null
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=2 k=3
+ - sudo ceph osd pool create datapool 128 128 erasure teuthologyprofile
+ - sudo ceph osd pool set datapool allow_ec_overwrites true
+ - rbd pool init datapool
+
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd default data pool: datapool
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 30
+ debug bdev: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ enable experimental unrecoverable data corrupting features: "*"
+ osd debug randomize hobject sort order: false
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
tasks:
- exec:
client.0:
- - sudo ceph osd pool create cache 4
+ - sudo ceph osd pool create cache 128 128
- sudo ceph osd tier add rbd cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay rbd cache