--- /dev/null
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5]
+- [client.0]
+- [client.1]
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+overrides:
+ ceph-deploy:
+ conf:
+ global:
+ osd pool default size: 2
+ osd crush chooseleaf type: 0
+ osd pool default pg num: 128
+ osd pool default pgp num: 128
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
--- /dev/null
+roles:
+- [mon.a, mon.c, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0]
+- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1]
+- [mon.b, mds.a-s, mon.c, mgr.y, osd.2, osd.3]
+- [client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+log-rotate:
+ ceph-mds: 10G
+ ceph-osd: 10G
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+roles:
+- [mon.a, mon.c, mgr.x, osd.0, osd.1, osd.2]
+- [mon.b, mgr.y, osd.3, osd.4, osd.5]
+- [osd.6, osd.7, osd.8, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+roles:
+- [mon.a, mgr.y, osd.0, osd.4, osd.8, osd.12]
+- [mon.b, osd.1, osd.5, osd.9, osd.13]
+- [mon.c, osd.2, osd.6, osd.10, osd.14]
+- [mgr.x, osd.3, osd.7, osd.11, osd.15, client.0]
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+roles:
+- [mon.a, mon.b, mgr.x, osd.0, osd.1, osd.2, osd.3, client.0]
+- [mon.c, mgr.y, client.1]
+- [osd.4, osd.5, client.2]
+- [osd.6, osd.7]
+
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd op queue: debug_random
+ osd op queue cut off: debug_random
+ osd debug verify missing on start: true
+ osd debug verify cached snaps: true
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon keyvaluedb: leveldb
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ mon:
+ mon keyvaluedb: rocksdb
--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ bluestore allocator: bitmap
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# # bluestore bluefs env mirror: true
+ ceph-deploy:
+ fs: xfs
+ bluestore: yes
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore compression mode: aggressive
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# # bluestore bluefs env mirror: true
--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# # bluestore bluefs env mirror: true
+ ceph-deploy:
+ fs: xfs
+ bluestore: yes
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
--- /dev/null
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: filestore
+ osd sloppy crc: true
+ osd max object name len: 400
+ osd max object namespace len: 64
+ ceph-deploy:
+ fs: xfs
+ filestore: True
+ conf:
+ osd:
+ osd objectstore: filestore
+ osd sloppy crc: true
+ osd max object name len: 400
+ osd max object namespace len: 64