bdev_inject_crash_probability: .5
ceph:
fs: xfs
- cephfs_ec_profile:
- - m=2
- - k=2
- - crush-failure-domain=osd
+ cephfs:
+ ec_profile:
+ - m=2
+ - k=2
+ - crush-failure-domain=osd
conf:
osd:
osd objectstore: bluestore
bdev_inject_crash_probability: .5
ceph:
fs: xfs
- cephfs_ec_profile:
- - m=2
- - k=2
- - crush-failure-domain=osd
+ cephfs:
+ ec_profile:
+ - m=2
+ - k=2
+ - crush-failure-domain=osd
conf:
osd:
osd objectstore: bluestore
overrides:
ceph:
- cephfs_ec_profile:
- - disabled
+ cephfs:
+ ec_profile:
+ - disabled
log-ignorelist:
- OSD full dropping all updates
- OSD near full
self.data_pool_name = None
self.data_pools = None
self.fs_config = fs_config
- self.ec_profile = fs_config.get('cephfs_ec_profile')
+ self.ec_profile = fs_config.get('ec_profile')
client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
self.client_id = client_list[0]
self.data_pool_name = None
self.data_pools = None
self.fs_config = fs_config
- self.ec_profile = fs_config.get('cephfs_ec_profile')
+ self.ec_profile = fs_config.get('ec_profile')
# Hack: cheeky inspection of ceph.conf to see what MDSs exist
self.mds_ids = set()