roles:
-- [mon.a, mon.c, mgr.y, mds.a, osd.0, osd.1, osd.2]
-- [mon.b, mgr.x, mds.b, mds.c, osd.3, osd.4, osd.5]
+- [mon.a, mon.c, mgr.y, mds.a, osd.0, osd.1, osd.2, osd.3]
+- [mon.b, mgr.x, mds.b, mds.c, osd.4, osd.5, osd.6, osd.7]
- [client.0]
roles:
-- [mon.a, mon.c, mgr.y, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2]
-- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5]
+- [mon.a, mon.c, mgr.y, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2, osd.3]
+- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.4, osd.5, osd.6, osd.7]
- [client.0]
--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ cephfs_ec_profile:
+ - m=2
+ - k=2
+ - crush-failure-domain=osd
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore compression mode: aggressive
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
+
--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore compression mode: aggressive
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ cephfs_ec_profile:
+ - m=2
+ - k=2
+ - crush-failure-domain=osd
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
+ ceph-deploy:
+ fs: xfs
+ bluestore: yes
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+
--- /dev/null
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
+ ceph-deploy:
+ fs: xfs
+ bluestore: yes
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+
--- /dev/null
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: filestore
+ osd sloppy crc: true
+ ceph-deploy:
+ fs: xfs
+ filestore: True
+ conf:
+ osd:
+ osd objectstore: filestore
+ osd sloppy crc: true
+
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
--- /dev/null
+../../../../cephfs/objectstore-ec/bluestore-ec-root.yaml
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
roles:
-- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2]
+- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
- [client.2]
- [client.1]
- [client.0]
roles:
-- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2]
+- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2, osd.3]
- [client.1]
- [client.0]
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
roles:
-- [mon.a, mgr.x, osd.0, mon.b, mds.a, mds.b, client.1]
-- [mds.c, mds.d, mon.c, client.0, osd.1, osd.2]
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, mon.b, mds.a, mds.b, client.1]
+- [mds.c, mds.d, mon.c, client.0, osd.4, osd.5, osd.6, osd.7]
openstack:
- volumes: # attached to each instance
count: 2
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec/
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
roles:
-- [mon.a, osd.0, mds.a, mds.c, client.2]
-- [mgr.x, osd.1, osd.2, mds.b, mds.d, client.3]
+- [mon.a, osd.0, osd.1, osd.2, osd.3, mds.a, mds.c, client.2]
+- [mgr.x, osd.4, osd.5, osd.6, osd.7, mds.b, mds.d, client.3]
- [client.0]
- [client.1]
openstack:
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
+++ /dev/null
-../../../objectstore_cephfs
\ No newline at end of file
--- /dev/null
+../../../cephfs/objectstore-ec
\ No newline at end of file
if mdss.remotes:
log.info('Setting up CephFS filesystem...')
- fs = Filesystem(ctx, name='cephfs', create=True)
+ fs = Filesystem(ctx, name='cephfs', create=True,
+ ec_profile=config.get('cephfs_ec_profile', None))
is_active_mds = lambda role: 'mds.' in role and not role.endswith('-s') and '-s-' not in role
all_roles = [item for remote_roles in mdss.remotes.values() for item in remote_roles]
This object is for driving a CephFS filesystem. The MDS daemons driven by
MDSCluster may be shared with other Filesystems.
"""
- def __init__(self, ctx, fscid=None, name=None, create=False):
+ def __init__(self, ctx, fscid=None, name=None, create=False,
+ ec_profile=None):
super(Filesystem, self).__init__(ctx)
self.name = name
+ self.ec_profile = ec_profile
self.id = None
self.metadata_pool_name = None
self.metadata_overlay = False
self.name, self.metadata_pool_name, data_pool_name,
'--allow-dangerous-metadata-overlay')
else:
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
- data_pool_name, pgs_per_fs_pool.__str__())
+ if self.ec_profile:
+ log.info("EC profile is %s", self.ec_profile)
+ cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name]
+ cmd.extend(self.ec_profile)
+ self.mon_manager.raw_cluster_cmd(*cmd)
+ self.mon_manager.raw_cluster_cmd(
+ 'osd', 'pool', 'create',
+ data_pool_name, pgs_per_fs_pool.__str__(), 'erasure',
+ data_pool_name)
+ self.mon_manager.raw_cluster_cmd(
+ 'osd', 'pool', 'set',
+ data_pool_name, 'allow_ec_overwrites', 'true')
+ else:
+ self.mon_manager.raw_cluster_cmd(
+ 'osd', 'pool', 'create',
+ data_pool_name, pgs_per_fs_pool.__str__())
self.mon_manager.raw_cluster_cmd('fs', 'new',
self.name, self.metadata_pool_name, data_pool_name)
self.check_pool_application(self.metadata_pool_name)