From af9d0b1fe5aed68ba705466a98c5f91d926b9e4c Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Thu, 17 Jan 2019 15:13:05 +0100 Subject: [PATCH] ceph-volume: strategies allow for external wal lv's Signed-off-by: Jan Fajerski --- .../ceph_volume/devices/lvm/batch.py | 10 +- .../devices/lvm/strategies/bluestore.py | 301 +++++++++++++----- .../devices/lvm/strategies/filestore.py | 28 +- .../devices/lvm/strategies/strategies.py | 6 +- .../devices/lvm/strategies/test_bluestore.py | 18 +- src/ceph-volume/ceph_volume/util/prepare.py | 34 ++ 6 files changed, 300 insertions(+), 97 deletions(-) diff --git a/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/src/ceph-volume/ceph_volume/devices/lvm/batch.py index 93cc5aa0bf0..3fb6be677fa 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -230,6 +230,11 @@ class Batch(object): type=int, help='Set (or override) the "bluestore_block_db_size" value, in bytes' ) + parser.add_argument( + '--block-wal-size', + type=int, + help='Set (or override) the "bluestore_block_wal_size" value, in bytes' + ) parser.add_argument( '--journal-size', type=int, @@ -315,11 +320,12 @@ class Batch(object): # TODO assert that none of the device lists overlap? self._filter_devices() if self.args.bluestore: - if self.db_usable: + if self.db_usable or self.wal_usable: self.strategy = strategies.bluestore.MixedType( self.args, self.usable, - self.db_usable) + self.db_usable, + self.wal_usable) else: self.strategy = strategies.bluestore.SingleType( self.args, diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py index e5f63272bb7..e1af45ecf8f 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py @@ -119,10 +119,12 @@ class MixedType(MixedStrategy): def __init__(self, args, data_devs, db_devs, wal_devs=[]): super(MixedType, self).__init__(args, data_devs, db_devs, wal_devs) - self.block_db_size = self.get_block_size() + self.block_db_size = self.get_block_db_size() + self.block_wal_size = self.get_block_wal_size() self.system_vgs = lvm.VolumeGroups() self.dbs_needed = len(self.data_devs) * self.osds_per_device - self.use_large_block_db = False + self.wals_needed = self.dbs_needed + self.use_large_block_db = self.use_large_block_wal = False self.validate_compute() @classmethod @@ -134,16 +136,19 @@ class MixedType(MixedStrategy): def type(): return "bluestore.MixedType" - def get_block_size(self): + def get_block_db_size(self): if self.args.block_db_size: return disk.Size(b=self.args.block_db_size) else: return prepare.get_block_db_size(lv_format=False) or disk.Size(b=0) - def report_pretty(self, filtered_devices): - vg_extents = lvm.sizing(self.total_available_db_space.b, parts=self.dbs_needed) - db_size = str(disk.Size(b=(vg_extents['sizes']))) + def get_block_wal_size(self): + if self.args.block_wal_size: + return disk.Size(b=self.args.block_wal_size) + else: + return prepare.get_block_wal_size(lv_format=False) or disk.Size(b=0) + def report_pretty(self, filtered_devices): string = "" if filtered_devices: string += templates.filtered_devices(filtered_devices) @@ -151,16 +156,35 @@ class MixedType(MixedStrategy): total_osds=len(self.data_devs) * self.osds_per_device ) - string += templates.ssd_volume_group.format( - target='block.db', - total_lv_size=str(self.total_available_db_space), - total_lvs=vg_extents['parts'] * self.osds_per_device, - block_lv_size=db_size, - block_db_devices=', '.join([ssd.abspath for ssd in - self.db_or_journal_devs]), - lv_size=self.block_db_size or str(disk.Size(b=(vg_extents['sizes']))), - total_osds=len(self.data_devs) - ) + if self.db_or_journal_devs: + vg_extents = lvm.sizing(self.total_available_db_space.b, parts=self.dbs_needed) + db_size = str(disk.Size(b=(vg_extents['sizes']))) + + string += templates.ssd_volume_group.format( + target='block.db', + total_lv_size=str(self.total_available_db_space), + total_lvs=vg_extents['parts'] * self.osds_per_device, + block_lv_size=db_size, + block_db_devices=', '.join([ssd.abspath for ssd in + self.db_or_journal_devs]), + lv_size=self.block_db_size or str(disk.Size(b=(vg_extents['sizes']))), + total_osds=len(self.data_devs) + ) + + if self.wal_devs: + wal_vg_extents = lvm.sizing(self.total_available_wal_space.b, + parts=self.wals_needed) + wal_size = str(disk.Size(b=(wal_vg_extents['sizes']))) + string += templates.ssd_volume_group.format( + target='block.wal', + total_lv_size=str(self.total_available_wal_space), + total_lvs=wal_vg_extents['parts'] * self.osds_per_device, + block_lv_size=wal_size, + block_db_devices=', '.join([dev.abspath for dev in + self.wal_devs]), + lv_size=self.block_wal_size or str(disk.Size(b=(wal_vg_extents['sizes']))), + total_osds=len(self.data_devs) + ) string += templates.osd_component_titles for osd in self.computed['osds']: @@ -171,32 +195,58 @@ class MixedType(MixedStrategy): size=osd['data']['human_readable_size'], percent=osd['data']['percentage']) - string += templates.osd_component.format( - _type='[block.db]', - path=osd['block.db']['path'], - size=osd['block.db']['human_readable_size'], - percent=osd['block.db']['percentage']) + if 'block.db' in osd: + string += templates.osd_component.format( + _type='[block.db]', + path=osd['block.db']['path'], + size=osd['block.db']['human_readable_size'], + percent=osd['block.db']['percentage']) + + if 'block.wal' in osd: + string += templates.osd_component.format( + _type='[block.wal]', + path=osd['block.wal']['path'], + size=osd['block.wal']['human_readable_size'], + percent=osd['block.wal']['percentage']) print(string) def compute(self): osds = self.computed['osds'] - if not self.common_vg: - # there isn't a common vg, so a new one must be created with all - # the blank SSDs - self.computed['vg'] = { - 'devices': ", ".join([ssd.abspath for ssd in self.blank_ssds]), - 'parts': self.dbs_needed, - 'percentages': self.vg_extents['percentages'], - 'sizes': self.block_db_size.b.as_int(), - 'size': self.total_blank_ssd_size.b.as_int(), - 'human_readable_sizes': str(self.block_db_size), - 'human_readable_size': str(self.total_available_db_space), - } - vg_name = 'vg/lv' - else: - vg_name = self.common_vg.name + if self.db_or_journal_devs: + if not self.common_vg: + # there isn't a common vg, so a new one must be created with all + # the blank db devs + self.computed['vg'] = { + 'devices': ", ".join([ssd.abspath for ssd in self.blank_db_devs]), + 'parts': self.dbs_needed, + 'percentages': self.vg_extents['percentages'], + 'sizes': self.block_db_size.b.as_int(), + 'size': self.total_blank_db_dev_size.b.as_int(), + 'human_readable_sizes': str(self.block_db_size), + 'human_readable_size': str(self.total_available_db_space), + } + vg_name = 'vg/lv' + else: + vg_name = self.common_vg.name + + if self.wal_devs: + if not self.common_wal_vg: + # there isn't a common vg, so a new one must be created with all + # the blank wal devs + self.computed['wal_vg'] = { + 'devices': ", ".join([dev.abspath for dev in self.blank_wal_devs]), + 'parts': self.wals_needed, + 'percentages': self.wal_vg_extents['percentages'], + 'sizes': self.block_wal_size.b.as_int(), + 'size': self.total_blank_wal_dev_size.b.as_int(), + 'human_readable_sizes': str(self.block_wal_size), + 'human_readable_size': str(self.total_available_wal_space), + } + wal_vg_name = 'vg/lv' + else: + wal_vg_name = self.common_wal_vg.name for device in self.data_devs: for hdd in range(self.osds_per_device): @@ -207,10 +257,20 @@ class MixedType(MixedStrategy): osd['data']['human_readable_size'] = str( disk.Size(b=device.lvm_size.b) / self.osds_per_device ) - osd['block.db']['path'] = 'vg: %s' % vg_name - osd['block.db']['size'] = int(self.block_db_size.b) - osd['block.db']['human_readable_size'] = str(self.block_db_size) - osd['block.db']['percentage'] = self.vg_extents['percentages'] + + if self.db_or_journal_devs: + osd['block.db']['path'] = 'vg: %s' % vg_name + osd['block.db']['size'] = int(self.block_db_size.b) + osd['block.db']['human_readable_size'] = str(self.block_db_size) + osd['block.db']['percentage'] = self.vg_extents['percentages'] + + if self.wal_devs: + osd['block.wal'] = {} + osd['block.wal']['path'] = 'vg: %s' % wal_vg_name + osd['block.wal']['size'] = int(self.block_wal_size.b) + osd['block.wal']['human_readable_size'] = str(self.block_wal_size) + osd['block.wal']['percentage'] = self.wal_vg_extents['percentages'] + osds.append(osd) self.computed['changed'] = len(osds) > 0 @@ -221,23 +281,8 @@ class MixedType(MixedStrategy): (block, block.db, block.wal, etc..) and offload the OSD creation to ``lvm create`` """ - blank_ssd_paths = [d.abspath for d in self.blank_ssds] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) - # no common vg is found, create one with all the blank SSDs - if not self.common_vg: - db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs') - - # if a common vg exists then extend it with any blank ssds - elif self.common_vg and blank_ssd_paths: - db_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) - - # one common vg with nothing else to extend can be used directly, - # either this is one device with one vg, or multiple devices with the - # same vg - else: - db_vg = self.common_vg - # create 1 vg per data device first, mapping them to the device path, # when the lv gets created later, it can create as many as needed (or # even just 1) @@ -247,12 +292,45 @@ class MixedType(MixedStrategy): vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block') data_vgs[osd['data']['path']] = vg - if self.use_large_block_db: - # make the block.db lvs as large as possible - vg_free_count = str_to_int(db_vg.vg_free_count) - db_lv_extents = int(vg_free_count / self.dbs_needed) - else: - db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents'] + if self.db_or_journal_devs: + blank_db_dev_paths = [d.abspath for d in self.blank_db_devs] + + # no common vg is found, create one with all the blank SSDs + if not self.common_vg: + db_vg = lvm.create_vg(blank_db_dev_paths, name_prefix='ceph-block-dbs') + elif self.common_vg and blank_db_dev_paths: + # if a common vg exists then extend it with any blank ssds + db_vg = lvm.extend_vg(self.common_vg, blank_db_dev_paths) + else: + # one common vg with nothing else to extend can be used directly, + # either this is one device with one vg, or multiple devices with the + # same vg + db_vg = self.common_vg + + if self.use_large_block_db: + # make the block.db lvs as large as possible + vg_free_count = str_to_int(db_vg.vg_free_count) + db_lv_extents = int(vg_free_count / self.dbs_needed) + else: + db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents'] + + if self.wal_devs: + blank_wal_dev_paths = [d.abspath for d in self.blank_wal_devs] + + if not self.common_wal_vg: + wal_vg = lvm.create_vg(blank_wal_dev_paths, + name_prefix='ceph-block-wals') + elif self.common_wal_vg and blank_wal_dev_paths: + wal_vg = lvm.extend_vg(self.common_wal_vg, blank_wal_dev_paths) + else: + wal_vg = self.common_wal_vg + + if self.use_large_block_wal: + # make the block.db lvs as large as possible + vg_free_count = str_to_int(wal_vg.vg_free_count) + wal_lv_extents = int(vg_free_count / self.wals_needed) + else: + wal_lv_extents = wal_vg.sizing(size=self.block_wal_size.gb.as_int())['extents'] # create the data lvs, and create the OSD with an lv from the common # block.db vg from before @@ -263,14 +341,24 @@ class MixedType(MixedStrategy): data_lv = lvm.create_lv( 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True ) - db_lv = lvm.create_lv( - 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True - ) command = [ '--bluestore', '--data', "%s/%s" % (data_lv.vg_name, data_lv.name), - '--block.db', '%s/%s' % (db_lv.vg_name, db_lv.name) ] + if 'block.db' in osd: + db_lv = lvm.create_lv( + 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True + ) + command.extend([ '--block.db', + '{}/{}'.format(db_lv.vg_name, db_lv.name)]) + if 'block.wal' in osd: + wal_lv = lvm.create_lv( + 'osd-block-wal', wal_vg.name, extents=wal_lv_extents, uuid_name=True + ) + command.extend( + ['--block.wal', + '{}/{}'.format(wal_lv.vg_name, wal_lv.name) + ]) if self.args.dmcrypt: command.append('--dmcrypt') if self.args.no_systemd: @@ -295,11 +383,18 @@ class MixedType(MixedStrategy): # make sure that data devices do not have any LVs validators.no_lvm_membership(self.data_devs) + if self.db_or_journal_devs: + self._validate_db_devs() + + if self.wal_devs: + self._validate_wal_devs() + + def _validate_db_devs(self): # do not allow non-common VG to continue validators.has_common_vg(self.db_or_journal_devs) # find the common VG to calculate how much is available - self.common_vg = self.get_common_vg() + self.common_vg = self.get_common_vg(self.db_or_journal_devs) # find how many block.db LVs are possible from the common VG if self.common_vg: @@ -308,13 +403,13 @@ class MixedType(MixedStrategy): common_vg_size = disk.Size(gb=0) # non-VG SSDs - self.vg_ssds = set([d for d in self.db_or_journal_devs if d.is_lvm_member]) - self.blank_ssds = set(self.db_or_journal_devs).difference(self.vg_ssds) - self.total_blank_ssd_size = disk.Size(b=0) - for blank_ssd in self.blank_ssds: - self.total_blank_ssd_size += disk.Size(b=blank_ssd.lvm_size.b) + vg_members = set([d for d in self.db_or_journal_devs if d.is_lvm_member]) + self.blank_db_devs = set(self.db_or_journal_devs).difference(vg_members) + self.total_blank_db_dev_size = disk.Size(b=0) + for blank_db_dev in self.blank_db_devs: + self.total_blank_db_dev_size += disk.Size(b=blank_db_dev.lvm_size.b) - self.total_available_db_space = self.total_blank_ssd_size + common_vg_size + self.total_available_db_space = self.total_blank_db_dev_size + common_vg_size # If not configured, we default to 0, which is really "use as much as # possible" captured by the `else` condition @@ -353,3 +448,65 @@ class MixedType(MixedStrategy): self.total_available_db_space, self.dbs_needed, self.block_db_size, ) raise RuntimeError(msg) + + def _validate_wal_devs(self): + # do not allow non-common VG to continue + validators.has_common_vg(self.wal_devs) + + # find the common VG to calculate how much is available + self.common_wal_vg = self.get_common_vg(self.wal_devs) + + # find how many block.wal LVs are possible from the common VG + if self.common_wal_vg: + common_vg_size = disk.Size(gb=self.common_wal_vg.free) + else: + common_vg_size = disk.Size(gb=0) + + # non-VG SSDs + vg_members = set([d for d in self.wal_devs if d.is_lvm_member]) + self.blank_wal_devs = set(self.wal_devs).difference(vg_members) + self.total_blank_wal_dev_size = disk.Size(b=0) + for blank_wal_dev in self.blank_wal_devs: + self.total_blank_wal_dev_size += disk.Size(b=blank_wal_dev.lvm_size.b) + + self.total_available_wal_space = self.total_blank_wal_dev_size + common_vg_size + + # If not configured, we default to 0, which is really "use as much as + # possible" captured by the `else` condition + if self.block_wal_size.gb > 0: + try: + self.vg_extents = lvm.sizing( + self.total_available_wal_space.b, size=self.block_wal_size.b * self.osds_per_device + ) + except SizeAllocationError: + msg = "Not enough space in fast devices (%s) to create %s x %s block.wal LV" + raise RuntimeError( + msg % (self.total_available_wal_space, + self.osds_per_device, self.block_wal_size) + ) + else: + self.wal_vg_extents = lvm.sizing( + self.total_available_wal_space.b, parts=self.wals_needed + ) + + # validate that number of block.wal LVs possible are enough for number of + # OSDs proposed + if self.total_available_wal_space.b == 0: + msg = "No space left in fast devices to create block.wal LVs" + raise RuntimeError(msg) + + # bluestore_block_wal_size was unset, so we must set this to whatever + # size we get by dividing the total available space for block.wal LVs + # into the number of block.wal LVs needed (i.e. "as large as possible") + if self.block_wal_size.b == 0: + self.block_wal_size = self.total_available_wal_space / self.wals_needed + self.use_large_block_wal = True + + total_wals_possible = self.total_available_wal_space / self.block_wal_size + + if self.wals_needed > total_wals_possible: + msg = "Not enough space (%s) to create %s x %s block.wal LVs" % ( + self.total_available_wal_space, self.wals_needed, + self.block_wal_size, + ) + raise RuntimeError(msg) diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py index a20192a8821..3e21f766f3e 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py @@ -169,7 +169,7 @@ class MixedType(MixedStrategy): def __init__(self, args, data_devs, journal_devs): super(MixedType, self).__init__(args, data_devs, journal_devs) - self.blank_ssds = [] + self.blank_journal_devs = [] self.journals_needed = len(self.data_devs) * self.osds_per_device self.journal_size = get_journal_size(args) self.system_vgs = lvm.VolumeGroups() @@ -235,7 +235,7 @@ class MixedType(MixedStrategy): validators.has_common_vg(self.db_or_journal_devs) # find the common VG to calculate how much is available - self.common_vg = self.get_common_vg() + self.common_vg = self.get_common_vg(self.db_or_journal_devs) # find how many journals are possible from the common VG if self.common_vg: @@ -244,13 +244,13 @@ class MixedType(MixedStrategy): common_vg_size = disk.Size(gb=0) # non-VG SSDs - self.vg_ssds = set([d for d in self.db_or_journal_devs if d.is_lvm_member]) - self.blank_ssds = set(self.db_or_journal_devs).difference(self.vg_ssds) - self.total_blank_ssd_size = disk.Size(b=0) - for blank_ssd in self.blank_ssds: - self.total_blank_ssd_size += disk.Size(b=blank_ssd.lvm_size.b) + vg_ssds = set([d for d in self.db_or_journal_devs if d.is_lvm_member]) + self.blank_journal_devs = set(self.db_or_journal_devs).difference(vg_ssds) + self.total_blank_journal_dev_size = disk.Size(b=0) + for blank_journal_dev in self.blank_journal_devs: + self.total_blank_journal_dev_size += disk.Size(b=blank_journal_dev.lvm_size.b) - self.total_available_journal_space = self.total_blank_ssd_size + common_vg_size + self.total_available_journal_space = self.total_blank_journal_dev_size + common_vg_size try: self.vg_extents = lvm.sizing( @@ -283,11 +283,11 @@ class MixedType(MixedStrategy): # there isn't a common vg, so a new one must be created with all # the blank SSDs self.computed['vg'] = { - 'devices': ", ".join([ssd.abspath for ssd in self.blank_ssds]), + 'devices': ", ".join([ssd.abspath for ssd in self.blank_journal_devs]), 'parts': self.journals_needed, 'percentages': self.vg_extents['percentages'], 'sizes': self.journal_size.b.as_int(), - 'size': self.total_blank_ssd_size.b.as_int(), + 'size': self.total_blank_journal_dev_size.b.as_int(), 'human_readable_sizes': str(self.journal_size), 'human_readable_size': str(self.total_available_journal_space), } @@ -317,15 +317,15 @@ class MixedType(MixedStrategy): Create vgs/lvs from the incoming set of devices, assign their roles (data, journal) and offload the OSD creation to ``lvm create`` """ - blank_ssd_paths = [d.abspath for d in self.blank_ssds] + blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs] data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']]) # no common vg is found, create one with all the blank SSDs if not self.common_vg: - journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals') + journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals') # a vg exists that can be extended - elif self.common_vg and blank_ssd_paths: - journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths) + elif self.common_vg and blank_journal_dev_paths: + journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths) # one common vg with nothing else to extend can be used directly else: journal_vg = self.common_vg diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py index 5a9d52cf159..be13d30b091 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py @@ -51,10 +51,10 @@ class Strategy(object): class MixedStrategy(Strategy): - def get_common_vg(self): + def get_common_vg(self, devs): # find all the vgs associated with the current device - for ssd in self.db_or_journal_devs: - for pv in ssd.pvs_api: + for dev in devs: + for pv in dev.pvs_api: vg = self.system_vgs.get(vg_name=pv.vg_name) if not vg: continue diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py index 324a5a5a7d3..b92b498a307 100644 --- a/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py @@ -52,7 +52,8 @@ class TestMixedTypeConfiguredSize(object): def test_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): # 3GB block.db in ceph.conf conf_ceph(get_safe=lambda *a: 3147483640) - args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + args = factory(filtered_devices=[], osds_per_device=1, + block_db_size=None, block_wal_size=None) ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) devices = [ssd, hdd] @@ -68,7 +69,8 @@ class TestMixedTypeConfiguredSize(object): def test_ssd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): # 7GB block.db in ceph.conf conf_ceph(get_safe=lambda *a: 7747483640) - args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + args = factory(filtered_devices=[], osds_per_device=1, + block_db_size=None, block_wal_size=None) ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) devices = [ssd, hdd] @@ -81,7 +83,8 @@ class TestMixedTypeConfiguredSize(object): def test_multi_hdd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): # 3GB block.db in ceph.conf conf_ceph(get_safe=lambda *a: 3147483640) - args = factory(filtered_devices=[], osds_per_device=2, block_db_size=None) + args = factory(filtered_devices=[], osds_per_device=2, + block_db_size=None, block_wal_size=None) ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000)) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) devices = [ssd, hdd] @@ -96,7 +99,8 @@ class TestMixedTypeLargeAsPossible(object): def test_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): conf_ceph(get_safe=lambda *a: None) - args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None) + args = factory(filtered_devices=[], osds_per_device=1, + block_db_size=None, block_wal_size=None) ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) devices = [ssd, hdd] @@ -112,7 +116,8 @@ class TestMixedTypeLargeAsPossible(object): def test_multi_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): conf_ceph(get_safe=lambda *a: None) - args = factory(filtered_devices=[], osds_per_device=2, block_db_size=None) + args = factory(filtered_devices=[], osds_per_device=2, + block_db_size=None, block_wal_size=None) ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60073740000)) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=60073740000)) devices = [ssd, hdd] @@ -128,7 +133,8 @@ class TestMixedTypeLargeAsPossible(object): def test_multi_hdd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph): conf_ceph(get_safe=lambda *a: None) - args = factory(filtered_devices=[], osds_per_device=2, block_db_size=None) + args = factory(filtered_devices=[], osds_per_device=2, + block_db_size=None, block_wal_size=None) ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000)) hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000)) devices = [ssd, hdd] diff --git a/src/ceph-volume/ceph_volume/util/prepare.py b/src/ceph-volume/ceph_volume/util/prepare.py index 59c1032563d..c258dc4a359 100644 --- a/src/ceph-volume/ceph_volume/util/prepare.py +++ b/src/ceph-volume/ceph_volume/util/prepare.py @@ -104,6 +104,40 @@ def get_block_db_size(lv_format=True): return '%sG' % db_size.gb.as_int() return db_size +def get_block_wal_size(lv_format=True): + """ + Helper to retrieve the size (defined in megabytes in ceph.conf) to create + the block.wal logical volume, it "translates" the string into a float value, + then converts that into gigabytes, and finally (optionally) it formats it + back as a string so that it can be used for creating the LV. + + :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size + would result in '5G', otherwise it will return a ``Size`` object. + + .. note: Configuration values are in bytes, unlike journals which + are defined in gigabytes + """ + conf_wal_size = None + try: + conf_wal_size = conf.ceph.get_safe('osd', 'bluestore_block_wal_size', None) + except RuntimeError: + logger.exception("failed to load ceph configuration, will use defaults") + + if not conf_wal_size: + logger.debug( + 'block.wal has no size configuration, will fallback to using as much as possible' + ) + return None + logger.debug('bluestore_block_wal_size set to %s' % conf_wal_size) + wal_size = disk.Size(b=str_to_int(conf_wal_size)) + + if wal_size < disk.Size(gb=2): + mlogger.error('Refusing to continue with configured size for block.wal') + raise RuntimeError('block.wal sizes must be larger than 2GB, detected: %s' % wal_size) + if lv_format: + return '%sG' % wal_size.gb.as_int() + return wal_size + def create_id(fsid, json_secrets, osd_id=None): """ -- 2.39.5