}
}
+
+uint64_t BlueFS::_estimate_transaction_size(bluefs_transaction_t* t)
+{
+ uint64_t max_alloc_size = std::max(alloc_size[BDEV_WAL],
+ std::max(alloc_size[BDEV_DB],
+ alloc_size[BDEV_SLOW]));
+
+ // conservative estimate for final encoded size
+ return round_up_to(t->op_bl.length() + super.block_size * 2, max_alloc_size);
+}
+
uint64_t BlueFS::_estimate_log_size_N()
{
std::lock_guard nl(nodes.lock);
// log can be used to write to, ops in log will be continuation of captured state
log.lock.unlock();
- uint64_t max_alloc_size = std::max(alloc_size[BDEV_WAL],
- std::max(alloc_size[BDEV_DB],
- alloc_size[BDEV_SLOW]));
-
- // conservative estimate for final encoded size
- new_log_jump_to = round_up_to(t.op_bl.length() + super.block_size * 2,
- max_alloc_size);
+ new_log_jump_to = _estimate_transaction_size(&t);
//newly constructed log head will jump to what we had before
t.op_jump(seq_now, new_log_jump_to);
ceph_assert(old_is_comp);
}
-void BlueFS::_pad_bl(bufferlist& bl)
+void BlueFS::_pad_bl(bufferlist& bl, uint64_t pad_size)
{
- uint64_t partial = bl.length() % super.block_size;
+ pad_size = std::max(pad_size, uint64_t(super.block_size));
+ uint64_t partial = bl.length() % pad_size;
if (partial) {
dout(10) << __func__ << " padding with 0x" << std::hex
- << super.block_size - partial << " zeros" << std::dec << dendl;
- bl.append_zero(super.block_size - partial);
+ << pad_size - partial << " zeros" << std::dec << dendl;
+ bl.append_zero(pad_size - partial);
}
}
void _init_alloc();
void _stop_alloc();
- void _pad_bl(ceph::buffer::list& bl); ///< pad ceph::buffer::list to block size w/ zeros
+ ///< pad ceph::buffer::list to max(block size, pad_size) w/ zeros
+ void _pad_bl(ceph::buffer::list& bl, uint64_t pad_size = 0);
uint64_t _get_used(unsigned id) const;
uint64_t _get_total(unsigned id) const;
int64_t available_runway);
int _flush_and_sync_log_LD(uint64_t want_seq = 0);
+ uint64_t _estimate_transaction_size(bluefs_transaction_t* t);
uint64_t _estimate_log_size_N();
bool _should_start_compact_log_L_N();