// The alternative approach would be to place the entire tail and
// padding on a dedicated, 4 KB long memory chunk. This shouldn't
// trigger the rebuild while still being less expensive.
+ if (file->envelope_mode() &&
+ buffer.get_append_buffer_unused_tail_length() <= super.block_size &&
+ p2phase(tail, super.block_size) >
+ p2phase<unsigned>(tail + File::envelope_t::head_size(), super.block_size)
+ ) {
+ // Envelope mode header must completely fit in single buffer::ptr,
+ // otherwise append_hole() will allocate new unaligned buffer.
+ // Clearing the buffer is a way to force buffer_appender to allocate fresh
+ // pages. The size is min 2 * super.block_size so header will fit.
+ buffer.clear();
+ }
buffer_appender.substr_of(bl, bl.length() - padding_len - tail, tail);
buffer.splice(buffer.length() - tail, tail, &tail_block);
} else {
{
std::unique_lock hl(h->lock);
if (h->file->envelope_mode() && h->get_buffer_length() == 0) {
- uint32_t pos1 = h->get_effective_write_pos();
h->envelope_head_filler = h->append_hole(File::envelope_t::head_size());
+ uint32_t pos1 = h->get_effective_write_pos() - File::envelope_t::head_size();
uint32_t pos2 = reinterpret_cast<uintptr_t>(h->envelope_head_filler.c_str());
ceph_assert(p2aligned(pos1 ^ pos2, CEPH_PAGE_SIZE));
}
BlueFS::FileWriter *BlueFS::_create_writer(FileRef f)
{
- FileWriter *w = new FileWriter(f);
+ FileWriter *w = new FileWriter(f, super.block_size);
for (unsigned i = 0; i < MAX_BDEV; ++i) {
if (bdev[i]) {
w->iocv[i] = new IOContext(cct, NULL);
std::array<IOContext*,MAX_BDEV> iocv; ///< for each bdev
std::array<bool, MAX_BDEV> dirty_devs;
- FileWriter(FileRef f)
- : file(std::move(f)),
- buffer_appender(buffer.get_page_aligned_appender(
- g_conf()->bluefs_alloc_size / CEPH_PAGE_SIZE)), envelope_head_filler() {
+ FileWriter(FileRef f, unsigned super_block_size)
+ : file(std::move(f))
+ , buffer_appender(buffer.get_page_aligned_appender(
+ std::max<uint64_t>(g_conf()->bluefs_alloc_size, 2 * super_block_size) / CEPH_PAGE_SIZE))
+ , envelope_head_filler() {
++file->num_writers;
iocv.fill(nullptr);
dirty_devs.fill(false);
}
bufferlist::contiguous_filler append_hole(uint64_t len) {
+ if (buffer.get_append_buffer_unused_tail_length() < len) {
+ ceph_assert(buffer.length() == 0);
+ buffer_appender.refill();
+ }
return buffer.append_hole(len);
}