This is enforced for compressed and non-compressed blobs alike.
We keep the separate compressoin min and max tunables since these
may be adjusted on a per-pool basis as well.
Signed-off-by: Sage Weil <sage@redhat.com>
OPTION(bluestore_compression_algorithm, OPT_STR, "snappy")
OPTION(bluestore_compression_min_blob_size, OPT_U32, 128*1024)
OPTION(bluestore_compression_max_blob_size, OPT_U32, 512*1024)
+OPTION(bluestore_max_blob_size, OPT_U32, 512*1024)
OPTION(bluestore_gc_max_blob_depth, OPT_U32, 3)
OPTION(bluestore_gc_merge_data, OPT_BOOL, true)
/*
bufferlist::iterator& blp,
WriteContext *wctx)
{
- uint64_t max_blob_len = length;
- if (wctx->compress) {
- max_blob_len = MIN(length, wctx->comp_blob_size);
- }
dout(10) << __func__ << " 0x" << std::hex << offset << "~" << length
- << " max_blob_len 0x" << max_blob_len
+ << " target_blob_size 0x" << wctx->target_blob_size
<< " compress " << (int)wctx->compress
<< std::dec << dendl;
while (length > 0) {
BlobRef b = c->new_blob();
- auto l = MIN(max_blob_len, length);
+ auto l = MIN(wctx->target_blob_size, length);
bufferlist t;
blp.copy(l, t);
_buffer_cache_write(txc, b, 0, t, wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY)) &&
(alloc_hints & CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE) == 0) {
dout(20) << __func__ << " will prefer large blob and csum sizes" << dendl;
- wctx.comp_blob_size = comp_max_blob_size.load();
wctx.csum_order = min_alloc_size_order;
+ if (wctx.compress) {
+ wctx.target_blob_size = comp_max_blob_size.load();
+ }
} else {
- wctx.comp_blob_size = comp_min_blob_size.load();
+ if (wctx.compress) {
+ wctx.target_blob_size = comp_min_blob_size.load();
+ }
}
+ if (wctx.target_blob_size == 0 ||
+ wctx.target_blob_size > g_conf->bluestore_max_blob_size) {
+ wctx.target_blob_size = g_conf->bluestore_max_blob_size;
+ }
+
dout(20) << __func__ << " prefer csum_order " << wctx.csum_order
- << " comp_blob_size 0x" << std::hex << wctx.comp_blob_size
+ << " target_blob_size 0x" << std::hex << wctx.target_blob_size
<< std::dec << dendl;
uint64_t gc_start_offset = offset, gc_end_offset = end;
// write ops
struct WriteContext {
- bool buffered = false; ///< buffered write
- bool compress = false; ///< compressed write
- uint64_t comp_blob_size = 0; ///< target compressed blob size
- uint8_t blob_depth = 0; ///< depth of the logical extent
- unsigned csum_order = 0; ///< target checksum chunk order
+ bool buffered = false; ///< buffered write
+ bool compress = false; ///< compressed write
+ uint64_t target_blob_size = 0; ///< target (max) blob size
+ uint8_t blob_depth = 0; ///< depth of the logical extent
+ unsigned csum_order = 0; ///< target checksum chunk order
extent_map_t old_extents; ///< must deref these blobs
{ "max_size", "262144", 0 },
{ "alignment", "4096", 0 },
{ "bluestore_min_alloc_size", "4096", 0 },
+ { "bluestore_max_blob_size", "65536", 0 },
{ "bluestore_extent_map_shard_min_size", "60", 0 },
{ "bluestore_extent_map_shard_max_size", "300", 0 },
{ "bluestore_extent_map_shard_target_size", "150", 0 },
{ "max_size", "1048576", 0 },
{ "alignment", "512", 0 },
{ "bluestore_min_alloc_size", "65536", "4096", 0 },
+ { "bluestore_max_blob_size", "262144", 0 },
{ "bluestore_compression", "force", "none", 0},
{ "bluestore_csum_type", "none", 0},
{ "bluestore_default_buffered_read", "true", "false", 0 },