} else {
on->extent_map.init_shards(false, false);
}
+ } else {
+ // init segment_size
+ uint32_t segment_size = c->store->segment_size.load();
+ if (c->comp_max_blob_size.has_value() && segment_size < c->comp_max_blob_size.value()) {
+ segment_size = c->comp_max_blob_size.value(); // compression larger than global segment_size, use it
+ }
+ on->onode.segment_size = segment_size;
}
return on;
}
if (head_length) {
_do_write_small(txc, c, o, head_offset, head_length, p, wctx);
}
- uint32_t segment_size = this->segment_size.load();
+ uint32_t segment_size = o->onode.segment_size;
if (segment_size) {
// split data to chunks
uint64_t write_offset = middle_offset;
// if we have compression, skip to write_v1
return _do_write(txc, c, o, offset, length, bl, fadvise_flags);
}
- if (segment_size != 0 && wctx.target_blob_size > segment_size) {
- wctx.target_blob_size = segment_size;
+ if (o->onode.segment_size != 0 && wctx.target_blob_size > o->onode.segment_size) {
+ wctx.target_blob_size = o->onode.segment_size;
}
if (bl.length() != length) {
bl.splice(length, bl.length() - length);
// finalize extent_map shards
o->extent_map.update(txn, false);
if (o->extent_map.needs_reshard()) {
- o->extent_map.reshard(db, txn, segment_size);
+ o->extent_map.reshard(db, txn, o->onode.segment_size);
o->extent_map.update(txn, true);
if (o->extent_map.needs_reshard()) {
dout(20) << __func__ << " warning: still wants reshard, check options?"
uint32_t expected_object_size = 0;
uint32_t expected_write_size = 0;
uint32_t alloc_hint_flags = 0;
+ uint32_t segment_size = 0; ///< mandatory segment lines to never cross; helps with sharding
uint8_t flags = 0;
}
DENC(bluestore_onode_t, v, p) {
- DENC_START(2, 1, p);
+ DENC_START(3, 1, p);
denc_varint(v.nid, p);
denc_varint(v.size, p);
denc(v.attrs, p);
if (struct_v >= 2) {
denc(v.zone_offset_refs, p);
}
+ if (struct_v >= 3) {
+ denc(v.segment_size, p);
+ }
DENC_FINISH(p);
}
void dump(ceph::Formatter *f) const;