}
}
+std::pair<uint32_t, uint32_t> BlueStore::ExtentMap::fault_range_ex(
+ KeyValueDB *db,
+ uint32_t offset,
+ uint32_t length)
+{
+ dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length
+ << std::dec << dendl;
+ if (shards.size() == 0) {
+ // no sharding yet; everyting is loaded
+ return {0, OBJECT_MAX_SIZE};
+ }
+ auto start = seek_shard(offset);
+ auto last = seek_shard(offset + length);
+ maybe_load_shard(db, start, last);
+ uint32_t left_bound = shards[start].shard_info->offset;
+ uint32_t right_bound = (size_t)last + 1 < shards.size() ?
+ shards[last + 1].shard_info->offset : OBJECT_MAX_SIZE;
+ dout(20) << __func__ << " start=" << start << " last=" << last
+ << " -> 0x" << std::hex << left_bound << "~" << right_bound
+ << std::dec << dendl;
+ return {left_bound, right_bound};
+}
+
void BlueStore::ExtentMap::fault_range(
KeyValueDB *db,
uint32_t offset,
}
auto start = seek_shard(offset);
auto last = seek_shard(offset + length);
+ maybe_load_shard(db, start, last);
+}
+
+void BlueStore::ExtentMap::maybe_load_shard(
+ KeyValueDB *db,
+ int start,
+ int last)
+{
ceph_assert(last >= start);
ceph_assert(start >= 0);
);
p->extents = decode_some(v);
p->loaded = true;
- dout(20) << __func__ << " open shard 0x" << std::hex
- << p->shard_info->offset
- << " for range 0x" << offset << "~" << length << std::dec
+ uint32_t shard_end =
+ (size_t)start + 1 < shards.size() ? (p + 1)->shard_info->offset : OBJECT_MAX_SIZE;
+ dout(20) << __func__ << " open shard for range 0x"
+ << std::hex << p->shard_info->offset << "~" << shard_end << std::dec
<< " (" << v.length() << " bytes)" << dendl;
ceph_assert(p->dirty == false);
ceph_assert(v.length() == p->shard_info->bytes);
if (bl.length() != length) {
bl.splice(length, bl.length() - length);
}
+ BlueStore::Writer wr(this, txc, &wctx, o);
uint64_t start = p2align(offset, min_alloc_size);
uint64_t end = p2roundup(offset + length, min_alloc_size);
- o->extent_map.fault_range(db, start, end - start);
- BlueStore::Writer wr(this, txc, &wctx, o);
+ std::tie(wr.left_shard_bound, wr.right_shard_bound) =
+ o->extent_map.fault_range_ex(db, start, end - start);
wr.do_write(offset, bl);
return r;
}
/// ensure that a range of the map is loaded
void fault_range(KeyValueDB *db,
uint32_t offset, uint32_t length);
+ /// ensure that a range of the map is loaded
+ /// return range that is encompassed by affected shards
+ std::pair<uint32_t, uint32_t> fault_range_ex(
+ KeyValueDB *db,
+ uint32_t offset,
+ uint32_t length);
+ void maybe_load_shard(
+ KeyValueDB *db,
+ int begin_shard,
+ int end_shard);
/// ensure a range of the map is marked dirty
void dirty_range(uint32_t offset, uint32_t length);
blob_data_t& bd) // modified when consumed
{
uint32_t search_stop = p2align(logical_offset, (uint32_t)wctx->target_blob_size);
+ search_stop = std::max(left_shard_bound, search_stop);
uint32_t au_size = bstore->min_alloc_size;
uint32_t block_size = bstore->block_size;
ceph_assert(!bd.is_compressed());
uint32_t block_size = bstore->block_size;
uint32_t blob_size = wctx->target_blob_size;
uint32_t search_end = p2roundup(end_offset, blob_size);
+ search_end = std::min(right_shard_bound, search_end);
ceph_assert(!bd.is_compressed());
ceph_assert(p2phase<uint32_t>(end_offset, au_size) != 0);
BlueStore::ExtentMap& emap = onode->extent_map;
virtual bufferlist read(uint32_t object_offset, uint32_t object_length) = 0;
};
Writer(BlueStore* bstore, TransContext* txc, WriteContext* wctx, OnodeRef o)
- :bstore(bstore), txc(txc), wctx(wctx), onode(o) {
+ :left_shard_bound(0), right_shard_bound(OBJECT_MAX_SIZE)
+ , bstore(bstore), txc(txc), wctx(wctx), onode(o) {
pp_mode = debug_level_to_pp_mode(bstore->cct);
}
public:
read_divertor* test_read_divertor = nullptr;
std::vector<BlobRef> pruned_blobs;
volatile_statfs statfs_delta;
+ uint32_t left_shard_bound; // if sharding is in effect,
+ uint32_t right_shard_bound; // do not cross this line
private:
BlueStore* bstore;