}
}
-int BlueStore::BufferSpace::_discard(uint64_t offset, uint64_t length)
+int BlueStore::BufferSpace::_discard(uint32_t offset, uint32_t length)
{
// note: we already hold cache->lock
ldout(cache->cct, 20) << __func__ << std::hex << " 0x" << offset << "~" << length
int cache_private = 0;
cache->_audit("discard start");
auto i = _data_lower_bound(offset);
- uint64_t end = offset + length;
+ uint32_t end = offset + length;
while (i != buffer_map.end()) {
Buffer *b = i->second.get();
if (b->offset >= end) {
int64_t front = offset - b->offset;
if (b->end() > end) {
// drop middle (split)
- uint64_t tail = b->end() - end;
+ uint32_t tail = b->end() - end;
if (b->data.length()) {
bufferlist bl;
bl.substr_of(b->data, b->length - tail, tail);
continue;
}
// drop front
- uint64_t keep = b->end() - end;
+ uint32_t keep = b->end() - end;
if (b->data.length()) {
bufferlist bl;
bl.substr_of(b->data, b->length - keep, keep);
}
void BlueStore::BufferSpace::read(
- uint64_t offset, uint64_t length,
+ uint32_t offset, uint32_t length,
BlueStore::ready_regions_t& res,
- interval_set<uint64_t>& res_intervals)
+ interval_set<uint32_t>& res_intervals)
{
std::lock_guard<std::recursive_mutex> l(cache->lock);
res.clear();
res_intervals.clear();
- uint64_t want_bytes = length;
- uint64_t end = offset + length;
+ uint32_t want_bytes = length;
+ uint32_t end = offset + length;
for (auto i = _data_lower_bound(offset);
i != buffer_map.end() && offset < end && i->first < end;
++i) {
assert(b->end() > offset);
if (b->is_writing() || b->is_clean()) {
if (b->offset < offset) {
- uint64_t skip = offset - b->offset;
- uint64_t l = MIN(length, b->length - skip);
+ uint32_t skip = offset - b->offset;
+ uint32_t l = MIN(length, b->length - skip);
res[offset].substr_of(b->data, skip, l);
res_intervals.insert(offset, l);
offset += l;
continue;
}
if (b->offset > offset) {
- uint64_t gap = b->offset - offset;
+ uint32_t gap = b->offset - offset;
if (length <= gap) {
break;
}
unsigned b_len = std::min(left, lp->length - l_off);
ready_regions_t cache_res;
- interval_set<uint64_t> cache_interval;
+ interval_set<uint32_t> cache_interval;
bptr->shared_blob->bc.read(b_off, b_len, cache_res, cache_interval);
dout(20) << __func__ << " blob " << *bptr << std::hex
<< " need 0x" << b_off << "~" << b_len
}
map<uint32_t,std::unique_ptr<Buffer>>::iterator _data_lower_bound(
- uint64_t offset) {
+ uint32_t offset) {
auto i = buffer_map.lower_bound(offset);
if (i != buffer_map.begin()) {
--i;
void _clear();
// return value is the highest cache_private of a trimmed buffer, or 0.
- int discard(uint64_t offset, uint64_t length) {
+ int discard(uint32_t offset, uint32_t length) {
std::lock_guard<std::recursive_mutex> l(cache->lock);
return _discard(offset, length);
}
- int _discard(uint64_t offset, uint64_t length);
+ int _discard(uint32_t offset, uint32_t length);
- void write(uint64_t seq, uint64_t offset, bufferlist& bl, unsigned flags) {
+ void write(uint64_t seq, uint32_t offset, bufferlist& bl, unsigned flags) {
std::lock_guard<std::recursive_mutex> l(cache->lock);
Buffer *b = new Buffer(this, Buffer::STATE_WRITING, seq, offset, bl,
flags);
_add_buffer(b, (flags & Buffer::FLAG_NOCACHE) ? 0 : 1, nullptr);
}
void finish_write(uint64_t seq);
- void did_read(uint64_t offset, bufferlist& bl) {
+ void did_read(uint32_t offset, bufferlist& bl) {
std::lock_guard<std::recursive_mutex> l(cache->lock);
Buffer *b = new Buffer(this, Buffer::STATE_CLEAN, 0, offset, bl);
b->cache_private = _discard(offset, bl.length());
_add_buffer(b, 1, nullptr);
}
- void read(uint64_t offset, uint64_t length,
+ void read(uint32_t offset, uint32_t length,
BlueStore::ready_regions_t& res,
- interval_set<uint64_t>& res_intervals);
+ interval_set<uint32_t>& res_intervals);
- void truncate(uint64_t offset) {
- discard(offset, (uint64_t)-1 - offset);
+ void truncate(uint32_t offset) {
+ discard(offset, (uint32_t)-1 - offset);
}
void split(size_t pos, BufferSpace &r);