<< dendl;
}
std::lock_guard l(e.blob->shared_blob->get_cache()->lock);
- for (auto& i : e.blob->shared_blob->bc.buffer_map) {
+ for (auto& i : e.blob->bc.buffer_map) {
dout(LogLevelV) << __func__ << " 0x" << std::hex << i.first
<< "~" << i.second->length << std::dec
<< " " << *i.second << dendl;
// race with lookup
return;
}
- bc._clear(coll_snap->cache);
- coll_snap->cache->rm_blob();
}
delete this;
}
#undef dout_prefix
#define dout_prefix *_dout << "bluestore.blob(" << this << ") "
+BlueStore::Blob::~Blob()
+{
+ SharedBlob* sb = shared_blob.get();
+ if (!sb) {
+ ceph_assert(bc.buffer_map.empty());
+ return;
+ }
+ again:
+ auto coll_cache = sb->get_cache();
+ if (coll_cache) {
+ std::lock_guard l(coll_cache->lock);
+ if (coll_cache != sb->get_cache()) {
+ goto again;
+ }
+ bc._clear(coll_cache);
+ coll_cache->rm_blob();
+ }
+}
+
void BlueStore::Blob::dump(Formatter* f) const
{
if (is_spanning()) {
ceph_assert(discard == all_invalid); // in case of compressed blob all
// or none pextents are invalid.
if (discard) {
- shared_blob->bc.discard(shared_blob->get_cache(), 0,
+ bc.discard(shared_blob->get_cache(), 0,
get_blob().get_logical_length());
}
} else {
dout(20) << __func__ << " 0x" << std::hex << pos
<< "~" << e.length
<< std::dec << dendl;
- shared_blob->bc.discard(shared_blob->get_cache(), pos, e.length);
+ bc.discard(shared_blob->get_cache(), pos, e.length);
}
pos += e.length;
}
<< dendl;
continue;
}
- shared_blob->bc._finish_write(cache, seq);
+ bc._finish_write(cache, seq);
break;
}
}
&(r->used_in_blob));
lb.split(blob_offset, rb);
- shared_blob->bc.split(shared_blob->get_cache(), blob_offset, r->shared_blob->bc);
+ bc.split(shared_blob->get_cache(), blob_offset, r->bc);
dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec
<< " finish " << *this << dendl;
// move over shared blobs and buffers. cover shared blobs from
// both extent map and spanning blob map (the full extent map
// may not be faulted in)
- vector<SharedBlob*> sbvec;
+ vector<Blob*> bvec;
for (auto& e : o->extent_map.extent_map) {
- sbvec.push_back(e.blob->shared_blob.get());
+ bvec.push_back(e.blob.get());
}
for (auto& b : o->extent_map.spanning_blob_map) {
- sbvec.push_back(b.second->shared_blob.get());
+ bvec.push_back(b.second.get());
}
- for (auto sb : sbvec) {
+ for (auto b : bvec) {
+ SharedBlob* sb = b->shared_blob.get();
if (sb->coll == dest) {
ldout(store->cct, 20) << __func__ << " already moved " << *sb
<< dendl;
}
sb->coll = dest;
if (dest->cache != cache) {
- for (auto& i : sb->bc.buffer_map) {
+ for (auto& i : b->bc.buffer_map) {
if (!i.second->is_writing()) {
ldout(store->cct, 20) << __func__ << " moving " << *i.second
<< dendl;
ready_regions_t cache_res;
interval_set<uint32_t> cache_interval;
- bptr->shared_blob->bc.read(
+ bptr->bc.read(
bptr->shared_blob->get_cache(), b_off, b_len, cache_res, cache_interval,
read_cache_policy);
dout(20) << __func__ << " blob " << *bptr << std::hex
if (r < 0)
return r;
if (buffered) {
- bptr->shared_blob->bc.did_read(bptr->shared_blob->get_cache(), 0,
+ bptr->bc.did_read(bptr->shared_blob->get_cache(), 0,
raw_bl);
}
for (auto& req : r2r) {
return -EIO;
}
if (buffered) {
- bptr->shared_blob->bc.did_read(bptr->shared_blob->get_cache(),
+ bptr->bc.did_read(bptr->shared_blob->get_cache(),
req.r_off, req.bl);
}
uint64_t sbid_unloaded; ///< sbid if persistent isn't loaded
bluestore_shared_blob_t *persistent; ///< persistent part of the shared blob if any
};
- BufferSpace bc; ///< buffer cache
SharedBlob(Collection *_coll) : coll(_coll), sbid_unloaded(0) {
if (get_cache()) {
int16_t id = -1; ///< id, for spanning blobs only, >= 0
int16_t last_encoded_id = -1; ///< (ephemeral) used during encoding only
SharedBlobRef shared_blob; ///< shared blob state (if any)
-
+ BufferSpace bc;
private:
mutable bluestore_blob_t blob; ///< decoded blob metadata
#ifdef CACHE_BLOB_BL
bool can_split() const {
std::lock_guard l(shared_blob->get_cache()->lock);
// splitting a BufferSpace writing list is too hard; don't try.
- return shared_blob->bc.writing.empty() &&
+ return bc.writing.empty() &&
used_in_blob.can_split() &&
get_blob().can_split();
}
if (--nref == 0)
delete this;
}
-
+ ~Blob();
#ifdef CACHE_BLOB_BL
void _encode() const {
uint64_t offset,
ceph::buffer::list& bl,
unsigned flags) {
- b->shared_blob->bc.write(b->shared_blob->get_cache(), txc->seq, offset, bl,
+ b->bc.write(b->shared_blob->get_cache(), txc->seq, offset, bl,
flags);
txc->blobs_written.insert(b);
}