cache->logger->inc(l_bluestore_buffer_miss_bytes, miss_bytes);
}
-void BlueStore::BufferSpace::finish_write(Cache* cache, uint64_t seq)
+void BlueStore::BufferSpace::_finish_write(Cache* cache, uint64_t seq)
{
- std::lock_guard<std::recursive_mutex> l(cache->lock);
-
auto i = writing.begin();
while (i != writing.end()) {
if (i->seq > seq) {
unshare && !*unshare ? unshare : nullptr);
}
+void BlueStore::SharedBlob::finish_write(uint64_t seq)
+{
+ while (true) {
+ Cache *cache = coll->cache;
+ std::lock_guard<std::recursive_mutex> l(cache->lock);
+ if (coll->cache != cache) {
+ ldout(coll->store->cct, 20) << __func__
+ << " raced with sb cache update, was " << cache
+ << ", now " << coll->cache << ", retrying"
+ << dendl;
+ continue;
+ }
+ bc._finish_write(cache, seq);
+ break;
+ }
+}
+
// SharedBlobSet
#undef dout_prefix
assert(txc->state == TransContext::STATE_FINISHING);
for (auto& sb : txc->shared_blobs_written) {
- sb->bc.finish_write(sb->get_cache(), txc->seq);
+ sb->finish_write(txc->seq);
}
txc->shared_blobs_written.clear();
b->cache_private = _discard(cache, offset, bl.length());
_add_buffer(cache, b, (flags & Buffer::FLAG_NOCACHE) ? 0 : 1, nullptr);
}
- void finish_write(Cache* cache, uint64_t seq);
+ void _finish_write(Cache* cache, uint64_t seq);
void did_read(Cache* cache, uint32_t offset, bufferlist& bl) {
std::lock_guard<std::recursive_mutex> l(cache->lock);
Buffer *b = new Buffer(this, Buffer::STATE_CLEAN, 0, offset, bl);
void put_ref(uint64_t offset, uint32_t length,
PExtentVector *r, bool *unshare);
+ void finish_write(uint64_t seq);
+
friend bool operator==(const SharedBlob &l, const SharedBlob &r) {
return l.get_sbid() == r.get_sbid();
}