From df0d9c530e3d73183ed3ab503fc99d70d07e6c1d Mon Sep 17 00:00:00 2001 From: xie xingguo Date: Thu, 22 Jun 2017 15:47:25 +0800 Subject: [PATCH] os/bluestore: slightly refactor Blob::try_reuse_blob (1) rename try_reuse_blob->can_reuse_blob to keep pace with can_split/can_split_at. (2) replace blob with get_blob(). Signed-off-by: xie xingguo --- src/os/bluestore/BlueStore.cc | 71 ++++++++++++++++++++--------------- src/os/bluestore/BlueStore.h | 6 +-- 2 files changed, 43 insertions(+), 34 deletions(-) diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 38d1e272ebc..91ca9c043ec 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -1664,13 +1664,13 @@ ostream& operator<<(ostream& out, const BlueStore::Blob& b) void BlueStore::Blob::discard_unallocated(Collection *coll) { - if (blob.is_shared()) { + if (get_blob().is_shared()) { return; } - if (blob.is_compressed()) { + if (get_blob().is_compressed()) { bool discard = false; bool all_invalid = true; - for (auto e : blob.get_extents()) { + for (auto e : get_blob().get_extents()) { if (!e.is_valid()) { discard = true; } else { @@ -1680,11 +1680,12 @@ void BlueStore::Blob::discard_unallocated(Collection *coll) assert(discard == all_invalid); // in case of compressed blob all // or none pextents are invalid. if (discard) { - shared_blob->bc.discard(shared_blob->get_cache(), 0, blob.get_logical_length()); + shared_blob->bc.discard(shared_blob->get_cache(), 0, + get_blob().get_logical_length()); } } else { size_t pos = 0; - for (auto e : blob.get_extents()) { + for (auto e : get_blob().get_extents()) { if (!e.is_valid()) { ldout(coll->store->cct, 20) << __func__ << " 0x" << std::hex << pos << "~" << e.length @@ -1693,12 +1694,11 @@ void BlueStore::Blob::discard_unallocated(Collection *coll) } pos += e.length; } - if (blob.can_prune_tail()) { - dirty_blob(); - blob.prune_tail(); - used_in_blob.prune_tail(blob.get_ondisk_length()); + if (get_blob().can_prune_tail()) { + dirty_blob().prune_tail(); + used_in_blob.prune_tail(get_blob().get_ondisk_length()); auto cct = coll->store->cct; //used by dout - dout(20) << __func__ << " pruned tail, now " << blob << dendl; + dout(20) << __func__ << " pruned tail, now " << get_blob() << dendl; } } } @@ -1719,10 +1719,10 @@ void BlueStore::Blob::get_ref( if (used_in_blob.is_empty()) { uint32_t min_release_size = - blob.get_release_size(coll->store->min_alloc_size); - uint64_t l = blob.get_logical_length(); - dout(20) << __func__ << " init 0x" << std::hex << l << ", " << min_release_size - << std::dec << dendl; + get_blob().get_release_size(coll->store->min_alloc_size); + uint64_t l = get_blob().get_logical_length(); + dout(20) << __func__ << " init 0x" << std::hex << l << ", " + << min_release_size << std::dec << dendl; used_in_blob.init(l, min_release_size); } used_in_blob.get( @@ -1756,7 +1756,7 @@ bool BlueStore::Blob::put_ref( return b.release_extents(empty, logical, r); } -bool BlueStore::Blob::try_reuse_blob(uint32_t min_alloc_size, +bool BlueStore::Blob::can_reuse_blob(uint32_t min_alloc_size, uint32_t target_blob_size, uint32_t b_offset, uint32_t *length0) { @@ -1784,17 +1784,25 @@ bool BlueStore::Blob::try_reuse_blob(uint32_t min_alloc_size, target_blob_size = MAX(blen, target_blob_size); if (b_offset >= blen) { - //new data totally stands out of the existing blob - new_blen = b_offset + length; + // new data totally stands out of the existing blob + new_blen = end; } else { - //new data overlaps with the existing blob - new_blen = MAX(blen, length + b_offset); - if (!get_blob().is_unallocated( - b_offset, - new_blen > blen ? blen - b_offset : length)) { - return false; + // new data overlaps with the existing blob + new_blen = MAX(blen, end); + + uint32_t overlap = 0; + if (new_blen > blen) { + overlap = blen - b_offset; + } else { + overlap = length; + } + + if (!get_blob().is_unallocated(b_offset, overlap)) { + // abort if any piece of the overlap has already been allocated + return false; } } + if (new_blen > blen) { int64_t overflow = int64_t(new_blen) - target_blob_size; // Unable to decrease the provided length to fit into max_blob_size @@ -1812,10 +1820,11 @@ bool BlueStore::Blob::try_reuse_blob(uint32_t min_alloc_size, length -= overflow; *length0 = length; } + if (new_blen > blen) { dirty_blob().add_tail(new_blen); used_in_blob.add_tail(new_blen, - blob.get_release_size(min_alloc_size)); + get_blob().get_release_size(min_alloc_size)); } } return true; @@ -9273,7 +9282,7 @@ void BlueStore::_do_write_small( // search suitable extent in both forward and reverse direction in // [offset - target_max_blob_size, offset + target_max_blob_size] range - // then check if blob can be reused via try_reuse_blob func or apply + // then check if blob can be reused via can_reuse_blob func or apply // direct/deferred write (the latter for extents including or higher // than 'offset' only). do { @@ -9440,8 +9449,8 @@ void BlueStore::_do_write_small( logger->inc(l_bluestore_write_small_deferred); return; } - //try to reuse blob - if (b->try_reuse_blob(min_alloc_size, + // try to reuse blob if we can + if (b->can_reuse_blob(min_alloc_size, max_bsize, offset0 - bstart, &alloc_len)) { @@ -9487,7 +9496,7 @@ void BlueStore::_do_write_small( auto bstart = prev_ep->blob_start(); dout(20) << __func__ << " considering " << *b << " bstart 0x" << std::hex << bstart << std::dec << dendl; - if (b->try_reuse_blob(min_alloc_size, + if (b->can_reuse_blob(min_alloc_size, max_bsize, offset0 - bstart, &alloc_len)) { @@ -9580,13 +9589,13 @@ void BlueStore::_do_write_big( auto min_off = offset >= max_bsize ? offset - max_bsize : 0; // search suitable extent in both forward and reverse direction in // [offset - target_max_blob_size, offset + target_max_blob_size] range - // then check if blob can be reused via try_reuse_blob func. + // then check if blob can be reused via can_reuse_blob func. bool any_change; do { any_change = false; if (ep != end && ep->logical_offset < offset + max_bsize) { if (offset >= ep->blob_start() && - ep->blob->try_reuse_blob(min_alloc_size, max_bsize, + ep->blob->can_reuse_blob(min_alloc_size, max_bsize, offset - ep->blob_start(), &l)) { b = ep->blob; @@ -9601,7 +9610,7 @@ void BlueStore::_do_write_big( } if (prev_ep != end && prev_ep->logical_offset >= min_off) { - if (prev_ep->blob->try_reuse_blob(min_alloc_size, max_bsize, + if (prev_ep->blob->can_reuse_blob(min_alloc_size, max_bsize, offset - prev_ep->blob_start(), &l)) { b = prev_ep->blob; diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index 1114e7b4f47..b1775cd872d 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -499,7 +499,7 @@ public: get_blob().can_split_at(blob_offset); } - bool try_reuse_blob(uint32_t min_alloc_size, + bool can_reuse_blob(uint32_t min_alloc_size, uint32_t target_blob_size, uint32_t b_offset, uint32_t *length0); @@ -512,10 +512,10 @@ public: #endif } - const bluestore_blob_t& get_blob() const { + inline const bluestore_blob_t& get_blob() const { return blob; } - bluestore_blob_t& dirty_blob() { + inline bluestore_blob_t& dirty_blob() { #ifdef CACHE_BLOB_BL blob_bl.clear(); #endif -- 2.39.5