From: Igor Fedotov Date: Thu, 17 Nov 2016 13:25:00 +0000 (+0000) Subject: os/bluestore: remove garbage collector staff X-Git-Tag: v11.1.0~249^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=adddaf49a7919fe68fbc2a9060473acfe63fd38e;p=ceph.git os/bluestore: remove garbage collector staff Signed-off-by: Igor Fedotov --- diff --git a/src/common/config_opts.h b/src/common/config_opts.h index 99237c787244..2e3b8ce4f7bb 100644 --- a/src/common/config_opts.h +++ b/src/common/config_opts.h @@ -1013,8 +1013,6 @@ OPTION(bluestore_compression_algorithm, OPT_STR, "snappy") OPTION(bluestore_compression_min_blob_size, OPT_U32, 128*1024) OPTION(bluestore_compression_max_blob_size, OPT_U32, 512*1024) OPTION(bluestore_max_blob_size, OPT_U32, 512*1024) -OPTION(bluestore_gc_max_blob_depth, OPT_U32, 3) -OPTION(bluestore_gc_merge_data, OPT_BOOL, true) /* * Require the net gain of compression at least to be at this ratio, * otherwise we don't compress. diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 74460aec2683..2c32a88640b1 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -78,8 +78,7 @@ const string PREFIX_SHARED_BLOB = "X"; // u64 offset -> shared_blob_t #define BLOBID_FLAG_ZEROOFFSET 0x2 // blob_offset is 0 #define BLOBID_FLAG_SAMELENGTH 0x4 // length matches previous extent #define BLOBID_FLAG_SPANNING 0x8 // has spanning blob id -#define BLOBID_FLAG_DEPTH 0x10 // has depth != 1 -#define BLOBID_SHIFT_BITS 5 +#define BLOBID_SHIFT_BITS 4 /* * object name key structure @@ -1562,7 +1561,6 @@ ostream& operator<<(ostream& out, const BlueStore::Extent& e) { return out << std::hex << "0x" << e.logical_offset << "~" << e.length << ": 0x" << e.blob_offset << "~" << e.length << std::dec - << " depth " << (int)e.blob_depth << " " << *e.blob; } @@ -1841,9 +1839,6 @@ bool BlueStore::ExtentMap::encode_some(uint32_t offset, uint32_t length, } else { prev_len = p->length; } - if (p->blob_depth != 1) { - blobid |= BLOBID_FLAG_DEPTH; - } denc_varint(blobid, app); if ((blobid & BLOBID_FLAG_CONTIGUOUS) == 0) { denc_varint_lowz(p->logical_offset - pos, app); @@ -1854,9 +1849,6 @@ bool BlueStore::ExtentMap::encode_some(uint32_t offset, uint32_t length, if ((blobid & BLOBID_FLAG_SAMELENGTH) == 0) { denc_varint_lowz(p->length, app); } - if (blobid & BLOBID_FLAG_DEPTH) { - denc(p->blob_depth, app); - } pos = p->logical_offset + p->length; if (include_blob) { p->blob->encode(app, false); @@ -1907,11 +1899,6 @@ void BlueStore::ExtentMap::decode_some(bufferlist& bl) } le->length = prev_len; - if (blobid & BLOBID_FLAG_DEPTH) { - denc(le->blob_depth, p); - } else { - le->blob_depth = 1; - } if (blobid & BLOBID_FLAG_SPANNING) { le->assign_blob(get_spanning_blob(blobid >> BLOBID_SHIFT_BITS)); } else { @@ -2178,10 +2165,10 @@ void BlueStore::ExtentMap::punch_hole( // split and deref middle uint64_t front = offset - p->logical_offset; old_extents->insert( - *new Extent(offset, p->blob_offset + front, length, p->blob_depth, p->blob)); + *new Extent(offset, p->blob_offset + front, length, p->blob)); add(end, p->blob_offset + front + length, - p->length - front - length, p->blob_depth, + p->length - front - length, p->blob); p->length = front; break; @@ -2190,7 +2177,7 @@ void BlueStore::ExtentMap::punch_hole( assert(p->logical_offset + p->length > offset); // else seek_lextent bug uint64_t keep = offset - p->logical_offset; old_extents->insert(*new Extent(offset, p->blob_offset + keep, - p->length - keep, p->blob_depth, p->blob)); + p->length - keep, p->blob)); p->length = keep; ++p; continue; @@ -2199,15 +2186,15 @@ void BlueStore::ExtentMap::punch_hole( if (p->logical_offset + p->length <= end) { // deref whole lextent old_extents->insert(*new Extent(p->logical_offset, p->blob_offset, - p->length, p->blob_depth, p->blob)); + p->length, p->blob)); rm(p++); continue; } // deref head uint64_t keep = (p->logical_offset + p->length) - end; old_extents->insert(*new Extent(p->logical_offset, p->blob_offset, - p->length - keep, p->blob_depth, p->blob)); - add(end, p->blob_offset + p->length - keep, keep, p->blob_depth, p->blob); + p->length - keep, p->blob)); + add(end, p->blob_offset + p->length - keep, keep, p->blob); rm(p); break; } @@ -2215,12 +2202,12 @@ void BlueStore::ExtentMap::punch_hole( BlueStore::Extent *BlueStore::ExtentMap::set_lextent( uint64_t logical_offset, - uint64_t blob_offset, uint64_t length, uint8_t blob_depth, BlobRef b, + uint64_t blob_offset, uint64_t length, BlobRef b, extent_map_t *old_extents) { punch_hole(logical_offset, length, old_extents); b->get_ref(blob_offset, length); - Extent *le = new Extent(logical_offset, blob_offset, length, blob_depth, b); + Extent *le = new Extent(logical_offset, blob_offset, length, b); extent_map.insert(*le); if (!needs_reshard && spans_shard(logical_offset, length)) { needs_reshard = true; @@ -2249,7 +2236,7 @@ BlueStore::BlobRef BlueStore::ExtentMap::split_blob( if (ep->logical_offset < pos) { // split extent size_t left = pos - ep->logical_offset; - Extent *ne = new Extent(pos, 0, ep->length - left, ep->blob_depth, rb); + Extent *ne = new Extent(pos, 0, ep->length - left, rb); extent_map.insert(*ne); lb->pass_ref(rb.get(), ep->blob_offset + left, ne->length, ne->blob_offset); ep->length = left; @@ -2267,86 +2254,6 @@ BlueStore::BlobRef BlueStore::ExtentMap::split_blob( return rb; } -bool BlueStore::ExtentMap::do_write_check_depth( - uint64_t onode_size, - uint64_t start_offset, - uint64_t end_offset, - uint8_t *blob_depth, - uint64_t *gc_start_offset, - uint64_t *gc_end_offset) -{ - uint8_t depth = 0; - bool head_overlap = false; - bool tail_overlap = false; - - *gc_start_offset = start_offset; - *gc_end_offset = end_offset; - *blob_depth = 1; - - auto hp = seek_lextent(start_offset); - if (hp != extent_map.end() && - hp->logical_offset < start_offset && - start_offset < (hp->logical_offset + hp->length) && - hp->blob->get_blob().is_compressed()) { - depth = hp->blob_depth; - head_overlap = true; - } - - auto tp = seek_lextent(end_offset); - if (tp != extent_map.end() && - tp->logical_offset < end_offset && - end_offset < (tp->logical_offset + tp->length) && - tp->blob->get_blob().is_compressed()) { - tail_overlap = true; - if (depth < tp->blob_depth) { - depth = tp->blob_depth; - } - } - - if (depth >= g_conf->bluestore_gc_max_blob_depth) { - if (head_overlap) { - auto hp_next = hp; - while (hp != extent_map.begin() && hp->blob_depth > 1) { - hp_next = hp; - --hp; - if (hp->logical_offset + hp->length != hp_next->logical_offset) { - hp = hp_next; - break; - } - } - *gc_start_offset = hp->logical_offset; - } - if (tail_overlap) { - auto tp_prev = tp; - - while (tp->blob_depth > 1) { - tp_prev = tp; - tp++; - if (tp == extent_map.end() || - (tp_prev->logical_offset + tp_prev->length) != tp->logical_offset) { - break; - } - } - *gc_end_offset = tp_prev->logical_offset + tp_prev->length; - } - } - if (*gc_end_offset > onode_size) { - *gc_end_offset = onode_size; - } - - bool do_collect = true; - if (depth < g_conf->bluestore_gc_max_blob_depth) { - *blob_depth = 1 + depth; - do_collect = false; - } - dout(20) << __func__ << " GC depth " << (int)*blob_depth - << ", gc 0x" << std::hex << *gc_start_offset << "~" - << (*gc_end_offset - *gc_start_offset) - << (do_collect ? " collect" : "") - << std::dec << dendl; - return do_collect; -} - // Onode #undef dout_prefix @@ -7607,7 +7514,7 @@ void BlueStore::_do_write_small( b->dirty_blob().calc_csum(b_off, padded); dout(20) << __func__ << " lex old " << *ep << dendl; Extent *le = o->extent_map.set_lextent(offset, b_off + head_pad, length, - wctx->blob_depth, b, + b, &wctx->old_extents); b->dirty_blob().mark_used(le->blob_offset, le->length); txc->statfs_delta.stored() += le->length; @@ -7682,7 +7589,7 @@ void BlueStore::_do_write_small( << b_len << std::dec << " of mutable " << *b << " at " << op->extents << dendl; Extent *le = o->extent_map.set_lextent(offset, offset - bstart, length, - wctx->blob_depth, b, &wctx->old_extents); + b, &wctx->old_extents); b->dirty_blob().mark_used(le->blob_offset, le->length); txc->statfs_delta.stored() += le->length; dout(20) << __func__ << " lex " << *le << dendl; @@ -7701,7 +7608,7 @@ void BlueStore::_do_write_small( wctx->buffered ? 0 : Buffer::FLAG_NOCACHE); _pad_zeros(&bl, &b_off, block_size); Extent *le = o->extent_map.set_lextent(offset, P2PHASE(offset, alloc_len), - length, wctx->blob_depth, b, &wctx->old_extents); + length, b, &wctx->old_extents); txc->statfs_delta.stored() += le->length; dout(20) << __func__ << " lex " << *le << dendl; wctx->write(b, alloc_len, b_off, bl, true); @@ -7730,7 +7637,7 @@ void BlueStore::_do_write_big( blp.copy(l, t); _buffer_cache_write(txc, b, 0, t, wctx->buffered ? 0 : Buffer::FLAG_NOCACHE); wctx->write(b, l, 0, t, false); - Extent *le = o->extent_map.set_lextent(offset, 0, l, wctx->blob_depth, + Extent *le = o->extent_map.set_lextent(offset, 0, l, b, &wctx->old_extents); txc->statfs_delta.stored() += l; dout(20) << __func__ << " lex " << *le << dendl; @@ -8044,60 +7951,6 @@ void BlueStore::_do_write_data( } } -void BlueStore::_do_garbage_collection( - TransContext *txc, - CollectionRef& c, - OnodeRef o, - uint64_t& offset, - uint64_t& length, - bufferlist& bl, - WriteContext *wctx) -{ - uint64_t gc_start_offset, gc_end_offset; - uint64_t end = offset + length; - bool do_collect = - o->extent_map.do_write_check_depth(o->onode.size, - offset, end, &wctx->blob_depth, - &gc_start_offset, - &gc_end_offset); - if (do_collect) { - // we need garbage collection of blobs. - if (offset > gc_start_offset) { - bufferlist head_bl; - size_t read_len = offset - gc_start_offset; - int r = _do_read(c.get(), o, gc_start_offset, read_len, head_bl, 0); - assert(r == (int)read_len); - if (g_conf->bluestore_gc_merge_data) { - head_bl.claim_append(bl); - bl.swap(head_bl); - offset = gc_start_offset; - length = end - offset; - } else { - o->extent_map.fault_range(db, gc_start_offset, read_len); - _do_write_data(txc, c, o, gc_start_offset, read_len, head_bl, wctx); - } - logger->inc(l_bluestore_gc); - logger->inc(l_bluestore_gc_bytes, read_len); - } - - if (end < gc_end_offset) { - bufferlist tail_bl; - size_t read_len = gc_end_offset - end; - int r = _do_read(c.get(), o, end, read_len, tail_bl, 0); - assert(r == (int)read_len); - if (g_conf->bluestore_gc_merge_data) { - bl.claim_append(tail_bl); - length += read_len; - } else { - o->extent_map.fault_range(db, end, read_len); - _do_write_data(txc, c, o, end, read_len, tail_bl, wctx); - } - logger->inc(l_bluestore_gc); - logger->inc(l_bluestore_gc_bytes, read_len); - } - } -} - int BlueStore::_do_write( TransContext *txc, CollectionRef& c, @@ -8211,7 +8064,6 @@ int BlueStore::_do_write( << " target_blob_size 0x" << std::hex << wctx.target_blob_size << std::dec << dendl; - _do_garbage_collection(txc, c, o, offset, length, bl, &wctx); o->extent_map.fault_range(db, offset, length); _do_write_data(txc, c, o, offset, length, bl, &wctx); @@ -8800,7 +8652,7 @@ int BlueStore::_do_clone_range( } Extent *ne = new Extent(e.logical_offset + skip_front + dstoff - srcoff, e.blob_offset + skip_front, - e.length - skip_front - skip_back, e.blob_depth, cb); + e.length - skip_front - skip_back, cb); newo->extent_map.extent_map.insert(*ne); ne->blob->get_ref(ne->blob_offset, ne->length); // fixme: we may leave parts of new blob unreferenced that could diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index c99da688bff1..7069dc3e770c 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -564,7 +564,6 @@ public: uint32_t logical_offset = 0; ///< logical offset uint32_t blob_offset = 0; ///< blob offset uint32_t length = 0; ///< length - uint8_t blob_depth = 0; ///< blob overlapping count BlobRef blob; ///< the blob with our data /// ctor for lookup only @@ -573,9 +572,9 @@ public: explicit Extent() : ExtentBase() { } /// ctor for general usage - Extent(uint32_t lo, uint32_t o, uint32_t l, uint8_t bd, BlobRef& b) + Extent(uint32_t lo, uint32_t o, uint32_t l, BlobRef& b) : ExtentBase(), - logical_offset(lo), blob_offset(o), length(l), blob_depth(bd) { + logical_offset(lo), blob_offset(o), length(l) { assign_blob(b); } ~Extent() { @@ -738,8 +737,8 @@ public: extent_map_t::iterator seek_lextent(uint64_t offset); /// add a new Extent - void add(uint32_t lo, uint32_t o, uint32_t l, uint8_t bd, BlobRef& b) { - extent_map.insert(*new Extent(lo, o, l, bd, b)); + void add(uint32_t lo, uint32_t o, uint32_t l, BlobRef& b) { + extent_map.insert(*new Extent(lo, o, l, b)); } /// remove (and delete) an Extent @@ -759,7 +758,7 @@ public: /// put new lextent into lextent_map overwriting existing ones if /// any and update references accordingly Extent *set_lextent(uint64_t logical_offset, - uint64_t offset, uint64_t length, uint8_t blob_depth, + uint64_t offset, uint64_t length, BlobRef b, extent_map_t *old_extents); /// split a blob (and referring extents) @@ -1967,7 +1966,6 @@ private: bool buffered = false; ///< buffered write bool compress = false; ///< compressed write uint64_t target_blob_size = 0; ///< target (max) blob size - uint8_t blob_depth = 0; ///< depth of the logical extent unsigned csum_order = 0; ///< target checksum chunk order extent_map_t old_extents; ///< must deref these blobs @@ -2026,14 +2024,6 @@ private: void _pad_zeros(bufferlist *bl, uint64_t *offset, uint64_t chunk_size); - void _do_garbage_collection(TransContext *txc, - CollectionRef& c, - OnodeRef o, - uint64_t& offset, - uint64_t& length, - bufferlist& bl, - WriteContext *wctx); - int _do_write(TransContext *txc, CollectionRef &c, OnodeRef o, diff --git a/src/test/objectstore/store_test.cc b/src/test/objectstore/store_test.cc index 19c096222fec..b2ce0dec50ac 100644 --- a/src/test/objectstore/store_test.cc +++ b/src/test/objectstore/store_test.cc @@ -934,179 +934,6 @@ TEST_P(StoreTest, CompressionTest) { g_ceph_context->_conf->apply_changes(NULL); } -TEST_P(StoreTest, garbageCollection) { - ObjectStore::Sequencer osr("test"); - int r; - int64_t waste1, waste2; - coll_t cid; - int buf_len = 256 * 1024; - int overlap_offset = 64 * 1024; - int write_offset = buf_len; - if (string(GetParam()) != "bluestore") - return; - -#define WRITE_AT(offset, length) {\ - ObjectStore::Transaction t;\ - t.write(cid, hoid, offset, length, bl);\ - r = apply_transaction(store, &osr, std::move(t));\ - ASSERT_EQ(r, 0);\ - } - g_conf->set_val("bluestore_compression_mode", "none"); - //g_conf->set_val("bluestore_compression_mode", "force"); - g_conf->set_val("bluestore_merge_gc_data", "true"); - g_ceph_context->_conf->apply_changes(NULL); - - ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP))); - { - bufferlist in; - r = store->read(cid, hoid, 0, 5, in); - ASSERT_EQ(-ENOENT, r); - } - { - ObjectStore::Transaction t; - t.create_collection(cid, 0); - cerr << "Creating collection " << cid << std::endl; - r = apply_transaction(store, &osr, std::move(t)); - ASSERT_EQ(r, 0); - } - - std::string data; - data.resize(buf_len); - - { - { - bool exists = store->exists(cid, hoid); - ASSERT_TRUE(!exists); - - ObjectStore::Transaction t; - t.touch(cid, hoid); - cerr << "Creating object " << hoid << std::endl; - r = apply_transaction(store, &osr, std::move(t)); - ASSERT_EQ(r, 0); - - exists = store->exists(cid, hoid); - ASSERT_EQ(true, exists); - } - bufferlist bl; - - for(size_t i = 0; i < data.size(); i++) - data[i] = 'R'; - - bl.append(data); - - WRITE_AT(0, buf_len); - WRITE_AT(write_offset - 3 * overlap_offset, buf_len); - WRITE_AT(write_offset - 2 * overlap_offset, buf_len); - { - struct store_statfs_t statfs; - int r = store->statfs(&statfs); - ASSERT_EQ(r, 0); - waste1 = statfs.allocated - statfs.stored; - } - WRITE_AT(write_offset - overlap_offset, buf_len); - { - struct store_statfs_t statfs; - int r = store->statfs(&statfs); - ASSERT_EQ(r, 0); - waste2 = statfs.allocated - statfs.stored; - ASSERT_GE(waste1, waste2); - } - { - ObjectStore::Transaction t; - t.remove(cid, hoid); - cerr << "Cleaning" << std::endl; - r = apply_transaction(store, &osr, std::move(t)); - ASSERT_EQ(r, 0); - } - } - { - { - bool exists = store->exists(cid, hoid); - ASSERT_TRUE(!exists); - - ObjectStore::Transaction t; - t.touch(cid, hoid); - cerr << "Creating object " << hoid << std::endl; - r = apply_transaction(store, &osr, std::move(t)); - ASSERT_EQ(r, 0); - - exists = store->exists(cid, hoid); - ASSERT_EQ(true, exists); - } - bufferlist bl; - - for(size_t i = 0; i < data.size(); i++) - data[i] = i % 256; - bl.append(data); - - WRITE_AT(write_offset - overlap_offset, buf_len); - WRITE_AT(write_offset - 2 * overlap_offset, buf_len); - WRITE_AT(write_offset - 3 * overlap_offset, buf_len); - { - struct store_statfs_t statfs; - int r = store->statfs(&statfs); - ASSERT_EQ(r, 0); - waste1 = statfs.allocated - statfs.stored; - } - WRITE_AT(0, buf_len); - { - struct store_statfs_t statfs; - int r = store->statfs(&statfs); - ASSERT_EQ(r, 0); - waste2 = statfs.allocated - statfs.stored; - ASSERT_GE(waste1, waste2); - } - { - ObjectStore::Transaction t; - t.remove(cid, hoid); - cerr << "Cleaning" << std::endl; - r = apply_transaction(store, &osr, std::move(t)); - ASSERT_EQ(r, 0); - } - } - { - { - bool exists = store->exists(cid, hoid); - ASSERT_TRUE(!exists); - - ObjectStore::Transaction t; - t.touch(cid, hoid); - cerr << "Creating object " << hoid << std::endl; - r = apply_transaction(store, &osr, std::move(t)); - ASSERT_EQ(r, 0); - - exists = store->exists(cid, hoid); - ASSERT_EQ(true, exists); - } - bufferlist bl; - for(size_t i = 0; i < data.size(); i++) - data[i] = i % 256; - bl.append(data); - - WRITE_AT(2 * write_offset - 5 * overlap_offset, buf_len); - WRITE_AT(2 * write_offset - 4 * overlap_offset, buf_len); - WRITE_AT(2 * write_offset - 3 * overlap_offset, buf_len); - WRITE_AT(2 * overlap_offset, buf_len); - { - struct store_statfs_t statfs; - int r = store->statfs(&statfs); - ASSERT_EQ(r, 0); - waste2 = statfs.allocated - statfs.stored; - ASSERT_GE(waste1, waste2); - } - { - ObjectStore::Transaction t; - t.remove(cid, hoid); - t.remove_collection(cid); - cerr << "Cleaning" << std::endl; - r = apply_transaction(store, &osr, std::move(t)); - ASSERT_EQ(r, 0); - } - } - g_conf->set_val("bluestore_compression_mode", "none"); - g_ceph_context->_conf->apply_changes(NULL); -} - TEST_P(StoreTest, SimpleObjectTest) { ObjectStore::Sequencer osr("test"); int r; diff --git a/src/test/objectstore/test_bluestore_types.cc b/src/test/objectstore/test_bluestore_types.cc index 20eac4e1adf0..56e9c1f25358 100644 --- a/src/test/objectstore/test_bluestore_types.cc +++ b/src/test/objectstore/test_bluestore_types.cc @@ -831,7 +831,7 @@ TEST(ExtentMap, find_lextent) ASSERT_EQ(em.extent_map.end(), em.find_lextent(0)); ASSERT_EQ(em.extent_map.end(), em.find_lextent(100)); - em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, br)); + em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br)); auto a = em.find(100); ASSERT_EQ(em.extent_map.end(), em.find_lextent(0)); ASSERT_EQ(em.extent_map.end(), em.find_lextent(99)); @@ -840,7 +840,7 @@ TEST(ExtentMap, find_lextent) ASSERT_EQ(a, em.find_lextent(199)); ASSERT_EQ(em.extent_map.end(), em.find_lextent(200)); - em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, 1, br)); + em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br)); auto b = em.find(200); ASSERT_EQ(em.extent_map.end(), em.find_lextent(0)); ASSERT_EQ(em.extent_map.end(), em.find_lextent(99)); @@ -851,7 +851,7 @@ TEST(ExtentMap, find_lextent) ASSERT_EQ(b, em.find_lextent(299)); ASSERT_EQ(em.extent_map.end(), em.find_lextent(300)); - em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, 1, br)); + em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br)); auto d = em.find(400); ASSERT_EQ(em.extent_map.end(), em.find_lextent(0)); ASSERT_EQ(em.extent_map.end(), em.find_lextent(99)); @@ -877,7 +877,7 @@ TEST(ExtentMap, seek_lextent) ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0)); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(100)); - em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, br)); + em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br)); auto a = em.find(100); ASSERT_EQ(a, em.seek_lextent(0)); ASSERT_EQ(a, em.seek_lextent(99)); @@ -886,7 +886,7 @@ TEST(ExtentMap, seek_lextent) ASSERT_EQ(a, em.seek_lextent(199)); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(200)); - em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, 1, br)); + em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br)); auto b = em.find(200); ASSERT_EQ(a, em.seek_lextent(0)); ASSERT_EQ(a, em.seek_lextent(99)); @@ -897,7 +897,7 @@ TEST(ExtentMap, seek_lextent) ASSERT_EQ(b, em.seek_lextent(299)); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(300)); - em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, 1, br)); + em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br)); auto d = em.find(400); ASSERT_EQ(a, em.seek_lextent(0)); ASSERT_EQ(a, em.seek_lextent(99)); @@ -924,7 +924,7 @@ TEST(ExtentMap, has_any_lextents) ASSERT_FALSE(em.has_any_lextents(0, 1000)); ASSERT_FALSE(em.has_any_lextents(1000, 1000)); - em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, b)); + em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b)); ASSERT_FALSE(em.has_any_lextents(0, 50)); ASSERT_FALSE(em.has_any_lextents(0, 100)); ASSERT_FALSE(em.has_any_lextents(50, 50)); @@ -936,7 +936,7 @@ TEST(ExtentMap, has_any_lextents) ASSERT_TRUE(em.has_any_lextents(199, 2)); ASSERT_FALSE(em.has_any_lextents(200, 2)); - em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, 1, b)); + em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, b)); ASSERT_TRUE(em.has_any_lextents(199, 1)); ASSERT_TRUE(em.has_any_lextents(199, 2)); ASSERT_TRUE(em.has_any_lextents(200, 2)); @@ -944,7 +944,7 @@ TEST(ExtentMap, has_any_lextents) ASSERT_TRUE(em.has_any_lextents(299, 1)); ASSERT_FALSE(em.has_any_lextents(300, 1)); - em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, 1, b)); + em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, b)); ASSERT_TRUE(em.has_any_lextents(0, 10000)); ASSERT_TRUE(em.has_any_lextents(199, 1)); ASSERT_FALSE(em.has_any_lextents(300, 1)); @@ -968,31 +968,31 @@ TEST(ExtentMap, compress_extent_map) b2->shared_blob = new BlueStore::SharedBlob(-1, string(), &cache); b3->shared_blob = new BlueStore::SharedBlob(-1, string(), &cache); - em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, 1, b1)); - em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, b2)); + em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, b1)); + em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2)); ASSERT_EQ(0, em.compress_extent_map(0, 10000)); ASSERT_EQ(2u, em.extent_map.size()); - em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, 1, b2)); - em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, 1, b2)); + em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b2)); + em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2)); ASSERT_EQ(0, em.compress_extent_map(0, 0)); ASSERT_EQ(0, em.compress_extent_map(100000, 1000)); ASSERT_EQ(2, em.compress_extent_map(0, 100000)); ASSERT_EQ(2u, em.extent_map.size()); em.extent_map.erase(em.find(100)); - em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, 1, b2)); - em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, 1, b3)); - em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, 1, b2)); + em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2)); + em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b3)); + em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2)); ASSERT_EQ(0, em.compress_extent_map(0, 1)); ASSERT_EQ(0, em.compress_extent_map(0, 100000)); ASSERT_EQ(4u, em.extent_map.size()); - em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, 1, b2)); - em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, 1, b2)); - em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, 1, b2)); - em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, 1, b1)); - em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, 1, b3)); + em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2)); + em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, b2)); + em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, b2)); + em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, b1)); + em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, b3)); ASSERT_EQ(0, em.compress_extent_map(0, 99)); ASSERT_EQ(0, em.compress_extent_map(800, 1000)); ASSERT_EQ(2, em.compress_extent_map(100, 500)); @@ -1000,109 +1000,13 @@ TEST(ExtentMap, compress_extent_map) em.extent_map.erase(em.find(300)); em.extent_map.erase(em.find(500)); em.extent_map.erase(em.find(700)); - em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, 1, b2)); - em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, 1, b2)); - em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, 1, b2)); + em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2)); + em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, b2)); + em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, b2)); ASSERT_EQ(1, em.compress_extent_map(0, 1000)); ASSERT_EQ(6u, em.extent_map.size()); } -TEST(ExtentMap, GarbageCollectorTest) -{ - BlueStore::LRUCache cache; - BlueStore::ExtentMap em(nullptr); - uint8_t blob_depth = 0; - uint64_t gc_start_offset = 0; - uint64_t gc_end_offset = 0; - - bool b; - b = em.do_write_check_depth(0, //onode_size - 0, //start_offset - 100, //end_offset - &blob_depth, - &gc_start_offset, - &gc_end_offset); - ASSERT_TRUE(!b); - ASSERT_EQ(blob_depth, 1ul); - ASSERT_EQ(gc_start_offset, 0ul); - ASSERT_EQ(gc_end_offset, 0ul); - - BlueStore::BlobRef b1(new BlueStore::Blob); - BlueStore::BlobRef b2(new BlueStore::Blob); - BlueStore::BlobRef b3(new BlueStore::Blob); - b1->shared_blob = new BlueStore::SharedBlob(-1, string(), &cache); - b2->shared_blob = new BlueStore::SharedBlob(-1, string(), &cache); - b3->shared_blob = new BlueStore::SharedBlob(1, string(), &cache); - b1->dirty_blob().set_flag(bluestore_blob_t::FLAG_COMPRESSED); - b2->dirty_blob().set_flag(bluestore_blob_t::FLAG_COMPRESSED); - b3->dirty_blob().set_flag(bluestore_blob_t::FLAG_COMPRESSED); - - em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, 3, b1)); - em.extent_map.insert(*new BlueStore::Extent(100, 0, 50, 3, b2)); - em.extent_map.insert(*new BlueStore::Extent(150, 0, 150, 1, b3)); - - b = em.do_write_check_depth(300, //onode_size - 10, //start_offset - 80, //end_offset - &blob_depth, - &gc_start_offset, - &gc_end_offset); - ASSERT_TRUE(b); - ASSERT_EQ(blob_depth, 1ul); - ASSERT_EQ(gc_start_offset, 0ul); - ASSERT_EQ(gc_end_offset, 150ul); - - b = em.do_write_check_depth(300, //onode_size - 70, //start_offset - 310, //end_offset - &blob_depth, - &gc_start_offset, - &gc_end_offset); - ASSERT_TRUE(b); - ASSERT_EQ(blob_depth, 1ul); - ASSERT_EQ(gc_start_offset, 0ul); - ASSERT_EQ(gc_end_offset, 300ul); - - b = em.do_write_check_depth(300, //onode_size - 70, //start_offset - 290, //end_offset - &blob_depth, - &gc_start_offset, - &gc_end_offset); - ASSERT_TRUE(b); - ASSERT_EQ(blob_depth, 1ul); - ASSERT_EQ(gc_start_offset, 0ul); - ASSERT_EQ(gc_end_offset, 300ul); - - b = em.do_write_check_depth(300, //onode_size - 180, //start_offset - 290, //end_offset - &blob_depth, - &gc_start_offset, - &gc_end_offset); - ASSERT_TRUE(!b); - ASSERT_EQ(blob_depth, 2ul); - - em.extent_map.clear(); - em.extent_map.insert(*new BlueStore::Extent(0x17c00, 0xc000, 0x400, 3, b1)); - em.extent_map.insert(*new BlueStore::Extent(0x18000, 0, 0xf000, 3, b1)); - em.extent_map.insert(*new BlueStore::Extent(0x27000, 0, 0x400, 3, b1)); - em.extent_map.insert(*new BlueStore::Extent(0x27400, 0x400, 0x7c00, 2, b2)); - em.extent_map.insert(*new BlueStore::Extent(0x2f000, 0, 0xe00, 1, b3)); - - b = em.do_write_check_depth(0x3f000, //onode_size - 0x1ac00, //start_offset - 0x1ac00 + 0x6600, //end_offset - &blob_depth, - &gc_start_offset, - &gc_end_offset); - ASSERT_TRUE(b); - ASSERT_EQ(blob_depth, 1ul); - ASSERT_EQ(gc_start_offset, 0x17c00ul); - ASSERT_EQ(gc_end_offset, 0x2f000ul); -} - - int main(int argc, char **argv) { vector args; argv_to_vec(argc, (const char **)argv, args);