From: Igor Fedotov Date: Tue, 27 Dec 2016 17:11:10 +0000 (+0300) Subject: os/bluestore: pass Collection* to access BlueStore instance and other needed stuff... X-Git-Tag: v12.0.0~183^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=96581a75427f23f7ae66ee4351f22f6ad6293155;p=ceph.git os/bluestore: pass Collection* to access BlueStore instance and other needed stuff rather than have additional referencing members in BlueStore child classes. Signed-off-by: Igor Fedotov --- diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 4f9e0f76d76..3cecd5a3fa6 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -1384,7 +1384,7 @@ ostream& operator<<(ostream& out, const BlueStore::Blob& b) return out; } -void BlueStore::Blob::discard_unallocated() +void BlueStore::Blob::discard_unallocated(Collection *coll) { if (blob.is_compressed()) { bool discard = false; @@ -1412,6 +1412,7 @@ void BlueStore::Blob::discard_unallocated() if (blob.can_prune_tail()) { dirty_blob(); blob.prune_tail(); + auto cct = coll->store->cct; //used by dout dout(20) << __func__ << " pruned tail, now " << blob << dendl; } } @@ -1425,9 +1426,9 @@ void BlueStore::Blob::get_ref( } bool BlueStore::Blob::put_ref( + Collection *coll, uint64_t offset, uint64_t length, - uint64_t min_release_size, vector *r) { vector logical; @@ -1456,6 +1457,7 @@ bool BlueStore::Blob::put_ref( return false; } + auto min_release_size = coll->store->min_alloc_size; // we cannot release something smaller than our csum chunk size if (b.has_csum() && b.get_csum_chunk_size() > min_release_size) { min_release_size = b.get_csum_chunk_size(); @@ -1552,8 +1554,9 @@ void BlueStore::Blob::pass_ref(Blob* other, uint64_t src_offset, uint64_t length other->ref_map.get(dest_offset, length); } -void BlueStore::Blob::split(size_t blob_offset, Blob *r) +void BlueStore::Blob::split(Collection *coll, size_t blob_offset, Blob *r) { + auto cct = coll->store->cct; //used by dout dout(10) << __func__ << " 0x" << std::hex << blob_offset << std::dec << " start " << *this << dendl; assert(blob.can_split()); @@ -1623,14 +1626,16 @@ ostream& operator<<(ostream& out, const BlueStore::Extent& e) #undef dout_prefix #define dout_prefix *_dout << "bluestore.extentmap(" << this << ") " -BlueStore::ExtentMap::ExtentMap(CephContext* cct, Onode *o) - : cct(cct), onode(o), - inline_bl(cct->_conf->bluestore_extent_map_inline_shard_prealloc_size) { +BlueStore::ExtentMap::ExtentMap(Onode *o) + : onode(o), + inline_bl( + o->c->store->cct->_conf->bluestore_extent_map_inline_shard_prealloc_size) { } bool BlueStore::ExtentMap::update(KeyValueDB::Transaction t, bool force) { + auto cct = onode->c->store->cct; //used by dout assert(!needs_reshard); if (onode->onode.extent_map_shards.empty()) { if (inline_bl.length() == 0) { @@ -1721,8 +1726,11 @@ bool BlueStore::ExtentMap::update(KeyValueDB::Transaction t, return false; } -void BlueStore::ExtentMap::reshard(uint64_t min_alloc_size) +void BlueStore::ExtentMap::reshard() { + auto min_alloc_size = onode->c->store->min_alloc_size; + auto cct = onode->c->store->cct; //used by dout + needs_reshard = false; // un-span all blobs @@ -1865,9 +1873,13 @@ void BlueStore::ExtentMap::reshard(uint64_t min_alloc_size) } } -bool BlueStore::ExtentMap::encode_some(uint32_t offset, uint32_t length, - bufferlist& bl, unsigned *pn) +bool BlueStore::ExtentMap::encode_some( + uint32_t offset, + uint32_t length, + bufferlist& bl, + unsigned *pn) { + auto cct = onode->c->store->cct; //used by dout Extent dummy(offset); auto start = extent_map.lower_bound(dummy); uint32_t end = offset + length; @@ -2003,7 +2015,7 @@ void BlueStore::ExtentMap::decode_some(bufferlist& bl) le->assign_blob(blobs[blobid - 1]); assert(le->blob); } else { - Blob *b = new Blob(cct); + Blob *b = new Blob(); uint64_t sbid = 0; b->decode(p, struct_v, &sbid, false); blobs[n] = b; @@ -2047,7 +2059,6 @@ void BlueStore::ExtentMap::encode_spanning_blobs( } void BlueStore::ExtentMap::decode_spanning_blobs( - Collection *c, bufferptr::iterator& p) { __u8 struct_v; @@ -2056,12 +2067,12 @@ void BlueStore::ExtentMap::decode_spanning_blobs( unsigned n; denc_varint(n, p); while (n--) { - BlobRef b(new Blob(c->store->cct)); + BlobRef b(new Blob()); denc_varint(b->id, p); spanning_blob_map[b->id] = b; uint64_t sbid = 0; b->decode(p, struct_v, &sbid, true); - c->open_shared_blob(sbid, b); + onode->c->open_shared_blob(sbid, b); } } @@ -2083,6 +2094,7 @@ void BlueStore::ExtentMap::fault_range( uint32_t offset, uint32_t length) { + auto cct = onode->c->store->cct; //used by dout dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length << std::dec << dendl; auto start = seek_shard(offset); @@ -2126,6 +2138,7 @@ void BlueStore::ExtentMap::dirty_range( uint32_t offset, uint32_t length) { + auto cct = onode->c->store->cct; //used by dout dout(30) << __func__ << " 0x" << std::hex << offset << "~" << length << std::dec << dendl; if (shards.empty()) { @@ -2195,8 +2208,11 @@ bool BlueStore::ExtentMap::has_any_lextents(uint64_t offset, uint64_t length) return true; } -int BlueStore::ExtentMap::compress_extent_map(uint64_t offset, uint64_t length) +int BlueStore::ExtentMap::compress_extent_map( + uint64_t offset, + uint64_t length) { + auto cct = onode->c->store->cct; //used by dout if (extent_map.empty()) return 0; int removed = 0; @@ -2327,12 +2343,14 @@ BlueStore::BlobRef BlueStore::ExtentMap::split_blob( uint32_t blob_offset, uint32_t pos) { + auto cct = onode->c->store->cct; //used by dout + uint32_t end_pos = pos + lb->get_blob().get_logical_length() - blob_offset; dout(20) << __func__ << " 0x" << std::hex << pos << " end 0x" << end_pos << " blob_offset 0x" << blob_offset << std::dec << " " << *lb << dendl; BlobRef rb = onode->c->new_blob(); - lb->split(blob_offset, rb.get()); + lb->split(onode->c, blob_offset, rb.get()); for (auto ep = seek_lextent(pos); ep != extent_map.end() && ep->logical_offset < end_pos; @@ -2504,7 +2522,7 @@ BlueStore::OnodeRef BlueStore::Collection::get_onode( on->onode.decode(p); // initialize extent_map - on->extent_map.decode_spanning_blobs(this, p); + on->extent_map.decode_spanning_blobs(p); if (on->onode.extent_map_shards.empty()) { denc(on->extent_map.inline_bl, p); on->extent_map.decode_some(on->extent_map.inline_bl); @@ -2632,6 +2650,64 @@ BlueStore::BlueStore(CephContext *cct, const string& path) } } +BlueStore::BlueStore(CephContext *cct, + const string& path, + uint64_t _min_alloc_size) + : ObjectStore(cct, path), + bluefs(NULL), + bluefs_shared_bdev(0), + db(NULL), + bdev(NULL), + fm(NULL), + alloc(NULL), + path_fd(-1), + fsid_fd(-1), + mounted(false), + coll_lock("BlueStore::coll_lock"), + throttle_ops(cct, "bluestore_max_ops", cct->_conf->bluestore_max_ops), + throttle_bytes(cct, "bluestore_max_bytes", cct->_conf->bluestore_max_bytes), + throttle_wal_ops(cct, "bluestore_wal_max_ops", + cct->_conf->bluestore_max_ops + + cct->_conf->bluestore_wal_max_ops), + throttle_wal_bytes(cct, "bluestore_wal_max_bytes", + cct->_conf->bluestore_max_bytes + + cct->_conf->bluestore_wal_max_bytes), + wal_tp(cct, + "BlueStore::wal_tp", + "tp_wal", + cct->_conf->bluestore_sync_wal_apply ? 0 : cct->_conf->bluestore_wal_threads, + "bluestore_wal_threads"), + wal_wq(this, + cct->_conf->bluestore_wal_thread_timeout, + cct->_conf->bluestore_wal_thread_suicide_timeout, + &wal_tp), + m_finisher_num(1), + kv_sync_thread(this), + kv_stop(false), + logger(NULL), + debug_read_error_lock("BlueStore::debug_read_error_lock"), + csum_type(Checksummer::CSUM_CRC32C), + min_alloc_size(_min_alloc_size), + min_alloc_size_order(ctz(_min_alloc_size)), + sync_wal_apply(cct->_conf->bluestore_sync_wal_apply), + mempool_thread(this) +{ + _init_logger(); + cct->_conf->add_observer(this); + set_cache_shards(1); + + if (cct->_conf->bluestore_shard_finishers) { + m_finisher_num = cct->_conf->osd_op_num_shards; + } + + for (int i = 0; i < m_finisher_num; ++i) { + ostringstream oss; + oss << "finisher-" << i; + Finisher *f = new Finisher(cct, oss.str(), "finisher"); + finishers.push_back(f); + } +} + BlueStore::~BlueStore() { for (auto f : finishers) { @@ -4903,7 +4979,7 @@ bool BlueStore::exists(const coll_t& cid, const ghobject_t& oid) bool BlueStore::exists(CollectionHandle &c_, const ghobject_t& oid) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(10) << __func__ << " " << c->cid << " " << oid << dendl; if (!c->exists) return false; @@ -4939,7 +5015,7 @@ int BlueStore::stat( struct stat *st, bool allow_eio) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); if (!c->exists) return -ENOENT; dout(10) << __func__ << " " << c->get_cid() << " " << oid << dendl; @@ -4970,7 +5046,7 @@ int BlueStore::set_collection_opts( CollectionHandle ch = _get_collection(cid); if (!ch) return -ENOENT; - Collection *c = static_cast(ch.get()); + Collection *c = static_cast(ch.get()); dout(15) << __func__ << " " << cid << " options " << opts << dendl; if (!c->exists) return -ENOENT; @@ -5003,7 +5079,7 @@ int BlueStore::read( uint32_t op_flags, bool allow_eio) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); const coll_t &cid = c->get_cid(); dout(15) << __func__ << " " << cid << " " << oid << " 0x" << std::hex << offset << "~" << length << std::dec @@ -5368,7 +5444,7 @@ int BlueStore::fiemap( size_t length, bufferlist& bl) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); if (!c->exists) return -ENOENT; interval_set m; @@ -5451,7 +5527,7 @@ int BlueStore::getattr( const char *name, bufferptr& value) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->cid << " " << oid << " " << name << dendl; if (!c->exists) return -ENOENT; @@ -5502,7 +5578,7 @@ int BlueStore::getattrs( const ghobject_t& oid, map& aset) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->cid << " " << oid << dendl; if (!c->exists) return -ENOENT; @@ -5591,7 +5667,7 @@ int BlueStore::collection_list( bool sort_bitwise, int max, vector *ls, ghobject_t *pnext) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->cid << " start " << start << " end " << end << " max " << max << dendl; int r; @@ -5609,7 +5685,7 @@ int BlueStore::collection_list( } int BlueStore::_collection_list( - Collection* c, const ghobject_t& start, const ghobject_t& end, + Collection *c, const ghobject_t& start, const ghobject_t& end, bool sort_bitwise, int max, vector *ls, ghobject_t *pnext) { @@ -5828,7 +5904,7 @@ int BlueStore::omap_get( map *out /// < [out] Key to value map ) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl; if (!c->exists) return -ENOENT; @@ -5891,7 +5967,7 @@ int BlueStore::omap_get_header( bool allow_eio ///< [in] don't assert on eio ) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl; if (!c->exists) return -ENOENT; @@ -5938,7 +6014,7 @@ int BlueStore::omap_get_keys( set *keys ///< [out] Keys defined on oid ) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl; if (!c->exists) return -ENOENT; @@ -5997,7 +6073,7 @@ int BlueStore::omap_get_values( map *out ///< [out] Returned keys and values ) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl; if (!c->exists) return -ENOENT; @@ -6050,7 +6126,7 @@ int BlueStore::omap_check_keys( set *out ///< [out] Subset of keys defined on oid ) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(15) << __func__ << " " << c->get_cid() << " oid " << oid << dendl; if (!c->exists) return -ENOENT; @@ -6104,7 +6180,7 @@ ObjectMap::ObjectMapIterator BlueStore::get_omap_iterator( const ghobject_t &oid ///< [in] object ) { - Collection *c = static_cast(c_.get()); + Collection *c = static_cast(c_.get()); dout(10) << __func__ << " " << c->get_cid() << " " << oid << dendl; if (!c->exists) { return ObjectMap::ObjectMapIterator(); @@ -6463,7 +6539,7 @@ void BlueStore::_txc_write_nodes(TransContext *txc, KeyValueDB::Transaction t) t->rmkey(PREFIX_OBJ, key); } o->extent_map.fault_range(db, 0, o->onode.size); - o->extent_map.reshard(min_alloc_size); + o->extent_map.reshard(); reshard = o->extent_map.update(t, true); if (reshard) { dout(20) << __func__ << " warning: still wants reshard, check options?" @@ -8045,7 +8121,7 @@ void BlueStore::_wctx_finish( BlobRef b = lo.blob; const bluestore_blob_t& blob = b->get_blob(); vector r; - if (b->put_ref(lo.blob_offset, lo.length, min_alloc_size, &r)) { + if (b->put_ref(c.get(), lo.blob_offset, lo.length, &r)) { if (blob.is_compressed()) { txc->statfs_delta.compressed() -= blob.get_compressed_payload_length(); } @@ -8079,7 +8155,7 @@ void BlueStore::_wctx_finish( // longer allocated. Note that this will leave behind edge bits // that are no longer referenced but not deallocated (until they // age out of the cache naturally). - b->discard_unallocated(); + b->discard_unallocated(c.get()); for (auto e : r) { dout(20) << __func__ << " release " << e << dendl; txc->released.insert(e.offset, e.length); @@ -8366,6 +8442,7 @@ int BlueStore::_do_truncate( { dout(15) << __func__ << " " << c->cid << " " << o->oid << " 0x" << std::hex << offset << std::dec << dendl; + _dump_onode(o, 30); if (offset == o->onode.size) @@ -8836,7 +8913,7 @@ int BlueStore::_do_clone_range( } else if (!e.blob->shared_blob->loaded) { c->load_shared_blob(e.blob->shared_blob); } - cb = new Blob(cct); + cb = new Blob(); e.blob->last_encoded_id = n; id_to_blob[n] = cb; e.blob->dup(*cb); diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index 83483fefa7f..78bac20435b 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -123,6 +123,7 @@ public: typedef map ready_regions_t; struct BufferSpace; + struct Collection; /// cached buffer struct Buffer { @@ -327,7 +328,7 @@ public: // these are defined/set if the blob is marked 'shared' uint64_t sbid = 0; ///< shared blob id - Collection* coll = nullptr; + Collection *coll = nullptr; BufferSpace bc; ///< buffer cache SharedBlob(Collection *_coll) : coll(_coll) { @@ -408,7 +409,6 @@ public: /// in-memory blob metadata and associated cached buffers (if any) struct Blob { MEMPOOL_CLASS_HELPERS(); - CephContext* cct; std::atomic_int nref = {0}; ///< reference count int16_t id = -1; ///< id, for spanning blobs only, >= 0 @@ -424,9 +424,6 @@ public: bluestore_extent_ref_map_t ref_map; public: - Blob(CephContext* cct) : cct(cct) {} - ~Blob() { - } friend void intrusive_ptr_add_ref(Blob *b) { b->get(); } friend void intrusive_ptr_release(Blob *b) { b->put(); } @@ -468,18 +465,18 @@ public: } /// discard buffers for unallocated regions - void discard_unallocated(); + void discard_unallocated(Collection *coll); /// get logical references void get_ref(uint64_t offset, uint64_t length); /// put logical references, and get back any released extents - bool put_ref(uint64_t offset, uint64_t length, uint64_t min_alloc_size, + bool put_ref(Collection *coll, uint64_t offset, uint64_t length, vector *r); /// pass references for specific range to other blob void pass_ref(Blob* other, uint64_t src_offset, uint64_t length, uint64_t dest_offset); /// split the blob - void split(size_t blob_offset, Blob *o); + void split(Collection *coll, size_t blob_offset, Blob *o); void get() { ++nref; @@ -623,12 +620,10 @@ public: friend ostream& operator<<(ostream& out, const Extent& e); - struct Collection; struct Onode; /// a sharded extent map, mapping offsets to lextents to blobs struct ExtentMap { - CephContext* cct; Onode *onode; extent_map_t extent_map; ///< map of Extents to Blobs blob_map_t spanning_blob_map; ///< blobs that span shards @@ -649,7 +644,7 @@ public: void operator()(Extent *e) { delete e; } }; - ExtentMap(CephContext* cct, Onode *o); + ExtentMap(Onode *o); ~ExtentMap() { extent_map.clear_and_dispose(DeleteDisposer()); } @@ -667,7 +662,7 @@ public: void bound_encode_spanning_blobs(size_t& p); void encode_spanning_blobs(bufferlist::contiguous_appender& p); - void decode_spanning_blobs(Collection *c, bufferptr::iterator& p); + void decode_spanning_blobs(bufferptr::iterator& p); BlobRef get_spanning_blob(int id) { auto p = spanning_blob_map.find(id); @@ -676,7 +671,7 @@ public: } bool update(KeyValueDB::Transaction t, bool force); - void reshard(uint64_t min_alloc_size); + void reshard(); /// initialize Shards from the onode void init_shards(bool loaded, bool dirty); @@ -796,7 +791,7 @@ public: oid(o), key(k), exists(false), - extent_map(c->store->cct, this) { + extent_map(this) { } void flush(); @@ -1112,7 +1107,7 @@ public: void make_blob_shared(uint64_t sbid, BlobRef b); BlobRef new_blob() { - BlobRef b = new Blob(store->cct); + BlobRef b = new Blob(); b->shared_blob = new SharedBlob(this); return b; } @@ -1728,6 +1723,7 @@ private: } public: BlueStore(CephContext *cct, const string& path); + BlueStore(CephContext *cct, const string& path, uint64_t min_alloc_size); // Ctor for UT only ~BlueStore(); string get_type() override { diff --git a/src/test/objectstore/test_bluestore_types.cc b/src/test/objectstore/test_bluestore_types.cc index f920ab532a4..38b0e8dacc8 100644 --- a/src/test/objectstore/test_bluestore_types.cc +++ b/src/test/objectstore/test_bluestore_types.cc @@ -317,7 +317,11 @@ TEST(bluestore_blob_t, csum_bench) TEST(Blob, put_ref) { { - BlueStore::Blob b(g_ceph_context); + BlueStore store(g_ceph_context, "", 4096); + BlueStore::Cache *cache = BlueStore::Cache::create( + g_ceph_context, "lru", NULL); + BlueStore::Collection coll(&store, cache, coll_t()); + BlueStore::Blob b; b.shared_blob = new BlueStore::SharedBlob(nullptr); b.shared_blob->get(); // hack to avoid dtor from running b.dirty_blob().extents.push_back(bluestore_pextent_t(0x40715000, 0x2000)); @@ -329,21 +333,24 @@ TEST(Blob, put_ref) cout << b << std::endl; vector r; - b.put_ref(0, 0x1200, 0x1000, &r); + b.put_ref(&coll, 0, 0x1200, &r); cout << " r " << r << std::endl; cout << b << std::endl; r.clear(); - b.put_ref(0xae00, 0x4200, 0x1000, &r); + b.put_ref(&coll, 0xae00, 0x4200, &r); cout << " r " << r << std::endl; cout << b << std::endl; } unsigned mas = 4096; - unsigned mrs = 8192; + BlueStore store(g_ceph_context, "", 8192); + BlueStore::Cache *cache = BlueStore::Cache::create( + g_ceph_context, "lru", NULL); + BlueStore::Collection coll(&store, cache, coll_t()); { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -351,7 +358,7 @@ TEST(Blob, put_ref) b.extents.push_back(bluestore_pextent_t(0, mas*2)); B.get_ref(0, mas*2); ASSERT_TRUE(b.is_allocated(0, mas*2)); - B.put_ref(0, mas*2, mrs, &r); + B.put_ref(&coll, 0, mas*2, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0u, r[0].offset); @@ -363,18 +370,18 @@ TEST(Blob, put_ref) ASSERT_EQ(mas*2, b.extents[0].length); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); vector r; b.extents.push_back(bluestore_pextent_t(123, mas*2)); B.get_ref(0, mas*2); - B.put_ref(0, mas, mrs, &r); + B.put_ref(&coll, 0, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*2)); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(123u, r[0].offset); @@ -384,7 +391,7 @@ TEST(Blob, put_ref) ASSERT_EQ(mas*2, b.extents[0].length); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -394,17 +401,17 @@ TEST(Blob, put_ref) b.extents.push_back(bluestore_pextent_t(3, mas)); b.extents.push_back(bluestore_pextent_t(4, mas)); B.get_ref(0, mas*4); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*4)); ASSERT_TRUE(b.is_allocated(mas, mas)); - B.put_ref(mas*2, mas, mrs, &r); + B.put_ref(&coll, mas*2, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(mas*2, mas)); ASSERT_TRUE(b.is_allocated(0, mas*4)); - B.put_ref(mas*3, mas, mrs, &r); + B.put_ref(&coll, mas*3, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(2u, r.size()); ASSERT_EQ(3u, r[0].offset); @@ -419,7 +426,7 @@ TEST(Blob, put_ref) ASSERT_EQ(3u, b.extents.size()); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -431,15 +438,15 @@ TEST(Blob, put_ref) b.extents.push_back(bluestore_pextent_t(5, mas)); b.extents.push_back(bluestore_pextent_t(6, mas)); B.get_ref(0, mas*6); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); - B.put_ref(mas*2, mas, mrs, &r); + B.put_ref(&coll, mas*2, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); - B.put_ref(mas*3, mas, mrs, &r); + B.put_ref(&coll, mas*3, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(2u, r.size()); ASSERT_EQ(3u, r[0].offset); @@ -457,22 +464,22 @@ TEST(Blob, put_ref) ASSERT_TRUE(b.extents[4].is_valid()); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); vector r; b.extents.push_back(bluestore_pextent_t(1, mas * 6)); B.get_ref(0, mas*6); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); - B.put_ref(mas*2, mas, mrs, &r); + B.put_ref(&coll, mas*2, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*6)); - B.put_ref(mas*3, mas, mrs, &r); + B.put_ref(&coll, mas*3, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); @@ -486,7 +493,7 @@ TEST(Blob, put_ref) ASSERT_TRUE(b.extents[2].is_valid()); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -495,15 +502,15 @@ TEST(Blob, put_ref) b.extents.push_back(bluestore_pextent_t(2, mas * 4)); b.extents.push_back(bluestore_pextent_t(3, mas * 4)); B.get_ref(0, mas*12); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); - B.put_ref(mas*9, mas, mrs, &r); + B.put_ref(&coll, mas*9, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); - B.put_ref(mas*2, mas*7, mrs, &r); + B.put_ref(&coll, mas*2, mas*7, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(3u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); @@ -521,7 +528,7 @@ TEST(Blob, put_ref) ASSERT_TRUE(b.extents[2].is_valid()); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -530,15 +537,15 @@ TEST(Blob, put_ref) b.extents.push_back(bluestore_pextent_t(2, mas * 4)); b.extents.push_back(bluestore_pextent_t(3, mas * 4)); B.get_ref(0, mas*12); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); - B.put_ref(mas*9, mas, mrs, &r); + B.put_ref(&coll, mas*9, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); - B.put_ref(mas*2, mas*7, mrs, &r); + B.put_ref(&coll, mas*2, mas*7, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(3u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); @@ -554,7 +561,7 @@ TEST(Blob, put_ref) ASSERT_TRUE(b.extents[0].is_valid()); ASSERT_FALSE(b.extents[1].is_valid()); ASSERT_TRUE(b.extents[2].is_valid()); - B.put_ref(0, mas, mrs, &r); + B.put_ref(&coll, 0, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x1u, r[0].offset); @@ -562,7 +569,7 @@ TEST(Blob, put_ref) ASSERT_EQ(2u, b.extents.size()); ASSERT_FALSE(b.extents[0].is_valid()); ASSERT_TRUE(b.extents[1].is_valid()); - B.put_ref(mas*10, mas*2, mrs, &r); + B.put_ref(&coll, mas*10, mas*2, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2003u, r[0].offset); @@ -571,7 +578,7 @@ TEST(Blob, put_ref) ASSERT_FALSE(b.extents[0].is_valid()); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -580,15 +587,15 @@ TEST(Blob, put_ref) b.extents.push_back(bluestore_pextent_t(2, mas * 4)); b.extents.push_back(bluestore_pextent_t(3, mas * 4)); B.get_ref(0, mas*12); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); - B.put_ref(mas*9, mas, mrs, &r); + B.put_ref(&coll, mas*9, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*12)); - B.put_ref(mas*2, mas*7, mrs, &r); + B.put_ref(&coll, mas*2, mas*7, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(3u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); @@ -604,7 +611,7 @@ TEST(Blob, put_ref) ASSERT_TRUE(b.extents[0].is_valid()); ASSERT_FALSE(b.extents[1].is_valid()); ASSERT_TRUE(b.extents[2].is_valid()); - B.put_ref(mas*10, mas*2, mrs, &r); + B.put_ref(&coll, mas*10, mas*2, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2003u, r[0].offset); @@ -612,7 +619,7 @@ TEST(Blob, put_ref) ASSERT_EQ(2u, b.extents.size()); ASSERT_TRUE(b.extents[0].is_valid()); ASSERT_FALSE(b.extents[1].is_valid()); - B.put_ref(0, mas, mrs, &r); + B.put_ref(&coll, 0, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x1u, r[0].offset); @@ -621,26 +628,26 @@ TEST(Blob, put_ref) ASSERT_FALSE(b.extents[0].is_valid()); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); vector r; b.extents.push_back(bluestore_pextent_t(1, mas * 8)); B.get_ref(0, mas*8); - B.put_ref(0, mas, mrs, &r); + B.put_ref(&coll, 0, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*8)); - B.put_ref(mas*7, mas, mrs, &r); + B.put_ref(&coll, mas*7, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*8)); - B.put_ref(mas*2, mas, mrs, &r); + B.put_ref(&coll, mas*2, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, 8)); - B.put_ref(mas*3, mas*4, mrs, &r); + B.put_ref(&coll, mas*3, mas*4, &r); ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x2001u, r[0].offset); ASSERT_EQ(mas*6, r[0].length); @@ -649,7 +656,7 @@ TEST(Blob, put_ref) ASSERT_EQ(2u, b.extents.size()); ASSERT_TRUE(b.extents[0].is_valid()); ASSERT_FALSE(b.extents[1].is_valid()); - B.put_ref(mas, mas, mrs, &r); + B.put_ref(&coll, mas, mas, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(1u, r.size()); ASSERT_EQ(0x1u, r[0].offset); @@ -659,7 +666,7 @@ TEST(Blob, put_ref) } // verify csum chunk size if factored in properly { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -668,7 +675,7 @@ TEST(Blob, put_ref) b.init_csum(Checksummer::CSUM_CRC32C, 14, mas * 4); B.get_ref(0, mas*4); ASSERT_TRUE(b.is_allocated(0, mas*4)); - B.put_ref(0, mas*3, mrs, &r); + B.put_ref(&coll, 0, mas*3, &r); cout << "r " << r << " " << b << std::endl; ASSERT_EQ(0u, r.size()); ASSERT_TRUE(b.is_allocated(0, mas*4)); @@ -676,7 +683,7 @@ TEST(Blob, put_ref) ASSERT_EQ(mas*4, b.extents[0].length); } { - BlueStore::Blob B(g_ceph_context); + BlueStore::Blob B; B.shared_blob = new BlueStore::SharedBlob(nullptr); B.shared_blob->get(); // hack to avoid dtor from running bluestore_blob_t& b = B.dirty_blob(); @@ -691,7 +698,7 @@ TEST(Blob, put_ref) cout << "before: " << B << std::endl; vector r; - B.put_ref(0x1800, 0x2000, 0x1000, &r); + B.put_ref(&coll, 0x1800, 0x2000, &r); cout << "after: " << B << std::endl; cout << "r " << r << std::endl; } @@ -759,19 +766,19 @@ TEST(bluestore_blob_t, prune_tail) TEST(Blob, split) { - BlueStore store(g_ceph_context, ""); + BlueStore store(g_ceph_context, "", 4096); BlueStore::Cache *cache = BlueStore::Cache::create( g_ceph_context, "lru", NULL); BlueStore::Collection coll(&store, cache, coll_t()); { - BlueStore::Blob L(g_ceph_context), R(g_ceph_context); + BlueStore::Blob L, R; L.shared_blob = new BlueStore::SharedBlob(&coll); L.shared_blob->get(); // hack to avoid dtor from running R.shared_blob = new BlueStore::SharedBlob(&coll); R.shared_blob->get(); // hack to avoid dtor from running L.dirty_blob().extents.emplace_back(bluestore_pextent_t(0x2000, 0x2000)); L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000); - L.split(0x1000, &R); + L.split(&coll, 0x1000, &R); ASSERT_EQ(0x1000u, L.get_blob().get_logical_length()); ASSERT_EQ(4u, L.get_blob().csum_data.length()); ASSERT_EQ(1u, L.get_blob().extents.size()); @@ -784,7 +791,7 @@ TEST(Blob, split) ASSERT_EQ(0x1000u, R.get_blob().extents.front().length); } { - BlueStore::Blob L(g_ceph_context), R(g_ceph_context); + BlueStore::Blob L, R; L.shared_blob = new BlueStore::SharedBlob(&coll); L.shared_blob->get(); // hack to avoid dtor from running R.shared_blob = new BlueStore::SharedBlob(&coll); @@ -792,7 +799,7 @@ TEST(Blob, split) L.dirty_blob().extents.emplace_back(bluestore_pextent_t(0x2000, 0x1000)); L.dirty_blob().extents.emplace_back(bluestore_pextent_t(0x12000, 0x1000)); L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000); - L.split(0x1000, &R); + L.split(&coll, 0x1000, &R); ASSERT_EQ(0x1000u, L.get_blob().get_logical_length()); ASSERT_EQ(4u, L.get_blob().csum_data.length()); ASSERT_EQ(1u, L.get_blob().extents.size()); @@ -808,11 +815,12 @@ TEST(Blob, split) TEST(ExtentMap, find_lextent) { - BlueStore store(g_ceph_context, ""); + BlueStore store(g_ceph_context, "", 4096); BlueStore::LRUCache cache(g_ceph_context); - BlueStore::ExtentMap em(g_ceph_context, nullptr); - BlueStore::BlobRef br(new BlueStore::Blob(g_ceph_context)); BlueStore::Collection coll(&store, &cache, coll_t()); + BlueStore::Onode onode(&coll, ghobject_t(), ""); + BlueStore::ExtentMap em(&onode); + BlueStore::BlobRef br(new BlueStore::Blob); br->shared_blob = new BlueStore::SharedBlob(&coll); ASSERT_EQ(em.extent_map.end(), em.find_lextent(0)); @@ -856,11 +864,12 @@ TEST(ExtentMap, find_lextent) TEST(ExtentMap, seek_lextent) { - BlueStore store(g_ceph_context, ""); + BlueStore store(g_ceph_context, "", 4096); BlueStore::LRUCache cache(g_ceph_context); - BlueStore::ExtentMap em(g_ceph_context, nullptr); - BlueStore::BlobRef br(new BlueStore::Blob(g_ceph_context)); BlueStore::Collection coll(&store, &cache, coll_t()); + BlueStore::Onode onode(&coll, ghobject_t(), ""); + BlueStore::ExtentMap em(&onode); + BlueStore::BlobRef br(new BlueStore::Blob); br->shared_blob = new BlueStore::SharedBlob(&coll); ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0)); @@ -904,11 +913,12 @@ TEST(ExtentMap, seek_lextent) TEST(ExtentMap, has_any_lextents) { - BlueStore store(g_ceph_context, ""); + BlueStore store(g_ceph_context, "", 4096); BlueStore::LRUCache cache(g_ceph_context); - BlueStore::ExtentMap em(g_ceph_context, nullptr); - BlueStore::BlobRef b(new BlueStore::Blob(g_ceph_context)); BlueStore::Collection coll(&store, &cache, coll_t()); + BlueStore::Onode onode(&coll, ghobject_t(), ""); + BlueStore::ExtentMap em(&onode); + BlueStore::BlobRef b(new BlueStore::Blob); b->shared_blob = new BlueStore::SharedBlob(&coll); ASSERT_FALSE(em.has_any_lextents(0, 0)); @@ -950,13 +960,14 @@ TEST(ExtentMap, has_any_lextents) TEST(ExtentMap, compress_extent_map) { - BlueStore store(g_ceph_context, ""); + BlueStore store(g_ceph_context, "", 4096); BlueStore::LRUCache cache(g_ceph_context); - BlueStore::ExtentMap em(g_ceph_context, nullptr); - BlueStore::BlobRef b1(new BlueStore::Blob(g_ceph_context)); - BlueStore::BlobRef b2(new BlueStore::Blob(g_ceph_context)); - BlueStore::BlobRef b3(new BlueStore::Blob(g_ceph_context)); BlueStore::Collection coll(&store, &cache, coll_t()); + BlueStore::Onode onode(&coll, ghobject_t(), ""); + BlueStore::ExtentMap em(&onode); + BlueStore::BlobRef b1(new BlueStore::Blob); + BlueStore::BlobRef b2(new BlueStore::Blob); + BlueStore::BlobRef b3(new BlueStore::Blob); b1->shared_blob = new BlueStore::SharedBlob(&coll); b2->shared_blob = new BlueStore::SharedBlob(&coll); b3->shared_blob = new BlueStore::SharedBlob(&coll);