#define dout_subsys ceph_subsys_bluestore
+// bluestore_meta_onode
+MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Onode, bluestore_onode,
+ bluestore_meta_onode);
+
+// bluestore_meta_other
+MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Buffer, bluestore_buffer,
+ bluestore_meta_other);
+MEMPOOL_DEFINE_MAP_FACTORY(uint64_t, std::unique_ptr<BlueStore::Buffer>,
+ bluestore_uint64_Buffer, bluestore_meta_other);
+MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Extent, bluestore_extent,
+ bluestore_meta_other);
+MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Blob, bluestore_blob,
+ bluestore_meta_other);
+MEMPOOL_DEFINE_MAP_FACTORY(int, BlueStore::BlobRef,
+ bluestore_int_BlobRef, bluestore_meta_other);
+MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::SharedBlob, bluestore_shared_blob,
+ bluestore_meta_other);
+MEMPOOL_DEFINE_FACTORY(BlueStore::ExtentMap::Shard, bluestore_ExtentMap_Shard,
+ bluestore_meta_other);
+
+MEMPOOL_DEFINE_UNORDERED_MAP_BASE_FACTORY(bluestore_meta_other);
+MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(ghobject_t, BlueStore::OnodeRef, true,
+ bluestore_ghobject_OnodeRef,
+ bluestore_meta_other);
+MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(coll_t, BlueStore::CollectionRef, true,
+ bluestore_coll_CollectionRef,
+ bluestore_meta_other);
+MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(uint64_t, BlueStore::SharedBlob*, false,
+ bluestore_u64_sharedblob,
+ bluestore_meta_other);
+
+// kv store prefixes
const string PREFIX_SUPER = "S"; // field -> value
const string PREFIX_STAT = "T"; // field -> value(int64 array)
const string PREFIX_COLL = "C"; // collection name -> cnode_t
#include "include/assert.h"
#include "include/unordered_map.h"
#include "include/memory.h"
+#include "include/mempool.h"
#include "common/Finisher.h"
#include "compressor/Compressor.h"
#include "os/ObjectStore.h"
/// cached buffer
struct Buffer {
+ MEMPOOL_CLASS_HELPERS();
+
enum {
STATE_EMPTY, ///< empty buffer -- used for cache history
STATE_CLEAN, ///< clean data that is up to date
boost::intrusive::list_member_hook<>,
&Buffer::state_item> > state_list_t;
- map<uint64_t,std::unique_ptr<Buffer>> buffer_map;
+ bluestore_meta_other::map<uint64_t, std::unique_ptr<Buffer>> buffer_map;
Cache *cache;
// we use a bare intrusive list here instead of std::map because
/// in-memory shared blob state (incl cached buffers)
struct SharedBlob {
+ MEMPOOL_CLASS_HELPERS();
+
std::atomic_int nref = {0}; ///< reference count
// these are defined/set if the shared_blob is 'loaded'
// we use a bare pointer because we don't want to affect the ref
// count
- std::unordered_map<uint64_t,SharedBlob*> sb_map;
+ bluestore_meta_other::unordered_map<uint64_t,SharedBlob*> sb_map;
SharedBlobRef lookup(uint64_t sbid) {
std::lock_guard<std::mutex> l(lock);
/// in-memory blob metadata and associated cached buffers (if any)
struct Blob {
+ MEMPOOL_CLASS_HELPERS();
+
std::atomic_int nref = {0}; ///< reference count
int16_t id = -1; ///< id, for spanning blobs only, >= 0
int16_t last_encoded_id = -1; ///< (ephemeral) used during encoding only
#endif
};
typedef boost::intrusive_ptr<Blob> BlobRef;
- typedef std::map<int,BlobRef> blob_map_t;
+ typedef bluestore_meta_other::map<int,BlobRef> blob_map_t;
/// a logical extent, pointing to (some portion of) a blob
struct Extent : public boost::intrusive::set_base_hook<boost::intrusive::optimize_size<true>> {
+ MEMPOOL_CLASS_HELPERS();
+
uint32_t logical_offset = 0; ///< logical offset
uint32_t blob_offset = 0; ///< blob offset
uint32_t length = 0; ///< length
bool loaded = false; ///< true if shard is loaded
bool dirty = false; ///< true if shard is dirty and needs reencoding
};
- vector<Shard> shards; ///< shards
+ bluestore_meta_other::vector<Shard> shards; ///< shards
bufferlist inline_bl; ///< cached encoded map, if unsharded; empty=>dirty
/// an in-memory object
struct Onode {
+ MEMPOOL_CLASS_HELPERS();
+
std::atomic_int nref; ///< reference count
Collection *c;
struct OnodeSpace {
Cache *cache;
- ceph::unordered_map<ghobject_t,OnodeRef> onode_map; ///< forward lookups
+
+ /// forward lookups
+ bluestore_meta_other::unordered_map<ghobject_t,OnodeRef> onode_map;
OnodeSpace(Cache *c) : cache(c) {}
~OnodeSpace() {
bool mounted;
RWLock coll_lock; ///< rwlock to protect coll_map
- ceph::unordered_map<coll_t, CollectionRef> coll_map;
+ bluestore_meta_other::unordered_map<coll_t, CollectionRef> coll_map;
vector<Cache*> cache_shards;