return r;
}
-bool Infiniband::MemoryManager::pool_context::can_alloc(unsigned nbufs)
+bool Infiniband::MemoryManager::MemPoolContext::can_alloc(unsigned nbufs)
{
- /* unimited */
+ /* unlimited */
if (manager->cct->_conf->ms_async_rdma_receive_buffers <= 0)
return true;
return true;
}
-void Infiniband::MemoryManager::pool_context::set_stat_logger(PerfCounters *logger) {
+void Infiniband::MemoryManager::MemPoolContext::set_stat_logger(PerfCounters *logger) {
perf_logger = logger;
if (perf_logger != nullptr)
perf_logger->set(l_msgr_rdma_rx_bufs_total, n_bufs_allocated);
}
-void Infiniband::MemoryManager::pool_context::update_stats(int nbufs)
+void Infiniband::MemoryManager::MemPoolContext::update_stats(int nbufs)
{
n_bufs_allocated += nbufs;
void *Infiniband::MemoryManager::mem_pool::slow_malloc()
{
- void *p;
+ void *p;
- Mutex::Locker l(PoolAllocator::lock);
- PoolAllocator::g_ctx = ctx;
- // this will trigger pool expansion via PoolAllocator::malloc()
- p = boost::pool<PoolAllocator>::malloc();
- PoolAllocator::g_ctx = nullptr;
- return p;
+ Mutex::Locker l(PoolAllocator::lock);
+ PoolAllocator::g_ctx = ctx;
+ // this will trigger pool expansion via PoolAllocator::malloc()
+ p = boost::pool<PoolAllocator>::malloc();
+ PoolAllocator::g_ctx = nullptr;
+ return p;
}
-Infiniband::MemoryManager::pool_context *Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr;
+Infiniband::MemoryManager::MemPoolContext *Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr;
Mutex Infiniband::MemoryManager::PoolAllocator::lock("pool-alloc-lock");
// lock is taken by mem_pool::slow_malloc()
Chunk* chunk_base = nullptr;
};
- class pool_context {
- PerfCounters *perf_logger;
-
- public:
- MemoryManager *manager;
- unsigned n_bufs_allocated;
- // true if it is possible to alloc
- // more memory for the pool
- pool_context(MemoryManager *m) :
- perf_logger(nullptr),
- manager(m),
- n_bufs_allocated(0) {}
- bool can_alloc(unsigned nbufs);
- void update_stats(int val);
- void set_stat_logger(PerfCounters *logger);
+ class MemPoolContext {
+ PerfCounters *perf_logger;
+
+ public:
+ MemoryManager *manager;
+ unsigned n_bufs_allocated;
+ // true if it is possible to alloc
+ // more memory for the pool
+ MemPoolContext(MemoryManager *m) :
+ perf_logger(nullptr),
+ manager(m),
+ n_bufs_allocated(0) {}
+ bool can_alloc(unsigned nbufs);
+ void update_stats(int val);
+ void set_stat_logger(PerfCounters *logger);
};
class PoolAllocator {
- struct mem_info {
- ibv_mr *mr;
- pool_context *ctx;
- unsigned nbufs;
- Chunk chunks[0];
- };
- public:
- typedef std::size_t size_type;
- typedef std::ptrdiff_t difference_type;
-
- static char * malloc(const size_type bytes);
- static void free(char * const block);
-
- static pool_context *g_ctx;
- static Mutex lock;
+ struct mem_info {
+ ibv_mr *mr;
+ MemPoolContext *ctx;
+ unsigned nbufs;
+ Chunk chunks[0];
+ };
+ public:
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+
+ static char * malloc(const size_type bytes);
+ static void free(char * const block);
+
+ static MemPoolContext *g_ctx;
+ static Mutex lock;
};
- // modify boost pool so that it is possible to
- // have a thread safe 'context' when allocating/freeing
- // the memory. It is needed to allow a different pool
- // configurations and bookkeeping per CephContext and
- // also to be able // to use same allocator to deal with
- // RX and TX pool.
- // TODO: use boost pool to allocate TX chunks too
+ /**
+ * modify boost pool so that it is possible to
+ * have a thread safe 'context' when allocating/freeing
+ * the memory. It is needed to allow a different pool
+ * configurations and bookkeeping per CephContext and
+ * also to be able to use same allocator to deal with
+ * RX and TX pool.
+ * TODO: use boost pool to allocate TX chunks too
+ */
class mem_pool : public boost::pool<PoolAllocator> {
- private:
- pool_context *ctx;
- void *slow_malloc();
-
- public:
- explicit mem_pool(pool_context *ctx, const size_type nrequested_size,
- const size_type nnext_size = 32,
- const size_type nmax_size = 0) :
- pool(nrequested_size, nnext_size, nmax_size),
- ctx(ctx) { }
-
- void *malloc() {
- if (!store().empty())
- return (store().malloc)();
- // need to alloc more memory...
- // slow path code
- return slow_malloc();
- }
+ private:
+ MemPoolContext *ctx;
+ void *slow_malloc();
+
+ public:
+ explicit mem_pool(MemPoolContext *ctx, const size_type nrequested_size,
+ const size_type nnext_size = 32,
+ const size_type nmax_size = 0) :
+ pool(nrequested_size, nnext_size, nmax_size),
+ ctx(ctx) { }
+
+ void *malloc() {
+ if (!store().empty())
+ return (store().malloc)();
+ // need to alloc more memory...
+ // slow path code
+ return slow_malloc();
+ }
};
MemoryManager(CephContext *c, Device *d, ProtectionDomain *p);
Cluster* send;// SEND
Device *device;
ProtectionDomain *pd;
- pool_context rxbuf_pool_ctx;
+ MemPoolContext rxbuf_pool_ctx;
mem_pool rxbuf_pool;
void* huge_pages_malloc(size_t size);