From: Alex Mikheev Date: Sun, 20 Aug 2017 08:52:15 +0000 (+0000) Subject: msg/async/rdma: refactor rx buffer pool allocator: cr fixes X-Git-Tag: v13.0.1~1015^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=2e44fde693c945afbd399838272b62bd8b3bc614;p=ceph-ci.git msg/async/rdma: refactor rx buffer pool allocator: cr fixes Signed-off-by: Alex Mikheev --- diff --git a/src/msg/async/rdma/Infiniband.cc b/src/msg/async/rdma/Infiniband.cc index e155a44888e..db2245dd1e1 100644 --- a/src/msg/async/rdma/Infiniband.cc +++ b/src/msg/async/rdma/Infiniband.cc @@ -633,9 +633,9 @@ int Infiniband::MemoryManager::Cluster::get_buffers(std::vector &chunks, return r; } -bool Infiniband::MemoryManager::pool_context::can_alloc(unsigned nbufs) +bool Infiniband::MemoryManager::MemPoolContext::can_alloc(unsigned nbufs) { - /* unimited */ + /* unlimited */ if (manager->cct->_conf->ms_async_rdma_receive_buffers <= 0) return true; @@ -649,13 +649,13 @@ bool Infiniband::MemoryManager::pool_context::can_alloc(unsigned nbufs) return true; } -void Infiniband::MemoryManager::pool_context::set_stat_logger(PerfCounters *logger) { +void Infiniband::MemoryManager::MemPoolContext::set_stat_logger(PerfCounters *logger) { perf_logger = logger; if (perf_logger != nullptr) perf_logger->set(l_msgr_rdma_rx_bufs_total, n_bufs_allocated); } -void Infiniband::MemoryManager::pool_context::update_stats(int nbufs) +void Infiniband::MemoryManager::MemPoolContext::update_stats(int nbufs) { n_bufs_allocated += nbufs; @@ -671,17 +671,17 @@ void Infiniband::MemoryManager::pool_context::update_stats(int nbufs) void *Infiniband::MemoryManager::mem_pool::slow_malloc() { - void *p; + void *p; - Mutex::Locker l(PoolAllocator::lock); - PoolAllocator::g_ctx = ctx; - // this will trigger pool expansion via PoolAllocator::malloc() - p = boost::pool::malloc(); - PoolAllocator::g_ctx = nullptr; - return p; + Mutex::Locker l(PoolAllocator::lock); + PoolAllocator::g_ctx = ctx; + // this will trigger pool expansion via PoolAllocator::malloc() + p = boost::pool::malloc(); + PoolAllocator::g_ctx = nullptr; + return p; } -Infiniband::MemoryManager::pool_context *Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr; +Infiniband::MemoryManager::MemPoolContext *Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr; Mutex Infiniband::MemoryManager::PoolAllocator::lock("pool-alloc-lock"); // lock is taken by mem_pool::slow_malloc() diff --git a/src/msg/async/rdma/Infiniband.h b/src/msg/async/rdma/Infiniband.h index 8a7d1f70ca8..00ee99b8c8a 100644 --- a/src/msg/async/rdma/Infiniband.h +++ b/src/msg/async/rdma/Infiniband.h @@ -241,67 +241,69 @@ class Infiniband { Chunk* chunk_base = nullptr; }; - class pool_context { - PerfCounters *perf_logger; - - public: - MemoryManager *manager; - unsigned n_bufs_allocated; - // true if it is possible to alloc - // more memory for the pool - pool_context(MemoryManager *m) : - perf_logger(nullptr), - manager(m), - n_bufs_allocated(0) {} - bool can_alloc(unsigned nbufs); - void update_stats(int val); - void set_stat_logger(PerfCounters *logger); + class MemPoolContext { + PerfCounters *perf_logger; + + public: + MemoryManager *manager; + unsigned n_bufs_allocated; + // true if it is possible to alloc + // more memory for the pool + MemPoolContext(MemoryManager *m) : + perf_logger(nullptr), + manager(m), + n_bufs_allocated(0) {} + bool can_alloc(unsigned nbufs); + void update_stats(int val); + void set_stat_logger(PerfCounters *logger); }; class PoolAllocator { - struct mem_info { - ibv_mr *mr; - pool_context *ctx; - unsigned nbufs; - Chunk chunks[0]; - }; - public: - typedef std::size_t size_type; - typedef std::ptrdiff_t difference_type; - - static char * malloc(const size_type bytes); - static void free(char * const block); - - static pool_context *g_ctx; - static Mutex lock; + struct mem_info { + ibv_mr *mr; + MemPoolContext *ctx; + unsigned nbufs; + Chunk chunks[0]; + }; + public: + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + static char * malloc(const size_type bytes); + static void free(char * const block); + + static MemPoolContext *g_ctx; + static Mutex lock; }; - // modify boost pool so that it is possible to - // have a thread safe 'context' when allocating/freeing - // the memory. It is needed to allow a different pool - // configurations and bookkeeping per CephContext and - // also to be able // to use same allocator to deal with - // RX and TX pool. - // TODO: use boost pool to allocate TX chunks too + /** + * modify boost pool so that it is possible to + * have a thread safe 'context' when allocating/freeing + * the memory. It is needed to allow a different pool + * configurations and bookkeeping per CephContext and + * also to be able to use same allocator to deal with + * RX and TX pool. + * TODO: use boost pool to allocate TX chunks too + */ class mem_pool : public boost::pool { - private: - pool_context *ctx; - void *slow_malloc(); - - public: - explicit mem_pool(pool_context *ctx, const size_type nrequested_size, - const size_type nnext_size = 32, - const size_type nmax_size = 0) : - pool(nrequested_size, nnext_size, nmax_size), - ctx(ctx) { } - - void *malloc() { - if (!store().empty()) - return (store().malloc)(); - // need to alloc more memory... - // slow path code - return slow_malloc(); - } + private: + MemPoolContext *ctx; + void *slow_malloc(); + + public: + explicit mem_pool(MemPoolContext *ctx, const size_type nrequested_size, + const size_type nnext_size = 32, + const size_type nmax_size = 0) : + pool(nrequested_size, nnext_size, nmax_size), + ctx(ctx) { } + + void *malloc() { + if (!store().empty()) + return (store().malloc)(); + // need to alloc more memory... + // slow path code + return slow_malloc(); + } }; MemoryManager(CephContext *c, Device *d, ProtectionDomain *p); @@ -341,7 +343,7 @@ class Infiniband { Cluster* send;// SEND Device *device; ProtectionDomain *pd; - pool_context rxbuf_pool_ctx; + MemPoolContext rxbuf_pool_ctx; mem_pool rxbuf_pool; void* huge_pages_malloc(size_t size);