From a6108dc4b50c3a0a85cedad3fcb824d3b0122014 Mon Sep 17 00:00:00 2001 From: Haomai Wang Date: Mon, 27 Feb 2017 16:33:39 +0800 Subject: [PATCH] msg/async/rdma: accelerate tx/rx buffer ownership lookup Signed-off-by: Haomai Wang --- src/msg/async/rdma/Infiniband.cc | 11 ++++++----- src/msg/async/rdma/Infiniband.h | 14 ++++++++------ 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/msg/async/rdma/Infiniband.cc b/src/msg/async/rdma/Infiniband.cc index eeb6ffe2f11eb..0160c04cf31a5 100644 --- a/src/msg/async/rdma/Infiniband.cc +++ b/src/msg/async/rdma/Infiniband.cc @@ -574,18 +574,20 @@ Infiniband::MemoryManager::Cluster::~Cluster() delete base; } -int Infiniband::MemoryManager::Cluster::add(uint32_t num) +int Infiniband::MemoryManager::Cluster::fill(uint32_t num) { + assert(!base); uint32_t bytes = chunk_size * num; - //cihar* base = (char*)malloc(bytes); if (manager.enabled_huge_page) { base = (char*)manager.malloc_huge_pages(bytes); } else { base = (char*)memalign(CEPH_PAGE_SIZE, bytes); } + end = base + bytes; assert(base); chunk_base = (char*)::malloc(sizeof(Chunk) * num); memset(chunk_base, 0, sizeof(Chunk) * num); + free_chunks.reserve(num); char *ptr = chunk_base; for (uint32_t offset = 0; offset < bytes; offset += chunk_size){ Chunk *chunk = reinterpret_cast(ptr); @@ -593,7 +595,6 @@ int Infiniband::MemoryManager::Cluster::add(uint32_t num) assert(m); new(chunk) Chunk(m, chunk_size, base+offset); free_chunks.push_back(chunk); - all_buffers.insert(chunk->buffer); ptr += sizeof(Chunk); } return 0; @@ -680,10 +681,10 @@ void Infiniband::MemoryManager::register_rx_tx(uint32_t size, uint32_t rx_num, u assert(device); assert(pd); channel = new Cluster(*this, size); - channel->add(rx_num); + channel->fill(rx_num); send = new Cluster(*this, size); - send->add(tx_num); + send->fill(tx_num); } void Infiniband::MemoryManager::return_tx(std::vector &chunks) diff --git a/src/msg/async/rdma/Infiniband.h b/src/msg/async/rdma/Infiniband.h index d09bf80ce377c..3e3c72d462d5b 100644 --- a/src/msg/async/rdma/Infiniband.h +++ b/src/msg/async/rdma/Infiniband.h @@ -166,7 +166,7 @@ class Infiniband { Cluster(MemoryManager& m, uint32_t s); ~Cluster(); - int add(uint32_t num); + int fill(uint32_t num); void take_back(std::vector &ck); int get_buffers(std::vector &chunks, size_t bytes); Chunk *get_chunk_by_buffer(const char *c) { @@ -174,13 +174,16 @@ class Infiniband { Chunk *chunk = reinterpret_cast(chunk_base + sizeof(Chunk) * idx); return chunk; } + bool is_my_buffer(const char *c) const { + return c >= base && c < end; + } MemoryManager& manager; uint32_t chunk_size; Mutex lock; std::vector free_chunks; - std::set all_buffers; - char* base; + char *base = nullptr; + char *end = nullptr; char* chunk_base; }; @@ -193,9 +196,8 @@ class Infiniband { void return_tx(std::vector &chunks); int get_send_buffers(std::vector &c, size_t bytes); int get_channel_buffers(std::vector &chunks, size_t bytes); - // TODO: optimize via address judgement - bool is_tx_buffer(const char* c) { return send->all_buffers.count(c); } - bool is_rx_buffer(const char* c) { return channel->all_buffers.count(c); } + bool is_tx_buffer(const char* c) { return send->is_my_buffer(c); } + bool is_rx_buffer(const char* c) { return channel->is_my_buffer(c); } Chunk *get_tx_chunk_by_buffer(const char *c) { return send->get_chunk_by_buffer(c); } -- 2.47.3