}
-Infiniband::MemoryManager::Chunk::Chunk(char* b, uint32_t len, ibv_mr* m)
- : buffer(b), bytes(len), offset(0), mr(m)
+Infiniband::MemoryManager::Chunk::Chunk(ibv_mr* m, uint32_t len, char* b)
+ : mr(m), bytes(len), offset(0), buffer(b)
{
}
ib->post_chunk(this);
}
-void Infiniband::MemoryManager::Chunk::set_owner(uint64_t o)
-{
- owner = o;
-}
-
-uint64_t Infiniband::MemoryManager::Chunk::get_owner()
-{
- return owner;
-}
-
-
Infiniband::MemoryManager::Cluster::Cluster(MemoryManager& m, uint32_t s)
: manager(m), chunk_size(s), lock("cluster_lock")
{
}
-Infiniband::MemoryManager::Cluster::Cluster(MemoryManager& m, uint32_t s, uint32_t n)
- : manager(m), chunk_size(s), lock("cluster_lock")
-{
- add(n);
-}
-
Infiniband::MemoryManager::Cluster::~Cluster()
{
- set<Chunk*>::iterator c = all_chunks.begin();
- while(c != all_chunks.end()) {
- delete *c;
- ++c;
- }
+ ::free(chunk_base);
if (manager.enabled_huge_page)
manager.free_huge_pages(base);
else
base = (char*)memalign(CEPH_PAGE_SIZE, bytes);
}
assert(base);
+ chunk_base = (char*)::malloc(sizeof(Chunk) * num);
+ memset(chunk_base, 0, sizeof(Chunk) * num);
+ char *ptr = chunk_base;
for (uint32_t offset = 0; offset < bytes; offset += chunk_size){
+ Chunk *chunk = reinterpret_cast<Chunk*>(ptr);
ibv_mr* m = ibv_reg_mr(manager.pd->pd, base+offset, chunk_size, IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE);
assert(m);
- Chunk* c = new Chunk(base+offset,chunk_size,m);
- free_chunks.push_back(c);
- all_chunks.insert(c);
+ new(chunk) Chunk(m, chunk_size, base+offset);
+ free_chunks.push_back(chunk);
+ all_chunks.insert(chunk);
+ ptr += sizeof(Chunk);
}
return 0;
}
-void Infiniband::MemoryManager::Cluster::take_back(Chunk* ck)
+void Infiniband::MemoryManager::Cluster::take_back(std::vector<Chunk*> &ck)
{
Mutex::Locker l(lock);
- free_chunks.push_back(ck);
+ for (auto c : ck) {
+ c->clear();
+ free_chunks.push_back(c);
+ }
}
int Infiniband::MemoryManager::Cluster::get_buffers(std::vector<Chunk*> &chunks, size_t bytes)
if (free_chunks.empty())
return 0;
if (!bytes) {
- free_chunks.swap(chunks);
- r = chunks.size();
+ r = free_chunks.size();
+ for (auto c : free_chunks)
+ chunks.push_back(c);
+ free_chunks.clear();
return r;
}
if (free_chunks.size() < num) {
void Infiniband::MemoryManager::return_tx(std::vector<Chunk*> &chunks)
{
- for (auto c : chunks) {
- c->clear();
- send->take_back(c);
- }
+ send->take_back(chunks);
}
int Infiniband::MemoryManager::get_send_buffers(std::vector<Chunk*> &c, size_t bytes)
ibv_recv_wr *badWorkRequest;
int ret = ibv_post_srq_recv(srq, &rx_work_request, &badWorkRequest);
if (ret)
- return -1;
+ return -errno;
return 0;
}
public:
class Chunk {
public:
- Chunk(char* b, uint32_t len, ibv_mr* m);
+ Chunk(ibv_mr* m, uint32_t len, char* b);
~Chunk();
void set_offset(uint32_t o);
bool over();
void clear();
void post_srq(Infiniband *ib);
- void set_owner(uint64_t o);
- uint64_t get_owner();
public:
- char* buffer;
+ ibv_mr* mr;
uint32_t bytes;
uint32_t bound;
uint32_t offset;
- ibv_mr* mr;
- uint64_t owner;
+ char* buffer;
};
class Cluster {
public:
Cluster(MemoryManager& m, uint32_t s);
- Cluster(MemoryManager& m, uint32_t s, uint32_t n);
~Cluster();
int add(uint32_t num);
- void take_back(Chunk* ck);
+ void take_back(std::vector<Chunk*> &ck);
int get_buffers(std::vector<Chunk*> &chunks, size_t bytes);
MemoryManager& manager;
std::vector<Chunk*> free_chunks;
std::set<Chunk*> all_chunks;
char* base;
+ char* chunk_base;
};
MemoryManager(Device *d, ProtectionDomain *p, bool hugepage);