assert(qp_conns.empty());
assert(dead_queue_pairs.empty());
+ assert(num_dead_queue_pair == 0);
tx_cc->ack_events();
rx_cc->ack_events();
// Additionally, don't delete qp while outstanding_buffers isn't empty,
// because we need to check qp's state before sending
perf_logger->set(l_msgr_rdma_inflight_tx_chunks, inflight);
- if (!inflight.load()) {
+ if (num_dead_queue_pair) {
Mutex::Locker l(lock); // FIXME reuse dead qp because creating one qp costs 1 ms
while (!dead_queue_pairs.empty()) {
ldout(cct, 10) << __func__ << " finally delete qp=" << dead_queue_pairs.back() << dendl;
delete dead_queue_pairs.back();
perf_logger->dec(l_msgr_rdma_active_queue_pair);
dead_queue_pairs.pop_back();
+ --num_dead_queue_pair;
}
}
if (done)
auto it = qp_conns.find(qpn);
if (it == qp_conns.end())
return ;
+ ++num_dead_queue_pair;
dead_queue_pairs.push_back(it->second.first);
qp_conns.erase(it);
}
Infiniband::CompletionChannel *tx_cc, *rx_cc;
EventCallbackRef async_handler;
bool done = false;
+ std::atomic<uint64_t> num_dead_queue_pair = {0};
Mutex lock; // protect `qp_conns`, `dead_queue_pairs`
// qp_num -> InfRcConnection
// The main usage of `qp_conns` is looking up connection by qp_num,