class buffer::xio_mempool : public buffer::raw {
public:
- struct xio_mempool_obj *mp;
- xio_mempool(struct xio_mempool_obj *_mp, unsigned l) :
+ struct xio_reg_mem *mp;
+ xio_mempool(struct xio_reg_mem *_mp, unsigned l) :
raw((char*)mp->addr, l), mp(_mp)
{ }
~xio_mempool() {}
}
};
- struct xio_mempool_obj* get_xio_mp(const buffer::ptr& bp)
+ struct xio_reg_mem* get_xio_mp(const buffer::ptr& bp)
{
buffer::xio_mempool *mb = dynamic_cast<buffer::xio_mempool*>(bp.get_raw());
if (mb) {
#endif
#if defined(HAVE_XIO)
-struct xio_mempool_obj;
+struct xio_reg_mem;
class XioDispatchHook;
#endif
};
#if defined(HAVE_XIO)
-xio_mempool_obj* get_xio_mp(const buffer::ptr& bp);
+xio_reg_mem* get_xio_mp(const buffer::ptr& bp);
#endif
typedef buffer::ptr bufferptr;
#include "libxio.h"
}
#else
-struct xio_mempool_obj {};
+struct xio_reg_mem {};
#endif /* HAVE_XIO */
-typedef void (*mdata_hook_func)(struct xio_mempool_obj *mp);
+typedef void (*mdata_hook_func)(struct xio_reg_mem *mp);
class MDataPing : public Message {
std::string tag;
uint32_t counter;
mdata_hook_func mdata_hook;
- struct xio_mempool_obj mp;
+ struct xio_reg_mem mp;
bool free_data;
MDataPing()
free_data(false)
{}
- struct xio_mempool_obj *get_mp()
+ struct xio_reg_mem *get_mp()
{
return ∓
}
static inline XioDispatchHook* pool_alloc_xio_dispatch_hook(
XioConnection *xcon, Message *m, XioInSeq& msg_seq)
{
- struct xio_mempool_obj mp_mem;
+ struct xio_reg_mem mp_mem;
int e = xpool_alloc(xio_msgr_noreg_mpool,
sizeof(XioDispatchHook), &mp_mem);
if (!!e)
/* and set threshold for buffer callouts */
xopt = 16384;
- xio_set_opt(NULL, XIO_OPTLEVEL_ACCELIO, XIO_OPTNAME_MAX_INLINE_DATA,
+ xio_set_opt(NULL, XIO_OPTLEVEL_ACCELIO, XIO_OPTNAME_MAX_INLINE_XIO_DATA,
&xopt, sizeof(xopt));
xopt = 216;
- xio_set_opt(NULL, XIO_OPTLEVEL_ACCELIO, XIO_OPTNAME_MAX_INLINE_HEADER,
+ xio_set_opt(NULL, XIO_OPTLEVEL_ACCELIO, XIO_OPTNAME_MAX_INLINE_XIO_HEADER,
&xopt, sizeof(xopt));
struct xio_mempool_config mempool_config = {
xio_mempool_create(-1 /* nodeid */,
XIO_MEMPOOL_FLAG_REGULAR_PAGES_ALLOC);
- (void) xio_mempool_add_allocator(xio_msgr_noreg_mpool, 64,
+ (void) xio_mempool_add_slab(xio_msgr_noreg_mpool, 64,
cct->_conf->xio_mp_min,
cct->_conf->xio_mp_max_64,
- XMSG_MEMPOOL_QUANTUM);
- (void) xio_mempool_add_allocator(xio_msgr_noreg_mpool, 256,
+ XMSG_MEMPOOL_QUANTUM, 0);
+ (void) xio_mempool_add_slab(xio_msgr_noreg_mpool, 256,
cct->_conf->xio_mp_min,
cct->_conf->xio_mp_max_256,
- XMSG_MEMPOOL_QUANTUM);
- (void) xio_mempool_add_allocator(xio_msgr_noreg_mpool, 1024,
+ XMSG_MEMPOOL_QUANTUM, 0);
+ (void) xio_mempool_add_slab(xio_msgr_noreg_mpool, 1024,
cct->_conf->xio_mp_min,
cct->_conf->xio_mp_max_1k,
- XMSG_MEMPOOL_QUANTUM);
- (void) xio_mempool_add_allocator(xio_msgr_noreg_mpool, getpagesize(),
+ XMSG_MEMPOOL_QUANTUM, 0);
+ (void) xio_mempool_add_slab(xio_msgr_noreg_mpool, getpagesize(),
cct->_conf->xio_mp_min,
cct->_conf->xio_mp_max_page,
- XMSG_MEMPOOL_QUANTUM);
+ XMSG_MEMPOOL_QUANTUM, 0);
/* initialize ops singleton */
xio_msgr_ops.on_session_event = on_session_event;
return 0;
/* if dsize is already present, returns -EEXIST */
- return xio_mempool_add_allocator(xio_msgr_noreg_mpool, dsize, 0,
+ return xio_mempool_add_slab(xio_msgr_noreg_mpool, dsize, 0,
cct->_conf->xio_mp_max_hint,
- XMSG_MEMPOOL_QUANTUM);
+ XMSG_MEMPOOL_QUANTUM, 0);
}
void XioMessenger::learned_addr(const entity_addr_t &peer_addr_for_me)
//break;
default:
{
- struct xio_mempool_obj *mp = get_xio_mp(*pb);
+ struct xio_reg_mem *mp = get_xio_mp(*pb);
iov->mr = (mp) ? mp->mr : NULL;
}
break;
static inline XioMsg* pool_alloc_xio_msg(Message *m, XioConnection *xcon,
int ex_cnt)
{
- struct xio_mempool_obj mp_mem;
+ struct xio_reg_mem mp_mem;
int e = xpool_alloc(xio_msgr_noreg_mpool, sizeof(XioMsg), &mp_mem);
if (!!e)
return NULL;
static inline XioMarkDownHook* pool_alloc_markdown_hook(
XioConnection *xcon, Message *m)
{
- struct xio_mempool_obj mp_mem;
+ struct xio_reg_mem mp_mem;
int e = xio_mempool_alloc(xio_msgr_noreg_mpool,
sizeof(XioMarkDownHook), &mp_mem);
if (!!e)
XioMsgHdr hdr;
xio_msg_ex req_0;
xio_msg_ex* req_arr;
- struct xio_mempool_obj mp_this;
+ struct xio_reg_mem mp_this;
atomic_t nrefs;
public:
- XioMsg(Message *_m, XioConnection *_xcon, struct xio_mempool_obj& _mp,
+ XioMsg(Message *_m, XioConnection *_xcon, struct xio_reg_mem& _mp,
int _ex_cnt) :
XioSubmit(XioSubmit::OUTGOING_MSG, _xcon),
m(_m), hdr(m->get_header(), m->get_footer()),
void put(int n) {
int refs = nrefs.sub(n);
if (refs == 0) {
- struct xio_mempool_obj *mp = &this->mp_this;
+ struct xio_reg_mem *mp = &this->mp_this;
this->~XioMsg();
xpool_free(sizeof(XioMsg), mp);
}
friend class XioConnection;
friend class XioMessenger;
public:
- struct xio_mempool_obj mp_this;
+ struct xio_reg_mem mp_this;
XioDispatchHook(XioConnection *_xcon, Message *_m, XioInSeq& _msg_seq,
- struct xio_mempool_obj& _mp) :
+ struct xio_reg_mem& _mp) :
CompletionHook(_m),
xcon(_xcon->get()),
msg_seq(_msg_seq),
*/
if (!cl_flag && release_msgs())
return;
- struct xio_mempool_obj *mp = &this->mp_this;
+ struct xio_reg_mem *mp = &this->mp_this;
this->~XioDispatchHook();
xpool_free(sizeof(XioDispatchHook), mp);
}
XioConnection* xcon;
public:
- struct xio_mempool_obj mp_this;
+ struct xio_reg_mem mp_this;
XioMarkDownHook(
- XioConnection* _xcon, Message *_m, struct xio_mempool_obj& _mp) :
+ XioConnection* _xcon, Message *_m, struct xio_reg_mem& _mp) :
CompletionHook(_m), xcon(_xcon->get()), mp_this(_mp)
{ }
virtual void finish(int r) {
xcon->put();
- struct xio_mempool_obj *mp = &this->mp_this;
+ struct xio_reg_mem *mp = &this->mp_this;
this->~XioMarkDownHook();
xio_mempool_free(mp);
}
static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
- struct xio_mempool_obj* mp);
-static inline void xpool_free(uint64_t size, struct xio_mempool_obj* mp);
+ struct xio_reg_mem* mp);
+static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp);
using ceph::atomic_t;
static const int MB = 8;
struct xio_piece {
- struct xio_mempool_obj mp[1];
+ struct xio_reg_mem mp[1];
struct xio_piece *next;
int s;
char payload[MB];
void *alloc(size_t _s)
{
void *r;
- struct xio_mempool_obj mp[1];
+ struct xio_reg_mem mp[1];
struct xio_piece *x;
int e = xpool_alloc(handle, (sizeof(struct xio_piece)-MB) + _s, mp);
if (e) {
extern XioPoolStats xp_stats;
static inline int xpool_alloc(struct xio_mempool *pool, uint64_t size,
- struct xio_mempool_obj* mp)
+ struct xio_reg_mem* mp)
{
// try to allocate from the xio pool
int r = xio_mempool_alloc(pool, size, mp);
return 0;
}
-static inline void xpool_free(uint64_t size, struct xio_mempool_obj* mp)
+static inline void xpool_free(uint64_t size, struct xio_reg_mem* mp)
{
if (mp->length) {
if (unlikely(XioPool::trace_mempool))
#if defined(HAVE_XIO)
extern struct xio_mempool *xio_msgr_mpool;
-void xio_hook_func(struct xio_mempool_obj *mp)
+void xio_hook_func(struct xio_reg_mem *mp)
{
xio_mempool_free(mp);
}
bufferlist bl;
void *p;
- struct xio_mempool_obj *mp = m->get_mp();
+ struct xio_reg_mem *mp = m->get_mp();
int e = xio_mempool_alloc(xio_msgr_mpool, size, mp);
assert(e == 0);
p = mp->addr;