BUG_ON(le32_to_cpu(req->r_request->hdr.type) !=
CEPH_MSG_CLIENT_REQUEST);
- radix_tree_preload(GFP_NOFS);
+ err = radix_tree_preload(GFP_NOFS);
+ if (err < 0) {
+ derr(10, "ENOMEM in ceph_mdsc_do_request\n");
+ return err;
+ }
spin_lock(&mdsc->lock);
__register_request(mdsc, req);
+ spin_unlock(&mdsc->lock);
+ radix_tree_preload_end();
+ spin_lock(&mdsc->lock);
retry:
mds = choose_mds(mdsc, req);
if (mds < 0) {
/* send and wait */
spin_unlock(&mdsc->lock);
+
dout(10, "do_request %p r_expects_cap=%d\n", req, req->r_expects_cap);
req->r_request = ceph_msg_maybe_dup(req->r_request);
ceph_msg_get(req->r_request);
__u32 peer_cseq = le32_to_cpu(con->in_connect_seq);
/* do we have an existing connection for this peer? */
- radix_tree_preload(GFP_NOFS);
+ if (radix_tree_preload(GFP_NOFS) < 0) {
+ derr(10, "ENOMEM in process_accept\n");
+ return;
+ }
spin_lock(&msgr->con_lock);
existing = __get_connection(msgr, &con->peer_addr);
if (existing) {
prepare_write_accept_reply(con, &tag_ready);
}
spin_unlock(&msgr->con_lock);
+ radix_tree_preload_end();
ceph_queue_write(con);
put_connection(con);
spin_lock_init(&msgr->con_lock);
INIT_LIST_HEAD(&msgr->con_all);
INIT_LIST_HEAD(&msgr->con_accepting);
- INIT_RADIX_TREE(&msgr->con_tree, GFP_KERNEL);
+ INIT_RADIX_TREE(&msgr->con_tree, GFP_ATOMIC);
msgr->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!msgr->zero_page) {
msg->hdr.src = msgr->inst;
/* do we have the connection? */
- radix_tree_preload(GFP_NOFS);
spin_lock(&msgr->con_lock);
con = __get_connection(msgr, &msg->hdr.dst.addr);
if (!con) {
newcon = new_connection(msgr);
if (IS_ERR(newcon))
return PTR_ERR(con);
+
+ ret = radix_tree_preload(GFP_NOFS);
+ if (ret < 0) {
+ derr(10, "ENOMEM in ceph_msg_send\n");
+ return ret;
+ }
+
spin_lock(&msgr->con_lock);
con = __get_connection(msgr, &msg->hdr.dst.addr);
if (con) {
"%u.%u.%u.%u:%u\n", con,
IPQUADPORT(msg->hdr.dst.addr.ipaddr));
}
+ spin_unlock(&msgr->con_lock);
+ radix_tree_preload_end();
} else {
dout(10, "ceph_msg_send had connection %p to peer "
"%u.%u.%u.%u:%u\n", con,
IPQUADPORT(msg->hdr.dst.addr.ipaddr));
+ spin_unlock(&msgr->con_lock);
}
- spin_unlock(&msgr->con_lock);
+
con->delay = timeout;
dout(10, "ceph_msg_send delay = %lu\n", con->delay);
init_completion(&req.completion);
/* register request */
+ err = radix_tree_preload(GFP_NOFS);
+ if (err < 0) {
+ derr(10, "ENOMEM in do_statfs\n");
+ return err;
+ }
+
spin_lock(&monc->lock);
req.tid = ++monc->last_tid;
req.last_attempt = jiffies;
radix_tree_insert(&monc->statfs_request_tree, req.tid, &req);
spin_unlock(&monc->lock);
+ radix_tree_preload_end();
/* send request */
err = send_statfs(monc, req.tid);
return -ENOMEM;
spin_lock_init(&monc->lock);
mutex_init(&monc->req_mutex);
- INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_NOFS);
+ INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_ATOMIC);
INIT_DELAYED_WORK(&monc->mds_delayed_work, do_request_mdsmap);
INIT_DELAYED_WORK(&monc->osd_delayed_work, do_request_osdmap);
INIT_DELAYED_WORK(&monc->umount_delayed_work, do_request_umount);
struct ceph_osd_request_head *head = req->r_request->front.iov_base;
int rc;
- radix_tree_preload(GFP_NOFS);
+ rc = radix_tree_preload(GFP_NOFS);
+ if (rc < 0) {
+ derr(10, "ENOMEM in register_request\n");
+ return rc;
+ }
spin_lock(&osdc->request_lock);
req->r_tid = head->tid = ++osdc->last_tid;
osdc->nr_requests++;
spin_unlock(&osdc->request_lock);
+ radix_tree_preload_end();
+
return rc;
}
spin_lock_init(&osdc->request_lock);
osdc->last_tid = 0;
osdc->nr_requests = 0;
- INIT_RADIX_TREE(&osdc->request_tree, GFP_NOFS);
+ INIT_RADIX_TREE(&osdc->request_tree, GFP_ATOMIC);
INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
}