int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
struct ceph_mds_session *session)
{
- struct ceph_mds_reply_info *rinfo = &req->r_reply_info;
+ struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
int err = 0, mask;
struct qstr dname;
struct dentry *dn = sb->s_root;
/* do we have a dn lease? */
have_lease = have_icontent ||
- (rinfo->trace_dlease[d]->mask & CEPH_LOCK_DN);
+ (le16_to_cpu(rinfo->trace_dlease[d]->mask) & CEPH_LOCK_DN);
if (!have_lease)
dout(10, "fill_trace no icontent|dentry lease\n");
int ceph_readdir_prepopulate(struct ceph_mds_request *req)
{
struct dentry *parent = req->r_last_dentry;
- struct ceph_mds_reply_info *rinfo = &req->r_reply_info;
+ struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct qstr dname;
struct dentry *dn;
struct inode *in;
struct ceph_vino vino;
dname.name = rinfo->dir_dname[i];
- dname.len = le32_to_cpu(rinfo->dir_dname_len[i]);
+ dname.len = rinfo->dir_dname_len[i];
dname.hash = full_name_hash(dname.name, dname.len);
vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
reqh->args.utime.mask = 0;
if (ia_valid & ATTR_ATIME)
- reqh->args.utime.mask |= CEPH_UTIME_ATIME;
+ reqh->args.utime.mask |= cpu_to_le32(CEPH_UTIME_ATIME);
if (ia_valid & ATTR_MTIME)
- reqh->args.utime.mask |= CEPH_UTIME_MTIME;
+ reqh->args.utime.mask |= cpu_to_le32(CEPH_UTIME_MTIME);
ceph_mdsc_lease_release(mdsc, inode, NULL, CEPH_LOCK_ICONTENT);
err = ceph_mdsc_do_request(mdsc, req);
{
unsigned long key;
key = *(__u32 *)&addr->ipaddr.sin_addr.s_addr;
- key ^= addr->ipaddr.sin_port;
+ key ^= *(__u16 *)&addr->ipaddr.sin_port;
return key;
}
void *base = kaddr + con->out_msg_pos.page_pos;
con->out_msg->footer.data_crc =
- crc32c_le(con->out_msg->footer.data_crc,
- base, len);
+ cpu_to_le32(crc32c_le(le32_to_cpu(con->out_msg->footer.data_crc),
+ base, len));
con->out_msg_pos.did_page_crc = 1;
}
/* queue up footer, too */
if (!crc)
- con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
+ con->out_msg->footer.flags |= cpu_to_le32(CEPH_MSG_FOOTER_NOCRC);
con->out_kvec[0].iov_base = &con->out_msg->footer;
con->out_kvec_bytes = con->out_kvec[0].iov_len =
sizeof(con->out_msg->footer);
/* encode header */
dout(20, "prepare_write_message %p seq %lld type %d len %d+%d %d pgs\n",
- m, le64_to_cpu(m->hdr.seq), le32_to_cpu(m->hdr.type),
+ m, le64_to_cpu(m->hdr.seq), le16_to_cpu(m->hdr.type),
le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.data_len),
m->nr_pages);
BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
con->out_msg_pos.did_page_crc = 0;
/* fill in crc (except data pages), footer */
- con->out_msg->hdr.crc = crc32c_le(0, (void *)&m->hdr,
- sizeof(m->hdr) - sizeof(m->hdr.crc));
+ con->out_msg->hdr.crc = cpu_to_le32(crc32c_le(0, (void *)&m->hdr,
+ sizeof(m->hdr) - sizeof(m->hdr.crc)));
con->out_msg->footer.flags = 0;
- con->out_msg->footer.front_crc = crc32c_le(0, m->front.iov_base,
- m->front.iov_len);
+ con->out_msg->footer.front_crc = cpu_to_le32(crc32c_le(0, m->front.iov_base,
+ m->front.iov_len));
con->out_msg->footer.data_crc = 0;
set_bit(WRITE_PENDING, &con->state);
}
static void prepare_write_accept_retry(struct ceph_connection *con, char *ptag,
- u32 *pseq)
+ __le32 *pseq)
{
con->out_kvec[0].iov_base = ptag;
con->out_kvec[0].iov_len = 1;
if (con->in_base_pos == sizeof(m->hdr)) {
u32 crc = crc32c_le(0, (void *)&m->hdr,
sizeof(m->hdr) - sizeof(m->hdr.crc));
- if (crc != m->hdr.crc) {
+ if (crc != le32_to_cpu(m->hdr.crc)) {
derr(0, "read_message_partial %p bad hdr crc"
" %u != expected %u\n",
m, crc, m->hdr.crc);
dout(20, "read_message_partial got msg %p\n", m);
/* crc ok? */
- if (con->in_front_crc != m->footer.front_crc) {
+ if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
derr(0, "read_message_partial %p front crc %u != expected %u\n",
con->in_msg,
con->in_front_crc, m->footer.front_crc);
return -EIO;
}
- if (con->in_data_crc != m->footer.data_crc) {
+ if (con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
derr(0, "read_message_partial %p data crc %u != expected %u\n",
con->in_msg,
con->in_data_crc, m->footer.data_crc);
spin_unlock(&con->out_queue_lock);
dout(1, "===== %p %u from %s%d %d=%s len %d+%d (%u %u) =====\n",
- con->in_msg, le32_to_cpu(con->in_msg->hdr.seq),
+ con->in_msg, le64_to_cpu(con->in_msg->hdr.seq),
ENTITY_NAME(con->in_msg->hdr.src.name),
- le32_to_cpu(con->in_msg->hdr.type),
- ceph_msg_type_name(le32_to_cpu(con->in_msg->hdr.type)),
+ le16_to_cpu(con->in_msg->hdr.type),
+ ceph_msg_type_name(le16_to_cpu(con->in_msg->hdr.type)),
le32_to_cpu(con->in_msg->hdr.front_len),
le32_to_cpu(con->in_msg->hdr.data_len),
con->in_front_crc, con->in_data_crc);
if (seq > ack)
break;
dout(5, "got ack for seq %llu type %d at %p\n", seq,
- le32_to_cpu(m->hdr.type), m);
+ le16_to_cpu(m->hdr.type), m);
list_del_init(&m->list_head);
ceph_msg_put(m);
}
if (atomic_read(&old->nref) == 1)
return old; /* we have only ref, all is well */
- dup = ceph_msg_new(le32_to_cpu(old->hdr.type),
+ dup = ceph_msg_new(le16_to_cpu(old->hdr.type),
le32_to_cpu(old->hdr.front_len),
le32_to_cpu(old->hdr.data_len),
le32_to_cpu(old->hdr.data_off),
/* revoke old message's pages */
mutex_lock(&old->page_mutex);
old->pages = NULL;
- old->footer.flags |= CEPH_MSG_FOOTER_ABORTED;
+ old->footer.flags |= cpu_to_le32(CEPH_MSG_FOOTER_ABORTED);
mutex_unlock(&old->page_mutex);
ceph_msg_put(old);
/* queue */
spin_lock(&con->out_queue_lock);
- if (unlikely(msg->hdr.type == CEPH_MSG_PING &&
+ if (unlikely(le16_to_cpu(msg->hdr.type) == CEPH_MSG_PING &&
!list_empty(&con->out_queue) &&
- list_entry(con->out_queue.prev, struct ceph_msg,
- list_head)->hdr.type == CEPH_MSG_PING)) {
+ le16_to_cpu(list_entry(con->out_queue.prev, struct ceph_msg,
+ list_head)->hdr.type) == CEPH_MSG_PING)) {
/* don't queue multiple pings in a row */
dout(2, "ceph_msg_send dropping dup ping\n");
ceph_msg_put(msg);
msg->hdr.seq = cpu_to_le64(++con->out_seq);
dout(1, "----- %p %u to %s%d %d=%s len %d+%d -----\n", msg,
(unsigned)con->out_seq,
- ENTITY_NAME(msg->hdr.dst.name), le32_to_cpu(msg->hdr.type),
- ceph_msg_type_name(le32_to_cpu(msg->hdr.type)),
+ ENTITY_NAME(msg->hdr.dst.name), le16_to_cpu(msg->hdr.type),
+ ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
le32_to_cpu(msg->hdr.front_len),
le32_to_cpu(msg->hdr.data_len));
dout(2, "ceph_msg_send %p seq %llu for %s%d on %p pgs %d\n",
mutex_init(&m->page_mutex);
INIT_LIST_HEAD(&m->list_head);
- m->hdr.type = cpu_to_le32(type);
+ m->hdr.type = cpu_to_le16(type);
m->hdr.front_len = cpu_to_le32(front_len);
m->hdr.data_len = cpu_to_le32(page_len);
m->hdr.data_off = cpu_to_le32(page_off);
atomic_read(&m->nref)-1);
if (atomic_read(&m->nref) <= 0) {
derr(0, "bad ceph_msg_put on %p %u from %s%d %d=%s len %d+%d\n",
- m, le32_to_cpu(m->hdr.seq),
+ m, le64_to_cpu(m->hdr.seq),
ENTITY_NAME(m->hdr.src.name),
- le32_to_cpu(m->hdr.type),
- ceph_msg_type_name(le32_to_cpu(m->hdr.type)),
+ le16_to_cpu(m->hdr.type),
+ ceph_msg_type_name(le16_to_cpu(m->hdr.type)),
le32_to_cpu(m->hdr.front_len),
le32_to_cpu(m->hdr.data_len));
WARN_ON(1);