struct address_space *mapping = inode->i_mapping;
struct writeback_control *wbc = req->r_wbc;
__s32 rc = -EIO;
- __u64 bytes = 0;
+ u64 bytes = 0;
/* parse reply */
if (req->r_reply) {
*
* Caller should be holding s_mutex.
*/
-static void send_cap_msg(struct ceph_mds_client *mdsc, __u64 ino, int op,
- int caps, int wanted, __u64 seq, __u64 mseq,
- __u64 size, __u64 max_size,
+static void send_cap_msg(struct ceph_mds_client *mdsc, u64 ino, int op,
+ int caps, int wanted, u64 seq, u64 mseq,
+ u64 size, u64 max_size,
struct timespec *mtime, struct timespec *atime,
u64 time_warp_seq, u64 follows, int mds)
{
#define ceph_decode_64(p, v) \
do { \
v = le64_to_cpu(*(__le64*)*(p)); \
- *(p) += sizeof(__u64); \
+ *(p) += sizeof(u64); \
} while (0)
#define ceph_decode_32(p, v) \
do { \
v = le32_to_cpu(*(__le32*)*(p)); \
- *(p) += sizeof(__u32); \
+ *(p) += sizeof(u32); \
} while (0)
#define ceph_decode_16(p, v) \
do { \
v = le16_to_cpu(*(__le16*)*(p)); \
- *(p) += sizeof(__u16); \
+ *(p) += sizeof(u16); \
} while (0)
#define ceph_decode_8(p, v) \
do { \
- v = *(__u8*)*(p); \
+ v = *(u8*)*(p); \
(*p)++; \
} while (0)
#define ceph_decode_64_le(p, v) \
do { \
v = *(__le64*)*(p); \
- *(p) += sizeof(__u64); \
+ *(p) += sizeof(u64); \
} while (0)
#define ceph_decode_32_le(p, v) \
do { \
v = *(__le32*)*(p); \
- *(p) += sizeof(__u32); \
+ *(p) += sizeof(u32); \
} while (0)
#define ceph_decode_16_le(p, v) \
do { \
v = *(__le16*)*(p); \
- *(p) += sizeof(__u16); \
+ *(p) += sizeof(u16); \
} while (0)
#define ceph_decode_copy(p, pv, n) \
/* bounds check too */
#define ceph_decode_64_safe(p, end, v, bad) \
do { \
- ceph_decode_need(p, end, sizeof(__u64), bad); \
+ ceph_decode_need(p, end, sizeof(u64), bad); \
ceph_decode_64(p, v); \
} while (0)
#define ceph_decode_32_safe(p, end, v, bad) \
do { \
- ceph_decode_need(p, end, sizeof(__u32), bad); \
+ ceph_decode_need(p, end, sizeof(u32), bad); \
ceph_decode_32(p, v); \
} while (0)
#define ceph_decode_16_safe(p, end, v, bad) \
do { \
- ceph_decode_need(p, end, sizeof(__u16), bad); \
+ ceph_decode_need(p, end, sizeof(u16), bad); \
ceph_decode_16(p, v); \
} while (0)
#define ceph_encode_64(p, v) \
do { \
*(__le64*)*(p) = cpu_to_le64((v)); \
- *(p) += sizeof(__u64); \
+ *(p) += sizeof(u64); \
} while (0)
#define ceph_encode_32(p, v) \
do { \
*(__le32*)*(p) = cpu_to_le32((v)); \
- *(p) += sizeof(__u32); \
+ *(p) += sizeof(u32); \
} while (0)
#define ceph_encode_16(p, v) \
do { \
*(__le16*)*(p) = cpu_to_le16((v)); \
- *(p) += sizeof(__u16); \
+ *(p) += sizeof(u16); \
} while (0)
#define ceph_encode_8(p, v) \
do { \
- *(__u8*)*(p) = v; \
+ *(u8*)*(p) = v; \
(*(p))++; \
} while (0)
* filepath, string encoders
*/
static __inline__ void ceph_encode_filepath(void **p, void *end,
- __u64 ino, const char *path)
+ u64 ino, const char *path)
{
- __u32 len = path ? strlen(path):0;
+ u32 len = path ? strlen(path):0;
BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end);
ceph_encode_64(p, ino);
ceph_encode_32(p, len);
}
static __inline__ void ceph_encode_string(void **p, void *end,
- const char *s, __u32 len)
+ const char *s, u32 len)
{
BUG_ON(*p + sizeof(len) + len > end);
ceph_encode_32(p, len);
* encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
-char *ceph_build_path(struct dentry *dentry, int *plen, __u64 *base, int min)
+char *ceph_build_path(struct dentry *dentry, int *plen, u64 *base, int min)
{
struct dentry *temp;
char *path;
unsigned off = fpos_off(filp->f_pos);
unsigned skew;
int err;
- __u32 ftype;
+ u32 ftype;
struct ceph_mds_reply_info_parsed *rinfo;
nextfrag:
#define IPSZ (sizeof(struct ceph_inopath_item) / sizeof(u32))
-static int ceph_encode_fh(struct dentry *dentry, __u32 *rawfh, int *max_len,
+static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
int connectable)
{
int type = 1;
* specified, copy the frag delegation info to the caller if
* it is present.
*/
-__u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
+u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
struct ceph_inode_frag *pfrag,
int *found)
{
static int parse_reply_info_trace(void **p, void *end,
struct ceph_mds_reply_info_parsed *info)
{
- __u16 numi, numd, snapdirpos;
+ u16 numi, numd, snapdirpos;
int err;
- ceph_decode_need(p, end, 3*sizeof(__u16), bad);
+ ceph_decode_need(p, end, 3*sizeof(u16), bad);
ceph_decode_16(p, numi);
ceph_decode_16(p, numd);
ceph_decode_16(p, snapdirpos);
goto bad;
info->trace_dir[numd] = *p;
*p += sizeof(struct ceph_mds_reply_dirfrag) +
- sizeof(__u32)*le32_to_cpu(info->trace_dir[numd]->ndist);
+ sizeof(u32)*le32_to_cpu(info->trace_dir[numd]->ndist);
if (unlikely(*p > end))
goto bad;
goto inode;
static int parse_reply_info_dir(void **p, void *end,
struct ceph_mds_reply_info_parsed *info)
{
- __u32 num, i = 0;
+ u32 num, i = 0;
int err;
info->dir_dir = *p;
if (*p + sizeof(*info->dir_dir) > end)
goto bad;
- *p += sizeof(*info->dir_dir) + sizeof(__u32)*le32_to_cpu(info->dir_dir->ndist);
+ *p += sizeof(*info->dir_dir) + sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
if (*p > end)
goto bad;
struct ceph_mds_reply_info_parsed *info)
{
void *p, *end;
- __u32 len;
+ u32 len;
int err;
info->head = msg->front.iov_base;
* called under mdsc->mutex.
*/
static struct ceph_mds_request *__get_request(struct ceph_mds_client *mdsc,
- __u64 tid)
+ u64 tid)
{
struct ceph_mds_request *req;
req = radix_tree_lookup(&mdsc->request_tree, tid);
/*
* session messages
*/
-static struct ceph_msg *create_session_msg(__u32 op, __u64 seq)
+static struct ceph_msg *create_session_msg(u32 op, u64 seq)
{
struct ceph_msg *msg;
struct ceph_mds_session_head *h;
static int wait_for_new_map(struct ceph_mds_client *mdsc,
unsigned long timeout)
{
- __u32 have;
+ u32 have;
int err = 0;
dout(30, "wait_for_new_map enter\n");
void ceph_mdsc_handle_session(struct ceph_mds_client *mdsc,
struct ceph_msg *msg)
{
- __u32 op;
- __u64 seq;
+ u32 op;
+ u64 seq;
struct ceph_mds_session *session = NULL;
int mds;
struct ceph_mds_session_head *h = msg->front.iov_base;
if (op == CEPH_MDS_OP_FINDINODE) {
pathlen = sizeof(u32) + ino1*sizeof(struct ceph_inopath_item);
} else {
- pathlen = 2*(sizeof(ino1) + sizeof(__u32));
+ pathlen = 2*(sizeof(ino1) + sizeof(u32));
if (path1)
pathlen += strlen(path1);
if (path2)
*
* called under mdsc->mutex.
*/
-static __u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
+static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
{
struct ceph_mds_request *first;
if (radix_tree_gang_lookup(&mdsc->request_tree,
struct ceph_msg *msg)
{
struct ceph_mds_request *req;
- __u64 tid;
- __u32 next_mds;
- __u32 fwd_seq;
- __u8 must_resend;
+ u64 tid;
+ u32 next_mds;
+ u32 fwd_seq;
+ u8 must_resend;
int err = -EINVAL;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
goto bad;
from_mds = le32_to_cpu(msg->hdr.src.name.num);
- ceph_decode_need(&p, end, sizeof(__u64)+2*sizeof(__u32), bad);
+ ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
ceph_decode_64(&p, tid);
ceph_decode_32(&p, next_mds);
ceph_decode_32(&p, fwd_seq);
struct ceph_dentry_info *di;
int origmask = mask;
int mds = -1;
- int len = sizeof(*lease) + sizeof(__u32);
+ int len = sizeof(*lease) + sizeof(u32);
int dnamelen = 0;
BUG_ON(inode == NULL);
*/
void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
- __u32 epoch;
- __u32 maplen;
+ u32 epoch;
+ u32 maplen;
void *p = msg->front.iov_base;
void *end = p + msg->front.iov_len;
struct ceph_mdsmap *newmap, *oldmap;
else
from = -1;
- ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(__u32), bad);
+ ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
ceph_decode_64_le(&p, fsid.major);
ceph_decode_64_le(&p, fsid.minor);
if (!ceph_fsid_equal(&fsid, &mdsc->client->monc.monmap->fsid)) {
*/
struct ceph_mds_reply_info_in {
struct ceph_mds_reply_inode *in;
- __u32 symlink_len;
+ u32 symlink_len;
char *symlink;
- __u32 xattr_len;
+ u32 xattr_len;
char *xattr_data;
};
struct ceph_mds_reply_lease **trace_ilease;
struct ceph_mds_reply_dirfrag **trace_dir;
char **trace_dname;
- __u32 *trace_dname_len;
+ u32 *trace_dname_len;
struct ceph_mds_reply_lease **trace_dlease;
struct ceph_mds_reply_dirfrag *dir_dir;
int dir_nr;
struct ceph_mds_reply_lease **dir_ilease;
char **dir_dname;
- __u32 *dir_dname_len;
+ u32 *dir_dname_len;
struct ceph_mds_reply_lease **dir_dlease;
struct ceph_mds_reply_info_in *dir_in;
* an in-flight mds request
*/
struct ceph_mds_request {
- __u64 r_tid; /* transaction id */
+ u64 r_tid; /* transaction id */
struct ceph_msg *r_request; /* original request */
struct ceph_msg *r_reply;
struct ceph_mds_reply_info_parsed r_reply_info;
struct rw_semaphore snap_rwsem;
struct radix_tree_root snap_realms;
- __u64 last_tid; /* most recent mds request */
+ u64 last_tid; /* most recent mds request */
struct radix_tree_root request_tree; /* pending mds requests */
struct delayed_work delayed_work; /* delayed work */
unsigned long last_renew_caps; /* last time we renewed our caps */
{
struct ceph_mdsmap *m;
int i, n;
- __u32 mds;
+ u32 mds;
int err = -EINVAL;
m = kzalloc(sizeof(*m), GFP_NOFS);
if (m == NULL)
return ERR_PTR(-ENOMEM);
- ceph_decode_need(p, end, 10*sizeof(__u32), bad);
+ ceph_decode_need(p, end, 10*sizeof(u32), bad);
ceph_decode_32(p, m->m_epoch);
ceph_decode_32(p, m->m_client_epoch);
ceph_decode_32(p, m->m_last_failure);
*p += sizeof(struct ceph_timespec); /* ignore map timestamp */
- *p += sizeof(__u32); /* skip anchortable */
+ *p += sizeof(u32); /* skip anchortable */
ceph_decode_32(p, m->m_root);
ceph_decode_32(p, m->m_session_timeout);
ceph_decode_32(p, m->m_session_autoclose);
/* state */
ceph_decode_32(p, n);
- ceph_decode_need(p, end, n*2*sizeof(__u32), bad);
+ ceph_decode_need(p, end, n*2*sizeof(u32), bad);
for (i = 0; i < n; i++) {
ceph_decode_32(p, mds);
if (mds >= m->m_max_mds)
/* state_seq */
ceph_decode_32_safe(p, end, n, bad);
- *p += n*(sizeof(__u32)+sizeof(__u64));
+ *p += n*(sizeof(u32)+sizeof(u64));
/* mds_inst */
ceph_decode_32_safe(p, end, n, bad);
ceph_decode_need(p, end,
- n*(sizeof(__u32)+sizeof(struct ceph_entity_name)+
+ n*(sizeof(u32)+sizeof(struct ceph_entity_name)+
sizeof(struct ceph_entity_addr)),
bad);
for (i = 0; i < n; i++) {
{
unsigned long key;
- key = *(__u32 *)&addr->ipaddr.sin_addr.s_addr;
- key ^= *(__u16 *)&addr->ipaddr.sin_port;
+ key = *(u32 *)&addr->ipaddr.sin_addr.s_addr;
+ key ^= *(u16 *)&addr->ipaddr.sin_port;
return key;
}
struct ceph_entity_addr peer_addr; /* peer address */
struct ceph_entity_name peer_name; /* peer name */
- __u32 connect_seq, global_seq; /* identify the most recent connection
+ u32 connect_seq, global_seq; /* identify the most recent connection
attempt for this connection, client */
/* out queue */
spinlock_t out_queue_lock; /* protects out_queue, out_sent, out_seq */
struct list_head out_queue;
struct list_head out_sent; /* sending/sent but unacked */
- __u32 out_seq; /* last message queued for send */
+ u32 out_seq; /* last message queued for send */
- __u32 in_seq, in_seq_acked; /* last message received, acked */
+ u32 in_seq, in_seq_acked; /* last message received, acked */
/* connection negotiation temps */
char in_banner[CEPH_BANNER_MAX_LEN];
if (m == NULL)
return ERR_PTR(-ENOMEM);
- ceph_decode_need(&p, end, 2*sizeof(__u32) + 2*sizeof(__u64), bad);
+ ceph_decode_need(&p, end, 2*sizeof(u32) + 2*sizeof(u64), bad);
ceph_decode_64_le(&p, m->fsid.major);
ceph_decode_64_le(&p, m->fsid.minor);
ceph_decode_32(&p, m->epoch);
mutex_unlock(&monc->req_mutex);
}
-int ceph_monc_got_osdmap(struct ceph_mon_client *monc, __u32 got)
+int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
{
int ret = 0;
* @want, and to indicate which maps receive. Periodically rerequest the map
* from the monitor cluster until we get what we want.
*/
-extern void ceph_monc_request_mdsmap(struct ceph_mon_client *monc, __u32 want);
-extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, __u32 have);
+extern void ceph_monc_request_mdsmap(struct ceph_mon_client *monc, u32 want);
+extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have);
-extern void ceph_monc_request_osdmap(struct ceph_mon_client *monc, __u32 want);
-extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, __u32 have);
+extern void ceph_monc_request_osdmap(struct ceph_mon_client *monc, u32 want);
+extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have);
extern void ceph_monc_request_umount(struct ceph_mon_client *monc);
void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
{
void *p, *end, *next;
- __u32 nr_maps, maplen;
- __u32 epoch;
+ u32 nr_maps, maplen;
+ u32 epoch;
struct ceph_osdmap *newmap = NULL, *oldmap;
int err;
struct ceph_fsid fsid;
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(10, " %d inc maps\n", nr_maps);
while (nr_maps > 0) {
- ceph_decode_need(&p, end, 2*sizeof(__u32), bad);
+ ceph_decode_need(&p, end, 2*sizeof(u32), bad);
ceph_decode_32(&p, epoch);
ceph_decode_32(&p, maplen);
ceph_decode_need(&p, end, maplen, bad);
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(30, " %d full maps\n", nr_maps);
while (nr_maps) {
- ceph_decode_need(&p, end, 2*sizeof(__u32), bad);
+ ceph_decode_need(&p, end, 2*sizeof(u32), bad);
ceph_decode_32(&p, epoch);
ceph_decode_32(&p, maplen);
ceph_decode_need(&p, end, maplen, bad);
*/
int ceph_osdc_writepages_start(struct ceph_osd_client *osdc,
struct ceph_osd_request *req,
- __u64 len, int num_pages)
+ u64 len, int num_pages)
{
struct ceph_msg *reqm = req->r_request;
struct ceph_osd_request_head *reqhead = reqm->front.iov_base;
- __u64 off = le64_to_cpu(reqhead->offset);
+ u64 off = le64_to_cpu(reqhead->offset);
int rc;
dout(10, "writepages_start %llu~%llu, %d pages\n", off, len, num_pages);
/* an in-flight request */
struct ceph_osd_request {
- __u64 r_tid; /* unique for this client */
+ u64 r_tid; /* unique for this client */
struct ceph_msg *r_request;
struct ceph_msg *r_reply;
int r_result;
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
struct ceph_vino vino,
- __u64 offset, __u64 *len, int op,
+ u64 offset, u64 *len, int op,
struct ceph_snap_context *snapc);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
struct page **pagevec, int nr_pages);
extern int ceph_osdc_writepages_start(struct ceph_osd_client *osdc,
struct ceph_osd_request *req,
- __u64 len,
+ u64 len,
int nr_pages);
extern int ceph_osdc_sync_read(struct ceph_osd_client *osdc,
{
int j;
dout(30, "crush_decode_uniform_bucket %p to %p\n", *p, end);
- b->primes = kmalloc(b->h.size * sizeof(__u32), GFP_NOFS);
+ b->primes = kmalloc(b->h.size * sizeof(u32), GFP_NOFS);
if (b->primes == NULL)
return -ENOMEM;
- ceph_decode_need(p, end, (1+b->h.size) * sizeof(__u32), bad);
+ ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
for (j = 0; j < b->h.size; j++)
ceph_decode_32(p, b->primes[j]);
ceph_decode_32(p, b->item_weight);
{
int j;
dout(30, "crush_decode_list_bucket %p to %p\n", *p, end);
- b->item_weights = kmalloc(b->h.size * sizeof(__u32), GFP_NOFS);
+ b->item_weights = kmalloc(b->h.size * sizeof(u32), GFP_NOFS);
if (b->item_weights == NULL)
return -ENOMEM;
- b->sum_weights = kmalloc(b->h.size * sizeof(__u32), GFP_NOFS);
+ b->sum_weights = kmalloc(b->h.size * sizeof(u32), GFP_NOFS);
if (b->sum_weights == NULL)
return -ENOMEM;
- ceph_decode_need(p, end, 2 * b->h.size * sizeof(__u32), bad);
+ ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
for (j = 0; j < b->h.size; j++) {
ceph_decode_32(p, b->item_weights[j]);
ceph_decode_32(p, b->sum_weights[j]);
{
int j;
dout(30, "crush_decode_tree_bucket %p to %p\n", *p, end);
- b->node_weights = kmalloc(b->h.size * sizeof(__u32), GFP_NOFS);
+ b->node_weights = kmalloc(b->h.size * sizeof(u32), GFP_NOFS);
if (b->node_weights == NULL)
return -ENOMEM;
- ceph_decode_need(p, end, b->h.size * sizeof(__u32), bad);
+ ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
for (j = 0; j < b->h.size; j++)
ceph_decode_32(p, b->node_weights[j]);
return 0;
{
int j;
dout(30, "crush_decode_straw_bucket %p to %p\n", *p, end);
- b->straws = kmalloc(b->h.size * sizeof(__u32), GFP_NOFS);
+ b->straws = kmalloc(b->h.size * sizeof(u32), GFP_NOFS);
if (b->straws == NULL)
return -ENOMEM;
- ceph_decode_need(p, end, 2 * b->h.size * sizeof(__u32), bad);
+ ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
for (j = 0; j < b->h.size; j++) {
ceph_decode_32(p, b->item_weights[j]);
ceph_decode_32(p, b->straws[j]);
if (c == NULL)
return ERR_PTR(-ENOMEM);
- ceph_decode_need(p, end, 3*sizeof(__u32), bad);
+ ceph_decode_need(p, end, 3*sizeof(u32), bad);
ceph_decode_32(p, c->max_buckets);
ceph_decode_32(p, c->max_rules);
ceph_decode_32(p, c->max_devices);
- c->device_offload = kmalloc(c->max_devices * sizeof(__u32), GFP_NOFS);
+ c->device_offload = kmalloc(c->max_devices * sizeof(u32), GFP_NOFS);
if (c->device_offload == NULL)
goto badmem;
- c->device_parents = kmalloc(c->max_devices * sizeof(__u32), GFP_NOFS);
+ c->device_parents = kmalloc(c->max_devices * sizeof(u32), GFP_NOFS);
if (c->device_parents == NULL)
goto badmem;
- c->bucket_parents = kmalloc(c->max_buckets * sizeof(__u32), GFP_NOFS);
+ c->bucket_parents = kmalloc(c->max_buckets * sizeof(u32), GFP_NOFS);
if (c->bucket_parents == NULL)
goto badmem;
if (c->rules == NULL)
goto badmem;
- ceph_decode_need(p, end, c->max_devices * sizeof(__u32), bad);
+ ceph_decode_need(p, end, c->max_devices * sizeof(u32), bad);
for (i = 0; i < c->max_devices; i++)
ceph_decode_32(p, c->device_offload[i]);
/* buckets */
for (i = 0; i < c->max_buckets; i++) {
int size = 0;
- __u32 type;
+ u32 type;
struct crush_bucket *b;
dout(30, "crush_decode bucket %d off %x %p to %p\n",
if (b == NULL)
goto badmem;
- ceph_decode_need(p, end, 4*sizeof(__u32), bad);
+ ceph_decode_need(p, end, 4*sizeof(u32), bad);
ceph_decode_32(p, b->id);
ceph_decode_16(p, b->type);
ceph_decode_16(p, b->alg);
if (b->items == NULL)
goto badmem;
- ceph_decode_need(p, end, b->size*sizeof(__u32), bad);
+ ceph_decode_need(p, end, b->size*sizeof(u32), bad);
for (j = 0; j < b->size; j++)
ceph_decode_32(p, b->items[j]);
/* rules */
dout(30, "rule vec is %p\n", c->rules);
for (i = 0; i < c->max_rules; i++) {
- __u32 yes;
+ u32 yes;
struct crush_rule *r;
ceph_decode_32_safe(p, end, yes, bad);
dout(30, " rule %d is at %p\n", i, r);
r->len = yes;
ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
- ceph_decode_need(p, end, r->len*3*sizeof(__u32), bad);
+ ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
for (j = 0; j < r->len; j++) {
ceph_decode_32(p, r->steps[j].op);
ceph_decode_32(p, r->steps[j].arg1);
*/
static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
{
- __u8 *state;
+ u8 *state;
struct ceph_entity_addr *addr;
- state = kzalloc(max * sizeof(__u32), GFP_NOFS);
+ state = kzalloc(max * sizeof(u32), GFP_NOFS);
addr = kzalloc(max * sizeof(struct ceph_entity_addr), GFP_NOFS);
if (state == NULL || addr == NULL) {
kfree(state);
struct ceph_osdmap *osdmap_decode(void **p, void *end)
{
struct ceph_osdmap *map;
- __u32 len, max;
+ u32 len, max;
int err = -EINVAL;
void *start = *p;
if (map == NULL)
return ERR_PTR(-ENOMEM);
- ceph_decode_need(p, end, 2*sizeof(__u64)+11*sizeof(__u32), bad);
+ ceph_decode_need(p, end, 2*sizeof(u64)+11*sizeof(u32), bad);
ceph_decode_64_le(p, map->fsid.major);
ceph_decode_64_le(p, map->fsid.minor);
ceph_decode_32(p, map->epoch);
/* osds */
err = -EINVAL;
- ceph_decode_need(p, end, 2*sizeof(__u32) +
+ ceph_decode_need(p, end, 2*sizeof(u32) +
map->max_osd*(1+sizeof(*map->osd_addr)), bad);
*p += 4; /* skip length field (should match max) */
ceph_decode_copy(p, map->osd_state, map->max_osd);
/* ignore pg primary swapping */
ceph_decode_32_safe(p, end, len, bad);
- p += len * (sizeof(__u64) + sizeof(__u32));
+ p += len * (sizeof(u64) + sizeof(u32));
if (len)
derr(0, "WARNING: pg primary swaps in osdmap e%d unsupported\n",
map->epoch);
struct ceph_osdmap *newmap = map;
struct crush_map *newcrush = NULL;
struct ceph_fsid fsid;
- __u32 epoch = 0;
+ u32 epoch = 0;
struct ceph_timespec ctime;
- __u32 len, x;
+ u32 len, x;
__s32 new_flags, max;
void *start = *p;
int err = -EINVAL;
- ceph_decode_need(p, end, sizeof(fsid)+sizeof(ctime)+2*sizeof(__u32),
+ ceph_decode_need(p, end, sizeof(fsid)+sizeof(ctime)+2*sizeof(u32),
bad);
ceph_decode_64_le(p, fsid.major);
ceph_decode_64_le(p, fsid.minor);
if (new_flags >= 0)
map->flags = new_flags;
- ceph_decode_need(p, end, 5*sizeof(__u32), bad);
+ ceph_decode_need(p, end, 5*sizeof(u32), bad);
/* new max? */
ceph_decode_32(p, max);
err = -EINVAL;
ceph_decode_32_safe(p, end, len, bad);
while (len--) {
- __u32 osd;
+ u32 osd;
struct ceph_entity_addr addr;
ceph_decode_32_safe(p, end, osd, bad);
ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
/* new_down */
ceph_decode_32_safe(p, end, len, bad);
while (len--) {
- __u32 osd;
+ u32 osd;
ceph_decode_32_safe(p, end, osd, bad);
(*p)++; /* clean flag */
dout(1, "osd%d down\n", osd);
/* new_offload */
ceph_decode_32_safe(p, end, len, bad);
while (len--) {
- __u32 osd, off;
- ceph_decode_need(p, end, sizeof(__u32)*2, bad);
+ u32 osd, off;
+ ceph_decode_need(p, end, sizeof(u32)*2, bad);
ceph_decode_32(p, osd);
ceph_decode_32(p, off);
dout(1, "osd%d offload 0x%x %s\n", osd, off,
/* skip old/new pg_swap stuff */
ceph_decode_32_safe(p, end, len, bad);
- *p += len * (sizeof(__u64) + sizeof(__u32));
+ *p += len * (sizeof(u64) + sizeof(u32));
if (len)
derr(0, "WARNING: pg primary swaps in osdmap e%d unsupported\n",
epoch);
ceph_decode_32_safe(p, end, len, bad);
- *p += len * sizeof(__u64);
+ *p += len * sizeof(u64);
if (len)
derr(0, "WARNING: pg primary swaps in osdmap e%d unsupported\n",
epoch);
/* skip new_max_snap, removed_snaps */
- *p += sizeof(__u64);
+ *p += sizeof(u64);
ceph_decode_32_safe(p, end, len, bad);
- *p += len * 2 * sizeof(__u64);
+ *p += len * 2 * sizeof(u64);
if (*p != end) {
derr(10, "osdmap incremental has trailing gunk?\n");
* pass a stride back to the caller.
*/
void calc_file_object_mapping(struct ceph_file_layout *layout,
- __u64 off, __u64 *plen,
+ u64 off, u64 *plen,
struct ceph_object *oid,
- __u64 *oxoff, __u64 *oxlen)
+ u64 *oxoff, u64 *oxlen)
{
u32 osize = le32_to_cpu(layout->fl_object_size);
u32 su = le32_to_cpu(layout->fl_stripe_unit);
/* *oxoff = *off / layout->fl_stripe_unit; */
t = off;
*oxoff = do_div(t, su);
- *oxlen = min_t(__u64, *plen, su - *oxoff);
+ *oxlen = min_t(u64, *plen, su - *oxoff);
*plen = *oxlen;
dout(80, " obj extent %llu~%llu\n", *oxoff, *oxlen);
*/
struct ceph_osdmap {
struct ceph_fsid fsid;
- __u32 epoch;
- __u32 mkfs_epoch;
+ u32 epoch;
+ u32 mkfs_epoch;
struct ceph_timespec ctime, mtime;
/* these parameters describe the number of placement groups
* in the system. foo_mask is the smallest value (2**n-1) >= foo. */
- __u32 pg_num, pg_num_mask;
- __u32 pgp_num, pgp_num_mask;
- __u32 lpg_num, lpg_num_mask;
- __u32 lpgp_num, lpgp_num_mask;
- __u32 last_pg_change; /* epoch of last pg count change */
+ u32 pg_num, pg_num_mask;
+ u32 pgp_num, pgp_num_mask;
+ u32 lpg_num, lpg_num_mask;
+ u32 lpgp_num, lpgp_num_mask;
+ u32 last_pg_change; /* epoch of last pg count change */
- __u32 flags; /* CEPH_OSDMAP_* */
+ u32 flags; /* CEPH_OSDMAP_* */
- __u32 max_osd; /* size of osd_state, _offload, _addr arrays */
- __u8 *osd_state; /* CEPH_OSD_* */
- __u32 *osd_offload; /* 0 = normal, 0x10000 = 100% offload (failed) */
+ u32 max_osd; /* size of osd_state, _offload, _addr arrays */
+ u8 *osd_state; /* CEPH_OSD_* */
+ u32 *osd_offload; /* 0 = normal, 0x10000 = 100% offload (failed) */
struct ceph_entity_addr *osd_addr;
/* the CRUSH map specifies the mapping of placement groups to
/* calculate mapping of a file extent to an object */
extern void calc_file_object_mapping(struct ceph_file_layout *layout,
- __u64 off, __u64 *plen,
+ u64 off, u64 *plen,
struct ceph_object *oid,
- __u64 *oxoff, __u64 *oxlen);
+ u64 *oxoff, u64 *oxlen);
/* calculate mapping of object to a placement group */
extern void calc_object_layout(struct ceph_object_layout *ol,
* mounting the same ceph filesystem/cluster.
*/
struct ceph_client {
- __u32 whoami; /* my client number */
+ u32 whoami; /* my client number */
struct mutex mount_mutex; /* serialize mount attempts */
struct ceph_mount_args mount_args;
* choose fragment for value @v. copy frag content to pfrag, if leaf
* exists
*/
-extern __u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
+extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
struct ceph_inode_frag *pfrag,
int *found);
extern struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
ceph_snapdir_dentry_ops;
-extern char *ceph_build_path(struct dentry *dn, int *len, __u64 *base, int min);
+extern char *ceph_build_path(struct dentry *dn, int *len, u64 *base, int min);
extern struct dentry *ceph_do_lookup(struct super_block *sb,
struct dentry *dentry,
int mask, int on_inode, int locked_dir);