#define CEPH_BUFFERLIST_START_IOVLEN 8 /* embed some statically, for fast normal case */
struct ceph_bufferlist {
- struct iovec *b_iov; /* data payload */
- struct iovec b_iov_array[CEPH_BUFFERLIST_START_IOVLEN];
- int b_iovlen; /* used/defined elements in b_iov */
- int b_iovmax; /* allocated size of b_iov array */
- struct iovec b_append; /* preallocated memory for appending data to this bufferlist */
+ struct kvec *b_kv; /* data payload */
+ struct kvec b_kv_array[CEPH_BUFFERLIST_START_KVLEN];
+ int b_kvlen; /* used/defined elements in b_kv */
+ int b_kvmax; /* allocated size of b_kv array */
+ struct kvec b_append; /* preallocated memory for appending data to this bufferlist */
};
struct ceph_bufferlist_iterator {
- int i_iov; /* which iov */
- int i_off; /* offset in that iov */
+ int i_kv; /* which kv */
+ int i_off; /* offset in that kv */
};
+
+
+void ceph_bl_init(struct ceph_bufferlist *bl)
+{
+ memset(bl, 0, sizeof(*bl));
+}
+
+void ceph_bl_clear(struct ceph_bufferlist *bl)
+{
+ int i;
+ for (i=0; i<bl->b_kvlen; i++)
+ kfree(bl->b_kv[i]->iov_base);
+ bl->b_kvlen = 0;
+ if (bl->b_kv != bl->b_kv_array) {
+ kfree(bl->b_kv);
+ bl->b_kv = bl->b_kv_array;
+ bl->b_kvmax = CEPH_BUFFERLIST_START_KVLEN;
+ }
+ if (bl->b_append.iov_base) {
+ kfree(bl->b_append.iov_base);
+ bl->b_append.iov_base = 0;
+ }
+}
+
+
/*
* add referenced memory to the bufferlist.
- * expand b_iov array if necessary.
- * extend tail iovec if the added region is contiguous.
+ * expand b_kv array if necessary.
+ * extend tail kvec if the added region is contiguous.
*/
-void ceph_bufferlist_append_ref(struct ceph_bufferlist *bl, void *p, int len)
+void ceph_bl_append_ref(struct ceph_bufferlist *bl, void *p, int len)
{
- struct iovec *tmpvec;
- if (bl->b_iovlen == bl->b_iovmax) {
- if (bl->b_iovmax) {
- bl->b_iovmax *= 2;
- tmpvec = kmalloc(bl->b_iovmax);
- memcpy(tmpvec, bl->b_iov, sizeof(iovec)*bl->b_iovlen);
- if (bl->b_iovlen > CEPH_BUFFERLIST_START_IOVLEN)
- kfree(bl->b_iov);
- bl->b_iov = tmpvec;
- memset(tmpvec + bl->b_iovlen, 0,
- sizeof(iovec)*(bl->b_iovmax - bl->b_iovlen));
+ struct kvec *tmpvec;
+ if (bl->b_kvlen == bl->b_kvmax) {
+ if (bl->b_kvmax) {
+ bl->b_kvmax *= 2;
+ tmpvec = kmalloc(bl->b_kvmax);
+ memcpy(tmpvec, bl->b_kv, sizeof(struct kvec)*bl->b_kvlen);
+ if (bl->b_kvlen > CEPH_BUFFERLIST_START_KVLEN)
+ kfree(bl->b_kv);
+ bl->b_kv = tmpvec;
+ memset(tmpvec + bl->b_kvlen, 0,
+ sizeof(struct kvec)*(bl->b_kvmax - bl->b_kvlen));
} else {
- bl->b_iovmax = CEPH_BUFFERLIST_START_IOVLEN;
- bl->b_iov = bl->b_iov_array;
+ bl->b_kvmax = CEPH_BUFFERLIST_START_KVLEN;
+ bl->b_kv = bl->b_kv_array;
}
}
- if (bl->b_iovlen &&
- p == bl->b_iov[bl->b_iovlen-1].iov_base + bl->b_iov[bl->b_iovlen-1].iov_base) {
- bl->b_iov[bl->b_iovlen-1].iov_len += len;
+ if (bl->b_kvlen &&
+ p == bl->b_kv[bl->b_kvlen-1].kv_base + bl->b_kv[bl->b_kvlen-1].kv_base) {
+ bl->b_kv[bl->b_kvlen-1].kv_len += len;
} else {
- bl->b_iov[bl->b_iovlen].iov_base = p;
- bl->b_iov[bl->b_iovlen].iov_len = len;
- bl->b_iovlen++;
+ bl->b_kv[bl->b_kvlen].kv_base = p;
+ bl->b_kv[bl->b_kvlen].kv_len = len;
+ bl->b_kvlen++;
}
}
-void ceph_bufferlist_append_copy(struct ceph_bufferlist *bl, void *p, int len)
+void ceph_bl_append_copy(struct ceph_bufferlist *bl, void *p, int len)
{
int s;
while (len > 0) {
/* allocate more space? */
- if (!bl->b_append.iov_len) {
- bl->b_append.iov_len = (len + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
- bl->b_append.iov_base = kmalloc(bl->b_append.iov_len, GFP_KERNEL);
+ if (!bl->b_append.kv_len) {
+ bl->b_append.kv_len = (len + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
+ bl->b_append.kv_base = kmalloc(bl->b_append.kv_len, GFP_KERNEL);
}
/* copy what we can */
- s = min(bl->b_append.iov_len, len);
- memcpy(bl->b_append.iov_base, s);
- ceph_bufferlist_append_ref(bl, b_append.iov_base, b_append.iov_len);
+ s = min(bl->b_append.kv_len, len);
+ memcpy(bl->b_append.kv_base, s);
+ ceph_bl_append_ref(bl, b_append.kv_base, b_append.kv_len);
len -= s;
- bl->b_append.iov_len -= s;
+ bl->b_append.kv_len -= s;
}
}
+
+
+
+
+
+
+
+void ceph_bl_iterator_init(struct ceph_bufferlist_iterator *bli)
+{
+ memset(bli, 0, sizeof(*bli));
+}
+
+void ceph_bl_iterator_advance(struct ceph_bufferlist *bl,
+ struct ceph_bufferlist_iterator *bli,
+ int off)
+{
+
+}
+
+__u64 ceph_bl_decode_u64(struct ceph_bufferlist *bl, ceph_bufferlist_iterator *bli)
+{
+ __u64 r;
+ r = le64_to_cpu((__u64*)(bl->b_kv[bli->i_kv] + bli->i_off));
+ ceph_bl_iterator_advance(bl, bli, sizeof(__u64));
+}
+__s64 ceph_bl_decode_s64(struct ceph_bufferlist *bl, ceph_bufferlist_iterator *bli)
+{
+ __s64 r;
+ r = le64_to_cpu((__s64*)(bl->b_kv[bli->i_kv] + bli->i_off));
+ ceph_bl_iterator_advance(bl, bli, sizeof(__s64));
+}
+
+__u32 ceph_bl_decode_u32(struct ceph_bufferlist *bl, ceph_bufferlist_iterator *bli)
+{
+ __u32 r;
+ r = le32_to_cpu((__u32*)(bl->b_kv[bli->i_kv] + bli->i_off));
+ ceph_bl_iterator_advance(bl, bli, sizeof(__u32));
+}
+__s32 ceph_bl_decode_s32(struct ceph_bufferlist *bl, ceph_bufferlist_iterator *bli)
+{
+ __s32 r;
+ r = le32_to_cpu((__s32*)(bl->b_kv[bli->i_kv] + bli->i_off));
+ ceph_bl_iterator_advance(bl, bli, sizeof(__s32));
+}
+
+__u8 ceph_bl_decode_u8(struct ceph_bufferlist *bl, ceph_bufferlist_iterator *bli)
+{
+ __u8 r;
+ r = (__u8*)(bl->b_kv[bli->i_kv] + bli->i_off);
+ ceph_bl_iterator_advance(bl, bli, sizeof(__u8));
+}
+
#endif
return m->m_addr[w];
}
-int ceph_mdsmap_decode(ceph_mdsmap *m, ceph_bufferlist *bl)
+int ceph_mdsmap_decode(struct ceph_mdsmap *m,
+ struct ceph_bufferlist *bl,
+ struct ceph_bufferlist_iterator *bli)
{
- /* write me */
+ int i, n;
+ __u32 mds;
+ struct ceph_entity_inst *inst;
+
+ m->m_epoch = ceph_bl_decode_u64(bl, bli);
+ ceph_bl_decode_u32(bl, bli); /* target_num */
+ m->m_created.tv_sec = ceph_bl_decode_u32(bl, bli);
+ m->m_created.tv_usec = ceph_bl_decode_u32(bl, bli);
+ ceph_bl_decode_u64(bl, bli); /* same_in_set_since */
+ m->m_anchortable = ceph_bl_decode_s32(bl, bli);
+ m->m_root = ceph_bl_decode_s32(bl, bli);
+ m->m_max_mds = ceph_bl_decode_u32(bl, bli);
+
+ m->m_addr = kmalloc(sizeof(struct ceph_entity_addr)*m->m_max_mds, GFP_KERNEL);
+ m->m_state = kmalloc(sizeof(__u8)*m->m_max_mds, GFP_KERNEL);
+ memset(m->m_state, 0, sizeof(__u8)*m->m_max_mds);
+
+ /* created */
+ n = ceph_bl_decode_u32(bl, bli);
+ ceph_bl_iterator_advance(bli, n*sizeof(__u32));
+
+ /* state */
+ n = ceph_bl_decode_u32(bl, bli);
+ for (i=0; i<n; i++) {
+ mds = ceph_bl_decode_u32(bl, bli);
+ m->m_state[mds] = ceph_bl_decode_s32(bl, bli);
+ }
+
+ /* state_seq */
+ n = ceph_bl_decode_u32(bl, bli);
+ ceph_bl_iterator_advance(bli, n*2*sizeof(__u32));
+
+ /* mds_inst */
+ n = ceph_bl_decode_u32(bl, bli);
+ for (i=0; i<n; i++) {
+ mds = ceph_bl_decode_u32(bl, bli);
+ inst = ceph
+ ceph_bl_iterator_advance(bli, sizeof(struct ceph_entity_name));
+ m->m_addr[mds].nonce = ceph_bl_decode_u64(bl, bli);
+ m->m_addr[mds].port = ceph_bl_decode_u32(bl, bli);
+ m->m_addr[mds].ipq[0] = ceph_bl_decode_u8(bl, bli);
+ m->m_addr[mds].ipq[1] = ceph_bl_decode_u8(bl, bli);
+ m->m_addr[mds].ipq[2] = ceph_bl_decode_u8(bl, bli);
+ m->m_addr[mds].ipq[3] = ceph_bl_decode_u8(bl, bli);
+ }
+
+ /* mds_inc */
+
+ return 0;
}
+
+