/* count */
for (i = 0; i < m->m_max_mds; i++)
- if (m->m_state[i] > 0)
+ if (m->m_info[i].state > 0)
n++;
if (n == 0)
return -1;
n = r % n;
i = 0;
for (i = 0; n > 0; i++, n--)
- while (m->m_state[i] <= 0)
+ while (m->m_info[i].state <= 0)
i++;
return i;
struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
{
struct ceph_mdsmap *m;
- int i, n;
+ int i, j, n;
int err = -EINVAL;
u16 version;
ceph_decode_64(p, m->m_max_file_size);
ceph_decode_32(p, m->m_max_mds);
- m->m_addr = kcalloc(m->m_max_mds, sizeof(*m->m_addr), GFP_NOFS);
- m->m_state = kcalloc(m->m_max_mds, sizeof(*m->m_state), GFP_NOFS);
- if (m->m_addr == NULL || m->m_state == NULL)
+ m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
+ if (m->m_info == NULL)
goto badmem;
/* pick out active nodes from mds_info (state > 0) */
u64 state_seq;
u8 infoversion;
struct ceph_entity_addr addr;
+ u32 num_export_targets;
+ void *pexport_targets = NULL;
ceph_decode_need(p, end, sizeof(addr) + 1 + sizeof(u32), bad);
*p += sizeof(addr); /* skip addr key */
ceph_decode_32(p, state);
ceph_decode_64(p, state_seq);
ceph_decode_copy(p, &addr, sizeof(addr));
- *p += sizeof(struct ceph_timespec) + 2*sizeof(u32);
+ *p += sizeof(struct ceph_timespec);
+ *p += sizeof(u32);
+ ceph_decode_32_safe(p, end, namelen, bad);
+ *p += sizeof(namelen);
+ if (infoversion >= 2) {
+ ceph_decode_32_safe(p, end, num_export_targets, bad);
+ pexport_targets = *p;
+ *p += sizeof(num_export_targets * sizeof(u32));
+ } else {
+ num_export_targets = 0;
+ }
+
dout("mdsmap_decode %d/%d mds%d.%d %u.%u.%u.%u:%u %s\n",
i+1, n, mds, inc, IPQUADPORT(addr.ipaddr),
ceph_mds_state_name(state));
if (mds >= 0 && mds < m->m_max_mds && state > 0) {
- m->m_state[mds] = state;
- m->m_addr[mds] = addr;
+ m->m_info[mds].state = state;
+ m->m_info[mds].addr = addr;
+ m->m_info[mds].num_export_targets = num_export_targets;
+ if (num_export_targets) {
+ m->m_info[mds].export_targets =
+ kcalloc(num_export_targets, sizeof(u32),
+ GFP_NOFS);
+ for (j = 0; j < num_export_targets; j++)
+ ceph_decode_32(&pexport_targets,
+ m->m_info[mds].export_targets[j]);
+ } else {
+ m->m_info[mds].export_targets = NULL;
+ }
}
}
void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
{
- kfree(m->m_addr);
- kfree(m->m_state);
+ int i;
+
+ for (i = 0; i < m->m_max_mds; i++)
+ kfree(m->m_info[i].export_targets);
+ kfree(m->m_info);
kfree(m->m_data_pg_pools);
kfree(m);
}
*
* we limit fields to those the client actually xcares about
*/
+struct ceph_mds_info {
+ struct ceph_entity_addr addr;
+ s32 state;
+ int num_export_targets;
+ u32 *export_targets;
+};
+
struct ceph_mdsmap {
u32 m_epoch, m_client_epoch, m_last_failure;
u32 m_root;
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
u32 m_max_mds; /* size of m_addr, m_state arrays */
- struct ceph_entity_addr *m_addr; /* mds addrs */
- s32 *m_state; /* states */
+ struct ceph_mds_info *m_info;
/* which object pools file data can be stored in */
int m_num_data_pg_pools;
{
if (w >= m->m_max_mds)
return NULL;
- return &m->m_addr[w];
+ return &m->m_info[w].addr;
}
static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
BUG_ON(w < 0);
if (w >= m->m_max_mds)
return CEPH_MDS_STATE_DNE;
- return m->m_state[w];
+ return m->m_info[w].state;
}
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);