for (i = 0; i < nr_pages; i++) {
page = pvec.pages[i];
- dout(30, "trying page %p\n", page);
if (first < 0)
lock_page(page);
else if (TestSetPageLocked(page))
end_page_writeback(page);
break;
}
+ /*
dout(20, "writepages locked page %p index %lu\n",
page, page->index);
+ */
kmap(page);
if (first < 0)
first = i;
mount_msg = ceph_msg_new(CEPH_MSG_CLIENT_MOUNT, 0, 0, 0, 0);
if (IS_ERR(mount_msg))
return PTR_ERR(mount_msg);
- mount_msg->hdr.dst.name.type = cpu_to_le32(CEPH_ENTITY_TYPE_MON);
+ mount_msg->hdr.dst.name.type =
+ cpu_to_le32(CEPH_ENTITY_TYPE_MON);
mount_msg->hdr.dst.name.num = cpu_to_le32(which);
mount_msg->hdr.dst.addr = args->mon_addr[which];
ceph_msg_send(client->msgr, mount_msg, 0);
- dout(10, "mount from mon%d, %d attempts left\n", which, attempts);
+ dout(10, "mount from mon%d, %d attempts left\n",
+ which, attempts);
/* wait */
dout(10, "mount sent mount request, waiting for maps\n");
- err = wait_for_completion_timeout(&client->mount_completion, 6*HZ);
+ err = wait_for_completion_timeout(&client->mount_completion,
+ 6*HZ);
if (err == -EINTR)
return err;
if (client->mounting == 7)
break; /* success */
- dout(10, "mount still waiting for mount, attempts=%d\n", attempts);
+ dout(10, "mount still waiting for mount, attempts=%d\n",
+ attempts);
if (--attempts == 0)
return -EIO;
}
if (first) {
client->whoami = le32_to_cpu(msg->hdr.dst.name.num);
client->msgr->inst.name = msg->hdr.dst.name;
- dout(1, "i am client%d\n", client->whoami);
+ dout(1, "i am client%d, fsid is %llx.%llx\n", client->whoami,
+ le64_to_cpu(client->monc.monmap->fsid.major),
+ le64_to_cpu(client->monc.monmap->fsid.minor));
}
}
}
}
+int ceph_monc_request_osdmap(struct ceph_mon_client *monc,
+ __u32 have, __u32 want)
+{
+ struct ceph_msg *msg;
+ int mon = pick_mon(monc, -1);
+
+ dout(5, "ceph_monc_request_osdmap from mon%d have %u want %u\n",
+ mon, have, want);
+ monc->want_mdsmap = have;
+ msg = ceph_msg_new(CEPH_MSG_OSD_GETMAP, 2*sizeof(__u32), 0, 0, 0);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+ *(__le32*)msg->front.iov_base = cpu_to_le32(have);
+ *(((__le32*)msg->front.iov_base)+1) = cpu_to_le32(want);
+ msg->hdr.dst = monc->monmap->mon_inst[mon];
+ ceph_msg_send(monc->client->msgr, msg, 0);
+ return 0;
+
+}
+
+int ceph_monc_got_osdmap(struct ceph_mon_client *monc, __u32 have)
+{
+ if (have > monc->want_osdmap) {
+ monc->want_osdmap = 0;
+ dout(5, "ceph_monc_got_osdmap have %u > wanted %u\n",
+ have, monc->want_osdmap);
+ return 0;
+ } else {
+ dout(5, "ceph_monc_got_osdmap have %u <= wanted %u *****\n",
+ have, monc->want_osdmap);
+ return -EAGAIN;
+ }
+}
+
+
/*
* statfs
u64 last_tid;
u32 want_mdsmap; /* protected by caller's lock */
+ u32 want_osdmap; /* protected by caller's lock */
};
extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
extern int ceph_monc_request_mdsmap(struct ceph_mon_client *monc, __u32 have);
extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, __u32 have);
+extern int ceph_monc_request_osdmap(struct ceph_mon_client *monc,
+ __u32 have, __u32 want);
+extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, __u32 have);
-extern void ceph_monc_request_osdmap(struct ceph_mon_client *monc, __u64 have);
extern void ceph_monc_request_umount(struct ceph_mon_client *monc);
extern void ceph_monc_report_failure(struct ceph_mon_client *monc, struct ceph_entity_inst *who);
struct ceph_osdmap *newmap = 0;
int err;
- dout(1, "handle_map\n");
+ dout(1, "handle_map, have %u\n", osdc->osdmap ? osdc->osdmap->epoch:0);
p = msg->front.iov_base;
end = p + msg->front.iov_len;
/* incremental maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
dout(10, " %d inc maps\n", nr_maps);
- while (nr_maps--) {
+ while (nr_maps > 0) {
ceph_decode_need(&p, end, 2*sizeof(__u32), bad);
ceph_decode_32(&p, epoch);
ceph_decode_32(&p, maplen);
osdc->osdmap = newmap;
}
} else {
- dout(10, "ignoring incremental map %u len %d\n", epoch, maplen);
+ dout(10, "ignoring incremental map %u len %d\n",
+ epoch, maplen);
}
p = next;
+ nr_maps--;
}
if (newmap)
goto out;
dout(5, "skipping non-latest full map %u len %d\n",
epoch, maplen);
p += maplen;
+ nr_maps--;
}
if (nr_maps) {
ceph_decode_need(&p, end, 2*sizeof(__u32), bad);
}
dout(1, "handle_map done\n");
+ ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
+
/* kick any pending requests that need kicking */
/* WRITE ME */
ceph_msg_get(req->r_request); /* send consumes a ref */
ceph_msg_send(osdc->client->msgr, req->r_request, 0);
} else {
- dout(10, "send_request no osds in pg are up\n");
+ dout(10, "send_request no osds in this pg are up\n");
+ ceph_monc_request_osdmap(&osdc->client->monc,
+ osdc->osdmap->epoch, 0);
}
}
__u32 epoch;
struct ceph_timespec ctime;
__u32 len;
- __u32 max;
+ __s32 max;
int err = -EINVAL;
ceph_decode_need(p, end, 2*sizeof(__u64)+4*sizeof(__u32), bad);
/* full map? */
ceph_decode_32(p, len);
if (len > 0) {
+ dout(20, "apply_incremental full map len %d, %p to %p\n",
+ len, *p, end);
newmap = osdmap_decode(p, min(*p+len, end));
return newmap; /* error or not */
}
/* new crush? */
ceph_decode_32_safe(p, end, len, bad);
if (len > 0) {
+ dout(20, "apply_incremental new crush map len %d, %p to %p\n",
+ len, *p, end);
newcrush = crush_decode(p, min(*p+len, end));
if (IS_ERR(newcrush))
return ERR_PTR(PTR_ERR(newcrush));
ceph_decode_need(p, end, 3*sizeof(__u32), bad);
ceph_decode_32(p, max);
*p += 4*sizeof(__u32); /* skip new_pg_num et al for now. FIXME. */
- if (max > 0) {
+ if (max >= 0) {
if ((err = osdmap_set_max_osd(map, max)) < 0)
goto bad;
}
if (osd < map->max_osd)
map->crush->device_offload[osd] = off;
}
-
+
+ /* skip old/new pg_swap stuff */
+ ceph_decode_32_safe(p, end, len, bad);
+ *p += len * (sizeof(__u64) + sizeof(__u32));
+ ceph_decode_32_safe(p, end, len, bad);
+ *p += len * sizeof(__u64);
+
+ if (*p != end)
+ goto bad;
return map;
bad: