/*
* renew at 1/2 the advertised timeout period.
*/
- unsigned hz = (HZ * mdsc->mdsmap->m_cap_bit_timeout) >> 1;
- schedule_delayed_work(&mdsc->delayed_work, hz);
+ int delay = mdsc->mdsmap->m_cap_bit_timeout >> 1;
+ unsigned hz = HZ * delay;
+ int r;
+ dout(10, "schedule_delayed for %d seconds (%u hz)\n", delay, hz);
+ r = schedule_delayed_work(&mdsc->delayed_work, hz);
+ dout(10, "r = %d\n", r);
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
void delayed_work(struct work_struct *work)
-#else
-void delayed_work(void *arg)
-#endif
{
int i;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
struct ceph_mds_client *mdsc =
container_of(work, struct ceph_mds_client, delayed_work.work);
-#else
- struct ceph_mds_client *mdsc = arg;
-#endif
dout(10, "delayed_work on %p\n", mdsc);
u64 last_tid;
u32 want_mdsmap; /* protected by caller's lock */
+ struct delayed_work delayed_work; /* delayed work */
+ unsigned long delay;
+
+ struct ceph_msg *msg;
+ u32 want_osdmap; /* protected by caller's lock */
};
extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);