}
/*
- * Delay work with exponential backoff.
+ * Generic timeout mechanism for monitor requests
*/
-static void reschedule_timeout(struct delayed_work *dwork, unsigned long *delay)
+static void reschedule_timeout(struct ceph_mon_request_type *req)
{
- schedule_delayed_work(dwork, *delay);
- if (*delay < MAX_DELAY_INTERVAL)
- *delay *= 2;
+ schedule_delayed_work(&req->delayed_work, req->delay);
+ if (req->delay < MAX_DELAY_INTERVAL)
+ req->delay *= 2;
else
- *delay = MAX_DELAY_INTERVAL;
+ req->delay = MAX_DELAY_INTERVAL;
+}
+
+static void retry_request(struct work_struct *work)
+{
+ struct ceph_mon_request_type *req =
+ container_of(work, struct ceph_mon_request_type,
+ delayed_work.work);
+
+ /*
+ * if lock is contended, reschedule sooner. we can't wait for
+ * mutex because we cancel the timeout sync with lock held.
+ */
+ if (mutex_trylock(&req->monc->req_mutex)) {
+ req->do_request(req->monc, 1);
+ reschedule_timeout(req);
+ mutex_unlock(&req->monc->req_mutex);
+ } else
+ schedule_delayed_work(&req->delayed_work, BASE_DELAY_INTERVAL);
+}
+
+static void cancel_timeout(struct ceph_mon_request_type *req)
+{
+ cancel_delayed_work_sync(&req->delayed_work);
+ req->delay = BASE_DELAY_INTERVAL;
+}
+
+static void init_request_type(struct ceph_mon_client *monc,
+ struct ceph_mon_request_type *req,
+ ceph_monc_request_func_t func)
+{
+ req->monc = monc;
+ INIT_DELAYED_WORK(&req->delayed_work, retry_request);
+ req->delay = 0;
+ req->do_request = func;
}
ceph_msg_send(monc->client->msgr, msg, 0);
}
-static void retry_request_mdsmap(struct work_struct *work)
-{
- struct ceph_mon_client *monc =
- container_of(work, struct ceph_mon_client,
- mds_delayed_work.work);
-
- /* keep sending request until we receive mds map */
- request_mdsmap(monc, 1);
- if (monc->want_mdsmap)
- reschedule_timeout(&monc->mds_delayed_work, &monc->mds_delay);
-}
-
/*
* Register our desire for an mdsmap >= epoch @want.
*/
dout(5, "request_mdsmap want %u\n", want);
mutex_lock(&monc->req_mutex);
if (want > monc->want_mdsmap) {
- monc->mds_delay = BASE_DELAY_INTERVAL;
monc->want_mdsmap = want;
+ monc->mdsreq.delay = BASE_DELAY_INTERVAL;
request_mdsmap(monc, 0);
- reschedule_timeout(&monc->mds_delayed_work, &monc->mds_delay);
+ reschedule_timeout(&monc->mdsreq);
}
mutex_unlock(&monc->req_mutex);
}
/*
- * Called when we receive an mds map.
+ * Possibly cancel our desire for a new map
*/
int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
{
} else {
dout(5, "got_mdsmap %u >= wanted %u\n", got, monc->want_mdsmap);
monc->want_mdsmap = 0;
- cancel_delayed_work_sync(&monc->mds_delayed_work);
- monc->mds_delay = BASE_DELAY_INTERVAL;
+ cancel_timeout(&monc->mdsreq);
}
mutex_unlock(&monc->req_mutex);
return ret;
/*
* osd map
*/
-
static void request_osdmap(struct ceph_mon_client *monc, int newmon)
{
struct ceph_msg *msg;
ceph_msg_send(monc->client->msgr, msg, 0);
}
-static void retry_request_osdmap(struct work_struct *work)
-{
- struct ceph_mon_client *monc =
- container_of(work, struct ceph_mon_client,
- osd_delayed_work.work);
-
- /* keep sending request until we receive osd map */
- request_osdmap(monc, 1);
- if (monc->want_osdmap)
- reschedule_timeout(&monc->osd_delayed_work, &monc->osd_delay);
-}
-
void ceph_monc_request_osdmap(struct ceph_mon_client *monc, u32 want)
{
dout(5, "request_osdmap want %u\n", want);
mutex_lock(&monc->req_mutex);
- monc->osd_delay = BASE_DELAY_INTERVAL;
+ monc->osdreq.delay = BASE_DELAY_INTERVAL;
monc->want_osdmap = want;
request_osdmap(monc, 0);
- reschedule_timeout(&monc->osd_delayed_work, &monc->osd_delay);
+ reschedule_timeout(&monc->osdreq);
mutex_unlock(&monc->req_mutex);
}
} else {
dout(5, "got_osdmap %u >= wanted %u\n", got, monc->want_osdmap);
monc->want_osdmap = 0;
- cancel_delayed_work_sync(&monc->osd_delayed_work);
- monc->osd_delay = BASE_DELAY_INTERVAL;
+ cancel_timeout(&monc->osdreq);
}
mutex_unlock(&monc->req_mutex);
return ret;
struct ceph_msg *msg;
int mon = pick_mon(monc, newmon);
- dout(5, "do_request_umount from mon%d\n", mon);
+ dout(5, "request_umount from mon%d\n", mon);
msg = ceph_msg_new(CEPH_MSG_CLIENT_UNMOUNT, 0, 0, 0, NULL);
if (IS_ERR(msg))
return;
ceph_msg_send(monc->client->msgr, msg, 0);
}
-static void retry_request_umount(struct work_struct *work)
-{
- struct ceph_mon_client *monc =
- container_of(work, struct ceph_mon_client,
- umount_delayed_work.work);
-
- request_umount(monc, 1);
- reschedule_timeout(&monc->umount_delayed_work, &monc->umount_delay);
-}
-
void ceph_monc_request_umount(struct ceph_mon_client *monc)
{
struct ceph_client *client = monc->client;
return;
mutex_lock(&monc->req_mutex);
- monc->umount_delay = BASE_DELAY_INTERVAL;
+ monc->umountreq.delay = BASE_DELAY_INTERVAL;
request_umount(monc, 0);
- reschedule_timeout(&monc->umount_delayed_work, &monc->umount_delay);
+ reschedule_timeout(&monc->umountreq);
mutex_unlock(&monc->req_mutex);
}
-/*
- * Handle monitor umount ack.
- */
void ceph_monc_handle_umount(struct ceph_mon_client *monc,
struct ceph_msg *msg)
{
dout(5, "handle_umount\n");
mutex_lock(&monc->req_mutex);
- cancel_delayed_work_sync(&monc->umount_delayed_work);
- monc->client->mount_state = CEPH_MOUNT_UNMOUNTED;
+ cancel_timeout(&monc->umountreq);
mutex_unlock(&monc->req_mutex);
wake_up(&monc->client->mount_wq);
}
return req.result;
}
+/*
+ * Resend any statfs requests that have timed out.
+ */
static void do_statfs_check(struct work_struct *work)
{
struct ceph_mon_client *monc =
next_tid = req->tid + 1;
if (time_after(jiffies, req->last_attempt + req->delay)) {
req->last_attempt = jiffies;
- if (req->delay < HZ*60)
+ if (req->delay < MAX_DELAY_INTERVAL)
req->delay *= 2;
send_statfs(monc, req->tid, newmon);
newmon = 0;
round_jiffies_relative(1*HZ));
}
+
int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
{
dout(5, "init\n");
INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_ATOMIC);
monc->num_statfs_requests = 0;
monc->last_tid = 0;
- INIT_DELAYED_WORK(&monc->mds_delayed_work, retry_request_mdsmap);
- INIT_DELAYED_WORK(&monc->osd_delayed_work, retry_request_osdmap);
- INIT_DELAYED_WORK(&monc->umount_delayed_work, retry_request_umount);
INIT_DELAYED_WORK(&monc->statfs_delayed_work, do_statfs_check);
- monc->mds_delay = monc->osd_delay = monc->umount_delay = 0;
+ init_request_type(monc, &monc->mdsreq, request_mdsmap);
+ init_request_type(monc, &monc->osdreq, request_osdmap);
+ init_request_type(monc, &monc->umountreq, request_umount);
mutex_init(&monc->req_mutex);
monc->want_mdsmap = 0;
monc->want_osdmap = 0;
void ceph_monc_stop(struct ceph_mon_client *monc)
{
dout(5, "stop\n");
- cancel_delayed_work_sync(&monc->mds_delayed_work);
- cancel_delayed_work_sync(&monc->osd_delayed_work);
- cancel_delayed_work_sync(&monc->umount_delayed_work);
+ cancel_timeout(&monc->mdsreq);
+ cancel_timeout(&monc->osdreq);
+ cancel_timeout(&monc->umountreq);
+ cancel_delayed_work_sync(&monc->statfs_delayed_work);
kfree(monc->monmap);
}