/*
* Generic timeout mechanism for monitor requests
*/
-static void reschedule_timeout(struct ceph_mon_request_type *req)
+static void reschedule_timeout(struct ceph_mon_request *req)
{
schedule_delayed_work(&req->delayed_work, req->delay);
if (req->delay < MAX_DELAY_INTERVAL)
static void retry_request(struct work_struct *work)
{
- struct ceph_mon_request_type *req =
- container_of(work, struct ceph_mon_request_type,
+ struct ceph_mon_request *req =
+ container_of(work, struct ceph_mon_request,
delayed_work.work);
/*
schedule_delayed_work(&req->delayed_work, BASE_DELAY_INTERVAL);
}
-static void cancel_timeout(struct ceph_mon_request_type *req)
+static void cancel_timeout(struct ceph_mon_request *req)
{
cancel_delayed_work_sync(&req->delayed_work);
req->delay = BASE_DELAY_INTERVAL;
}
static void init_request_type(struct ceph_mon_client *monc,
- struct ceph_mon_request_type *req,
+ struct ceph_mon_request *req,
ceph_monc_request_func_t func)
{
req->monc = monc;
/*
* (re)send a statfs request
*/
-static int send_statfs(struct ceph_mon_client *monc, u64 tid, int newmon)
+static int send_statfs(struct ceph_mon_client *monc,
+ struct ceph_mon_statfs_request *req,
+ int newmon)
{
struct ceph_msg *msg;
struct ceph_mon_statfs *h;
int mon = pick_mon(monc, newmon ? 1:-1);
- dout(10, "send_statfs to mon%d tid %llu\n", mon, tid);
+ dout(10, "send_statfs to mon%d tid %llu\n", mon, req->tid);
msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
if (IS_ERR(msg))
return PTR_ERR(msg);
h = msg->front.iov_base;
h->fsid = monc->monmap->fsid;
- h->tid = cpu_to_le64(tid);
+ h->tid = cpu_to_le64(req->tid);
msg->hdr.dst = monc->monmap->mon_inst[mon];
+ ceph_sysfs_mon_statfs_req_init(monc, req, msg);
ceph_msg_send(monc->client->msgr, msg, 0);
return 0;
}
req.tid = ++monc->last_tid;
req.last_attempt = jiffies;
req.delay = BASE_DELAY_INTERVAL;
+ memset(&req.kobj, 0, sizeof(req.kobj));
if (radix_tree_insert(&monc->statfs_request_tree, req.tid, &req) < 0) {
mutex_unlock(&monc->statfs_mutex);
derr(10, "ENOMEM in do_statfs\n");
mutex_unlock(&monc->statfs_mutex);
/* send request and wait */
- err = send_statfs(monc, req.tid, 0);
+ err = send_statfs(monc, &req, 0);
if (!err)
err = wait_for_completion_interruptible(&req.completion);
mutex_lock(&monc->statfs_mutex);
+ ceph_sysfs_mon_statfs_req_cleanup(&req);
radix_tree_delete(&monc->statfs_request_tree, req.tid);
monc->num_statfs_requests--;
if (monc->num_statfs_requests == 0)
req->last_attempt = jiffies;
if (req->delay < MAX_DELAY_INTERVAL)
req->delay *= 2;
- send_statfs(monc, req->tid, newmon);
+ send_statfs(monc, req, newmon);
newmon = 0;
}
}
};
struct ceph_mon_client;
+struct ceph_mon_statfs_request;
+
+struct ceph_mon_client_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct ceph_mon_client *, struct ceph_mon_client_attr *,
+ char *);
+ ssize_t (*store)(struct ceph_mon_client *, struct ceph_mon_client_attr *,
+ const char *, size_t);
+};
+
+struct ceph_mon_statfs_request_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct ceph_mon_statfs_request *, struct ceph_mon_statfs_request_attr *,
+ char *);
+ ssize_t (*store)(struct ceph_mon_statfs_request *, struct ceph_mon_statfs_request_attr *,
+ const char *, size_t);
+ struct ceph_entity_inst dst;
+};
/*
* Generic mechanism for resending monitor requests.
*/
typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc,
int newmon);
-struct ceph_mon_request_type {
+struct ceph_mon_request {
+ struct kobject kobj;
struct ceph_mon_client *monc;
struct delayed_work delayed_work;
unsigned long delay;
/* statfs() is done a bit differently */
struct ceph_mon_statfs_request {
u64 tid;
+ struct kobject kobj;
+ struct ceph_mon_statfs_request_attr k_op, k_mon;
int result;
struct ceph_statfs *buf;
struct completion completion;
/* mds/osd map or umount requests */
struct mutex req_mutex;
- struct ceph_mon_request_type mdsreq, osdreq, umountreq;
+ struct ceph_mon_request mdsreq, osdreq, umountreq;
u32 want_mdsmap;
u32 want_osdmap;
+
+ struct kobject kobj;
+ struct ceph_mon_client_attr k_want_osdmap, k_want_mdsmap;
};
extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
extern void ceph_sysfs_mds_req_cleanup(struct ceph_mds_request *req);
extern int ceph_sysfs_osd_req_init(struct ceph_osd_client *osdc, struct ceph_osd_request *req);
extern void ceph_sysfs_osd_req_cleanup(struct ceph_osd_request *req);
+extern int ceph_sysfs_mon_statfs_req_init(struct ceph_mon_client *monc, struct ceph_mon_statfs_request *req,
+ struct ceph_msg *msg);
+extern void ceph_sysfs_mon_statfs_req_cleanup(struct ceph_mon_statfs_request *req);
+
static inline struct inode *get_dentry_parent_inode(struct dentry *dentry)
{
DEF_ATTR_OP(ceph_client)
DEF_ATTR_OP(ceph_mds_request)
DEF_ATTR_OP(ceph_osd_request)
+DEF_ATTR_OP(ceph_mon_statfs_request)
/*
* per-client attributes
return pos;
}
+static ssize_t req_mon_want_osdmap_show(struct ceph_mon_client *monc,
+ struct ceph_mon_client_attr *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", monc->want_osdmap);
+}
+
+static ssize_t req_mon_want_mdsmap_show(struct ceph_mon_client *monc,
+ struct ceph_mon_client_attr *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", monc->want_mdsmap);
+}
+
static struct kobj_type entity_ops = {
.sysfs_ops = &ceph_client_sysfs_ops,
};
if (ret)
goto out;
+ ret = kobject_init_and_add(&client->monc.kobj, &entity_ops,
+ &client->kobj, "monc");
+ if (ret)
+ goto out;
+
ADD_ENTITY_ATTR(client, k_fsid, "fsid", 0400, fsid_show, NULL);
ADD_ENTITY_ATTR(client, k_monmap, "monmap", 0400, monmap_show, NULL);
ADD_ENTITY_ATTR(client, k_mdsmap, "mdsmap", 0400, mdsmap_show, NULL);
ADD_ENTITY_ATTR(client, k_osdmap, "osdmap", 0400, osdmap_show, NULL);
+ ADD_ENTITY_ATTR((&client->monc), k_want_osdmap, "want_osdmap", 0400, req_mon_want_osdmap_show, NULL);
+ ADD_ENTITY_ATTR((&client->monc), k_want_mdsmap, "want_mdsmap", 0400, req_mon_want_mdsmap_show, NULL);
return 0;
out:
#endif
}
+static ssize_t req_mon_show(struct ceph_mon_statfs_request *req,
+ struct ceph_mon_statfs_request_attr *attr, char *buf)
+{
+ return sprintf(buf, "%u.%u.%u.%u:%u (%s%d)\n",
+ IPQUADPORT(attr->dst.addr.ipaddr),
+ ENTITY_NAME(attr->dst.name));
+}
+
+static ssize_t req_mon_op_show(struct ceph_mon_statfs_request *req,
+ struct ceph_mon_statfs_request_attr *attr, char *buf)
+{
+ return sprintf(buf, "statfs\n");
+}
+
+int ceph_sysfs_mon_statfs_req_init(struct ceph_mon_client *monc, struct ceph_mon_statfs_request *req,
+ struct ceph_msg *msg)
+{
+ int ret = 0;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ ret = kobject_init_and_add(&req->kobj, &ceph_mon_statfs_request_ops,
+ &monc->kobj, "%d", req->tid);
+ if (ret)
+ goto out;
+
+ req->k_mon.dst = msg->hdr.dst;
+ ADD_ENTITY_ATTR(req, k_mon, "mon", 0400, req_mon_show, NULL);
+ ADD_ENTITY_ATTR(req, k_op, "op", 0400, req_mon_op_show, NULL);
+
+ return 0;
+out:
+#endif
+ return ret;
+}
+
+void ceph_sysfs_mon_statfs_req_cleanup(struct ceph_mon_statfs_request *req)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
+ kobject_del(&req->kobj);
+#endif
+}
+
/*
* ceph attrs
*/