From: Sage Weil Date: Mon, 20 Oct 2008 17:06:08 +0000 (-0700) Subject: kclient: some osd, mds client comments X-Git-Tag: v0.5~261 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=e7aa663bda0ece2e3db31a1715df6b4112b138e0;p=ceph.git kclient: some osd, mds client comments --- diff --git a/src/kernel/mds_client.h b/src/kernel/mds_client.h index 1aca03275e1a..9fbfa6f6aba7 100644 --- a/src/kernel/mds_client.h +++ b/src/kernel/mds_client.h @@ -11,6 +11,31 @@ #include "messenger.h" #include "mdsmap.h" +/* + * A cluster of MDS (metadata server) daemons is responsible for + * managing the file system namespace (the directory hierarchy and + * inodes) and for coordinating shared access to storage. Metadata is + * partitioning hierarchically across a number of servers, and that + * partition varies over time as the cluster adjusts the distribution + * in order to balance load. + * + * The MDS client is primarily responsible to managing synchronous + * metadata requests for operations like open, unlink, and so forth. + * If there is a MDS failure, we find out about it when we (possibly + * request and) receive a new MDS map, and can resubmit affected + * requests. + * + * For the most part, though, we take advantage of a lossless + * communications channel to the MDS, and do not need to worry about + * timing out or resubmitting requests. + * + * We maintain a stateful "session" with each MDS we interact with. + * Within each session, we sent periodic heartbeat messages to ensure + * any capabilities or leases we have been issues remain valid. If + * the session times out and goes stale, our leases and capabilities + * are no longer valid. + */ + struct ceph_client; struct ceph_cap; diff --git a/src/kernel/osd_client.h b/src/kernel/osd_client.h index cdc50016a369..201d93aef5b5 100644 --- a/src/kernel/osd_client.h +++ b/src/kernel/osd_client.h @@ -8,6 +8,22 @@ #include "types.h" #include "osdmap.h" +/* + * All data objects are stored within a cluster/cloud of OSDs, or + * "object storage devices." (Note that Ceph OSDs have _nothing_ to + * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply + * remote daemons serving up and coordinating consistent and safe + * access to storage. + * + * Cluster membership and the mapping of data objects onto storage devices + * are described by the osd map. + * + * We keep track of pending OSD requests (read, write), resubmit + * requests to different OSDs when the cluster topology/data layout + * change, or retry the affected requests when the communications + * channel with an OSD is reset. + */ + struct ceph_msg; struct ceph_snap_context; struct ceph_osd_request;