if (!is_delayed)
__cap_delay_requeue(mdsc, ci);
-
+
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
cap = rb_entry(p, struct ceph_cap, ci_node);
}
have = __ceph_caps_issued(ci, &implemented);
/* HACK: force sync writes...
- have &= ~CEPH_CAP_WRBUFFER;
+ have &= ~CEPH_CAP_WRBUFFER;
implemented &= ~CEPH_CAP_WRBUFFER;
*/
if ((have & need) == need) {
if (snapc == ci->i_snap_realm->cached_context) {
ci->i_wrbuffer_ref_head -= nr;
dout(30, "put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
- inode,
+ inode,
ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
last ? " LAST":"");
{DOUT_MASK_IOCTL, "ioctl"},
{DOUT_MASK_MDSC, "mdsc"},
{DOUT_MASK_MDSMAP, "mdsmap"},
- {DOUT_MASK_MSGR, "msgr"},
+ {DOUT_MASK_MSGR, "msgr"},
{DOUT_MASK_MON, "mon"},
{DOUT_MASK_OSDC, "osdc"},
{DOUT_MASK_OSDMAP, "osdmap"},
{DOUT_MASK_SUPER, "super"},
{DOUT_MASK_PROTOCOL, "protocol"},
{DOUT_MASK_PROC, "proc"},
- {0, NULL}
+ {0, NULL}
};
static inline int ceph_get_debug_mask(char *name)
#define IPSZ (sizeof(struct ceph_inopath_item) / sizeof(u32))
-static int ceph_encode_fh(struct dentry *dentry, __u32 *rawfh, int *max_len,
+static int ceph_encode_fh(struct dentry *dentry, __u32 *rawfh, int *max_len,
int connectable)
{
int type = 1;
dout(10, "encode_fh %p max_len %d u32s (%d inopath items)%s\n", dentry,
*max_len, max, connectable ? " connectable":"");
-
+
if (max < 1 || (connectable && max < 2))
return -ENOSPC;
if (!inode) {
struct ceph_mds_request *req;
derr(10, "__fh_to_dentry %llx.%x -- no inode\n", vino.ino,hash);
-
+
req = ceph_mdsc_create_request(mdsc,
CEPH_MDS_OP_FINDINODE,
len, (char *)fh, 0, NULL,
return ERR_PTR(PTR_ERR(req));
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
-
+
inode = ceph_find_inode(sb, vino);
if (!inode)
return ERR_PTR(err ? err : -ESTALE);
}
dout(10, "__fh_to_dentry %llx.%x -- inode %p dentry %p\n", vino.ino,
hash, inode, dentry);
- return dentry;
+ return dentry;
}
{
struct ceph_inode_info *ci = ceph_inode(inode);
int check = 0;
-
+
/* do we need to explicitly request a larger max_size? */
spin_lock(&inode->i_lock);
if ((endoff >= ci->i_max_size ||
&iocb->ki_pos);
else
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
-
+
out:
dout(10, "aio_write %p %llu~%u dropping cap refs on %d\n",
inode, pos, (unsigned)iov->iov_len, got);
static const struct inode_operations ceph_symlink_iops;
+
/*
* find or create an inode, given the ceph ino number
*/
return err;
}
+
+/*
+ * initialize a newly allocated inode.
+ */
+struct inode *ceph_alloc_inode(struct super_block *sb)
+{
+ struct ceph_inode_info *ci;
+ int i;
+
+ ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
+ if (!ci)
+ return NULL;
+
+ dout(10, "alloc_inode %p\n", &ci->vfs_inode);
+
+ ci->i_version = 0;
+ ci->i_truncate_seq = 0;
+ ci->i_time_warp_seq = 0;
+ ci->i_symlink = NULL;
+
+ ci->i_lease_session = NULL;
+ ci->i_lease_mask = 0;
+ ci->i_lease_ttl = 0;
+ INIT_LIST_HEAD(&ci->i_lease_item);
+
+ ci->i_fragtree = RB_ROOT;
+ mutex_init(&ci->i_fragtree_mutex);
+
+ ci->i_xattr_len = 0;
+ ci->i_xattr_data = NULL;
+
+ ci->i_caps = RB_ROOT;
+ for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
+ ci->i_nr_by_mode[i] = 0;
+ init_waitqueue_head(&ci->i_cap_wq);
+ INIT_LIST_HEAD(&ci->i_cap_snaps);
+ ci->i_snap_caps = 0;
+
+ ci->i_wanted_max_size = 0;
+ ci->i_requested_max_size = 0;
+
+ ci->i_cap_exporting_mds = 0;
+ ci->i_cap_exporting_mseq = 0;
+ ci->i_cap_exporting_issued = 0;
+
+ ci->i_rd_ref = ci->i_rdcache_ref = 0;
+ ci->i_wr_ref = 0;
+ ci->i_wrbuffer_ref = 0;
+ ci->i_wrbuffer_ref_head = 0;
+ ci->i_hold_caps_until = 0;
+ INIT_LIST_HEAD(&ci->i_cap_delay_list);
+
+ ci->i_snap_realm = NULL;
+
+ INIT_WORK(&ci->i_wb_work, ceph_inode_writeback);
+
+ ci->i_vmtruncate_to = -1;
+ INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
+
+ return &ci->vfs_inode;
+}
+
+void ceph_destroy_inode(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_inode_frag *frag;
+ struct rb_node *n;
+
+ dout(30, "destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+ kfree(ci->i_symlink);
+ while ((n = rb_first(&ci->i_fragtree)) != NULL) {
+ frag = rb_entry(n, struct ceph_inode_frag, node);
+ rb_erase(n, &ci->i_fragtree);
+ kfree(frag);
+ }
+ kfree(ci->i_xattr_data);
+ kmem_cache_free(ceph_inode_cachep, ci);
+}
+
+
/*
* Helper to fill in size, ctime, mtime, and atime. We have to be
* careful because either the client or MDS may have more up to date
return err;
}
+
+
/*
* caller must hold session s_mutex.
*/
if (snapdir) {
iput(snapdir);
dput(parent);
- }
+ }
dout(10, "readdir_prepopulate done\n");
return err;
}
size_t size)
{
return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
- (long)ci->i_rctime.tv_nsec);
+ (long)ci->i_rctime.tv_nsec);
}
static struct _ceph_vir_xattr_cb _ceph_vir_xattr_recs[] = {
mdsc->mdsmap = newmap;
check_new_map(mdsc, newmap, oldmap);
ceph_mdsmap_destroy(oldmap);
-
+
/* reconnect? */
if (from < newmap->m_max_mds) {
newstate = ceph_mdsmap_get_state(newmap, from);
int max_sessions; /* len of s_mds_sessions */
int stopping; /* true if shutting down */
- /*
+ /*
* snap_rwsem will cover cap linkage into snaprealms, and realm
* snap contexts. (later, we can do per-realm snap contexts locks..)
*/
/*
- * mds map
+ * mds map
*/
static void do_request_mdsmap(struct work_struct *work)
{
dout(5, "got_mdsmap got %u > wanted %u\n",
got, monc->want_mdsmap);
monc->want_mdsmap = 0;
-
+
/* we got map so take map request out of queue */
cancel_delayed_work_sync(&monc->mds_delayed_work);
monc->mds_delay = BASE_DELAY_INTERVAL;
};
extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
-extern int ceph_monmap_contains(struct ceph_monmap *m,
+extern int ceph_monmap_contains(struct ceph_monmap *m,
struct ceph_entity_addr *addr);
extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, __u32 have);
extern void ceph_monc_request_umount(struct ceph_mon_client *monc);
-extern void ceph_monc_report_failure(struct ceph_mon_client *monc,
+extern void ceph_monc_report_failure(struct ceph_mon_client *monc,
struct ceph_entity_inst *who);
-extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
+extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
struct ceph_statfs *buf);
-extern void ceph_monc_handle_statfs_reply(struct ceph_mon_client *monc,
+extern void ceph_monc_handle_statfs_reply(struct ceph_mon_client *monc,
struct ceph_msg *msg);
extern void ceph_monc_request_umount(struct ceph_mon_client *monc);
-extern void ceph_monc_handle_umount(struct ceph_mon_client *monc,
+extern void ceph_monc_handle_umount(struct ceph_mon_client *monc,
struct ceph_msg *msg);
#endif
/* skip new_max_snap, removed_snaps */
*p += sizeof(__u64);
ceph_decode_32_safe(p, end, len, bad);
- *p += len * 2 * sizeof(__u64);
+ *p += len * 2 * sizeof(__u64);
if (*p != end) {
derr(10, "osdmap incremental has trailing gunk?\n");
do_div(t, su);
bl = t;
dout(80, "off %llu / su %u = bl %u\n", off, su, bl);
-
+
stripeno = bl / sc;
stripepos = bl % sc;
objsetno = stripeno / su_per_object;
}
static inline struct ceph_entity_addr *
-ceph_osd_addr(struct ceph_osdmap *map, int osd)
+ceph_osd_addr(struct ceph_osdmap *map, int osd)
{
if (osd >= map->max_osd)
return 0;
*debug &= ~mask;
else
*debug |= mask;
-
+
}
} while (mask_str);
realm->cached_context->seq <= parent->cached_context->seq)) {
dout(10, "build_snap_context %llx %p: %p seq %lld (%d snaps)"
" (unchanged)\n",
- realm->ino, realm, realm->cached_context,
+ realm->ino, realm, realm->cached_context,
realm->cached_context->seq,
realm->cached_context->num_snaps);
return 0;
if (used & CEPH_CAP_WR) {
dout(10, "queue_cap_snap %p cap_snap %p snapc %p"
- " seq %llu used WR, now pending\n", inode,
+ " seq %llu used WR, now pending\n", inode,
capsnap, snapc, snapc->seq);
capsnap->writing = 1;
} else {
struct list_head *pi;
dout(10, "update_snap_trace updating %llx %p %lld -> %lld\n",
realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
-
+
list_for_each(pi, &realm->inodes_with_caps) {
struct ceph_inode_info *ci =
list_entry(pi, struct ceph_inode_info,
invalidate += adjust_snap_realm_parent(mdsc, realm,
le64_to_cpu(ri->parent));
-
+
if (le64_to_cpu(ri->seq) > realm->seq) {
realm->seq = le64_to_cpu(ri->seq);
realm->created = le64_to_cpu(ri->created);
realm->num_snaps = le32_to_cpu(ri->num_snaps);
if (dup_array(&realm->snaps, snaps, realm->num_snaps) < 0)
goto fail;
-
+
realm->num_prior_parent_snaps =
le32_to_cpu(ri->num_prior_parent_snaps);
if (dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
err = -EINVAL;
fail:
derr(10, "update_snap_trace error %d\n", err);
- return ERR_PTR(err);
+ return ERR_PTR(err);
}
/*
* inode cache
*/
-static struct kmem_cache *ceph_inode_cachep;
+struct kmem_cache *ceph_inode_cachep;
-static struct inode *ceph_alloc_inode(struct super_block *sb)
-{
- struct ceph_inode_info *ci;
- int i;
-
- ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
- if (!ci)
- return NULL;
-
- dout(10, "alloc_inode %p vfsi %p\n", ci, &ci->vfs_inode);
-
- ci->i_version = 0;
- ci->i_truncate_seq = 0;
- ci->i_time_warp_seq = 0;
- ci->i_symlink = NULL;
-
- ci->i_lease_session = NULL;
- ci->i_lease_mask = 0;
- ci->i_lease_ttl = 0;
- INIT_LIST_HEAD(&ci->i_lease_item);
-
- ci->i_fragtree = RB_ROOT;
- mutex_init(&ci->i_fragtree_mutex);
-
- ci->i_xattr_len = 0;
- ci->i_xattr_data = NULL;
-
- ci->i_caps = RB_ROOT;
- for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
- ci->i_nr_by_mode[i] = 0;
- init_waitqueue_head(&ci->i_cap_wq);
- INIT_LIST_HEAD(&ci->i_cap_snaps);
- ci->i_snap_caps = 0;
-
- ci->i_wanted_max_size = 0;
- ci->i_requested_max_size = 0;
-
- ci->i_cap_exporting_mds = 0;
- ci->i_cap_exporting_mseq = 0;
- ci->i_cap_exporting_issued = 0;
-
- ci->i_rd_ref = ci->i_rdcache_ref = 0;
- ci->i_wr_ref = 0;
- ci->i_wrbuffer_ref = 0;
- ci->i_wrbuffer_ref_head = 0;
- ci->i_hold_caps_until = 0;
- INIT_LIST_HEAD(&ci->i_cap_delay_list);
-
- ci->i_snap_realm = NULL;
-
- INIT_WORK(&ci->i_wb_work, ceph_inode_writeback);
-
- ci->i_vmtruncate_to = -1;
- INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
-
- return &ci->vfs_inode;
-}
-
-static void ceph_destroy_inode(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_inode_frag *frag;
- struct rb_node *n;
-
- dout(30, "destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
- kfree(ci->i_symlink);
- while ((n = rb_first(&ci->i_fragtree)) != NULL) {
- frag = rb_entry(n, struct ceph_inode_frag, node);
- rb_erase(n, &ci->i_fragtree);
- kfree(frag);
- }
- kfree(ci->i_xattr_data);
- kmem_cache_free(ceph_inode_cachep, ci);
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
static void init_once(void *foo)
#else
static void init_once(struct kmem_cache *cachep, void *foo)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
client->client_kobj = kobject_create_and_add(name, ceph_kobj);
- //client->fsid_kobj = kobject_create_and_add("fsid",
+ //client->fsid_kobj = kobject_create_and_add("fsid",
//client->client_kobj);
#endif
}
}
-
-
const char *ceph_msg_type_name(int type)
{
switch (type) {
/*
* mount options
*/
-
enum {
Opt_fsidmajor,
Opt_fsidminor,
client->msgr->prepare_pages = ceph_osdc_prepare_pages;
client->msgr->peer_reset = ceph_peer_reset;
}
-
+
while (!have_all_maps(client)) {
err = -EIO;
if (timeout && time_after_eq(jiffies, started + timeout))
return 0;
out_icache:
- destroy_inodecache();
+ destroy_inodecache();
out_msgr:
ceph_msgr_exit();
out_proc:
struct super_block *sb;
- unsigned long mount_state;
+ unsigned long mount_state;
wait_queue_head_t mount_wq;
struct ceph_messenger *msgr; /* messenger instance */
unsigned i_cap_exporting_issued;
struct list_head i_cap_snaps; /* snapped state pending flush to mds */
unsigned i_snap_caps; /* cap bits for snapped files */
-
+
int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
loff_t i_max_size; /* max file size authorized by mds */
* are organized into a hierarchy, such that children inherit (some of)
* the snapshots of their parents.
*
- * All inodes within the realm that have capabilities are linked into a
+ * All inodes within the realm that have capabilities are linked into a
* per-realm list.
*/
struct ceph_snap_realm {
int num_prior_parent_snaps; /* had prior to parent_since */
u64 *snaps; /* snaps specific to this realm */
int num_snaps;
-
+
struct ceph_snap_realm *parent;
struct list_head children; /* list of child realms */
struct list_head child_item;
/* inode.c */
extern const struct inode_operations ceph_file_iops;
+extern struct kmem_cache *ceph_inode_cachep;
+
+extern struct inode *ceph_alloc_inode(struct super_block *sb);
+extern void ceph_destroy_inode(struct inode *inode);
extern struct inode *ceph_get_inode(struct super_block *sb,
struct ceph_vino vino);
ceph_snapdir_dentry_ops;
extern char *ceph_build_path(struct dentry *dn, int *len, __u64 *base, int min);
-extern struct dentry *ceph_do_lookup(struct super_block *sb,
- struct dentry *dentry,
+extern struct dentry *ceph_do_lookup(struct super_block *sb,
+ struct dentry *dentry,
int mask, int on_inode, int locked_dir);
extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err);