* @is_delayed indicates caller is delayed work and we should not
* delay further.
*/
-void ceph_check_caps(struct ceph_inode_info *ci, int is_delayed)
+void ceph_check_caps(struct ceph_inode_info *ci, int is_delayed, int drop)
{
struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
struct ceph_mds_client *mdsc = &client->mdsc;
retain |= CEPH_CAP_PIN |
(S_ISDIR(inode->i_mode) ? CEPH_CAP_ANY_RDCACHE :
CEPH_CAP_ANY_RD);
+ retain &= ~drop;
dout(10, "check_caps %p file_wanted %s used %s retain %s issued %s\n",
inode, ceph_cap_string(file_wanted), ceph_cap_string(used),
dout(10, "mds%d revoking %s\n", cap->mds,
ceph_cap_string(revoking));
- /* request larger max_size from MDS? */
+ /* request larger max_size from MDS& ~drop;? */
if (ci->i_wanted_max_size > ci->i_max_size &&
ci->i_wanted_max_size > ci->i_requested_max_size)
goto ack;
up_read(&mdsc->snap_rwsem);
}
-
/*
* Track references to capabilities we hold, so that we don't release
* them to the MDS prematurely.
last ? "last" : "");
if (last && !flushsnaps)
- ceph_check_caps(ci, 0);
+ ceph_check_caps(ci, 0, 0);
else if (flushsnaps)
ceph_flush_snaps(ci);
if (wake)
spin_unlock(&inode->i_lock);
if (last) {
- ceph_check_caps(ci, 0);
+ ceph_check_caps(ci, 0, 0);
} else if (last_snap) {
ceph_flush_snaps(ci);
wake_up(&ci->i_cap_wq);
ceph_put_mds_session(session);
if (check_caps)
- ceph_check_caps(ceph_inode(inode), 1);
+ ceph_check_caps(ceph_inode(inode), 1, 0);
if (inode)
iput(inode);
return;
list_del_init(&ci->i_cap_delay_list);
spin_unlock(&mdsc->cap_delay_lock);
dout(10, "check_delayed_caps on %p\n", &ci->vfs_inode);
- ceph_check_caps(ci, 1);
+ ceph_check_caps(ci, 1, 0);
iput(&ci->vfs_inode);
}
spin_unlock(&mdsc->cap_delay_lock);
}
-void ceph_caps_release(struct inode *inode, int mask)
-{
- /* TODO */
-}
-
rhead = req->r_request->front.iov_base;
rhead->args.mknod.mode = cpu_to_le32(mode);
rhead->args.mknod.rdev = cpu_to_le32(rdev);
- ceph_caps_release(dir, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(dir, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
if (!err && req->r_reply_info.trace_numd == 0) {
/*
return PTR_ERR(req);
}
req->r_locked_dir = dir;
- ceph_caps_release(dir, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(dir, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
if (err)
rhead = req->r_request->front.iov_base;
rhead->args.mkdir.mode = cpu_to_le32(mode);
- ceph_caps_release(dir, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(dir, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
if (err < 0)
req->r_last_dentry = dget(dentry); /* use this dentry in fill_trace */
req->r_locked_dir = old_dentry->d_inode;
- ceph_caps_release(dir, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(dir, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
if (err) {
d_drop(dentry);
req->r_locked_dir = dir; /* by VFS */
- ceph_caps_release(dir, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(dir, CEPH_CAP_FILE_RDCACHE);
ceph_mdsc_lease_release(mdsc, dir, dentry,
CEPH_LOCK_DN);
- ceph_caps_release(inode, CEPH_CAP_LINK_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_LINK_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
req->r_old_dentry = dget(old_dentry);
req->r_last_dentry = dget(new_dentry);
req->r_locked_dir = new_dir;
- ceph_caps_release(old_dir, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(old_dir, CEPH_CAP_FILE_RDCACHE);
ceph_mdsc_lease_release(mdsc, old_dir, old_dentry,
CEPH_LOCK_DN);
if (new_dentry->d_inode)
- ceph_caps_release(new_dentry->d_inode, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(new_dentry->d_inode, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
if (!err && req->r_reply_info.trace_numd == 0) {
/*
dentry = d_find_alias(inode);
if (!dentry)
return -ESTALE; /* yuck */
- ceph_caps_release(inode, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_FILE_RDCACHE);
req = prepare_open_request(inode->i_sb, dentry, flags, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
if (IS_ERR(req))
return ERR_PTR(PTR_ERR(req));
if (flags & O_CREAT)
- ceph_caps_release(dir, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(dir, CEPH_CAP_FILE_RDCACHE);
req->r_last_dentry = dget(dentry); /* use this dentry in fill_trace */
req->r_locked_dir = dir; /* caller holds dir->i_mutex */
err = ceph_mdsc_do_request(mdsc, req);
}
spin_unlock(&inode->i_lock);
if (check)
- ceph_check_caps(ci, 0);
+ ceph_check_caps(ci, 0, 0);
}
/*
if ((size << 1) >= ci->i_max_size &&
(ci->i_reported_size << 1) < ci->i_max_size) {
spin_unlock(&inode->i_lock);
- ceph_check_caps(ci, 0);
+ ceph_check_caps(ci, 0, 0);
} else {
spin_unlock(&inode->i_lock);
}
spin_unlock(&ci->vfs_inode.i_lock);
if (last && ci->i_vino.snap == CEPH_NOSNAP)
- ceph_check_caps(ci, 0);
+ ceph_check_caps(ci, 0, 0);
}
spin_unlock(&inode->i_lock);
if (check)
- ceph_check_caps(ci, 0);
+ ceph_check_caps(ci, 0, 0);
out:
iput(inode);
}
dout(10, "__do_pending_vmtruncate %p to %lld\n", inode, to);
truncate_inode_pages(inode->i_mapping, to);
if (wrbuffer_refs == 0)
- ceph_check_caps(ci, 0);
+ ceph_check_caps(ci, 0, 0);
} else {
dout(10, "__do_pending_vmtruncate %p nothing to do\n", inode);
}
mask |= CEPH_CHOWN_GID;
}
reqh->args.chown.mask = cpu_to_le32(mask);
- ceph_caps_release(inode, CEPH_CAP_AUTH_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_AUTH_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
dout(10, "chown result %d\n", err);
return PTR_ERR(req);
reqh = req->r_request->front.iov_base;
reqh->args.chmod.mode = cpu_to_le32(attr->ia_mode);
- ceph_caps_release(inode, CEPH_CAP_AUTH_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_AUTH_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
dout(10, "chmod result %d\n", err);
if (ia_valid & ATTR_MTIME)
reqh->args.utime.mask |= cpu_to_le32(CEPH_UTIME_MTIME);
- ceph_caps_release(inode, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
dout(10, "utime result %d\n", err);
return PTR_ERR(req);
reqh = req->r_request->front.iov_base;
reqh->args.truncate.length = cpu_to_le64(attr->ia_size);
- ceph_caps_release(inode, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
dout(10, "truncate result %d\n", err);
req->r_request->hdr.data_len = cpu_to_le32(size);
req->r_request->hdr.data_off = cpu_to_le32(0);
- ceph_caps_release(inode, CEPH_CAP_XATTR_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_XATTR_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
if (IS_ERR(req))
return PTR_ERR(req);
- ceph_caps_release(inode, CEPH_CAP_XATTR_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_XATTR_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
return err;
kfree(path);
reqh = req->r_request->front.iov_base;
reqh->args.setlayout.layout = layout;
- ceph_caps_release(inode, CEPH_CAP_FILE_RDCACHE);
+ ceph_release_caps(inode, CEPH_CAP_FILE_RDCACHE);
err = ceph_mdsc_do_request(mdsc, req);
ceph_mdsc_put_request(req);
return err;
igrab(inode);
mutex_unlock(&session->s_mutex);
- ceph_check_caps(ceph_inode(inode), 1);
+ ceph_check_caps(ceph_inode(inode), 1, 0);
mutex_lock(&session->s_mutex);
iput(inode);
}
struct ceph_snap_context *snapc);
extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession);
-extern void ceph_check_caps(struct ceph_inode_info *ci, int delayed);
+extern void ceph_check_caps(struct ceph_inode_info *ci, int delayed, int drop);
extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
-extern void ceph_caps_release(struct inode *inode, int mask);
+extern inline void ceph_release_caps(struct inode *inode, int mask)
+{
+ ceph_check_caps(ceph_inode(inode), 1, mask);
+}
/* addr.c */
extern const struct address_space_operations ceph_aops;