* @is_delayed indicates caller is delayed work and we should not
* delay further.
*/
-void ceph_check_caps(struct ceph_inode_info *ci, int is_delayed,
+void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session)
{
struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
struct rb_node *p;
int tried_invalidate = 0;
int delayed = 0, sent = 0, force_requeue = 0, num;
+ int is_delayed = flags & CHECK_CAPS_NODELAY;
/* if we are unmounting, flush any unused caps immediately. */
if (mdsc->stopping)
ceph_msg_get(msg);
ceph_send_msg_mds(mdsc, msg, mds);
} else if (r == 2) {
- ceph_check_caps(ceph_inode(inode), 1, session);
+ ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
+ session);
}
break;
kfree(xattr_data);
if (check_caps)
- ceph_check_caps(ceph_inode(inode), 1, NULL);
+ ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL);
if (inode)
iput(inode);
return;
list_del_init(&ci->i_cap_delay_list);
spin_unlock(&mdsc->cap_delay_lock);
dout(10, "check_delayed_caps on %p\n", &ci->vfs_inode);
- ceph_check_caps(ci, 1, NULL);
+ ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
}
spin_unlock(&mdsc->cap_delay_lock);
}
struct list_head caps_item;
};
+#define CHECK_CAPS_NODELAY 1 /* do not delay any further */
+#define CHECK_CAPS_AUTHONLY 2 /* only check auth cap */
+
/*
* Snapped cap state that is pending flush to mds. When a snapshot occurs,
* we first complete any in-process sync writes and writeback any dirty
struct ceph_snap_context *snapc);
extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession);
-extern void ceph_check_caps(struct ceph_inode_info *ci, int delayed,
+extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session);
extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);