spin_unlock(&mdsc->cap_delay_lock);
}
+/*
+ * Common issue checks for add_cap, handle_cap_grant.
+ */
+static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
+ unsigned issued)
+{
+ unsigned had = __ceph_caps_issued(ci, NULL);
+
+ /*
+ * Each time we receive FILE_CACHE anew, we increment
+ * i_rdcache_gen.
+ */
+ if ((issued & CEPH_CAP_FILE_CACHE) &&
+ (had & CEPH_CAP_FILE_CACHE) == 0)
+ ci->i_rdcache_gen++;
+
+ /*
+ * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
+ * don't know what happened to this directory while we didn't
+ * have the cap.
+ */
+ if ((issued & CEPH_CAP_FILE_SHARED) &&
+ (had & CEPH_CAP_FILE_SHARED) == 0) {
+ ci->i_shared_gen++;
+ if (S_ISDIR(ci->vfs_inode.i_mode)) {
+ dout(" marking %p NOT complete\n", &ci->vfs_inode);
+ ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
+ }
+ }
+}
+
/*
* Add a capability under the given MDS session.
*
}
}
- /*
- * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
- * don't know what happened to this directory while we didn't
- * have the cap.
- */
- if (S_ISDIR(inode->i_mode) &&
- (issued & CEPH_CAP_FILE_SHARED) &&
- (cap->issued & CEPH_CAP_FILE_SHARED) == 0) {
- dout(" marking %p NOT complete\n", inode);
- ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
- }
+ __check_cap_issue(ci, cap, issued);
/*
* If we are issued caps we don't want, or the mds' wanted
cap->gen = session->s_cap_gen;
- /*
- * Each time we receive CACHE anew, we increment i_rdcache_gen.
- * Also clear I_COMPLETE: we don't know what happened to this directory
- */
- if ((newcaps & CEPH_CAP_FILE_CACHE) && /* got RDCACHE */
- (cap->issued & CEPH_CAP_FILE_CACHE) == 0 && /* but not before */
- (__ceph_caps_issued(ci, NULL) & CEPH_CAP_FILE_CACHE) == 0) {
- ci->i_rdcache_gen++;
-
- if (S_ISDIR(inode->i_mode)) {
- dout(" marking %p NOT complete\n", inode);
- ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
- }
- }
+ __check_cap_issue(ci, cap, newcaps);
/*
* If CACHE is being revoked, and we have no dirty buffers,
spin_unlock(&dir->i_lock);
dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
- di->lease_rdcache_gen = ci->i_rdcache_gen;
+ di->lease_shared_gen = ci->i_shared_gen;
return NULL;
}
spin_unlock(&dir->i_lock);
int valid = 0;
spin_lock(&dir->i_lock);
- if (ci->i_rdcache_gen == di->lease_rdcache_gen)
+ if (ci->i_shared_gen == di->lease_shared_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
spin_unlock(&dir->i_lock);
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
- dir, (unsigned)ci->i_rdcache_gen, dentry,
- (unsigned)di->lease_rdcache_gen, valid);
+ dir, (unsigned)ci->i_shared_gen, dentry,
+ (unsigned)di->lease_shared_gen, valid);
return valid;
}
struct ceph_inode_info *ci = ceph_inode(parent_inode);
spin_lock(&parent_inode->i_lock);
- if (ci->i_rdcache_gen == di->lease_rdcache_gen) {
+ if (ci->i_shared_gen == di->lease_shared_gen) {
dout(" clearing %p complete (d_release)\n",
parent_inode);
ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
ci->i_wr_ref = 0;
ci->i_wrbuffer_ref = 0;
ci->i_wrbuffer_ref_head = 0;
+ ci->i_shared_gen = 0;
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
queue_trunc = ceph_fill_file_size(inode, issued,
le32_to_cpu(info->truncate_seq),
le64_to_cpu(info->truncate_size),
+ S_ISDIR(inode->i_mode) ?
+ ci->i_rbytes :
le64_to_cpu(info->size));
ceph_fill_file_time(inode, issued,
le32_to_cpu(info->time_warp_seq),
/* make lease_rdcache_gen match directory */
dir = dentry->d_parent->d_inode;
- di->lease_rdcache_gen = ceph_inode(dir)->i_rdcache_gen;
+ di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
if (lease->mask == 0)
goto out_unlock;
int i_pin_ref;
int i_rd_ref, i_rdcache_ref, i_wr_ref;
int i_wrbuffer_ref, i_wrbuffer_ref_head;
- u32 i_rdcache_gen; /* we increment this each time we get RDCACHE.
- If it's non-zero, we _may_ have cached
- pages. */
+ u32 i_shared_gen; /* increment each time we get FILE_SHARED */
+ u32 i_rdcache_gen; /* we increment this each time we get
+ FILE_CACHE. If it's non-zero, we
+ _may_ have cached pages. */
u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
struct list_head i_unsafe_writes; /* uncommitted sync writes */
*/
struct ceph_dentry_info {
struct ceph_mds_session *lease_session;
- u32 lease_gen, lease_rdcache_gen;
+ u32 lease_gen, lease_shared_gen;
u32 lease_seq;
unsigned long lease_renew_after, lease_renew_from;
struct list_head lru;