}
};
+Locker::Locker(MDSRank *m, MDCache *c) :
+ mds(m), mdcache(c), need_snapflush_inodes(member_offset(CInode, item_caps)) {}
+
+
/* This function DOES put the passed message before returning */
void Locker::dispatch(Message *m)
{
}
}
if (gather) {
- if (in->client_snap_caps.empty())
+ if (in->client_snap_caps.empty()) {
in->item_open_file.remove_myself();
+ in->item_caps.remove_myself();
+ }
eval_cap_gather(in, &need_issue);
}
} else {
}
}
+void Locker::snapflush_nudge(CInode *in)
+{
+ assert(in->last != CEPH_NOSNAP);
+ if (in->client_snap_caps.empty())
+ return;
+
+ CInode *head = mdcache->get_inode(in->ino());
+ assert(head);
+ assert(head->is_auth());
+ if (head->client_need_snapflush.empty())
+ return;
+
+ SimpleLock *hlock = head->get_lock(CEPH_LOCK_IFILE);
+ if (hlock->get_state() == LOCK_SYNC || !hlock->is_stable()) {
+ hlock = NULL;
+ for (int i = 0; i < num_cinode_locks; i++) {
+ SimpleLock *lock = head->get_lock(cinode_lock_info[i].lock);
+ if (lock->get_state() != LOCK_SYNC && lock->is_stable()) {
+ hlock = lock;
+ break;
+ }
+ }
+ }
+ if (hlock) {
+ _rdlock_kick(hlock, true);
+ } else {
+ // also, requeue, in case of unstable lock
+ need_snapflush_inodes.push_back(&in->item_caps);
+ }
+}
+void Locker::mark_need_snapflush_inode(CInode *in)
+{
+ assert(in->last != CEPH_NOSNAP);
+ if (!in->item_caps.is_on_list()) {
+ need_snapflush_inodes.push_back(&in->item_caps);
+ utime_t now = ceph_clock_now();
+ in->last_dirstat_prop = now;
+ dout(10) << "mark_need_snapflush_inode " << *in << " - added at " << now << dendl;
+ }
+}
void Locker::_do_null_snapflush(CInode *head_in, client_t client, snapid_t last)
{
{
utime_t now = ceph_clock_now();
+ if (!need_snapflush_inodes.empty()) {
+ // snap inodes that needs flush are auth pinned, they affect
+ // subtree/difrarg freeze.
+ utime_t cutoff = now;
+ cutoff -= g_conf->mds_freeze_tree_timeout / 3;
+
+ CInode *last = need_snapflush_inodes.back();
+ while (!need_snapflush_inodes.empty()) {
+ CInode *in = need_snapflush_inodes.front();
+ if (in->last_dirstat_prop >= cutoff)
+ break;
+ in->item_caps.remove_myself();
+ snapflush_nudge(in);
+ if (in == last)
+ break;
+ }
+ }
+
dout(20) << __func__ << " " << revoking_caps.size() << " revoking caps" << dendl;
- int i = 0;
+ now = ceph_clock_now();
+ int n = 0;
for (xlist<Capability*>::iterator p = revoking_caps.begin(); !p.end(); ++p) {
Capability *cap = *p;
dout(20) << __func__ << " age below timeout " << g_conf->mds_revoke_cap_timeout << dendl;
break;
} else {
- ++i;
- if (i > MAX_WARN_CAPS) {
+ ++n;
+ if (n > MAX_WARN_CAPS) {
dout(1) << __func__ << " more than " << MAX_WARN_CAPS << " caps are late"
<< "revoking, ignoring subsequent caps" << dendl;
break;
MDCache *mdcache;
public:
- Locker(MDSRank *m, MDCache *c) : mds(m), mdcache(c) {}
+ Locker(MDSRank *m, MDCache *c);
SimpleLock *get_lock(int lock_type, MDSCacheObjectInfo &info);
void get_late_revoking_clients(std::list<client_t> *result) const;
bool any_late_revoking_caps(xlist<Capability*> const &revoking) const;
- protected:
+protected:
bool _need_flush_mdlog(CInode *in, int wanted_caps);
void adjust_cap_wanted(Capability *cap, int wanted, int issue_seq);
void handle_client_caps(class MClientCaps *m);
// Maintain a per-client list to find clients responsible for late ones quickly
std::map<client_t, xlist<Capability*> > revoking_caps_by_client;
+ elist<CInode*> need_snapflush_inodes;
+public:
+ void snapflush_nudge(CInode *in);
+ void mark_need_snapflush_inode(CInode *in);
+
// local
public:
void local_wrlock_grab(LocalLock *lock, MutationRef& mut);
export_pin_queue.erase(o);
// remove from inode map
- if (o->last == CEPH_NOSNAP)
+ if (o->last == CEPH_NOSNAP) {
inode_map.erase(o->ino());
- else
+ } else {
+ o->item_caps.remove_myself();
snap_inode_map.erase(o->vino());
+ }
if (o->ino() < MDS_INO_SYSTEM_BASE) {
if (o == root) root = 0;
oldin->inode.version = olddn->pre_dirty();
dout(10) << " olddn " << *olddn << dendl;
bool need_snapflush = !oldin->client_snap_caps.empty();
- if (need_snapflush)
+ if (need_snapflush) {
mut->ls->open_files.push_back(&oldin->item_open_file);
+ mds->locker->mark_need_snapflush_inode(oldin);
+ }
metablob->add_primary_dentry(olddn, 0, true, false, false, need_snapflush);
mut->add_cow_dentry(olddn);
} else {
lock->set_state(LOCK_SNAP_SYNC);
lock->get_wrlock(true);
}
-
+ mds->locker->mark_need_snapflush_inode(in);
}
}