From e075c27c3554380c59dce0cc17ef0944eb415025 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 2 Oct 2014 18:02:50 +0800 Subject: [PATCH] common: link mds/flock.o to libcommon later commit will use this code to track file locks held by cephfs client. Signed-off-by: Yan, Zheng --- src/common/Makefile.am | 3 +- src/mds/CInode.h | 1 + src/mds/Makefile.am | 1 - src/mds/flock.cc | 66 +++++++++++++++++++++--------------------- src/mds/flock.h | 2 ++ 5 files changed, 38 insertions(+), 35 deletions(-) diff --git a/src/common/Makefile.am b/src/common/Makefile.am index 0b99c6e3f1589..be53051c064bf 100644 --- a/src/common/Makefile.am +++ b/src/common/Makefile.am @@ -85,7 +85,8 @@ libcommon_la_SOURCES += \ osd/HitSet.cc \ mds/MDSMap.cc \ mds/inode_backtrace.cc \ - mds/mdstypes.cc + mds/mdstypes.cc \ + mds/flock.cc # inject crc in common libcommon_crc_la_SOURCES = \ diff --git a/src/mds/CInode.h b/src/mds/CInode.h index ff19bd6b57df7..5b27718350b84 100644 --- a/src/mds/CInode.h +++ b/src/mds/CInode.h @@ -455,6 +455,7 @@ public: parent(0), inode_auth(CDIR_AUTH_DEFAULT), replica_caps_wanted(0), + fcntl_locks(g_ceph_context), flock_locks(g_ceph_context), item_dirty(this), item_caps(this), item_open_file(this), item_dirty_parent(this), item_dirty_dirfrag_dir(this), item_dirty_dirfrag_nest(this), diff --git a/src/mds/Makefile.am b/src/mds/Makefile.am index 8bed0e15b11ff..6ce5a5b563902 100644 --- a/src/mds/Makefile.am +++ b/src/mds/Makefile.am @@ -2,7 +2,6 @@ libmds_la_SOURCES = \ mds/Capability.cc \ mds/MDS.cc \ mds/Beacon.cc \ - mds/flock.cc \ mds/locks.c \ mds/journal.cc \ mds/Server.cc \ diff --git a/src/mds/flock.cc b/src/mds/flock.cc index 7b58c3ac321df..45659804226d4 100644 --- a/src/mds/flock.cc +++ b/src/mds/flock.cc @@ -44,33 +44,33 @@ void ceph_lock_state_t::remove_waiting(ceph_filelock& fl) bool ceph_lock_state_t::add_lock(ceph_filelock& new_lock, bool wait_on_fail, bool replay) { - dout(15) << "add_lock " << new_lock << dendl; + ldout(cct,15) << "add_lock " << new_lock << dendl; bool ret = false; list::iterator> overlapping_locks, self_overlapping_locks, neighbor_locks; // first, get any overlapping locks and split them into owned-by-us and not if (get_overlapping_locks(new_lock, overlapping_locks, &neighbor_locks)) { - dout(15) << "got overlapping lock, splitting by owner" << dendl; + ldout(cct,15) << "got overlapping lock, splitting by owner" << dendl; split_by_owner(new_lock, overlapping_locks, self_overlapping_locks); } if (!overlapping_locks.empty()) { //overlapping locks owned by others :( if (CEPH_LOCK_EXCL == new_lock.type) { //can't set, we want an exclusive - dout(15) << "overlapping lock, and this lock is exclusive, can't set" + ldout(cct,15) << "overlapping lock, and this lock is exclusive, can't set" << dendl; if (wait_on_fail && !replay) { waiting_locks.insert(pair(new_lock.start, new_lock)); } } else { //shared lock, check for any exclusive locks blocking us if (contains_exclusive_lock(overlapping_locks)) { //blocked :( - dout(15) << " blocked by exclusive lock in overlapping_locks" << dendl; + ldout(cct,15) << " blocked by exclusive lock in overlapping_locks" << dendl; if (wait_on_fail && !replay) { waiting_locks.insert(pair(new_lock.start, new_lock)); } } else { //yay, we can insert a shared lock - dout(15) << "inserting shared lock" << dendl; + ldout(cct,15) << "inserting shared lock" << dendl; remove_waiting(new_lock); adjust_locks(self_overlapping_locks, new_lock, neighbor_locks); held_locks.insert(pair(new_lock.start, new_lock)); @@ -80,7 +80,7 @@ bool ceph_lock_state_t::add_lock(ceph_filelock& new_lock, } else { //no overlapping locks except our own remove_waiting(new_lock); adjust_locks(self_overlapping_locks, new_lock, neighbor_locks); - dout(15) << "no conflicts, inserting " << new_lock << dendl; + ldout(cct,15) << "no conflicts, inserting " << new_lock << dendl; held_locks.insert(pair (new_lock.start, new_lock)); ret = true; @@ -123,9 +123,9 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock, list::iterator> overlapping_locks, self_overlapping_locks; if (get_overlapping_locks(removal_lock, overlapping_locks)) { - dout(15) << "splitting by owner" << dendl; + ldout(cct,15) << "splitting by owner" << dendl; split_by_owner(removal_lock, overlapping_locks, self_overlapping_locks); - } else dout(15) << "attempt to remove lock at " << removal_lock.start + } else ldout(cct,15) << "attempt to remove lock at " << removal_lock.start << " but no locks there!" << dendl; bool remove_to_end = (0 == removal_lock.length); uint64_t removal_start = removal_lock.start; @@ -134,13 +134,13 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock, __s64 old_lock_client = 0; ceph_filelock *old_lock; - dout(15) << "examining " << self_overlapping_locks.size() + ldout(cct,15) << "examining " << self_overlapping_locks.size() << " self-overlapping locks for removal" << dendl; for (list::iterator>::iterator iter = self_overlapping_locks.begin(); iter != self_overlapping_locks.end(); ++iter) { - dout(15) << "self overlapping lock " << (*iter)->second << dendl; + ldout(cct,15) << "self overlapping lock " << (*iter)->second << dendl; old_lock = &(*iter)->second; bool old_lock_to_end = (0 == old_lock->length); old_lock_end = old_lock->start + old_lock->length - 1; @@ -149,7 +149,7 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock, if (old_lock->start < removal_start) { old_lock->length = removal_start - old_lock->start; } else { - dout(15) << "erasing " << (*iter)->second << dendl; + ldout(cct,15) << "erasing " << (*iter)->second << dendl; held_locks.erase(*iter); --client_held_lock_counts[old_lock_client]; } @@ -160,7 +160,7 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock, (append_lock.start, append_lock)); ++client_held_lock_counts[(client_t)old_lock->client]; if (old_lock->start >= removal_start) { - dout(15) << "erasing " << (*iter)->second << dendl; + ldout(cct,15) << "erasing " << (*iter)->second << dendl; held_locks.erase(*iter); --client_held_lock_counts[old_lock_client]; } else old_lock->length = removal_start - old_lock->start; @@ -176,7 +176,7 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock, if (old_lock->start < removal_start) { old_lock->length = removal_start - old_lock->start; } else { - dout(15) << "erasing " << (*iter)->second << dendl; + ldout(cct,15) << "erasing " << (*iter)->second << dendl; held_locks.erase(*iter); --client_held_lock_counts[old_lock_client]; } @@ -207,7 +207,7 @@ void ceph_lock_state_t::adjust_locks(list::ite list::iterator> neighbor_locks) { - dout(15) << "adjust_locks" << dendl; + ldout(cct,15) << "adjust_locks" << dendl; bool new_lock_to_end = (0 == new_lock.length); uint64_t new_lock_start, new_lock_end; uint64_t old_lock_start, old_lock_end; @@ -218,7 +218,7 @@ void ceph_lock_state_t::adjust_locks(list::ite iter != old_locks.end(); ++iter) { old_lock = &(*iter)->second; - dout(15) << "adjusting lock: " << *old_lock << dendl; + ldout(cct,15) << "adjusting lock: " << *old_lock << dendl; bool old_lock_to_end = (0 == old_lock->length); old_lock_start = old_lock->start; old_lock_end = old_lock->start + old_lock->length - 1; @@ -227,17 +227,17 @@ void ceph_lock_state_t::adjust_locks(list::ite old_lock_client = old_lock->client; if (new_lock_to_end || old_lock_to_end) { //special code path to deal with a length set at 0 - dout(15) << "one lock extends forever" << dendl; + ldout(cct,15) << "one lock extends forever" << dendl; if (old_lock->type == new_lock.type) { //just unify them in new lock, remove old lock - dout(15) << "same lock type, unifying" << dendl; + ldout(cct,15) << "same lock type, unifying" << dendl; new_lock.start = (new_lock_start < old_lock_start) ? new_lock_start : old_lock_start; new_lock.length = 0; held_locks.erase(*iter); --client_held_lock_counts[old_lock_client]; } else { //not same type, have to keep any remains of old lock around - dout(15) << "shrinking old lock" << dendl; + ldout(cct,15) << "shrinking old lock" << dendl; if (new_lock_to_end) { if (old_lock_start < new_lock_start) { old_lock->length = new_lock_start - old_lock_start; @@ -261,17 +261,17 @@ void ceph_lock_state_t::adjust_locks(list::ite } } else { if (old_lock->type == new_lock.type) { //just merge them! - dout(15) << "merging locks, they're the same type" << dendl; + ldout(cct,15) << "merging locks, they're the same type" << dendl; new_lock.start = (old_lock_start < new_lock_start ) ? old_lock_start : new_lock_start; int new_end = (new_lock_end > old_lock_end) ? new_lock_end : old_lock_end; new_lock.length = new_end - new_lock.start + 1; - dout(15) << "erasing lock " << (*iter)->second << dendl; + ldout(cct,15) << "erasing lock " << (*iter)->second << dendl; held_locks.erase(*iter); --client_held_lock_counts[old_lock_client]; } else { //we'll have to update sizes and maybe make new locks - dout(15) << "locks aren't same type, changing sizes" << dendl; + ldout(cct,15) << "locks aren't same type, changing sizes" << dendl; if (old_lock_end > new_lock_end) { //add extra lock after new_lock ceph_filelock appended_lock = *old_lock; appended_lock.start = new_lock_end + 1; @@ -301,7 +301,7 @@ void ceph_lock_state_t::adjust_locks(list::ite ++iter) { old_lock = &(*iter)->second; old_lock_client = old_lock->client; - dout(15) << "lock to coalesce: " << *old_lock << dendl; + ldout(cct,15) << "lock to coalesce: " << *old_lock << dendl; /* because if it's a neighboring lock there can't be any self-overlapping locks that covered it */ if (old_lock->type == new_lock.type) { //merge them @@ -353,8 +353,8 @@ ceph_lock_state_t::get_lower_bound(uint64_t start, && (start != 0) && (lower_bound != lock_map.begin())) --lower_bound; if (lock_map.end() == lower_bound) - dout(15) << "get_lower_dout(15)eturning end()" << dendl; - else dout(15) << "get_lower_bound returning iterator pointing to " + ldout(cct,15) << "get_lower_dout(15)eturning end()" << dendl; + else ldout(cct,15) << "get_lower_bound returning iterator pointing to " << lower_bound->second << dendl; return lower_bound; } @@ -367,8 +367,8 @@ ceph_lock_state_t::get_last_before(uint64_t end, lock_map.upper_bound(end); if (last != lock_map.begin()) --last; if (lock_map.end() == last) - dout(15) << "get_last_before returning end()" << dendl; - else dout(15) << "get_last_before returning iterator pointing to " + ldout(cct,15) << "get_last_before returning end()" << dendl; + else ldout(cct,15) << "get_last_before returning iterator pointing to " << last->second << dendl; return last; } @@ -381,7 +381,7 @@ bool ceph_lock_state_t::share_space( ((iter->first < start) && (((iter->first + iter->second.length - 1) >= start) || (0 == iter->second.length)))); - dout(15) << "share_space got start: " << start << ", end: " << end + ldout(cct,15) << "share_space got start: " << start << ", end: " << end << ", lock: " << iter->second << ", returning " << ret << dendl; return ret; } @@ -392,7 +392,7 @@ bool ceph_lock_state_t::get_overlapping_locks(ceph_filelock& lock, list::iterator> *self_neighbors) { - dout(15) << "get_overlapping_locks" << dendl; + ldout(cct,15) << "get_overlapping_locks" << dendl; // create a lock starting one earlier and ending one later // to check for neighbors ceph_filelock neighbor_check_lock = lock; @@ -437,7 +437,7 @@ bool ceph_lock_state_t::get_waiting_overlaps(ceph_filelock& lock, ceph_filelock>::iterator>& overlaps) { - dout(15) << "get_waiting_overlaps" << dendl; + ldout(cct,15) << "get_waiting_overlaps" << dendl; multimap::iterator iter = get_last_before(lock.start + lock.length - 1, waiting_locks); bool cont = iter != waiting_locks.end(); @@ -458,15 +458,15 @@ void ceph_lock_state_t::split_by_owner(ceph_filelock& owner, { list::iterator>::iterator iter = locks.begin(); - dout(15) << "owner lock: " << owner << dendl; + ldout(cct,15) << "owner lock: " << owner << dendl; while (iter != locks.end()) { - dout(15) << "comparing to " << (*iter)->second << dendl; + ldout(cct,15) << "comparing to " << (*iter)->second << dendl; if (ceph_filelock_owner_equal((*iter)->second, owner)) { - dout(15) << "success, pushing to owned_locks" << dendl; + ldout(cct,15) << "success, pushing to owned_locks" << dendl; owned_locks.push_back(*iter); iter = locks.erase(iter); } else { - dout(15) << "failure, something not equal in this group " + ldout(cct,15) << "failure, something not equal in this group " << (*iter)->second.client << ":" << owner.client << "," << (*iter)->second.owner << ":" << owner.owner << "," << (*iter)->second.pid << ":" << owner.pid << dendl; diff --git a/src/mds/flock.h b/src/mds/flock.h index 4791b850c4f4f..bf3980d094b3c 100644 --- a/src/mds/flock.h +++ b/src/mds/flock.h @@ -37,7 +37,9 @@ inline bool operator==(ceph_filelock& l, ceph_filelock& r) { } class ceph_lock_state_t { + CephContext *cct; public: + ceph_lock_state_t(CephContext *cct_) : cct(cct_) {} multimap held_locks; // current locks multimap waiting_locks; // locks waiting for other locks // both of the above are keyed by starting offset -- 2.39.5