]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
common: link mds/flock.o to libcommon
authorYan, Zheng <zyan@redhat.com>
Thu, 2 Oct 2014 10:02:50 +0000 (18:02 +0800)
committerYan, Zheng <zyan@redhat.com>
Fri, 10 Oct 2014 13:08:53 +0000 (21:08 +0800)
later commit will use this code to track file locks held by cephfs
client.

Signed-off-by: Yan, Zheng <zyan@redhat.com>
src/common/Makefile.am
src/mds/CInode.h
src/mds/Makefile.am
src/mds/flock.cc
src/mds/flock.h

index 0b99c6e3f1589dd8121364c4dc42f6fddf2ad485..be53051c064bf7a25585d9c1e2aeb190986402a2 100644 (file)
@@ -85,7 +85,8 @@ libcommon_la_SOURCES += \
        osd/HitSet.cc \
        mds/MDSMap.cc \
        mds/inode_backtrace.cc \
-       mds/mdstypes.cc 
+       mds/mdstypes.cc \
+       mds/flock.cc
 
 # inject crc in common
 libcommon_crc_la_SOURCES = \
index ff19bd6b57df7bd1823618ccd142bce0f8eb1165..5b27718350b848697d95817c34a81f3d908fab5c 100644 (file)
@@ -455,6 +455,7 @@ public:
     parent(0),
     inode_auth(CDIR_AUTH_DEFAULT),
     replica_caps_wanted(0),
+    fcntl_locks(g_ceph_context), flock_locks(g_ceph_context),
     item_dirty(this), item_caps(this), item_open_file(this), item_dirty_parent(this),
     item_dirty_dirfrag_dir(this), 
     item_dirty_dirfrag_nest(this), 
index 8bed0e15b11ffcb00057172becc178a2c402e7b0..6ce5a5b56390216985c63888d9eb57b5c67d9ea6 100644 (file)
@@ -2,7 +2,6 @@ libmds_la_SOURCES = \
        mds/Capability.cc \
        mds/MDS.cc \
        mds/Beacon.cc \
-       mds/flock.cc \
        mds/locks.c \
        mds/journal.cc \
        mds/Server.cc \
index 7b58c3ac321df32e03b07485b86535beb04770c3..45659804226d488fb23cc4e353e81edc11e900c7 100644 (file)
@@ -44,33 +44,33 @@ void ceph_lock_state_t::remove_waiting(ceph_filelock& fl)
 bool ceph_lock_state_t::add_lock(ceph_filelock& new_lock,
                                  bool wait_on_fail, bool replay)
 {
-  dout(15) << "add_lock " << new_lock << dendl;
+  ldout(cct,15) << "add_lock " << new_lock << dendl;
   bool ret = false;
   list<multimap<uint64_t, ceph_filelock>::iterator>
     overlapping_locks, self_overlapping_locks, neighbor_locks;
 
   // first, get any overlapping locks and split them into owned-by-us and not
   if (get_overlapping_locks(new_lock, overlapping_locks, &neighbor_locks)) {
-    dout(15) << "got overlapping lock, splitting by owner" << dendl;
+    ldout(cct,15) << "got overlapping lock, splitting by owner" << dendl;
     split_by_owner(new_lock, overlapping_locks, self_overlapping_locks);
   }
   if (!overlapping_locks.empty()) { //overlapping locks owned by others :(
     if (CEPH_LOCK_EXCL == new_lock.type) {
       //can't set, we want an exclusive
-      dout(15) << "overlapping lock, and this lock is exclusive, can't set"
+      ldout(cct,15) << "overlapping lock, and this lock is exclusive, can't set"
               << dendl;
       if (wait_on_fail && !replay) {
         waiting_locks.insert(pair<uint64_t, ceph_filelock>(new_lock.start, new_lock));
       }
     } else { //shared lock, check for any exclusive locks blocking us
       if (contains_exclusive_lock(overlapping_locks)) { //blocked :(
-        dout(15) << " blocked by exclusive lock in overlapping_locks" << dendl;
+        ldout(cct,15) << " blocked by exclusive lock in overlapping_locks" << dendl;
         if (wait_on_fail && !replay) {
           waiting_locks.insert(pair<uint64_t, ceph_filelock>(new_lock.start, new_lock));
         }
       } else {
         //yay, we can insert a shared lock
-        dout(15) << "inserting shared lock" << dendl;
+        ldout(cct,15) << "inserting shared lock" << dendl;
         remove_waiting(new_lock);
         adjust_locks(self_overlapping_locks, new_lock, neighbor_locks);
         held_locks.insert(pair<uint64_t, ceph_filelock>(new_lock.start, new_lock));
@@ -80,7 +80,7 @@ bool ceph_lock_state_t::add_lock(ceph_filelock& new_lock,
   } else { //no overlapping locks except our own
     remove_waiting(new_lock);
     adjust_locks(self_overlapping_locks, new_lock, neighbor_locks);
-    dout(15) << "no conflicts, inserting " << new_lock << dendl;
+    ldout(cct,15) << "no conflicts, inserting " << new_lock << dendl;
     held_locks.insert(pair<uint64_t, ceph_filelock>
                       (new_lock.start, new_lock));
     ret = true;
@@ -123,9 +123,9 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock,
   list<multimap<uint64_t, ceph_filelock>::iterator> overlapping_locks,
     self_overlapping_locks;
   if (get_overlapping_locks(removal_lock, overlapping_locks)) {
-    dout(15) << "splitting by owner" << dendl;
+    ldout(cct,15) << "splitting by owner" << dendl;
     split_by_owner(removal_lock, overlapping_locks, self_overlapping_locks);
-  } else dout(15) << "attempt to remove lock at " << removal_lock.start
+  } else ldout(cct,15) << "attempt to remove lock at " << removal_lock.start
                  << " but no locks there!" << dendl;
   bool remove_to_end = (0 == removal_lock.length);
   uint64_t removal_start = removal_lock.start;
@@ -134,13 +134,13 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock,
   __s64 old_lock_client = 0;
   ceph_filelock *old_lock;
 
-  dout(15) << "examining " << self_overlapping_locks.size()
+  ldout(cct,15) << "examining " << self_overlapping_locks.size()
           << " self-overlapping locks for removal" << dendl;
   for (list<multimap<uint64_t, ceph_filelock>::iterator>::iterator
          iter = self_overlapping_locks.begin();
        iter != self_overlapping_locks.end();
        ++iter) {
-    dout(15) << "self overlapping lock " << (*iter)->second << dendl;
+    ldout(cct,15) << "self overlapping lock " << (*iter)->second << dendl;
     old_lock = &(*iter)->second;
     bool old_lock_to_end = (0 == old_lock->length);
     old_lock_end = old_lock->start + old_lock->length - 1;
@@ -149,7 +149,7 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock,
       if (old_lock->start < removal_start) {
         old_lock->length = removal_start - old_lock->start;
       } else {
-        dout(15) << "erasing " << (*iter)->second << dendl;
+        ldout(cct,15) << "erasing " << (*iter)->second << dendl;
         held_locks.erase(*iter);
         --client_held_lock_counts[old_lock_client];
       }
@@ -160,7 +160,7 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock,
                         (append_lock.start, append_lock));
       ++client_held_lock_counts[(client_t)old_lock->client];
       if (old_lock->start >= removal_start) {
-        dout(15) << "erasing " << (*iter)->second << dendl;
+        ldout(cct,15) << "erasing " << (*iter)->second << dendl;
         held_locks.erase(*iter);
         --client_held_lock_counts[old_lock_client];
       } else old_lock->length = removal_start - old_lock->start;
@@ -176,7 +176,7 @@ void ceph_lock_state_t::remove_lock(ceph_filelock removal_lock,
       if (old_lock->start < removal_start) {
         old_lock->length = removal_start - old_lock->start;
       } else {
-        dout(15) << "erasing " << (*iter)->second << dendl;
+        ldout(cct,15) << "erasing " << (*iter)->second << dendl;
         held_locks.erase(*iter);
         --client_held_lock_counts[old_lock_client];
       }
@@ -207,7 +207,7 @@ void ceph_lock_state_t::adjust_locks(list<multimap<uint64_t, ceph_filelock>::ite
                   list<multimap<uint64_t, ceph_filelock>::iterator>
                   neighbor_locks)
 {
-  dout(15) << "adjust_locks" << dendl;
+  ldout(cct,15) << "adjust_locks" << dendl;
   bool new_lock_to_end = (0 == new_lock.length);
   uint64_t new_lock_start, new_lock_end;
   uint64_t old_lock_start, old_lock_end;
@@ -218,7 +218,7 @@ void ceph_lock_state_t::adjust_locks(list<multimap<uint64_t, ceph_filelock>::ite
        iter != old_locks.end();
        ++iter) {
     old_lock = &(*iter)->second;
-    dout(15) << "adjusting lock: " << *old_lock << dendl;
+    ldout(cct,15) << "adjusting lock: " << *old_lock << dendl;
     bool old_lock_to_end = (0 == old_lock->length);
     old_lock_start = old_lock->start;
     old_lock_end = old_lock->start + old_lock->length - 1;
@@ -227,17 +227,17 @@ void ceph_lock_state_t::adjust_locks(list<multimap<uint64_t, ceph_filelock>::ite
     old_lock_client = old_lock->client;
     if (new_lock_to_end || old_lock_to_end) {
       //special code path to deal with a length set at 0
-      dout(15) << "one lock extends forever" << dendl;
+      ldout(cct,15) << "one lock extends forever" << dendl;
       if (old_lock->type == new_lock.type) {
         //just unify them in new lock, remove old lock
-        dout(15) << "same lock type, unifying" << dendl;
+        ldout(cct,15) << "same lock type, unifying" << dendl;
         new_lock.start = (new_lock_start < old_lock_start) ? new_lock_start :
           old_lock_start;
         new_lock.length = 0;
         held_locks.erase(*iter);
         --client_held_lock_counts[old_lock_client];
       } else { //not same type, have to keep any remains of old lock around
-        dout(15) << "shrinking old lock" << dendl;
+        ldout(cct,15) << "shrinking old lock" << dendl;
         if (new_lock_to_end) {
           if (old_lock_start < new_lock_start) {
             old_lock->length = new_lock_start - old_lock_start;
@@ -261,17 +261,17 @@ void ceph_lock_state_t::adjust_locks(list<multimap<uint64_t, ceph_filelock>::ite
       }
     } else {
       if (old_lock->type == new_lock.type) { //just merge them!
-        dout(15) << "merging locks, they're the same type" << dendl;
+        ldout(cct,15) << "merging locks, they're the same type" << dendl;
         new_lock.start = (old_lock_start < new_lock_start ) ? old_lock_start :
           new_lock_start;
         int new_end = (new_lock_end > old_lock_end) ? new_lock_end :
           old_lock_end;
         new_lock.length = new_end - new_lock.start + 1;
-        dout(15) << "erasing lock " << (*iter)->second << dendl;
+        ldout(cct,15) << "erasing lock " << (*iter)->second << dendl;
         held_locks.erase(*iter);
         --client_held_lock_counts[old_lock_client];
       } else { //we'll have to update sizes and maybe make new locks
-        dout(15) << "locks aren't same type, changing sizes" << dendl;
+        ldout(cct,15) << "locks aren't same type, changing sizes" << dendl;
         if (old_lock_end > new_lock_end) { //add extra lock after new_lock
           ceph_filelock appended_lock = *old_lock;
           appended_lock.start = new_lock_end + 1;
@@ -301,7 +301,7 @@ void ceph_lock_state_t::adjust_locks(list<multimap<uint64_t, ceph_filelock>::ite
        ++iter) {
     old_lock = &(*iter)->second;
     old_lock_client = old_lock->client;
-    dout(15) << "lock to coalesce: " << *old_lock << dendl;
+    ldout(cct,15) << "lock to coalesce: " << *old_lock << dendl;
     /* because if it's a neighboring lock there can't be any self-overlapping
        locks that covered it */
     if (old_lock->type == new_lock.type) { //merge them
@@ -353,8 +353,8 @@ ceph_lock_state_t::get_lower_bound(uint64_t start,
        && (start != 0)
        && (lower_bound != lock_map.begin())) --lower_bound;
    if (lock_map.end() == lower_bound)
-     dout(15) << "get_lower_dout(15)eturning end()" << dendl;
-   else dout(15) << "get_lower_bound returning iterator pointing to "
+     ldout(cct,15) << "get_lower_dout(15)eturning end()" << dendl;
+   else ldout(cct,15) << "get_lower_bound returning iterator pointing to "
                 << lower_bound->second << dendl;
    return lower_bound;
  }
@@ -367,8 +367,8 @@ ceph_lock_state_t::get_last_before(uint64_t end,
     lock_map.upper_bound(end);
   if (last != lock_map.begin()) --last;
   if (lock_map.end() == last)
-    dout(15) << "get_last_before returning end()" << dendl;
-  else dout(15) << "get_last_before returning iterator pointing to "
+    ldout(cct,15) << "get_last_before returning end()" << dendl;
+  else ldout(cct,15) << "get_last_before returning iterator pointing to "
                << last->second << dendl;
   return last;
 }
@@ -381,7 +381,7 @@ bool ceph_lock_state_t::share_space(
               ((iter->first < start) &&
                (((iter->first + iter->second.length - 1) >= start) ||
                 (0 == iter->second.length))));
-  dout(15) << "share_space got start: " << start << ", end: " << end
+  ldout(cct,15) << "share_space got start: " << start << ", end: " << end
           << ", lock: " << iter->second << ", returning " << ret << dendl;
   return ret;
 }
@@ -392,7 +392,7 @@ bool ceph_lock_state_t::get_overlapping_locks(ceph_filelock& lock,
                            list<multimap<uint64_t,
                                ceph_filelock>::iterator> *self_neighbors)
 {
-  dout(15) << "get_overlapping_locks" << dendl;
+  ldout(cct,15) << "get_overlapping_locks" << dendl;
   // create a lock starting one earlier and ending one later
   // to check for neighbors
   ceph_filelock neighbor_check_lock = lock;
@@ -437,7 +437,7 @@ bool ceph_lock_state_t::get_waiting_overlaps(ceph_filelock& lock,
                                                ceph_filelock>::iterator>&
                                                overlaps)
 {
-  dout(15) << "get_waiting_overlaps" << dendl;
+  ldout(cct,15) << "get_waiting_overlaps" << dendl;
   multimap<uint64_t, ceph_filelock>::iterator iter =
     get_last_before(lock.start + lock.length - 1, waiting_locks);
   bool cont = iter != waiting_locks.end();
@@ -458,15 +458,15 @@ void ceph_lock_state_t::split_by_owner(ceph_filelock& owner,
 {
   list<multimap<uint64_t, ceph_filelock>::iterator>::iterator
     iter = locks.begin();
-  dout(15) << "owner lock: " << owner << dendl;
+  ldout(cct,15) << "owner lock: " << owner << dendl;
   while (iter != locks.end()) {
-    dout(15) << "comparing to " << (*iter)->second << dendl;
+    ldout(cct,15) << "comparing to " << (*iter)->second << dendl;
     if (ceph_filelock_owner_equal((*iter)->second, owner)) {
-      dout(15) << "success, pushing to owned_locks" << dendl;
+      ldout(cct,15) << "success, pushing to owned_locks" << dendl;
       owned_locks.push_back(*iter);
       iter = locks.erase(iter);
     } else {
-      dout(15) << "failure, something not equal in this group "
+      ldout(cct,15) << "failure, something not equal in this group "
               << (*iter)->second.client << ":" << owner.client << ","
              << (*iter)->second.owner << ":" << owner.owner << ","
              << (*iter)->second.pid << ":" << owner.pid << dendl;
index 4791b850c4f4f2a30ac437bbbc011077552bd7e1..bf3980d094b3cc8a8e9182bdc969565ade9a1f19 100644 (file)
@@ -37,7 +37,9 @@ inline bool operator==(ceph_filelock& l, ceph_filelock& r) {
 }
 
 class ceph_lock_state_t {
+  CephContext *cct;
 public:
+  ceph_lock_state_t(CephContext *cct_) : cct(cct_) {}
   multimap<uint64_t, ceph_filelock> held_locks;    // current locks
   multimap<uint64_t, ceph_filelock> waiting_locks; // locks waiting for other locks
   // both of the above are keyed by starting offset