]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mds: Move lock print function; add debugging; fix fencepost errors.
authorGreg Farnum <gregf@hq.newdream.net>
Mon, 10 May 2010 22:42:49 +0000 (15:42 -0700)
committerGreg Farnum <gregf@hq.newdream.net>
Mon, 2 Aug 2010 17:39:55 +0000 (10:39 -0700)
src/mds/Server.cc
src/mds/Server.h
src/mds/mdstypes.h

index ce4c721e9c54951880c18a4e5840a27782458153..824e68fc4a044a506a5185647eee860e4272fdce 100644 (file)
@@ -2580,6 +2580,8 @@ void Server::handle_client_file_setlock(MDRequest *mdr)
   set_lock.type = req->head.args.filelock_change.type;
   bool will_wait = req->head.args.filelock_change.wait;
 
+  dout(0) << "handle_client_file_setlock: " << set_lock << dendl;
+
   ceph_lock_state_t *lock_state = NULL;
 
   // get the appropriate lock state
@@ -2598,7 +2600,9 @@ void Server::handle_client_file_setlock(MDRequest *mdr)
     return;
   }
 
+  dout(0) << "state prior to lock change: " << *lock_state << dendl;;
   if (CEPH_LOCK_UNLOCK == set_lock.type) {
+    dout(0) << "got unlock" << dendl;
     list<ceph_filelock> activated_locks;
     lock_state->remove_lock(set_lock, activated_locks);
     reply_request(mdr, 0);
@@ -2609,17 +2613,23 @@ void Server::handle_client_file_setlock(MDRequest *mdr)
     cur->take_waiting(CInode::WAIT_FLOCK, waiters);
     mds->queue_waiters(waiters);
   } else {
+    dout(0) << "got lock" << dendl;
     if (lock_state->add_lock(set_lock, will_wait)) {
       // lock set successfully
+      dout(0) << "it succeeded" << dendl;
       reply_request(mdr, 0);
     } else {
+      dout(0) << "it failed on this attempt" << dendl;
       // couldn't set lock right now
       if (!will_wait)
        reply_request(mdr, -1);
-      else
+      else {
+       dout(0) << "but it's a wait" << dendl;
        cur->add_waiter(CInode::WAIT_FLOCK, new C_MDS_RetryRequest(mdcache, mdr));
+      }
     }
   }
+  dout(0) << "state after lock change: " << *lock_state << dendl;
 }
 
 void Server::handle_client_file_readlock(MDRequest *mdr)
index 0d4a0c1db5b42fee4f5e5b1b0699236ecc0cb6a8..07596cfd476f22e930dcd8a2f7f9a2646f37b5aa 100644 (file)
@@ -211,7 +211,23 @@ public:
 
 };
 
-
-
+inline ostream& operator<<(ostream& out, ceph_lock_state_t& l) {
+  out << "ceph_lock_state_t. held_locks.size()=" << l.held_locks.size()
+      << ", waiting_locks.size()=" << l.waiting_locks.size()
+      << ", client_held_lock_counts -- " << l.client_held_lock_counts
+      << "\n client_waiting_lock_counts -- " << l.client_waiting_lock_counts
+      << "\n held_locks -- ";
+    for (multimap<uint64_t, ceph_filelock>::iterator iter = l.held_locks.begin();
+        iter != l.held_locks.end();
+        ++iter)
+      out << iter->second;
+    out << "\n waiting_locks -- ";
+    for (multimap<uint64_t, ceph_filelock>::iterator iter =l.waiting_locks.begin();
+        iter != l.waiting_locks.end();
+        ++iter)
+      out << iter->second << "\n";
+    out << std::endl;
+  return out;
+}
 
 #endif
index cbd9387957c1faf2f06288ed870d8c1b8286ef11..dc06d2e3b67da40d1cf586bf406e436b0dfaf81a 100644 (file)
@@ -334,6 +334,13 @@ inline bool operator==(const byte_range_t& l, const byte_range_t& r) {
   return l.first == r.first && l.last == r.last;
 }
 
+inline ostream& operator<<(ostream& out, ceph_filelock& l) {
+  out << "start: " << l.start << ", length: " << l.length
+      << ", client: " << l.client << ", pid: " << l.pid
+      << ", type: " << (int)l.type
+      << std::endl;
+  return out;
+}
 
 struct ceph_lock_state_t {
   multimap<__u64, ceph_filelock> held_locks;    // current locks
@@ -586,7 +593,7 @@ private:
          if (old_lock_end > new_lock_end) { //add extra lock after new_lock
            ceph_filelock appended_lock = *old_lock;
            appended_lock.start = new_lock_end + 1;
-           appended_lock.length = old_lock_end - appended_lock.start;
+           appended_lock.length = old_lock_end - appended_lock.start + 1;
            held_locks.insert(pair<__u64, ceph_filelock>
                              (appended_lock.start, appended_lock));
            ++client_held_lock_counts[old_lock->client];
@@ -634,6 +641,10 @@ private:
     if ((lower_bound->first != start)
        && (start != 0)
        && (lower_bound != lock_map.begin())) --lower_bound;
+    if (lock_map.end() == lower_bound)
+      dout(0) << "get_lower_bound returning end()" << dendl;
+    else dout(0) << "get_lower_bound returning iterator pointing to "
+                << lower_bound->second << dendl;
     return lower_bound;
   }
 
@@ -643,6 +654,10 @@ private:
     multimap<__u64, ceph_filelock>::iterator last =
       lock_map.upper_bound(end);
     if (last != lock_map.begin()) --last;
+    if (lock_map.end() == last)
+      dout(0) << "get_last_before returning end()" << dendl;
+    else dout(0) << "get_last_before returning iterator pointing to "
+                << last->second << dendl;
     return last;
   }
 
@@ -654,10 +669,13 @@ private:
    */
   bool share_space(multimap<__u64, ceph_filelock>::iterator& iter,
                   __u64 start, __u64 end) {
-    return ((iter->first > start && iter->first < end) ||
-           ((iter->first < start) &&
-            (((iter->first + iter->second.length - 1) > start) ||
-             (0 == iter->second.length))));
+    bool ret = ((iter->first >= start && iter->first <= end) ||
+               ((iter->first < start) &&
+                (((iter->first + iter->second.length - 1) >= start) ||
+                 (0 == iter->second.length))));
+    dout(0) << "share_space got start: " << start << ", end: " << end
+           << ", lock: " << iter->second << ", returning " << ret << dendl;
+    return ret;
   }
   bool share_space(multimap<__u64, ceph_filelock>::iterator& iter,
                   ceph_filelock& lock) {
@@ -672,6 +690,7 @@ private:
    */
   bool get_overlapping_locks(ceph_filelock& lock,
                             list<ceph_filelock*>& overlaps) {
+    dout(0) << "get_overlapping_locks" << dendl;
     multimap<__u64, ceph_filelock>::iterator iter =
       get_last_before(lock.start + lock.length - 1, held_locks);
     bool cont = iter != held_locks.end();
@@ -696,9 +715,10 @@ private:
    */
   bool get_waiting_overlaps(ceph_filelock& lock,
                            list<ceph_filelock*>& overlaps) {
+    dout(0) << "get_waiting_overlaps" << dendl;
     multimap<__u64, ceph_filelock>::iterator iter =
       get_last_before(lock.start + lock.length - 1, waiting_locks);
-    bool cont = iter != held_locks.end();
+    bool cont = iter != waiting_locks.end();
     while(cont) {
       if (share_space(iter, lock)) overlaps.push_front(&iter->second);
       if (held_locks.begin() == iter) cont = false;