]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mds: debug output and some comments.
authorGreg Farnum <gregf@hq.newdream.net>
Mon, 17 May 2010 22:18:24 +0000 (15:18 -0700)
committerGreg Farnum <gregf@hq.newdream.net>
Mon, 2 Aug 2010 17:39:56 +0000 (10:39 -0700)
src/mds/Server.cc
src/mds/mdstypes.h
src/messages/MClientRequest.h

index fb0d4a71abda56fae2a3b3942f8da4efa462b441..112f1e98996e5f400584d14bd47b6da62a192338 100644 (file)
@@ -2568,8 +2568,10 @@ void Server::handle_client_file_setlock(MDRequest *mdr)
   /* acquire_locks will return true if it gets the locks. If it fails,
      it will redeliver this request at a later date, so drop the request.
    */
-  if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
+  if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks)) {
+    dout(0) << "handle_client_file_setlock could not get locks!" << dendl;
     return;
+  }
 
   // copy the lock change into a ceph_filelock so we can store/apply it
   ceph_filelock set_lock;
@@ -2577,6 +2579,7 @@ void Server::handle_client_file_setlock(MDRequest *mdr)
   set_lock.length = req->head.args.filelock_change.length;
   set_lock.client = req->get_orig_source().num();
   set_lock.pid = req->head.args.filelock_change.pid;
+  set_lock.pid_namespace = req->head.args.filelock_change.pid_namespace;
   set_lock.type = req->head.args.filelock_change.type;
   bool will_wait = req->head.args.filelock_change.wait;
 
@@ -2648,8 +2651,10 @@ void Server::handle_client_file_readlock(MDRequest *mdr)
      it will redeliver this request at a later date, so drop the request.
   */
   rdlocks.insert(&cur->flocklock);
-  if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
+  if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks)) {
+    dout(0) << "handle_client_file_readlock could not get locks!" << dendl;
     return;
+  }
   
   // copy the lock change into a ceph_filelock so we can store/apply it
   ceph_filelock checking_lock;
index 5d3a96c9a535fa9e9b87dad0ed363cc1e4084c43..148dd37aae3fd8b4bff5144b5d0a47c4cc17d730 100644 (file)
@@ -357,16 +357,20 @@ struct ceph_lock_state_t {
    * Returns true if set, false if not set.
    */
   bool add_lock(ceph_filelock& new_lock, bool wait_on_fail) {
+    dout(0) << "add_lock " << new_lock << dendl;
     bool ret = false;
     list<multimap<uint64_t, ceph_filelock>::iterator>
       overlapping_locks, self_overlapping_locks, neighbor_locks;
     // first, get any overlapping locks and split them into owned-by-us and not
     if(get_overlapping_locks(new_lock, overlapping_locks, &neighbor_locks)) {
+      dout(0) << "got overlapping lock, splitting by owner" << dendl;
       split_by_owner(new_lock, overlapping_locks, self_overlapping_locks);
     }
     if (!overlapping_locks.empty()) { //overlapping locks owned by others :(
       if (CEPH_LOCK_EXCL == new_lock.type) {
        //can't set, we want an exclusive
+       dout(0) << "overlapping lock, and this lock is exclusive, can't set"
+               << dendl;
        if (wait_on_fail) {
          waiting_locks.
            insert(pair<uint64_t, ceph_filelock>(new_lock.start, new_lock));
@@ -374,6 +378,7 @@ struct ceph_lock_state_t {
        ret = false;
       } else { //shared lock, check for any exclusive locks blocking us
        if (contains_exclusive_lock(overlapping_locks)) { //blocked :(
+         dout(0) << " blocked by exclusive lock in overlapping_locks" << dendl;
          if (wait_on_fail) {
            waiting_locks.
              insert(pair<uint64_t, ceph_filelock>(new_lock.start, new_lock));
@@ -381,6 +386,7 @@ struct ceph_lock_state_t {
          ret = false;
        } else {
          //yay, we can insert a shared lock
+         dout(0) << "inserting shared lock" << dendl;
          adjust_locks(self_overlapping_locks, new_lock, neighbor_locks);
          held_locks.
            insert(pair<uint64_t, ceph_filelock>(new_lock.start, new_lock));
@@ -389,6 +395,7 @@ struct ceph_lock_state_t {
       }
     } else { //no overlapping locks except our own
       adjust_locks(self_overlapping_locks, new_lock, neighbor_locks);
+      dout(0) << "no conflicts, inserting " << new_lock << dendl;
       held_locks.insert(pair<uint64_t, ceph_filelock>
                        (new_lock.start, new_lock));
       ret = true;
@@ -545,11 +552,14 @@ private:
    * new_lock: The new lock the process has requested.
    * old_locks: list of all locks currently held by same
    *    client/process that overlap new_lock.
+   * neighbor_locks: locks owned by same process that neighbor new_lock on
+   *    left or right side.
    */
   void adjust_locks(list<multimap<uint64_t, ceph_filelock>::iterator> old_locks,
                    ceph_filelock& new_lock,
                    list<multimap<uint64_t, ceph_filelock>::iterator>
                    neighbor_locks) {
+    dout(0) << "adjust_locks" << dendl;
     bool new_lock_to_end = (0 == new_lock.length);
     bool old_lock_to_end;
     uint64_t new_lock_start = new_lock.start;
@@ -562,6 +572,7 @@ private:
         iter != old_locks.end();
         ++iter) {
       old_lock = &(*iter)->second;
+      dout(0) << "adjusting lock: " << *old_lock << dendl;
       old_lock_to_end = (0 == old_lock->length);
       old_lock_start = old_lock->start;
       old_lock_end = old_lock->start + old_lock->length - 1;
@@ -570,14 +581,17 @@ private:
       old_lock_client = old_lock->client;
       if (new_lock_to_end || old_lock_to_end) {
        //special code path to deal with a length set at 0
+       dout(0) << "one lock extends forever" << dendl;
        if (old_lock->type == new_lock.type) {
          //just unify them in new lock, remove old lock
+         dout(0) << "same lock type, unifying" << dendl;
          new_lock.start = (new_lock_start < old_lock_start) ? new_lock_start :
            old_lock_start;
          new_lock.length = 0;
          held_locks.erase(*iter);
          --client_held_lock_counts[old_lock_client];
        } else { //not same type, have to keep any remains of old lock around
+         dout(0) << "shrinking old lock" << dendl;
          if (new_lock_to_end) {
            if (old_lock_start < new_lock_start) {
              old_lock->length = new_lock_start - old_lock_start;
@@ -601,14 +615,17 @@ private:
        }
       } else {
        if (old_lock->type == new_lock.type) { //just merge them!
+         dout(0) << "merging locks, they're the same type" << dendl;
          new_lock.start = (old_lock_start < new_lock_start ) ? old_lock_start :
            new_lock_start;
          int new_end = (new_lock_end > old_lock_end) ? new_lock_end :
            old_lock_end;
          new_lock.length = new_end - new_lock.start + 1;
+         dout(0) << "erasing lock " << (*iter)->second << dendl;
          held_locks.erase(*iter);
          --client_held_lock_counts[old_lock_client];
        } else { //we'll have to update sizes and maybe make new locks
+         dout(0) << "locks aren't same type, changing sizes" << dendl;
          if (old_lock_end > new_lock_end) { //add extra lock after new_lock
            ceph_filelock appended_lock = *old_lock;
            appended_lock.start = new_lock_end + 1;
@@ -638,6 +655,7 @@ private:
         ++iter) {
       old_lock = &(*iter)->second;
       old_lock_client = old_lock->client;
+      dout(0) << "lock to coalesce: " << *old_lock << dendl;
       /* because if it's a neibhoring lock there can't be any self-overlapping
         locks that covered it */
       if (old_lock->type == new_lock.type) { //merge them
index d4ec62943c5ecadd1c0018f94b8ce2b6b553301a..5614741841947de5be13d06a4b634e78c9e69164 100644 (file)
@@ -183,6 +183,7 @@ public:
       out << "rule " << (int)head.args.filelock_change.rule
          << ", type " << (int)head.args.filelock_change.type
          << ", pid " << head.args.filelock_change.pid
+         << ", pid_ns " << head.args.filelock_change.pid_namespace
          << ", start " << head.args.filelock_change.start
          << ", length " << head.args.filelock_change.length
          << ", wait " << (int)head.args.filelock_change.wait;