From: Greg Farnum Date: Mon, 10 May 2010 22:42:49 +0000 (-0700) Subject: mds: Move lock print function; add debugging; fix fencepost errors. X-Git-Tag: v0.22~346^2~18 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=8315b5dbce7d7248d0b30c01a74793b8bd1b5ab8;p=ceph.git mds: Move lock print function; add debugging; fix fencepost errors. --- diff --git a/src/mds/Server.cc b/src/mds/Server.cc index ce4c721e9c5..824e68fc4a0 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -2580,6 +2580,8 @@ void Server::handle_client_file_setlock(MDRequest *mdr) set_lock.type = req->head.args.filelock_change.type; bool will_wait = req->head.args.filelock_change.wait; + dout(0) << "handle_client_file_setlock: " << set_lock << dendl; + ceph_lock_state_t *lock_state = NULL; // get the appropriate lock state @@ -2598,7 +2600,9 @@ void Server::handle_client_file_setlock(MDRequest *mdr) return; } + dout(0) << "state prior to lock change: " << *lock_state << dendl;; if (CEPH_LOCK_UNLOCK == set_lock.type) { + dout(0) << "got unlock" << dendl; list activated_locks; lock_state->remove_lock(set_lock, activated_locks); reply_request(mdr, 0); @@ -2609,17 +2613,23 @@ void Server::handle_client_file_setlock(MDRequest *mdr) cur->take_waiting(CInode::WAIT_FLOCK, waiters); mds->queue_waiters(waiters); } else { + dout(0) << "got lock" << dendl; if (lock_state->add_lock(set_lock, will_wait)) { // lock set successfully + dout(0) << "it succeeded" << dendl; reply_request(mdr, 0); } else { + dout(0) << "it failed on this attempt" << dendl; // couldn't set lock right now if (!will_wait) reply_request(mdr, -1); - else + else { + dout(0) << "but it's a wait" << dendl; cur->add_waiter(CInode::WAIT_FLOCK, new C_MDS_RetryRequest(mdcache, mdr)); + } } } + dout(0) << "state after lock change: " << *lock_state << dendl; } void Server::handle_client_file_readlock(MDRequest *mdr) diff --git a/src/mds/Server.h b/src/mds/Server.h index 0d4a0c1db5b..07596cfd476 100644 --- a/src/mds/Server.h +++ b/src/mds/Server.h @@ -211,7 +211,23 @@ public: }; - - +inline ostream& operator<<(ostream& out, ceph_lock_state_t& l) { + out << "ceph_lock_state_t. held_locks.size()=" << l.held_locks.size() + << ", waiting_locks.size()=" << l.waiting_locks.size() + << ", client_held_lock_counts -- " << l.client_held_lock_counts + << "\n client_waiting_lock_counts -- " << l.client_waiting_lock_counts + << "\n held_locks -- "; + for (multimap::iterator iter = l.held_locks.begin(); + iter != l.held_locks.end(); + ++iter) + out << iter->second; + out << "\n waiting_locks -- "; + for (multimap::iterator iter =l.waiting_locks.begin(); + iter != l.waiting_locks.end(); + ++iter) + out << iter->second << "\n"; + out << std::endl; + return out; +} #endif diff --git a/src/mds/mdstypes.h b/src/mds/mdstypes.h index cbd9387957c..dc06d2e3b67 100644 --- a/src/mds/mdstypes.h +++ b/src/mds/mdstypes.h @@ -334,6 +334,13 @@ inline bool operator==(const byte_range_t& l, const byte_range_t& r) { return l.first == r.first && l.last == r.last; } +inline ostream& operator<<(ostream& out, ceph_filelock& l) { + out << "start: " << l.start << ", length: " << l.length + << ", client: " << l.client << ", pid: " << l.pid + << ", type: " << (int)l.type + << std::endl; + return out; +} struct ceph_lock_state_t { multimap<__u64, ceph_filelock> held_locks; // current locks @@ -586,7 +593,7 @@ private: if (old_lock_end > new_lock_end) { //add extra lock after new_lock ceph_filelock appended_lock = *old_lock; appended_lock.start = new_lock_end + 1; - appended_lock.length = old_lock_end - appended_lock.start; + appended_lock.length = old_lock_end - appended_lock.start + 1; held_locks.insert(pair<__u64, ceph_filelock> (appended_lock.start, appended_lock)); ++client_held_lock_counts[old_lock->client]; @@ -634,6 +641,10 @@ private: if ((lower_bound->first != start) && (start != 0) && (lower_bound != lock_map.begin())) --lower_bound; + if (lock_map.end() == lower_bound) + dout(0) << "get_lower_bound returning end()" << dendl; + else dout(0) << "get_lower_bound returning iterator pointing to " + << lower_bound->second << dendl; return lower_bound; } @@ -643,6 +654,10 @@ private: multimap<__u64, ceph_filelock>::iterator last = lock_map.upper_bound(end); if (last != lock_map.begin()) --last; + if (lock_map.end() == last) + dout(0) << "get_last_before returning end()" << dendl; + else dout(0) << "get_last_before returning iterator pointing to " + << last->second << dendl; return last; } @@ -654,10 +669,13 @@ private: */ bool share_space(multimap<__u64, ceph_filelock>::iterator& iter, __u64 start, __u64 end) { - return ((iter->first > start && iter->first < end) || - ((iter->first < start) && - (((iter->first + iter->second.length - 1) > start) || - (0 == iter->second.length)))); + bool ret = ((iter->first >= start && iter->first <= end) || + ((iter->first < start) && + (((iter->first + iter->second.length - 1) >= start) || + (0 == iter->second.length)))); + dout(0) << "share_space got start: " << start << ", end: " << end + << ", lock: " << iter->second << ", returning " << ret << dendl; + return ret; } bool share_space(multimap<__u64, ceph_filelock>::iterator& iter, ceph_filelock& lock) { @@ -672,6 +690,7 @@ private: */ bool get_overlapping_locks(ceph_filelock& lock, list& overlaps) { + dout(0) << "get_overlapping_locks" << dendl; multimap<__u64, ceph_filelock>::iterator iter = get_last_before(lock.start + lock.length - 1, held_locks); bool cont = iter != held_locks.end(); @@ -696,9 +715,10 @@ private: */ bool get_waiting_overlaps(ceph_filelock& lock, list& overlaps) { + dout(0) << "get_waiting_overlaps" << dendl; multimap<__u64, ceph_filelock>::iterator iter = get_last_before(lock.start + lock.length - 1, waiting_locks); - bool cont = iter != held_locks.end(); + bool cont = iter != waiting_locks.end(); while(cont) { if (share_space(iter, lock)) overlaps.push_front(&iter->second); if (held_locks.begin() == iter) cont = false;