]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
os/bluestore/BlueStore: Fix wrong usage of std::unique_lock for OpSequenecer 9279/head
authorJianpeng Ma <jianpeng.ma@intel.com>
Tue, 24 May 2016 09:22:32 +0000 (17:22 +0800)
committerJianpeng Ma <jianpeng.ma@intel.com>
Tue, 24 May 2016 09:22:32 +0000 (17:22 +0800)
When set bluestore_sync_wal_apply = false, the osd must be crashed.
I add some debug message in WALWQ::_dequeue():
// preserve wal ordering for this sequencer by taking the lock
/ while still holding the queue lock
-      i->osr->wal_apply_lock.lock();
+      try {
+       i->osr->wal_apply_lock.lock();
+      } catch (const system_error& e) {
+       if (e.code() == std::errc::resource_deadlock_would_occur) {
+         assert(0 == "would deadlock");
+       } else
+         assert(0 == "other system_error");
+      }
       return i;
     }

2016-05-24 00:02:29.107165 7f0f32ea6700 -1 os/bluestore/BlueStore.h: In
function 'virtual BlueStore::TransContext* BlueStore::WALWQ::_dequeue()'
thread 7f0f32ea6700 time 2016-05-24 00:02:29.103360
os/bluestore/BlueStore.h: 489: FAILED assert(0 == "would deadlock")

 ceph version 10.2.0-1232-g21c7599 (21c759936ddcbfec50cf177a577206a2c443c212)
 1: (ceph::__ceph_assert_fail(char const*, char const*, int, char const*)+0x80) [0x560196215ac0]
 2: (ThreadPool::WorkQueue<BlueStore::TransContext>::_void_dequeue()+0x234)[0x560195e618b4]
 3: (ThreadPool::worker(ThreadPool::WorkThread*)+0x654) [0x560196207324]
 4: (ThreadPool::WorkThread::entry()+0x10) [0x560196208d70]
 5: (()+0x760a) [0x7f0f411d260a]
 6: (clone()+0x6d) [0x7f0f3f17a59d]
 NOTE: a copy of the executable, or `objdump -rdS <executable>` is
needed to interpret this.

This because the wron usage of std::unique_lock.

Signed-off-by: Jianpeng Ma <jianpeng.ma@intel.com>
src/os/bluestore/BlueStore.h

index 6628bcfbc190ef2eab3781dabfcaa7d05152d57c..6600b1cea5008bef41c31ea03e4a979963d031e7 100644 (file)
@@ -369,14 +369,12 @@ public:
     Sequencer *parent;
 
     std::mutex wal_apply_mutex;
-    std::unique_lock<std::mutex> wal_apply_lock;
 
     uint64_t last_seq = 0;
 
     OpSequencer()
        //set the qlock to PTHREAD_MUTEX_RECURSIVE mode
-      : parent(NULL),
-       wal_apply_lock(wal_apply_mutex, std::defer_lock) {
+      : parent(NULL) {
     }
     ~OpSequencer() {
       assert(q.empty());
@@ -482,12 +480,12 @@ public:
 
       // preserve wal ordering for this sequencer by taking the lock
       // while still holding the queue lock
-      i->osr->wal_apply_lock.lock();
+      i->osr->wal_apply_mutex.lock();
       return i;
     }
     void _process(TransContext *i, ThreadPool::TPHandle &) override {
       store->_wal_apply(i);
-      i->osr->wal_apply_lock.unlock();
+      i->osr->wal_apply_mutex.unlock();
     }
     void _clear() {
       assert(wal_queue.empty());