]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
test/osdc: s/Mutex/ceph::mutex/
authorKefu Chai <kchai@redhat.com>
Sun, 7 Jul 2019 04:43:18 +0000 (12:43 +0800)
committerKefu Chai <kchai@redhat.com>
Sat, 3 Aug 2019 03:27:19 +0000 (11:27 +0800)
Signed-off-by: Kefu Chai <kchai@redhat.com>
src/test/osdc/FakeWriteback.cc
src/test/osdc/FakeWriteback.h
src/test/osdc/MemWriteback.cc
src/test/osdc/MemWriteback.h
src/test/osdc/object_cacher_stress.cc

index 56d208b7d6a07beabc307a0fb6cade128f884124..2f58965cc4bbdac8aca7baf993bd921b0d3fb610 100644 (file)
@@ -8,7 +8,7 @@
 #include "common/debug.h"
 #include "common/Cond.h"
 #include "common/Finisher.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "include/ceph_assert.h"
 #include "common/ceph_time.h"
 
@@ -22,12 +22,12 @@ class C_Delay : public Context {
   CephContext *m_cct;
   Context *m_con;
   ceph::timespan m_delay;
-  Mutex *m_lock;
+  ceph::mutex *m_lock;
   bufferlist *m_bl;
   uint64_t m_off;
 
 public:
-  C_Delay(CephContext *cct, Context *c, Mutex *lock, uint64_t off,
+  C_Delay(CephContext *cct, Context *c, ceph::mutex *lock, uint64_t off,
          bufferlist *pbl, uint64_t delay_ns=0)
     : m_cct(cct), m_con(c), m_delay(delay_ns * std::chrono::nanoseconds(1)),
       m_lock(lock), m_bl(pbl), m_off(off) {}
@@ -39,13 +39,12 @@ public:
       m_bl->append(bp);
       ldout(m_cct, 20) << "finished read " << m_off << "~" << r << dendl;
     }
-    m_lock->Lock();
+    std::lock_guard locker{*m_lock};
     m_con->complete(r);
-    m_lock->Unlock();
   }
 };
 
-FakeWriteback::FakeWriteback(CephContext *cct, Mutex *lock, uint64_t delay_ns)
+FakeWriteback::FakeWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns)
   : m_cct(cct), m_lock(lock), m_delay_ns(delay_ns)
 {
   m_finisher = new Finisher(cct);
index 0d3705c87fd58e1b56ba557943f6e343cb0c00b3..11f78e813cc6285c7e812cb524a3a4b1bfa68a8d 100644 (file)
 #include <atomic>
 
 class Finisher;
-class Mutex;
 
 class FakeWriteback : public WritebackHandler {
 public:
-  FakeWriteback(CephContext *cct, Mutex *lock, uint64_t delay_ns);
+  FakeWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns);
   ~FakeWriteback() override;
 
   void read(const object_t& oid, uint64_t object_no,
@@ -39,7 +38,7 @@ public:
                                 snapid_t) override;
 private:
   CephContext *m_cct;
-  Mutex *m_lock;
+  ceph::mutex *m_lock;
   uint64_t m_delay_ns;
   std::atomic<unsigned> m_tid = { 0 };
   Finisher *m_finisher;
index 32d3a38f7595d6350268595c6bb5cbeda464fd5d..4cb11291a988d94610b931712d813f84283804c5 100644 (file)
@@ -8,7 +8,7 @@
 #include "common/debug.h"
 #include "common/Cond.h"
 #include "common/Finisher.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "include/ceph_assert.h"
 #include "common/ceph_time.h"
 
@@ -24,14 +24,14 @@ class C_DelayRead : public Context {
   CephContext *m_cct;
   Context *m_con;
   ceph::timespan m_delay;
-  Mutex *m_lock;
+  ceph::mutex *m_lock;
   object_t m_oid;
   uint64_t m_off;
   uint64_t m_len;
   bufferlist *m_bl;
 
 public:
-  C_DelayRead(MemWriteback *mwb, CephContext *cct, Context *c, Mutex *lock,
+  C_DelayRead(MemWriteback *mwb, CephContext *cct, Context *c, ceph::mutex *lock,
              const object_t& oid, uint64_t off, uint64_t len, bufferlist *pbl,
              uint64_t delay_ns=0)
     : wb(mwb), m_cct(cct), m_con(c),
@@ -39,11 +39,10 @@ public:
       m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(pbl) {}
   void finish(int r) override {
     std::this_thread::sleep_for(m_delay);
-    m_lock->Lock();
+    std::lock_guard locker{*m_lock};
     r = wb->read_object_data(m_oid, m_off, m_len, m_bl);
     if (m_con)
       m_con->complete(r);
-    m_lock->Unlock();
   }
 };
 
@@ -52,14 +51,14 @@ class C_DelayWrite : public Context {
   CephContext *m_cct;
   Context *m_con;
   ceph::timespan m_delay;
-  Mutex *m_lock;
+  ceph::mutex *m_lock;
   object_t m_oid;
   uint64_t m_off;
   uint64_t m_len;
   const bufferlist& m_bl;
 
 public:
-  C_DelayWrite(MemWriteback *mwb, CephContext *cct, Context *c, Mutex *lock,
+  C_DelayWrite(MemWriteback *mwb, CephContext *cct, Context *c, ceph::mutex *lock,
               const object_t& oid, uint64_t off, uint64_t len,
               const bufferlist& bl, uint64_t delay_ns=0)
     : wb(mwb), m_cct(cct), m_con(c),
@@ -67,15 +66,14 @@ public:
       m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(bl) {}
   void finish(int r) override {
     std::this_thread::sleep_for(m_delay);
-    m_lock->Lock();
+    std::lock_guard locker{*m_lock};
     wb->write_object_data(m_oid, m_off, m_len, m_bl);
     if (m_con)
       m_con->complete(r);
-    m_lock->Unlock();
   }
 };
 
-MemWriteback::MemWriteback(CephContext *cct, Mutex *lock, uint64_t delay_ns)
+MemWriteback::MemWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns)
   : m_cct(cct), m_lock(lock), m_delay_ns(delay_ns)
 {
   m_finisher = new Finisher(cct);
index 84ea78f5a2c4e34876f2dbf907d0957f32cac116..12c1ac3c0b53704b53076ffc0ac2dfc3ee9862da 100644 (file)
 #include <atomic>
 
 class Finisher;
-class Mutex;
 
 class MemWriteback : public WritebackHandler {
 public:
-  MemWriteback(CephContext *cct, Mutex *lock, uint64_t delay_ns);
+  MemWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns);
   ~MemWriteback() override;
 
   void read(const object_t& oid, uint64_t object_no,
@@ -44,7 +43,7 @@ public:
 private:
   std::map<object_t, bufferlist> object_data;
   CephContext *m_cct;
-  Mutex *m_lock;
+  ceph::mutex *m_lock;
   uint64_t m_delay_ns;
   std::atomic<unsigned> m_tid = { 0 };
   Finisher *m_finisher;
index cd65e10ad37328855e85d9a0126ee009730cac22..065ea8c453f64c95cd94ab4020375d0fcb2934db 100644 (file)
@@ -9,9 +9,9 @@
 #include <boost/scoped_ptr.hpp>
 
 #include "common/ceph_argparse.h"
+#include "common/ceph_mutex.h"
 #include "common/common_init.h"
 #include "common/config.h"
-#include "common/Mutex.h"
 #include "common/snap_types.h"
 #include "global/global_init.h"
 #include "include/buffer.h"
@@ -56,7 +56,7 @@ int stress_test(uint64_t num_ops, uint64_t num_objs,
                uint64_t max_obj_size, uint64_t delay_ns,
                uint64_t max_op_len, float percent_reads)
 {
-  Mutex lock("object_cacher_stress::object_cacher");
+  ceph::mutex lock = ceph::make_mutex("object_cacher_stress::object_cacher");
   FakeWriteback writeback(g_ceph_context, &lock, delay_ns);
 
   ObjectCacher obc(g_ceph_context, "test", writeback, lock, NULL, NULL,
@@ -103,9 +103,9 @@ int stress_test(uint64_t num_ops, uint64_t num_objs,
       rd->extents.push_back(op->extent);
       outstanding_reads++;
       Context *completion = new C_Count(op.get(), &outstanding_reads);
-      lock.Lock();
+      lock.lock();
       int r = obc.readx(rd, &object_set, completion);
-      lock.Unlock();
+      lock.unlock();
       ceph_assert(r >= 0);
       if ((uint64_t)r == length)
        completion->complete(r);
@@ -116,9 +116,9 @@ int stress_test(uint64_t num_ops, uint64_t num_objs,
                                                     ceph::real_time::min(), 0,
                                                     ++journal_tid);
       wr->extents.push_back(op->extent);
-      lock.Lock();
+      lock.lock();
       obc.writex(wr, &object_set, NULL);
-      lock.Unlock();
+      lock.unlock();
     }
   }
 
@@ -140,28 +140,26 @@ int stress_test(uint64_t num_ops, uint64_t num_objs,
     }
   }
 
-  lock.Lock();
+  lock.lock();
   obc.release_set(&object_set);
-  lock.Unlock();
+  lock.unlock();
 
   int r = 0;
-  Mutex mylock("librbd::ImageCtx::flush_cache");
-  Cond cond;
+  ceph::mutex mylock = ceph::make_mutex("librbd::ImageCtx::flush_cache");
+  ceph::condition_variable cond;
   bool done;
-  Context *onfinish = new C_SafeCond(&mylock, &cond, &done, &r);
-  lock.Lock();
+  Context *onfinish = new C_SafeCond(mylock, cond, &done, &r);
+  lock.lock();
   bool already_flushed = obc.flush_set(&object_set, onfinish);
   std::cout << "already flushed = " << already_flushed << std::endl;
-  lock.Unlock();
-  mylock.Lock();
-  while (!done) {
-    cond.Wait(mylock);
+  lock.unlock();
+  {
+    std::unique_lock locker{mylock};
+    cond.wait(locker, [&done] { return done; });
   }
-  mylock.Unlock();
-
-  lock.Lock();
+  lock.lock();
   bool unclean = obc.release_set(&object_set);
-  lock.Unlock();
+  lock.unlock();
 
   if (unclean) {
     std::cout << "unclean buffers left over!" << std::endl;
@@ -178,7 +176,7 @@ int stress_test(uint64_t num_ops, uint64_t num_objs,
 int correctness_test(uint64_t delay_ns)
 {
   std::cerr << "starting correctness test" << std::endl;
-  Mutex lock("object_cacher_stress::object_cacher");
+  ceph::mutex lock = ceph::make_mutex("object_cacher_stress::object_cacher");
   MemWriteback writeback(g_ceph_context, &lock, delay_ns);
 
   ObjectCacher obc(g_ceph_context, "test", writeback, lock, NULL, NULL,
@@ -209,9 +207,9 @@ int correctness_test(uint64_t delay_ns)
     extent.oloc.pool = 0;
     extent.buffer_extents.push_back(make_pair(0, 1<<20));
     wr->extents.push_back(extent);
-    lock.Lock();
+    lock.lock();
     obc.writex(wr, &object_set, &create_finishers[i]);
-    lock.Unlock();
+    lock.unlock();
   }
 
   // write some 1-valued bits at 256-KB intervals for checking consistency
@@ -228,25 +226,25 @@ int correctness_test(uint64_t delay_ns)
     extent.oloc.pool = 0;
     extent.buffer_extents.push_back(make_pair(0, 1<<16));
     wr->extents.push_back(extent);
-    lock.Lock();
+    lock.lock();
     obc.writex(wr, &object_set, &create_finishers[i]);
-    lock.Unlock();
+    lock.unlock();
   }
 
   for (auto i = create_finishers.begin(); i != create_finishers.end(); ++i) {
     i->second.wait();
   }
   std::cout << "Finished setting up object" << std::endl;
-  lock.Lock();
+  lock.lock();
   C_SaferCond flushcond;
   bool done = obc.flush_all(&flushcond);
   if (!done) {
     std::cout << "Waiting for flush" << std::endl;
-    lock.Unlock();
+    lock.unlock();
     flushcond.wait();
-    lock.Lock();
+    lock.lock();
   }
-  lock.Unlock();
+  lock.unlock();
 
   /* now read the back half of the object in, check consistency,
    */
@@ -258,9 +256,9 @@ int correctness_test(uint64_t delay_ns)
   back_half_extent.oloc.pool = 0;
   back_half_extent.buffer_extents.push_back(make_pair(0, 1<<21));
   back_half_rd->extents.push_back(back_half_extent);
-  lock.Lock();
+  lock.lock();
   int r = obc.readx(back_half_rd, &object_set, &backreadcond);
-  lock.Unlock();
+  lock.unlock();
   ceph_assert(r >= 0);
   if (r == 0) {
     std::cout << "Waiting to read data into cache" << std::endl;
@@ -282,7 +280,7 @@ int correctness_test(uint64_t delay_ns)
   whole_extent.oloc.pool = 0;
   whole_extent.buffer_extents.push_back(make_pair(0, 1<<22));
   whole_rd->extents.push_back(whole_extent);
-  lock.Lock();
+  lock.lock();
   r = obc.readx(whole_rd, &object_set, &frontreadcond);
   // we cleared out the cache by reading back half, it shouldn't pass immediately!
   ceph_assert(r == 0);
@@ -297,7 +295,7 @@ int correctness_test(uint64_t delay_ns)
   verify_wr->extents.push_back(verify_extent);
   C_SaferCond verify_finisher;
   obc.writex(verify_wr, &object_set, &verify_finisher);
-  lock.Unlock();
+  lock.unlock();
   std::cout << "wrote dirtying data" << std::endl;
 
   std::cout << "Waiting to read data into cache" << std::endl;
@@ -317,14 +315,14 @@ int correctness_test(uint64_t delay_ns)
 
   std::cout << "validated that data is 0xff where it should be" << std::endl;
   
-  lock.Lock();
+  lock.lock();
   C_SaferCond flushcond2;
   done = obc.flush_all(&flushcond2);
   if (!done) {
     std::cout << "Waiting for final write flush" << std::endl;
-    lock.Unlock();
+    lock.unlock();
     flushcond2.wait();
-    lock.Lock();
+    lock.lock();
   }
 
   bool unclean = obc.release_set(&object_set);
@@ -336,11 +334,11 @@ int correctness_test(uint64_t delay_ns)
       discard_extents.emplace_back(oid, i++, 0, 1<<22, 0);
     }
     obc.discard_set(&object_set, discard_extents);
-    lock.Unlock();
+    lock.unlock();
     obc.stop();
     goto fail;
   }
-  lock.Unlock();
+  lock.unlock();
 
   obc.stop();