]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
test/librados_test_stub: separate mock cluster from connection
authorJason Dillaman <dillaman@redhat.com>
Wed, 1 Mar 2017 21:21:20 +0000 (16:21 -0500)
committerJason Dillaman <dillaman@redhat.com>
Fri, 10 Mar 2017 01:25:18 +0000 (20:25 -0500)
The plan to support independent connections sharing the same
view of the mocked cluster.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
25 files changed:
src/test/journal/RadosTestFixture.cc
src/test/librados_test_stub/CMakeLists.txt
src/test/librados_test_stub/LibradosTestStub.cc
src/test/librados_test_stub/LibradosTestStub.h
src/test/librados_test_stub/MockTestMemCluster.h [new file with mode: 0644]
src/test/librados_test_stub/MockTestMemIoCtxImpl.h
src/test/librados_test_stub/MockTestMemRadosClient.h
src/test/librados_test_stub/TestCluster.h [new file with mode: 0644]
src/test/librados_test_stub/TestIoCtxImpl.cc
src/test/librados_test_stub/TestMemCluster.cc [new file with mode: 0644]
src/test/librados_test_stub/TestMemCluster.h [new file with mode: 0644]
src/test/librados_test_stub/TestMemIoCtxImpl.cc
src/test/librados_test_stub/TestMemIoCtxImpl.h
src/test/librados_test_stub/TestMemRadosClient.cc
src/test/librados_test_stub/TestMemRadosClient.h
src/test/librados_test_stub/TestRadosClient.cc
src/test/librados_test_stub/TestRadosClient.h
src/test/librados_test_stub/TestWatchNotify.cc
src/test/librados_test_stub/TestWatchNotify.h
src/test/librbd/managed_lock/test_mock_BreakRequest.cc
src/test/librbd/test_mock_fixture.cc
src/test/librbd/test_mock_fixture.h
src/test/rbd_mirror/test_mock_fixture.cc
src/test/rbd_mirror/test_mock_fixture.h
src/tools/rbd_mirror/image_sync/ImageCopyRequest.cc

index 0a71a859043897e639b30f2133630d5bd0be9eb3..1c4ed84075ed1f16d8ec5e8d8918f2d1ed941f39 100644 (file)
@@ -22,10 +22,10 @@ void RadosTestFixture::SetUpTestCase() {
 }
 
 void RadosTestFixture::TearDownTestCase() {
-  ASSERT_EQ(0, destroy_one_pool_pp(_pool_name, _rados));
-
   _thread_pool->stop();
   delete _thread_pool;
+
+  ASSERT_EQ(0, destroy_one_pool_pp(_pool_name, _rados));
 }
 
 std::string RadosTestFixture::get_temp_oid() {
index ef333cc97063a4f8eb93513bfa8dc3ac6779cfa2..ba30a4356c9b2b76496366ecdcb54e240ae42ed8 100644 (file)
@@ -2,9 +2,10 @@ set(librados_test_stub_srcs
   LibradosTestStub.cc
   TestClassHandler.cc
   TestIoCtxImpl.cc
+  TestMemCluster.cc
   TestMemIoCtxImpl.cc
   TestMemRadosClient.cc
   TestRadosClient.cc
   TestWatchNotify.cc)
 add_library(rados_test_stub STATIC ${librados_test_stub_srcs})
-  
+
index 16c0f4da3682fcf2209bd7e5b1cd103a520c8995..085fee67f9d9e6cf3d0c3623ff7d9960b202d759 100644 (file)
@@ -8,11 +8,11 @@
 #include "common/config.h"
 #include "common/debug.h"
 #include "common/snap_types.h"
-#include "global/global_context.h"
 #include "librados/AioCompletionImpl.h"
 #include "test/librados_test_stub/TestClassHandler.h"
 #include "test/librados_test_stub/TestIoCtxImpl.h"
 #include "test/librados_test_stub/TestRadosClient.h"
+#include "test/librados_test_stub/TestMemCluster.h"
 #include "test/librados_test_stub/TestMemRadosClient.h"
 #include "objclass/objclass.h"
 #include "osd/osd_types.h"
 #define dout_context g_ceph_context
 #define dout_subsys ceph_subsys_rados
 
+namespace librados {
+
+MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx) {
+  MockTestMemIoCtxImpl **mock =
+    reinterpret_cast<MockTestMemIoCtxImpl **>(&ioctx);
+  return **mock;
+}
+
+} // namespace librados
+
+namespace librados_test_stub {
+
+TestClusterRef &cluster() {
+  static TestClusterRef s_cluster;
+  return s_cluster;
+}
+
+void set_cluster(TestClusterRef cluster_ref) {
+  cluster() = cluster_ref;
+}
+
+TestClusterRef get_cluster() {
+  auto &cluster_ref = cluster();
+  if (cluster_ref.get() == nullptr) {
+    cluster_ref.reset(new librados::TestMemCluster());
+  }
+  return cluster_ref;
+}
+
+} // namespace librados_test_stub
+
 namespace {
 
 librados::TestClassHandler *get_class_handler() {
@@ -66,52 +97,19 @@ void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen) {
   }
 }
 
-} // anonymous namespace
-
-namespace librados {
+librados::TestRadosClient *create_rados_client() {
+  CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
+  CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
+  cct->_conf->parse_env();
+  cct->_conf->apply_changes(nullptr);
 
-MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx) {
-  MockTestMemIoCtxImpl **mock =
-    reinterpret_cast<MockTestMemIoCtxImpl **>(&ioctx);
-  return **mock;
+  auto rados_client =
+    librados_test_stub::get_cluster()->create_rados_client(cct);
+  cct->put();
+  return rados_client;
 }
 
-}
-
-namespace librados_test_stub {
-
-TestRadosClientPtr *rados_client() {
-  // force proper destruction order by delaying construction
-  static TestRadosClientPtr s_rados_client;
-  return &s_rados_client;
-}
-
-void set_rados_client(
-    const boost::shared_ptr<librados::TestRadosClient> &new_client) {
-  assert(new_client.get() != nullptr);
-  *rados_client() = new_client;
-}
-
-TestRadosClientPtr get_rados_client() {
-  // TODO: use factory to allow tests to swap out impl
-  TestRadosClientPtr *client = rados_client();
-  if (client->get() == nullptr) {
-    CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
-    CephContext *cct = common_preinit(iparams, CODE_ENVIRONMENT_LIBRARY, 0);
-    cct->_conf->parse_env();
-    cct->_conf->apply_changes(NULL);
-    client->reset(new librados::TestMemRadosClient(cct),
-                  &librados::TestRadosClient::Deallocate);
-    if (g_ceph_context == NULL) {
-      g_ceph_context = cct;
-    }
-    cct->put();
-  }
-  (*client)->get();
-  return *client;
-}
-
-} // namespace librados_test_stub
+} // anonymous namespace
 
 extern "C" int rados_aio_create_completion(void *cb_arg,
                                            rados_callback_t cb_complete,
@@ -184,7 +182,7 @@ extern "C" int rados_connect(rados_t cluster) {
 }
 
 extern "C" int rados_create(rados_t *cluster, const char * const id) {
-  *cluster = librados_test_stub::get_rados_client().get();
+  *cluster = create_rados_client();
   return 0;
 }
 
index 4432ec30e5c4f0b215da3c61b721da5d2c611eaa..eb3757a84be59faf0a953df46ff0a1bf09bebd29 100644 (file)
@@ -7,21 +7,23 @@
 #include <boost/shared_ptr.hpp>
 
 namespace librados {
+
 class IoCtx;
-class TestRadosClient;
 class MockTestMemIoCtxImpl;
+class TestCluster;
 
 MockTestMemIoCtxImpl &get_mock_io_ctx(IoCtx &ioctx);
-}
 
-namespace librados_test_stub {
+} // namespace librados
 
-typedef boost::shared_ptr<librados::TestRadosClient> TestRadosClientPtr;
+namespace librados_test_stub {
 
-void set_rados_client(const TestRadosClientPtr &rados_client);
+typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
 
-TestRadosClientPtr get_rados_client();
+void set_cluster(TestClusterRef cluster);
+TestClusterRef get_cluster();
 
 } // namespace librados_test_stub
 
+
 #endif // LIBRADOS_TEST_STUB_H
diff --git a/src/test/librados_test_stub/MockTestMemCluster.h b/src/test/librados_test_stub/MockTestMemCluster.h
new file mode 100644 (file)
index 0000000..0cf2a7b
--- /dev/null
@@ -0,0 +1,31 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
+#define LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
+
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "gmock/gmock.h"
+
+struct CephContext;
+
+namespace librados {
+
+class TestRadosClient;
+
+class MockTestMemCluster : public TestCluster {
+public:
+  TestRadosClient *create_rados_client(CephContext *cct) override {
+    return new ::testing::NiceMock<librados::MockTestMemRadosClient>(
+      cct, &m_mem_cluster);
+  }
+
+private:
+  TestMemCluster m_mem_cluster;
+
+};
+
+} // namespace librados
+
+#endif // LIBRADOS_MOCK_TEST_MEM_CLUSTER_H
index bc1250d84a9f0a0fdedd5e2ef4f9f462881ded1b..3d06d4fd8069cdd3a8a239d072652a7ef3ee3636 100644 (file)
@@ -5,6 +5,7 @@
 #define LIBRADOS_TEST_STUB_MOCK_TEST_MEM_IO_CTX_IMPL_H
 
 #include "test/librados_test_stub/TestMemIoCtxImpl.h"
+#include "test/librados_test_stub/TestMemCluster.h"
 #include "gmock/gmock.h"
 
 namespace librados {
@@ -16,7 +17,7 @@ public:
   MockTestMemIoCtxImpl(MockTestMemRadosClient *mock_client,
                        TestMemRadosClient *client, int64_t pool_id,
                        const std::string& pool_name,
-                       TestMemRadosClient::Pool *pool)
+                       TestMemCluster::Pool *pool)
     : TestMemIoCtxImpl(client, pool_id, pool_name, pool),
       m_mock_client(mock_client), m_client(client) {
     default_to_parent();
index 9afde338f62fd49303c9b77ec1a28b04ee163d81..c1f1756e2d1e09c833bb943ae7561a4e30a7258b 100644 (file)
 
 namespace librados {
 
+class TestMemCluster;
+
 class MockTestMemRadosClient : public TestMemRadosClient {
 public:
-  MockTestMemRadosClient(CephContext *cct) : TestMemRadosClient(cct) {
+  MockTestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster)
+    : TestMemRadosClient(cct, test_mem_cluster) {
     default_to_dispatch();
   }
 
@@ -21,7 +24,8 @@ public:
   TestIoCtxImpl *do_create_ioctx(int64_t pool_id,
                                  const std::string &pool_name) {
     return new ::testing::NiceMock<MockTestMemIoCtxImpl>(
-      this, this, pool_id, pool_name, get_pool(pool_name));
+      this, this, pool_id, pool_name,
+      get_mem_cluster()->get_pool(pool_name));
   }
 
   MOCK_METHOD2(blacklist_add, int(const std::string& client_address,
diff --git a/src/test/librados_test_stub/TestCluster.h b/src/test/librados_test_stub/TestCluster.h
new file mode 100644 (file)
index 0000000..1a60cc1
--- /dev/null
@@ -0,0 +1,34 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_CLUSTER_H
+#define CEPH_TEST_CLUSTER_H
+
+#include "test/librados_test_stub/TestWatchNotify.h"
+
+class CephContext;
+
+namespace librados {
+
+class TestRadosClient;
+class TestWatchNotify;
+
+class TestCluster {
+public:
+  virtual ~TestCluster() {
+  }
+
+  virtual TestRadosClient *create_rados_client(CephContext *cct) = 0;
+
+  TestWatchNotify *get_watch_notify() {
+    return &m_watch_notify;
+  }
+
+private:
+  TestWatchNotify m_watch_notify;
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_CLUSTER_H
index 77e1ed909f1be8035c1722b98d9e325f53fc303f..1ccc860563f07ff94b893142d9dc1cb2341baf5f 100644 (file)
@@ -97,7 +97,8 @@ void TestIoCtxImpl::aio_notify(const std::string& oid, AioCompletionImpl *c,
   m_pending_ops.inc();
   c->get();
   C_AioNotify *ctx = new C_AioNotify(this, c);
-  m_client->get_watch_notify().aio_notify(oid, bl, timeout_ms, pbl, ctx);
+  m_client->get_watch_notify()->aio_notify(m_client, oid, bl, timeout_ms, pbl,
+                                           ctx);
 }
 
 int TestIoCtxImpl::aio_operate(const std::string& oid, TestObjectOperationImpl &ops,
@@ -130,8 +131,8 @@ int TestIoCtxImpl::aio_watch(const std::string& o, AioCompletionImpl *c,
   m_pending_ops.inc();
   c->get();
   C_AioNotify *ctx = new C_AioNotify(this, c);
-  m_client->get_watch_notify().aio_watch(o, get_instance_id(), handle,
-                                         watch_ctx, ctx);
+  m_client->get_watch_notify()->aio_watch(m_client, o, get_instance_id(),
+                                          handle, watch_ctx, ctx);
   return 0;
 }
 
@@ -139,7 +140,7 @@ int TestIoCtxImpl::aio_unwatch(uint64_t handle, AioCompletionImpl *c) {
   m_pending_ops.inc();
   c->get();
   C_AioNotify *ctx = new C_AioNotify(this, c);
-  m_client->get_watch_notify().aio_unwatch(handle, ctx);
+  m_client->get_watch_notify()->aio_unwatch(m_client, handle, ctx);
   return 0;
 }
 
@@ -158,18 +159,18 @@ int TestIoCtxImpl::exec(const std::string& oid, TestClassHandler *handler,
 
 int TestIoCtxImpl::list_watchers(const std::string& o,
                                  std::list<obj_watch_t> *out_watchers) {
-  return m_client->get_watch_notify().list_watchers(o, out_watchers);
+  return m_client->get_watch_notify()->list_watchers(o, out_watchers);
 }
 
 int TestIoCtxImpl::notify(const std::string& o, bufferlist& bl,
                           uint64_t timeout_ms, bufferlist *pbl) {
-  return m_client->get_watch_notify().notify(o, bl, timeout_ms, pbl);
+  return m_client->get_watch_notify()->notify(m_client, o, bl, timeout_ms, pbl);
 }
 
 void TestIoCtxImpl::notify_ack(const std::string& o, uint64_t notify_id,
                                uint64_t handle, bufferlist& bl) {
-  m_client->get_watch_notify().notify_ack(o, notify_id, handle,
-                                          m_client->get_instance_id(), bl);
+  m_client->get_watch_notify()->notify_ack(m_client, o, notify_id, handle,
+                                           m_client->get_instance_id(), bl);
 }
 
 int TestIoCtxImpl::operate(const std::string& oid, TestObjectOperationImpl &ops) {
@@ -291,13 +292,13 @@ int TestIoCtxImpl::tmap_update(const std::string& oid, bufferlist& cmdbl) {
 }
 
 int TestIoCtxImpl::unwatch(uint64_t handle) {
-  return m_client->get_watch_notify().unwatch(handle);
+  return m_client->get_watch_notify()->unwatch(m_client, handle);
 }
 
 int TestIoCtxImpl::watch(const std::string& o, uint64_t *handle,
                          librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2) {
-  return m_client->get_watch_notify().watch(o, get_instance_id(), handle, ctx,
-                                            ctx2);
+  return m_client->get_watch_notify()->watch(m_client, o, get_instance_id(),
+                                             handle, ctx, ctx2);
 }
 
 int TestIoCtxImpl::execute_operation(const std::string& oid,
diff --git a/src/test/librados_test_stub/TestMemCluster.cc b/src/test/librados_test_stub/TestMemCluster.cc
new file mode 100644 (file)
index 0000000..c1b07ab
--- /dev/null
@@ -0,0 +1,133 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "test/librados_test_stub/TestMemCluster.h"
+#include "test/librados_test_stub/TestMemRadosClient.h"
+
+namespace librados {
+
+TestMemCluster::File::File()
+  : snap_id(), exists(true), lock("TestMemCluster::File::lock") {
+}
+
+TestMemCluster::File::File(const File &rhs)
+  : data(rhs.data),
+    mtime(rhs.mtime),
+    snap_id(rhs.snap_id),
+    exists(rhs.exists),
+    lock("TestMemCluster::File::lock") {
+}
+
+TestMemCluster::Pool::Pool()
+  : file_lock("TestMemCluster::Pool::file_lock") {
+}
+
+TestMemCluster::TestMemCluster()
+  : m_lock("TestMemCluster::m_lock") {
+}
+
+TestMemCluster::~TestMemCluster() {
+  for (auto pool_pair : m_pools) {
+    pool_pair.second->put();
+  }
+}
+
+TestRadosClient *TestMemCluster::create_rados_client(CephContext *cct) {
+  return new TestMemRadosClient(cct, this);
+}
+
+int TestMemCluster::pool_create(const std::string &pool_name) {
+  Mutex::Locker locker(m_lock);
+  if (m_pools.find(pool_name) != m_pools.end()) {
+    return -EEXIST;
+  }
+  Pool *pool = new Pool();
+  pool->pool_id = ++m_pool_id;
+  m_pools[pool_name] = pool;
+  return 0;
+}
+
+int TestMemCluster::pool_delete(const std::string &pool_name) {
+  Mutex::Locker locker(m_lock);
+  Pools::iterator iter = m_pools.find(pool_name);
+  if (iter == m_pools.end()) {
+    return -ENOENT;
+  }
+  iter->second->put();
+  m_pools.erase(iter);
+  return 0;
+}
+
+int TestMemCluster::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
+  // TODO
+  *base_tier = pool_id;
+  return 0;
+}
+
+int TestMemCluster::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
+  Mutex::Locker locker(m_lock);
+  v.clear();
+  for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
+    v.push_back(std::make_pair(iter->second->pool_id, iter->first));
+  }
+  return 0;
+}
+
+int64_t TestMemCluster::pool_lookup(const std::string &pool_name) {
+  Mutex::Locker locker(m_lock);
+  Pools::iterator iter = m_pools.find(pool_name);
+  if (iter == m_pools.end()) {
+    return -ENOENT;
+  }
+  return iter->second->pool_id;
+}
+
+int TestMemCluster::pool_reverse_lookup(int64_t id, std::string *name) {
+  Mutex::Locker locker(m_lock);
+  for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
+    if (iter->second->pool_id == id) {
+      *name = iter->first;
+      return 0;
+    }
+  }
+  return -ENOENT;
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(int64_t pool_id) {
+  Mutex::Locker locker(m_lock);
+  for (auto &pool_pair : m_pools) {
+    if (pool_pair.second->pool_id == pool_id) {
+      return pool_pair.second;
+    }
+  }
+  return nullptr;
+}
+
+TestMemCluster::Pool *TestMemCluster::get_pool(const std::string &pool_name) {
+  Mutex::Locker locker(m_lock);
+  Pools::iterator iter = m_pools.find(pool_name);
+  if (iter != m_pools.end()) {
+    return iter->second;
+  }
+  return nullptr;
+}
+
+void TestMemCluster::transaction_start(const std::string &oid) {
+  Mutex::Locker locker(m_lock);
+  while (m_transactions.count(oid)) {
+    m_transaction_cond.Wait(m_lock);
+  }
+  std::pair<std::set<std::string>::iterator, bool> result =
+    m_transactions.insert(oid);
+  assert(result.second);
+}
+
+void TestMemCluster::transaction_finish(const std::string &oid) {
+  Mutex::Locker locker(m_lock);
+  size_t count = m_transactions.erase(oid);
+  assert(count == 1);
+  m_transaction_cond.Signal();
+}
+
+} // namespace librados
+
diff --git a/src/test/librados_test_stub/TestMemCluster.h b/src/test/librados_test_stub/TestMemCluster.h
new file mode 100644 (file)
index 0000000..12c8184
--- /dev/null
@@ -0,0 +1,99 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#ifndef CEPH_TEST_MEM_CLUSTER_H
+#define CEPH_TEST_MEM_CLUSTER_H
+
+#include "test/librados_test_stub/TestCluster.h"
+#include "include/buffer.h"
+#include "include/interval_set.h"
+#include "include/int_types.h"
+#include "common/Cond.h"
+#include "common/Mutex.h"
+#include "common/RefCountedObj.h"
+#include "common/RWLock.h"
+#include <boost/shared_ptr.hpp>
+#include <list>
+#include <map>
+#include <string>
+
+namespace librados {
+
+class TestMemCluster : public TestCluster {
+public:
+  typedef std::map<std::string, bufferlist> OMap;
+  typedef std::map<std::string, OMap> FileOMaps;
+  typedef std::map<std::string, bufferlist> FileTMaps;
+  typedef std::map<std::string, bufferlist> XAttrs;
+  typedef std::map<std::string, XAttrs> FileXAttrs;
+
+  struct File {
+    File();
+    File(const File &rhs);
+
+    bufferlist data;
+    time_t mtime;
+
+    uint64_t snap_id;
+    std::vector<uint64_t> snaps;
+    interval_set<uint64_t> snap_overlap;
+
+    bool exists;
+    RWLock lock;
+  };
+  typedef boost::shared_ptr<File> SharedFile;
+
+  typedef std::list<SharedFile> FileSnapshots;
+  typedef std::map<std::string, FileSnapshots> Files;
+
+  typedef std::set<uint64_t> SnapSeqs;
+  struct Pool : public RefCountedObject {
+    Pool();
+
+    int64_t pool_id = 0;
+
+    SnapSeqs snap_seqs;
+    uint64_t snap_id = 1;
+
+    RWLock file_lock;
+    Files files;
+    FileOMaps file_omaps;
+    FileTMaps file_tmaps;
+    FileXAttrs file_xattrs;
+  };
+
+  TestMemCluster();
+  ~TestMemCluster() override;
+
+  TestRadosClient *create_rados_client(CephContext *cct) override;
+
+  int pool_create(const std::string &pool_name);
+  int pool_delete(const std::string &pool_name);
+  int pool_get_base_tier(int64_t pool_id, int64_t* base_tier);
+  int pool_list(std::list<std::pair<int64_t, std::string> >& v);
+  int64_t pool_lookup(const std::string &name);
+  int pool_reverse_lookup(int64_t id, std::string *name);
+
+  Pool *get_pool(int64_t pool_id);
+  Pool *get_pool(const std::string &pool_name);
+
+  void transaction_start(const std::string &oid);
+  void transaction_finish(const std::string &oid);
+
+private:
+
+  typedef std::map<std::string, Pool*>         Pools;
+
+  mutable Mutex m_lock;
+
+  Pools        m_pools;
+  int64_t m_pool_id = 0;
+
+  Cond m_transaction_cond;
+  std::set<std::string> m_transactions;
+
+};
+
+} // namespace librados
+
+#endif // CEPH_TEST_MEM_CLUSTER_H
index 3df216adabbd7b9889469f36b9f3db82cc94d657..72532583b5bbf9ea9079f0be2f2ebcb3357bef72 100644 (file)
@@ -4,6 +4,7 @@
 #include "test/librados_test_stub/TestMemIoCtxImpl.h"
 #include "test/librados_test_stub/TestMemRadosClient.h"
 #include "common/Clock.h"
+#include "common/RWLock.h"
 #include <boost/algorithm/string/predicate.hpp>
 #include <boost/bind.hpp>
 #include <errno.h>
@@ -30,7 +31,7 @@ TestMemIoCtxImpl::TestMemIoCtxImpl(const TestMemIoCtxImpl& rhs)
 
 TestMemIoCtxImpl::TestMemIoCtxImpl(TestMemRadosClient *client, int64_t pool_id,
                                    const std::string& pool_name,
-                                   TestMemRadosClient::Pool *pool)
+                                   TestMemCluster::Pool *pool)
     : TestIoCtxImpl(client, pool_id, pool_name), m_client(client),
       m_pool(pool) {
   m_pool->get();
@@ -58,7 +59,7 @@ int TestMemIoCtxImpl::append(const std::string& oid, const bufferlist &bl,
     return -EROFS;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, true, snapc);
@@ -71,8 +72,7 @@ int TestMemIoCtxImpl::append(const std::string& oid, const bufferlist &bl,
 
 int TestMemIoCtxImpl::assert_exists(const std::string &oid) {
   RWLock::RLocker l(m_pool->file_lock);
-  TestMemRadosClient::SharedFile file = get_file(oid, false,
-                                                 get_snap_context());
+  TestMemCluster::SharedFile file = get_file(oid, false, get_snap_context());
   if (file == NULL) {
     return -ENOENT;
   }
@@ -94,20 +94,20 @@ int TestMemIoCtxImpl::list_snaps(const std::string& oid, snap_set_t *out_snaps)
   out_snaps->clones.clear();
 
   RWLock::RLocker l(m_pool->file_lock);
-  TestMemRadosClient::Files::iterator it = m_pool->files.find(oid);
+  TestMemCluster::Files::iterator it = m_pool->files.find(oid);
   if (it == m_pool->files.end()) {
     return -ENOENT;
   }
 
   bool include_head = false;
-  TestMemRadosClient::FileSnapshots &file_snaps = it->second;
-  for (TestMemRadosClient::FileSnapshots::iterator s_it = file_snaps.begin();
+  TestMemCluster::FileSnapshots &file_snaps = it->second;
+  for (TestMemCluster::FileSnapshots::iterator s_it = file_snaps.begin();
        s_it != file_snaps.end(); ++s_it) {
-    TestMemRadosClient::File &file = *s_it->get();
+    TestMemCluster::File &file = *s_it->get();
 
     if (file_snaps.size() > 1) {
       out_snaps->seq = file.snap_id;
-      TestMemRadosClient::FileSnapshots::iterator next_it(s_it);
+      TestMemCluster::FileSnapshots::iterator next_it(s_it);
       ++next_it;
       if (next_it == file_snaps.end()) {
         include_head = true;
@@ -120,7 +120,7 @@ int TestMemIoCtxImpl::list_snaps(const std::string& oid, snap_set_t *out_snaps)
       }
 
       // update the overlap with the next version's overlap metadata
-      TestMemRadosClient::File &next_file = *next_it->get();
+      TestMemCluster::File &next_file = *next_it->get();
       interval_set<uint64_t> overlap;
       if (next_file.exists) {
         overlap = next_file.snap_overlap;
@@ -139,7 +139,7 @@ int TestMemIoCtxImpl::list_snaps(const std::string& oid, snap_set_t *out_snaps)
       include_head)
   {
     // Include the SNAP_HEAD
-    TestMemRadosClient::File &file = *file_snaps.back();
+    TestMemCluster::File &file = *file_snaps.back();
     if (file.exists) {
       RWLock::RLocker l2(file.lock);
       if (out_snaps->seq == 0 && !include_head) {
@@ -164,7 +164,7 @@ int TestMemIoCtxImpl::omap_get_vals(const std::string& oid,
     return -EINVAL;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::RLocker l(m_pool->file_lock);
     file = get_file(oid, false, get_snap_context());
@@ -176,13 +176,13 @@ int TestMemIoCtxImpl::omap_get_vals(const std::string& oid,
   out_vals->clear();
 
   RWLock::RLocker l(file->lock);
-  TestMemRadosClient::FileOMaps::iterator o_it = m_pool->file_omaps.find(oid);
+  TestMemCluster::FileOMaps::iterator o_it = m_pool->file_omaps.find(oid);
   if (o_it == m_pool->file_omaps.end()) {
     return 0;
   }
 
-  TestMemRadosClient::OMap &omap = o_it->second;
-  TestMemRadosClient::OMap::iterator it = omap.begin();
+  TestMemCluster::OMap &omap = o_it->second;
+  TestMemCluster::OMap::iterator it = omap.begin();
   if (!start_after.empty()) {
     it = omap.upper_bound(start_after);
   }
@@ -204,7 +204,7 @@ int TestMemIoCtxImpl::omap_rm_keys(const std::string& oid,
     return -EROFS;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, true, get_snap_context());
@@ -227,7 +227,7 @@ int TestMemIoCtxImpl::omap_set(const std::string& oid,
     return -EROFS;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, true, get_snap_context());
@@ -249,7 +249,7 @@ int TestMemIoCtxImpl::omap_set(const std::string& oid,
 
 int TestMemIoCtxImpl::read(const std::string& oid, size_t len, uint64_t off,
                            bufferlist *bl) {
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::RLocker l(m_pool->file_lock);
     file = get_file(oid, false, get_snap_context());
@@ -277,7 +277,7 @@ int TestMemIoCtxImpl::remove(const std::string& oid, const SnapContext &snapc) {
   }
 
   RWLock::WLocker l(m_pool->file_lock);
-  TestMemRadosClient::SharedFile file = get_file(oid, false, snapc);
+  TestMemCluster::SharedFile file = get_file(oid, false, snapc);
   if (file == NULL) {
     return -ENOENT;
   }
@@ -286,7 +286,7 @@ int TestMemIoCtxImpl::remove(const std::string& oid, const SnapContext &snapc) {
   RWLock::WLocker l2(file->lock);
   file->exists = false;
 
-  TestMemRadosClient::Files::iterator it = m_pool->files.find(oid);
+  TestMemCluster::Files::iterator it = m_pool->files.find(oid);
   assert(it != m_pool->files.end());
   if (it->second.size() == 1) {
     m_pool->files.erase(it);
@@ -304,7 +304,7 @@ int TestMemIoCtxImpl::selfmanaged_snap_create(uint64_t *snapid) {
 
 int TestMemIoCtxImpl::selfmanaged_snap_remove(uint64_t snapid) {
   RWLock::WLocker l(m_pool->file_lock);
-  TestMemRadosClient::SnapSeqs::iterator it =
+  TestMemCluster::SnapSeqs::iterator it =
     m_pool->snap_seqs.find(snapid);
   if (it == m_pool->snap_seqs.end()) {
     return -ENOENT;
@@ -319,19 +319,19 @@ int TestMemIoCtxImpl::selfmanaged_snap_rollback(const std::string& oid,
                                                 uint64_t snapid) {
   RWLock::WLocker l(m_pool->file_lock);
 
-  TestMemRadosClient::SharedFile file;
-  TestMemRadosClient::Files::iterator f_it = m_pool->files.find(oid);
+  TestMemCluster::SharedFile file;
+  TestMemCluster::Files::iterator f_it = m_pool->files.find(oid);
   if (f_it == m_pool->files.end()) {
     return 0;
   }
 
-  TestMemRadosClient::FileSnapshots &snaps = f_it->second;
+  TestMemCluster::FileSnapshots &snaps = f_it->second;
   file = snaps.back();
 
   size_t versions = 0;
-  for (TestMemRadosClient::FileSnapshots::reverse_iterator it = snaps.rbegin();
+  for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
       it != snaps.rend(); ++it) {
-    TestMemRadosClient::SharedFile file = *it;
+    TestMemCluster::SharedFile file = *it;
     if (file->snap_id < get_snap_read()) {
       if (versions == 0) {
         // already at the snapshot version
@@ -342,13 +342,13 @@ int TestMemIoCtxImpl::selfmanaged_snap_rollback(const std::string& oid,
           snaps.erase(it.base());
         } else {
           // overwrite contents of current HEAD
-          file = TestMemRadosClient::SharedFile (new TestMemRadosClient::File(**it));
+          file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
           file->snap_id = CEPH_NOSNAP;
           *it = file;
         }
       } else {
         // create new head version
-        file = TestMemRadosClient::SharedFile (new TestMemRadosClient::File(**it));
+        file = TestMemCluster::SharedFile (new TestMemCluster::File(**it));
         file->snap_id = m_pool->snap_id;
         snaps.push_back(file);
       }
@@ -364,7 +364,7 @@ int TestMemIoCtxImpl::sparse_read(const std::string& oid, uint64_t off,
                                   std::map<uint64_t,uint64_t> *m,
                                   bufferlist *data_bl) {
   // TODO verify correctness
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::RLocker l(m_pool->file_lock);
     file = get_file(oid, false, get_snap_context());
@@ -391,7 +391,7 @@ int TestMemIoCtxImpl::sparse_read(const std::string& oid, uint64_t off,
 
 int TestMemIoCtxImpl::stat(const std::string& oid, uint64_t *psize,
                            time_t *pmtime) {
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::RLocker l(m_pool->file_lock);
     file = get_file(oid, false, get_snap_context());
@@ -416,7 +416,7 @@ int TestMemIoCtxImpl::truncate(const std::string& oid, uint64_t size,
     return -EROFS;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, true, snapc);
@@ -452,7 +452,7 @@ int TestMemIoCtxImpl::write(const std::string& oid, bufferlist& bl, size_t len,
     return -EROFS;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, true, snapc);
@@ -477,7 +477,7 @@ int TestMemIoCtxImpl::write_full(const std::string& oid, bufferlist& bl,
     return -EROFS;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, true, snapc);
@@ -509,7 +509,7 @@ int TestMemIoCtxImpl::writesame(const std::string& oid, bufferlist& bl, size_t l
     return -EINVAL;
   }
 
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, true, snapc);
@@ -534,9 +534,9 @@ int TestMemIoCtxImpl::writesame(const std::string& oid, bufferlist& bl, size_t l
 
 int TestMemIoCtxImpl::xattr_get(const std::string& oid,
                                 std::map<std::string, bufferlist>* attrset) {
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   RWLock::RLocker l(m_pool->file_lock);
-  TestMemRadosClient::FileXAttrs::iterator it = m_pool->file_xattrs.find(oid);
+  TestMemCluster::FileXAttrs::iterator it = m_pool->file_xattrs.find(oid);
   if (it == m_pool->file_xattrs.end()) {
     return -ENODATA;
   }
@@ -553,7 +553,7 @@ int TestMemIoCtxImpl::xattr_set(const std::string& oid, const std::string &name,
 
 int TestMemIoCtxImpl::zero(const std::string& oid, uint64_t off, uint64_t len) {
   bool truncate_redirect = false;
-  TestMemRadosClient::SharedFile file;
+  TestMemCluster::SharedFile file;
   {
     RWLock::WLocker l(m_pool->file_lock);
     file = get_file(oid, false, get_snap_context());
@@ -605,23 +605,23 @@ void TestMemIoCtxImpl::ensure_minimum_length(size_t len, bufferlist *bl) {
   }
 }
 
-TestMemRadosClient::SharedFile TestMemIoCtxImpl::get_file(
+TestMemCluster::SharedFile TestMemIoCtxImpl::get_file(
     const std::string &oid, bool write, const SnapContext &snapc) {
   assert(m_pool->file_lock.is_locked() || m_pool->file_lock.is_wlocked());
   assert(!write || m_pool->file_lock.is_wlocked());
 
-  TestMemRadosClient::SharedFile file;
-  TestMemRadosClient::Files::iterator it = m_pool->files.find(oid);
+  TestMemCluster::SharedFile file;
+  TestMemCluster::Files::iterator it = m_pool->files.find(oid);
   if (it != m_pool->files.end()) {
     file = it->second.back();
   } else if (!write) {
-    return TestMemRadosClient::SharedFile();
+    return TestMemCluster::SharedFile();
   }
 
   if (write) {
     bool new_version = false;
     if (!file || !file->exists) {
-      file = TestMemRadosClient::SharedFile(new TestMemRadosClient::File());
+      file = TestMemCluster::SharedFile(new TestMemCluster::File());
       new_version = true;
     } else {
       if (!snapc.snaps.empty() && file->snap_id < snapc.seq) {
@@ -634,8 +634,8 @@ TestMemRadosClient::SharedFile TestMemIoCtxImpl::get_file(
         }
 
         bufferlist prev_data = file->data;
-        file = TestMemRadosClient::SharedFile(
-          new TestMemRadosClient::File(*file));
+        file = TestMemCluster::SharedFile(
+          new TestMemCluster::File(*file));
         file->data.clear();
         append_clone(prev_data, &file->data);
         if (prev_data.length() > 0) {
@@ -656,23 +656,23 @@ TestMemRadosClient::SharedFile TestMemIoCtxImpl::get_file(
   if (get_snap_read() == CEPH_NOSNAP) {
     if (!file->exists) {
       assert(it->second.size() > 1);
-      return TestMemRadosClient::SharedFile();
+      return TestMemCluster::SharedFile();
     }
     return file;
   }
 
-  TestMemRadosClient::FileSnapshots &snaps = it->second;
-  for (TestMemRadosClient::FileSnapshots::reverse_iterator it = snaps.rbegin();
+  TestMemCluster::FileSnapshots &snaps = it->second;
+  for (TestMemCluster::FileSnapshots::reverse_iterator it = snaps.rbegin();
       it != snaps.rend(); ++it) {
-    TestMemRadosClient::SharedFile file = *it;
+    TestMemCluster::SharedFile file = *it;
     if (file->snap_id < get_snap_read()) {
       if (!file->exists) {
-        return TestMemRadosClient::SharedFile();
+        return TestMemCluster::SharedFile();
       }
       return file;
     }
   }
-  return TestMemRadosClient::SharedFile();
+  return TestMemCluster::SharedFile();
 }
 
 } // namespace librados
index bfe45df5d1b36babcea1dc0c7e8ef44fbf0c9c0d..9c9519f803b815f96a295baa19804215240777c7 100644 (file)
@@ -5,16 +5,18 @@
 #define CEPH_TEST_MEM_IO_CTX_IMPL_H
 
 #include "test/librados_test_stub/TestIoCtxImpl.h"
-#include "test/librados_test_stub/TestMemRadosClient.h"
+#include "test/librados_test_stub/TestMemCluster.h"
 
 namespace librados {
 
+class TestMemRadosClient;
+
 class TestMemIoCtxImpl : public TestIoCtxImpl {
 public:
   TestMemIoCtxImpl();
   TestMemIoCtxImpl(TestMemRadosClient *client, int64_t m_pool_id,
                    const std::string& pool_name,
-                   TestMemRadosClient::Pool *pool);
+                   TestMemCluster::Pool *pool);
   ~TestMemIoCtxImpl() override;
 
   TestIoCtxImpl *clone() override;
@@ -43,26 +45,26 @@ public:
   int selfmanaged_snap_create(uint64_t *snapid) override;
   int selfmanaged_snap_remove(uint64_t snapid) override;
   int selfmanaged_snap_rollback(const std::string& oid,
-                                        uint64_t snapid) override;
+                                uint64_t snapid) override;
   int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
-                          std::map<uint64_t,uint64_t> *m, bufferlist *data_bl) override;
+                  std::map<uint64_t,uint64_t> *m, bufferlist *data_bl) override;
   int stat(const std::string& oid, uint64_t *psize, time_t *pmtime) override;
   int truncate(const std::string& oid, uint64_t size,
-                       const SnapContext &snapc) override;
+               const SnapContext &snapc) override;
   int write(const std::string& oid, bufferlist& bl, size_t len,
-                    uint64_t off, const SnapContext &snapc) override;
+            uint64_t off, const SnapContext &snapc) override;
   int write_full(const std::string& oid, bufferlist& bl,
-                         const SnapContext &snapc) override;
+                 const SnapContext &snapc) override;
   int writesame(const std::string& oid, bufferlist& bl, size_t len,
-                        uint64_t off, const SnapContext &snapc) override;
+                uint64_t off, const SnapContext &snapc) override;
   int xattr_get(const std::string& oid,
-                        std::map<std::string, bufferlist>* attrset) override;
+                std::map<std::string, bufferlist>* attrset) override;
   int xattr_set(const std::string& oid, const std::string &name,
-                        bufferlist& bl) override;
+                bufferlist& bl) override;
   int zero(const std::string& oid, uint64_t off, uint64_t len) override;
 
 protected:
-  TestMemRadosClient::Pool *get_pool() {
+  TestMemCluster::Pool *get_pool() {
     return m_pool;
   }
 
@@ -70,14 +72,14 @@ private:
   TestMemIoCtxImpl(const TestMemIoCtxImpl&);
 
   TestMemRadosClient *m_client;
-  TestMemRadosClient::Pool *m_pool;
+  TestMemCluster::Pool *m_pool;
 
   void append_clone(bufferlist& src, bufferlist* dest);
   size_t clip_io(size_t off, size_t len, size_t bl_len);
   void ensure_minimum_length(size_t len, bufferlist *bl);
 
-  TestMemRadosClient::SharedFile get_file(const std::string &oid, bool write,
-                                          const SnapContext &snapc);
+  TestMemCluster::SharedFile get_file(const std::string &oid, bool write,
+                                      const SnapContext &snapc);
 
 };
 
index 6492d25b76b3d33f0f11ec174f8238a966ee87f0..b4f3c1f96735508dccc40afb0fd23e9a98d0aa52 100644 (file)
@@ -2,82 +2,45 @@
 // vim: ts=8 sw=2 smarttab
 
 #include "test/librados_test_stub/TestMemRadosClient.h"
+#include "test/librados_test_stub/TestMemCluster.h"
 #include "test/librados_test_stub/TestMemIoCtxImpl.h"
 #include <errno.h>
 
 namespace librados {
 
-TestMemRadosClient::TestMemRadosClient(CephContext *cct)
-  : TestRadosClient(cct), m_pool_id() {
-}
-
-TestMemRadosClient::~TestMemRadosClient() {
-  for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
-    delete iter->second;
-  }
-}
-
-TestMemRadosClient::File::File()
-  : snap_id(), exists(true), lock("TestMemRadosClient::File::lock")
-{
-}
-
-TestMemRadosClient::File::File(const File &rhs)
-  : data(rhs.data),
-    mtime(rhs.mtime),
-    snap_id(rhs.snap_id),
-    exists(rhs.exists),
-    lock("TestMemRadosClient::File::lock")
-{
-}
-
-TestMemRadosClient::Pool::Pool()
-  : pool_id(), snap_id(1), file_lock("TestMemRadosClient::Pool::file_lock")
-{
+TestMemRadosClient::TestMemRadosClient(CephContext *cct,
+                                       TestMemCluster *test_mem_cluster)
+  : TestRadosClient(cct, test_mem_cluster->get_watch_notify()),
+    m_mem_cluster(test_mem_cluster) {
 }
 
 TestIoCtxImpl *TestMemRadosClient::create_ioctx(int64_t pool_id,
                                                const std::string &pool_name) {
-  return new TestMemIoCtxImpl(this, pool_id, pool_name, get_pool(pool_name));
+  return new TestMemIoCtxImpl(this, pool_id, pool_name,
+                              m_mem_cluster->get_pool(pool_name));
 }
 
 void TestMemRadosClient::object_list(int64_t pool_id,
                                     std::list<librados::TestRadosClient::Object> *list) {
   list->clear();
 
-  for (Pools::iterator p_it = m_pools.begin(); p_it != m_pools.end(); ++p_it) {
-    Pool *pool = p_it->second;
-    if (pool->pool_id == pool_id) {
-      RWLock::RLocker l(pool->file_lock);
-      for (Files::iterator it = pool->files.begin();
-          it != pool->files.end(); ++it) {
-       Object obj;
-       obj.oid = it->first;
-       list->push_back(obj);
-      } 
-      break;
+  auto pool = m_mem_cluster->get_pool(pool_id);
+  if (pool != nullptr) {
+    RWLock::RLocker file_locker(pool->file_lock);
+    for (auto &file_pair : pool->files) {
+      Object obj;
+      obj.oid = file_pair.first;
+      list->push_back(obj);
     }
   }
-} 
+}
 
 int TestMemRadosClient::pool_create(const std::string &pool_name) {
-  if (m_pools.find(pool_name) != m_pools.end()) {
-    return -EEXIST;
-  }
-  Pool *pool = new Pool();
-  pool->pool_id = ++m_pool_id;
-  m_pools[pool_name] = pool;
-  return 0;
+  return m_mem_cluster->pool_create(pool_name);
 }
 
 int TestMemRadosClient::pool_delete(const std::string &pool_name) {
-  Pools::iterator iter = m_pools.find(pool_name);
-  if (iter == m_pools.end()) {
-    return -ENOENT;
-  }
-  iter->second->put();
-  m_pools.erase(iter);
-  return 0;
+  return m_mem_cluster->pool_delete(pool_name);
 }
 
 int TestMemRadosClient::pool_get_base_tier(int64_t pool_id, int64_t* base_tier) {
@@ -87,33 +50,19 @@ int TestMemRadosClient::pool_get_base_tier(int64_t pool_id, int64_t* base_tier)
 }
 
 int TestMemRadosClient::pool_list(std::list<std::pair<int64_t, std::string> >& v) {
-  v.clear();
-  for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
-    v.push_back(std::make_pair(iter->second->pool_id, iter->first));
-  }
-  return 0;
+  return m_mem_cluster->pool_list(v);
 }
 
 int64_t TestMemRadosClient::pool_lookup(const std::string &pool_name) {
-  Pools::iterator iter = m_pools.find(pool_name);
-  if (iter == m_pools.end()) {
-    return -ENOENT;
-  }
-  return iter->second->pool_id;
+  return m_mem_cluster->pool_lookup(pool_name);
 }
 
 int TestMemRadosClient::pool_reverse_lookup(int64_t id, std::string *name) {
-  for (Pools::iterator iter = m_pools.begin(); iter != m_pools.end(); ++iter) {
-    if (iter->second->pool_id == id) {
-      *name = iter->first;
-      return 0;
-    }
-  }
-  return -ENOENT;
+  return m_mem_cluster->pool_reverse_lookup(id, name);
 }
 
 int TestMemRadosClient::watch_flush() {
-  get_watch_notify().flush();
+  get_watch_notify()->flush(this);
   return 0;
 }
 
@@ -122,11 +71,12 @@ int TestMemRadosClient::blacklist_add(const std::string& client_address,
   return 0;
 }
 
-TestMemRadosClient::Pool *TestMemRadosClient::get_pool(
-    const std::string &pool_name) {
-  Pools::iterator iter = m_pools.find(pool_name);
-  assert(iter != m_pools.end());
-  return iter->second;
+void TestMemRadosClient::transaction_start(const std::string &oid) {
+  m_mem_cluster->transaction_start(oid);
+}
+
+void TestMemRadosClient::transaction_finish(const std::string &oid) {
+  m_mem_cluster->transaction_finish(oid);
 }
 
 } // namespace librados
index e574a1361b45d07d0eb772e3544beb7e5ecfd095..dbe18fe09ddc830c07580ee3a751b8be0a434992 100644 (file)
@@ -5,73 +5,24 @@
 #define CEPH_TEST_MEM_RADOS_CLIENT_H
 
 #include "test/librados_test_stub/TestRadosClient.h"
-#include "include/atomic.h"
 #include "include/assert.h"
-#include "include/buffer.h"
-#include "include/interval_set.h"
-#include "common/RefCountedObj.h"
-#include "common/RWLock.h"
-#include <boost/shared_ptr.hpp>
 #include <list>
-#include <map>
-#include <set>
 #include <string>
 
 namespace librados {
 
 class AioCompletionImpl;
+class TestMemCluster;
 
 class TestMemRadosClient : public TestRadosClient {
 public:
-
-  typedef std::map<std::string, bufferlist> OMap;
-  typedef std::map<std::string, OMap> FileOMaps;
-  typedef std::map<std::string, bufferlist> FileTMaps;
-  typedef std::map<std::string, bufferlist> XAttrs;
-  typedef std::map<std::string, XAttrs> FileXAttrs;
-
-  struct File {
-    File();
-    File(const File &rhs);
-
-    bufferlist data;
-    time_t mtime;
-
-    uint64_t snap_id;
-    std::vector<uint64_t> snaps;
-    interval_set<uint64_t> snap_overlap;
-
-    bool exists;
-    RWLock lock;
-  };
-  typedef boost::shared_ptr<File> SharedFile;
-
-  typedef std::list<SharedFile> FileSnapshots;
-  typedef std::map<std::string, FileSnapshots> Files;
-
-  typedef std::set<uint64_t> SnapSeqs;
-  struct Pool : public RefCountedObject {
-    Pool();
-
-    int64_t pool_id;
-
-    SnapSeqs snap_seqs;
-    uint64_t snap_id;
-
-    RWLock file_lock;
-    Files files;
-    FileOMaps file_omaps;
-    FileTMaps file_tmaps;
-    FileXAttrs file_xattrs;
-  };
-
-  TestMemRadosClient(CephContext *cct);
+  TestMemRadosClient(CephContext *cct, TestMemCluster *test_mem_cluster);
 
   TestIoCtxImpl *create_ioctx(int64_t pool_id,
                                       const std::string &pool_name) override;
 
-  void object_list(int64_t pool_id, 
-                          std::list<librados::TestRadosClient::Object> *list) override;
+  void object_list(int64_t pool_id,
+                   std::list<librados::TestRadosClient::Object> *list) override;
 
   int pool_create(const std::string &pool_name) override;
   int pool_delete(const std::string &pool_name) override;
@@ -83,18 +34,18 @@ public:
   int watch_flush() override;
 
   int blacklist_add(const std::string& client_address,
-                           uint32_t expire_seconds) override;
+                    uint32_t expire_seconds) override;
 protected:
-  ~TestMemRadosClient() override;
+  TestMemCluster *get_mem_cluster() {
+    return m_mem_cluster;
+  }
 
-  Pool *get_pool(const std::string &pool_name);
+protected:
+  void transaction_start(const std::string &oid) override;
+  void transaction_finish(const std::string &oid) override;
 
 private:
-
-  typedef std::map<std::string, Pool*>         Pools;
-
-  Pools        m_pools;
-  int64_t m_pool_id;
+  TestMemCluster *m_mem_cluster;
 
 };
 
index 48a47f84dd29aedbfe4322b1934a7d8a1c782331..d8f1e32a303b7aaee19dcf37b301d788345974f1 100644 (file)
@@ -81,11 +81,10 @@ private:
   AioCompletionImpl *m_comp;
 };
 
-TestRadosClient::TestRadosClient(CephContext *cct)
-  : m_cct(cct->get()),
-    m_aio_finisher(new Finisher(m_cct)),
-    m_watch_notify(m_cct, m_aio_finisher),
-    m_transaction_lock("TestRadosClient::m_transaction_lock")
+TestRadosClient::TestRadosClient(CephContext *cct,
+                                 TestWatchNotify *watch_notify)
+  : m_cct(cct->get()), m_watch_notify(watch_notify),
+    m_aio_finisher(new Finisher(m_cct))
 {
   get();
 
@@ -225,7 +224,7 @@ int TestRadosClient::aio_watch_flush(AioCompletionImpl *c) {
   c->get();
   Context *ctx = new FunctionContext(boost::bind(
     &TestRadosClient::finish_aio_completion, this, c, _1));
-  get_watch_notify().aio_flush(ctx);
+  get_watch_notify()->aio_flush(this, ctx);
   return 0;
 }
 
@@ -238,21 +237,4 @@ Finisher *TestRadosClient::get_finisher(const std::string &oid) {
   return m_finishers[h % m_finishers.size()];
 }
 
-void TestRadosClient::transaction_start(const std::string &oid) {
-  Mutex::Locker locker(m_transaction_lock);
-  while (m_transactions.count(oid)) {
-    m_transaction_cond.Wait(m_transaction_lock);
-  }
-  std::pair<std::set<std::string>::iterator, bool> result =
-    m_transactions.insert(oid);
-  assert(result.second);
-}
-
-void TestRadosClient::transaction_finish(const std::string &oid) {
-  Mutex::Locker locker(m_transaction_lock);
-  size_t count = m_transactions.erase(oid);
-  assert(count == 1);
-  m_transaction_cond.Signal();
-}
-
 } // namespace librados
index a4fd00d864900099f536dd82e35d4ac8e3e33bea..51efb71ff61ef8e9e3d0858f42abf4ac60456a82 100644 (file)
@@ -6,8 +6,6 @@
 
 #include "include/rados/librados.hpp"
 #include "common/config.h"
-#include "common/Cond.h"
-#include "common/Mutex.h"
 #include "include/atomic.h"
 #include "include/buffer_fwd.h"
 #include "test/librados_test_stub/TestWatchNotify.h"
@@ -15,7 +13,6 @@
 #include <boost/functional/hash.hpp>
 #include <list>
 #include <map>
-#include <set>
 #include <string>
 #include <vector>
 
@@ -55,7 +52,7 @@ public:
     std::string oid;
   };
 
-  TestRadosClient(CephContext *cct);
+  TestRadosClient(CephContext *cct, TestWatchNotify *watch_notify);
 
   void get();
   void put();
@@ -91,7 +88,10 @@ public:
   virtual int blacklist_add(const std::string& client_address,
                            uint32_t expire_seconds) = 0;
 
-  TestWatchNotify &get_watch_notify() {
+  Finisher *get_aio_finisher() {
+    return m_aio_finisher;
+  }
+  TestWatchNotify *get_watch_notify() {
     return m_watch_notify;
   }
 
@@ -105,26 +105,22 @@ public:
 protected:
   virtual ~TestRadosClient();
 
+  virtual void transaction_start(const std::string &oid) = 0;
+  virtual void transaction_finish(const std::string &oid) = 0;
+
 private:
 
   CephContext *m_cct;
   atomic_t m_refcount;
 
+  TestWatchNotify *m_watch_notify;
+
   Finisher *get_finisher(const std::string& oid);
 
   Finisher *m_aio_finisher;
   std::vector<Finisher *> m_finishers;
   boost::hash<std::string> m_hash;
 
-  TestWatchNotify m_watch_notify;
-
-  Mutex m_transaction_lock;
-  Cond m_transaction_cond;
-  std::set<std::string> m_transactions;
-
-  void transaction_start(const std::string &oid);
-  void transaction_finish(const std::string &oid);
-
 };
 
 } // namespace librados
index 6e749a7b92c386faeb13d5b4d10d62844dd06d7a..73383b5265496edef0d0d416156c4d3320419783 100644 (file)
@@ -4,8 +4,10 @@
 #include "test/librados_test_stub/TestWatchNotify.h"
 #include "include/Context.h"
 #include "common/Finisher.h"
+#include "test/librados_test_stub/TestRadosClient.h"
 #include <boost/bind.hpp>
 #include <boost/function.hpp>
+#include "include/assert.h"
 
 #define dout_subsys ceph_subsys_rados
 #undef dout_prefix
@@ -19,19 +21,14 @@ std::ostream& operator<<(std::ostream& out,
   return out;
 }
 
-TestWatchNotify::TestWatchNotify(CephContext *cct, Finisher *finisher)
-  : m_cct(cct), m_finisher(finisher), m_handle(), m_notify_id(),
-    m_lock("librados::TestWatchNotify::m_lock"),
-    m_pending_notifies(0) {
-  m_cct->get();
+TestWatchNotify::TestWatchNotify()
+  : m_lock("librados::TestWatchNotify::m_lock") {
 }
 
-TestWatchNotify::~TestWatchNotify() {
-  m_cct->put();
-}
+void TestWatchNotify::flush(TestRadosClient *rados_client) {
+  CephContext *cct = rados_client->cct();
 
-void TestWatchNotify::flush() {
-  ldout(m_cct, 20) << "enter" << dendl;
+  ldout(cct, 20) << "enter" << dendl;
   // block until we know no additional async notify callbacks will occur
   Mutex::Locker locker(m_lock);
   while (m_pending_notifies > 0) {
@@ -58,31 +55,37 @@ int TestWatchNotify::list_watchers(const std::string& o,
   return 0;
 }
 
-void TestWatchNotify::aio_flush(Context *on_finish) {
-  m_finisher->queue(on_finish);
+void TestWatchNotify::aio_flush(TestRadosClient *rados_client,
+                                Context *on_finish) {
+  rados_client->get_aio_finisher()->queue(on_finish);
 }
 
-void TestWatchNotify::aio_watch(const std::string& o, uint64_t gid,
+void TestWatchNotify::aio_watch(TestRadosClient *rados_client,
+                                const std::string& o, uint64_t gid,
                                 uint64_t *handle,
                                 librados::WatchCtx2 *watch_ctx,
                                 Context *on_finish) {
-  int r = watch(o, gid, handle, nullptr, watch_ctx);
-  m_finisher->queue(on_finish, r);
+  int r = watch(rados_client, o, gid, handle, nullptr, watch_ctx);
+  rados_client->get_aio_finisher()->queue(on_finish, r);
 }
 
-void TestWatchNotify::aio_unwatch(uint64_t handle, Context *on_finish) {
-  unwatch(handle);
-  m_finisher->queue(on_finish);
+void TestWatchNotify::aio_unwatch(TestRadosClient *rados_client,
+                                  uint64_t handle, Context *on_finish) {
+  unwatch(rados_client, handle);
+  rados_client->get_aio_finisher()->queue(on_finish);
 }
 
-void TestWatchNotify::aio_notify(const std::string& oid, bufferlist& bl,
+void TestWatchNotify::aio_notify(TestRadosClient *rados_client,
+                                 const std::string& oid, bufferlist& bl,
                                  uint64_t timeout_ms, bufferlist *pbl,
                                  Context *on_notify) {
+  CephContext *cct = rados_client->cct();
+
   Mutex::Locker lock(m_lock);
   ++m_pending_notifies;
   uint64_t notify_id = ++m_notify_id;
 
-  ldout(m_cct, 20) << "oid=" << oid << ": notify_id=" << notify_id << dendl;
+  ldout(cct, 20) << "oid=" << oid << ": notify_id=" << notify_id << dendl;
 
   SharedWatcher watcher = get_watcher(oid);
 
@@ -97,31 +100,38 @@ void TestWatchNotify::aio_notify(const std::string& oid, bufferlist& bl,
   watcher->notify_handles[notify_id] = notify_handle;
 
   FunctionContext *ctx = new FunctionContext(
-    boost::bind(&TestWatchNotify::execute_notify, this, oid, bl, notify_id));
-  m_finisher->queue(ctx);
+    boost::bind(&TestWatchNotify::execute_notify, this, rados_client, oid, bl,
+                notify_id));
+  rados_client->get_aio_finisher()->queue(ctx);
 }
 
-int TestWatchNotify::notify(const std::string& oid, bufferlist& bl,
+int TestWatchNotify::notify(TestRadosClient *rados_client,
+                            const std::string& oid, bufferlist& bl,
                             uint64_t timeout_ms, bufferlist *pbl) {
   C_SaferCond cond;
-  aio_notify(oid, bl, timeout_ms, pbl, &cond);
+  aio_notify(rados_client, oid, bl, timeout_ms, pbl, &cond);
   return cond.wait();
 }
 
-void TestWatchNotify::notify_ack(const std::string& o, uint64_t notify_id,
+void TestWatchNotify::notify_ack(TestRadosClient *rados_client,
+                                 const std::string& o, uint64_t notify_id,
                                  uint64_t handle, uint64_t gid,
                                  bufferlist& bl) {
-  ldout(m_cct, 20) << "notify_id=" << notify_id << ", handle=" << handle
-                  << ", gid=" << gid << dendl;
+  CephContext *cct = rados_client->cct();
+  ldout(cct, 20) << "notify_id=" << notify_id << ", handle=" << handle
+                << ", gid=" << gid << dendl;
   Mutex::Locker lock(m_lock);
   WatcherID watcher_id = std::make_pair(gid, handle);
-  ack_notify(o, notify_id, watcher_id, bl);
-  finish_notify(o, notify_id);
+  ack_notify(rados_client, o, notify_id, watcher_id, bl);
+  finish_notify(rados_client, o, notify_id);
 }
 
-int TestWatchNotify::watch(const std::string& o, uint64_t gid,
+int TestWatchNotify::watch(TestRadosClient *rados_client,
+                           const std::string& o, uint64_t gid,
                            uint64_t *handle, librados::WatchCtx *ctx,
                            librados::WatchCtx2 *ctx2) {
+  CephContext *cct = rados_client->cct();
+
   Mutex::Locker lock(m_lock);
   SharedWatcher watcher = get_watcher(o);
 
@@ -134,13 +144,16 @@ int TestWatchNotify::watch(const std::string& o, uint64_t gid,
 
   *handle = watch_handle.handle;
 
-  ldout(m_cct, 20) << "oid=" << o << ", gid=" << gid << ": handle=" << *handle
-                  << dendl;
+  ldout(cct, 20) << "oid=" << o << ", gid=" << gid << ": handle=" << *handle
+                << dendl;
   return 0;
 }
 
-int TestWatchNotify::unwatch(uint64_t handle) {
-  ldout(m_cct, 20) << "handle=" << handle << dendl;
+int TestWatchNotify::unwatch(TestRadosClient *rados_client,
+                             uint64_t handle) {
+  CephContext *cct = rados_client->cct();
+
+  ldout(cct, 20) << "handle=" << handle << dendl;
   Mutex::Locker locker(m_lock);
   for (FileWatchers::iterator it = m_file_watchers.begin();
        it != m_file_watchers.end(); ++it) {
@@ -168,9 +181,12 @@ TestWatchNotify::SharedWatcher TestWatchNotify::get_watcher(
   return watcher;
 }
 
-void TestWatchNotify::execute_notify(const std::string &oid,
+void TestWatchNotify::execute_notify(TestRadosClient *rados_client,
+                                     const std::string &oid,
                                      bufferlist &bl, uint64_t notify_id) {
-  ldout(m_cct, 20) << "oid=" << oid << ", notify_id=" << notify_id << dendl;
+  CephContext *cct = rados_client->cct();
+
+  ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id << dendl;
 
   Mutex::Locker lock(m_lock);
   SharedWatcher watcher = get_watcher(oid);
@@ -178,8 +194,8 @@ void TestWatchNotify::execute_notify(const std::string &oid,
 
   NotifyHandles::iterator n_it = watcher->notify_handles.find(notify_id);
   if (n_it == watcher->notify_handles.end()) {
-    ldout(m_cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
-                   << ": not found" << dendl;
+    ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
+                 << ": not found" << dendl;
     return;
   }
 
@@ -211,32 +227,35 @@ void TestWatchNotify::execute_notify(const std::string &oid,
 
       if (watch_handle.watch_ctx2 == NULL) {
         // auto ack old-style watch/notify clients
-        ack_notify(oid, notify_id, watcher_id, bufferlist());
+        ack_notify(rados_client, oid, notify_id, watcher_id, bufferlist());
       }
     }
   }
 
-  finish_notify(oid, notify_id);
+  finish_notify(rados_client, oid, notify_id);
 
   if (--m_pending_notifies == 0) {
     m_file_watcher_cond.Signal();
   }
 }
 
-void TestWatchNotify::ack_notify(const std::string &oid,
+void TestWatchNotify::ack_notify(TestRadosClient *rados_client,
+                                 const std::string &oid,
                                  uint64_t notify_id,
                                  const WatcherID &watcher_id,
                                  const bufferlist &bl) {
-  ldout(m_cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
-                  << ", WatcherID=" << watcher_id << dendl;
+  CephContext *cct = rados_client->cct();
+
+  ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
+                << ", WatcherID=" << watcher_id << dendl;
 
   assert(m_lock.is_locked());
   SharedWatcher watcher = get_watcher(oid);
 
   NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
   if (it == watcher->notify_handles.end()) {
-    ldout(m_cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
-                   << ", WatcherID=" << watcher_id << ": not found" << dendl;
+    ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
+                 << ", WatcherID=" << watcher_id << ": not found" << dendl;
     return;
   }
 
@@ -248,29 +267,32 @@ void TestWatchNotify::ack_notify(const std::string &oid,
   notify_handle->pending_watcher_ids.erase(watcher_id);
 }
 
-void TestWatchNotify::finish_notify(const std::string &oid,
+void TestWatchNotify::finish_notify(TestRadosClient *rados_client,
+                                    const std::string &oid,
                                     uint64_t notify_id) {
-  ldout(m_cct, 20) << "oid=" << oid << ", notify_id=" << notify_id << dendl;
+  CephContext *cct = rados_client->cct();
+
+  ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id << dendl;
 
   assert(m_lock.is_locked());
   SharedWatcher watcher = get_watcher(oid);
 
   NotifyHandles::iterator it = watcher->notify_handles.find(notify_id);
   if (it == watcher->notify_handles.end()) {
-    ldout(m_cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
-                   << ": not found" << dendl;
+    ldout(cct, 1) << "oid=" << oid << ", notify_id=" << notify_id
+                 << ": not found" << dendl;
     return;
   }
 
   SharedNotifyHandle notify_handle = it->second;
   if (!notify_handle->pending_watcher_ids.empty()) {
-    ldout(m_cct, 10) << "oid=" << oid << ", notify_id=" << notify_id
-                    << ": pending watchers, returning" << dendl;
+    ldout(cct, 10) << "oid=" << oid << ", notify_id=" << notify_id
+                  << ": pending watchers, returning" << dendl;
     return;
   }
 
-  ldout(m_cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
-                  << ": completing" << dendl;
+  ldout(cct, 20) << "oid=" << oid << ", notify_id=" << notify_id
+                << ": completing" << dendl;
 
   if (notify_handle->pbl != NULL) {
     ::encode(notify_handle->notify_responses, *notify_handle->pbl);
index 9cd52306c365963b3bdaee292cb375563a5455fb..bd52add17929d6777c3305b56163fffc0709bca2 100644 (file)
 #include <list>
 #include <map>
 
-class CephContext;
 class Cond;
 class Finisher;
 
 namespace librados {
 
+class TestRadosClient;
+
 class TestWatchNotify : boost::noncopyable {
 public:
   typedef std::pair<uint64_t, uint64_t> WatcherID;
@@ -34,6 +35,7 @@ public:
   typedef std::map<uint64_t, SharedNotifyHandle> NotifyHandles;
 
   struct WatchHandle {
+    TestRadosClient *rados_client;
     uint64_t gid;
     uint64_t handle;
     librados::WatchCtx* watch_ctx;
@@ -48,51 +50,54 @@ public:
   };
   typedef boost::shared_ptr<Watcher> SharedWatcher;
 
-  TestWatchNotify(CephContext *cct, Finisher *finisher);
-  ~TestWatchNotify();
+  TestWatchNotify();
 
   int list_watchers(const std::string& o,
                     std::list<obj_watch_t> *out_watchers);
 
-  void aio_flush(Context *on_finish);
-  void aio_watch(const std::string& o, uint64_t gid, uint64_t *handle,
-                 librados::WatchCtx2 *watch_ctx, Context *on_finish);
-  void aio_unwatch(uint64_t handle, Context *on_finish);
-  void aio_notify(const std::string& oid, bufferlist& bl, uint64_t timeout_ms,
-                  bufferlist *pbl, Context *on_notify);
-
-  void flush();
-  int notify(const std::string& o, bufferlist& bl,
-             uint64_t timeout_ms, bufferlist *pbl);
-  void notify_ack(const std::string& o, uint64_t notify_id,
-                  uint64_t handle, uint64_t gid, bufferlist& bl);
-  int watch(const std::string& o, uint64_t gid, uint64_t *handle,
-            librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2);
-  int unwatch(uint64_t handle);
+  void aio_flush(TestRadosClient *rados_client, Context *on_finish);
+  void aio_watch(TestRadosClient *rados_client, const std::string& o,
+                 uint64_t gid, uint64_t *handle, librados::WatchCtx2 *watch_ctx,
+                 Context *on_finish);
+  void aio_unwatch(TestRadosClient *rados_client, uint64_t handle,
+                   Context *on_finish);
+  void aio_notify(TestRadosClient *rados_client, const std::string& oid,
+                  bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl,
+                  Context *on_notify);
+
+  void flush(TestRadosClient *rados_client);
+  int notify(TestRadosClient *rados_client, const std::string& o,
+             bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl);
+  void notify_ack(TestRadosClient *rados_client, const std::string& o,
+                  uint64_t notify_id, uint64_t handle, uint64_t gid,
+                  bufferlist& bl);
+  int watch(TestRadosClient *rados_client, const std::string& o, uint64_t gid,
+            uint64_t *handle, librados::WatchCtx *ctx,
+            librados::WatchCtx2 *ctx2);
+  int unwatch(TestRadosClient *rados_client, uint64_t handle);
 
 private:
 
   typedef std::map<std::string, SharedWatcher> FileWatchers;
 
-  CephContext *m_cct;
-  Finisher *m_finisher;
-
-  uint64_t m_handle;
-  uint64_t m_notify_id;
+  uint64_t m_handle = 0;
+  uint64_t m_notify_id = 0;
 
   Mutex m_lock;
-  uint64_t m_pending_notifies;
+  uint64_t m_pending_notifies = 0;
 
   Cond m_file_watcher_cond;
   FileWatchers m_file_watchers;
 
   SharedWatcher get_watcher(const std::string& oid);
 
-  void execute_notify(const std::string &oid, bufferlist &bl,
-                      uint64_t notify_id);
-  void ack_notify(const std::string &oid, uint64_t notify_id,
-                  const WatcherID &watcher_id, const bufferlist &bl);
-  void finish_notify(const std::string &oid, uint64_t notify_id);
+  void execute_notify(TestRadosClient *rados_client, const std::string &oid,
+                      bufferlist &bl, uint64_t notify_id);
+  void ack_notify(TestRadosClient *rados_client, const std::string &oid,
+                  uint64_t notify_id, const WatcherID &watcher_id,
+                  const bufferlist &bl);
+  void finish_notify(TestRadosClient *rados_client, const std::string &oid,
+                     uint64_t notify_id);
 };
 
 } // namespace librados
index 3ebc233024fdb45e9f28f6ff74900a1cc6530d55..c1dff856a0df318bcfa537cc64bf7e81f3142035 100644 (file)
@@ -62,7 +62,8 @@ public:
   }
 
   void expect_blacklist_add(MockTestImageCtx &mock_image_ctx, int r) {
-    EXPECT_CALL(get_mock_rados_client(), blacklist_add(_, _))
+    EXPECT_CALL(*get_mock_io_ctx(mock_image_ctx.md_ctx).get_mock_rados_client(),
+                blacklist_add(_, _))
                   .WillOnce(Return(r));
   }
 
index 97335d49b30da5f92a81978fbec50083f1b3747c..fba57b739ccec6f4a7f78ae18a9d19a02940dfd5 100644 (file)
@@ -4,7 +4,7 @@
 #include "test/librbd/test_mock_fixture.h"
 #include "test/librbd/mock/MockImageCtx.h"
 #include "test/librados_test_stub/LibradosTestStub.h"
-#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "test/librados_test_stub/MockTestMemCluster.h"
 
 // template definitions
 #include "librbd/AsyncRequest.cc"
@@ -21,37 +21,32 @@ using ::testing::Return;
 using ::testing::StrEq;
 using ::testing::WithArg;
 
-TestMockFixture::TestRadosClientPtr TestMockFixture::s_test_rados_client;
-::testing::NiceMock<librados::MockTestMemRadosClient> *
-  TestMockFixture::s_mock_rados_client = NULL;
+TestMockFixture::TestClusterRef TestMockFixture::s_test_cluster;
 
 void TestMockFixture::SetUpTestCase() {
-  s_test_rados_client = librados_test_stub::get_rados_client();
+  s_test_cluster = librados_test_stub::get_cluster();
 
-  // use a mock version of the in-memory rados client
-  s_mock_rados_client = new ::testing::NiceMock<librados::MockTestMemRadosClient>(
-      s_test_rados_client->cct());
-  librados_test_stub::set_rados_client(TestRadosClientPtr(s_mock_rados_client));
+  // use a mock version of the in-memory cluster
+  librados_test_stub::set_cluster(boost::shared_ptr<librados::TestCluster>(
+    new librados::MockTestMemCluster()));
   TestFixture::SetUpTestCase();
 }
 
 void TestMockFixture::TearDownTestCase() {
   TestFixture::TearDownTestCase();
-  librados_test_stub::set_rados_client(s_test_rados_client);
-  s_test_rados_client->put();
-  s_test_rados_client.reset();
-}
-
-void TestMockFixture::SetUp() {
-  TestFixture::SetUp();
+  librados_test_stub::set_cluster(s_test_cluster);
 }
 
 void TestMockFixture::TearDown() {
-  TestFixture::TearDown();
-
   // Mock rados client lives across tests -- reset it to initial state
-  ::testing::Mock::VerifyAndClear(s_mock_rados_client);
-  s_mock_rados_client->default_to_dispatch();
+  librados::MockTestMemRadosClient *mock_rados_client =
+    get_mock_io_ctx(m_ioctx).get_mock_rados_client();
+  ASSERT_TRUE(mock_rados_client != nullptr);
+
+  ::testing::Mock::VerifyAndClear(mock_rados_client);
+  mock_rados_client->default_to_dispatch();
+
+  TestFixture::TearDown();
 }
 
 void TestMockFixture::expect_unlock_exclusive_lock(librbd::ImageCtx &ictx) {
index 6146a3686107233045b7040d971e638208c38035..c0bbb3d2536328649ebdc88d04bd6ec335756ab2 100644 (file)
@@ -12,7 +12,8 @@
 #include <gmock/gmock.h>
 
 namespace librados {
-class TestRadosClient;
+class TestCluster;
+class MockTestMemCluster;
 class MockTestMemIoCtxImpl;
 class MockTestMemRadosClient;
 }
@@ -59,18 +60,13 @@ MATCHER_P(ContentsEqual, bl, "") {
 
 class TestMockFixture : public TestFixture {
 public:
-  typedef boost::shared_ptr<librados::TestRadosClient> TestRadosClientPtr;
+  typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
 
   static void SetUpTestCase();
   static void TearDownTestCase();
 
-  void SetUp() override;
   void TearDown() override;
 
-  ::testing::NiceMock<librados::MockTestMemRadosClient> &get_mock_rados_client() {
-    return *s_mock_rados_client;
-  }
-
   void expect_op_work_queue(librbd::MockImageCtx &mock_image_ctx);
   void expect_unlock_exclusive_lock(librbd::ImageCtx &ictx);
 
@@ -90,8 +86,7 @@ public:
   void expect_commit_op_event(librbd::MockImageCtx &mock_image_ctx, int r);
 
 private:
-  static TestRadosClientPtr s_test_rados_client;
-  static ::testing::NiceMock<librados::MockTestMemRadosClient> *s_mock_rados_client;
+  static TestClusterRef s_test_cluster;
 };
 
 #endif // CEPH_TEST_LIBRBD_TEST_MOCK_FIXTURE_H
index 6f45ca9528d5eaedf4098999e2368e0c3ccd9996..d5c6c7caf278744435594577693d1b105122ce07 100644 (file)
@@ -4,6 +4,7 @@
 #include "test/rbd_mirror/test_mock_fixture.h"
 #include "include/rbd/librbd.hpp"
 #include "test/librados_test_stub/LibradosTestStub.h"
+#include "test/librados_test_stub/MockTestMemCluster.h"
 #include "test/librados_test_stub/MockTestMemRadosClient.h"
 #include "test/librbd/mock/MockImageCtx.h"
 
@@ -14,37 +15,32 @@ using ::testing::_;
 using ::testing::Invoke;
 using ::testing::WithArg;
 
-TestMockFixture::TestRadosClientPtr TestMockFixture::s_test_rados_client;
-::testing::NiceMock<librados::MockTestMemRadosClient> *
-  TestMockFixture::s_mock_rados_client = NULL;
+TestMockFixture::TestClusterRef TestMockFixture::s_test_cluster;
 
 void TestMockFixture::SetUpTestCase() {
-  s_test_rados_client = librados_test_stub::get_rados_client();
+  s_test_cluster = librados_test_stub::get_cluster();
 
   // use a mock version of the in-memory rados client
-  s_mock_rados_client = new ::testing::NiceMock<librados::MockTestMemRadosClient>(
-      s_test_rados_client->cct());
-  librados_test_stub::set_rados_client(TestRadosClientPtr(s_mock_rados_client));
+  librados_test_stub::set_cluster(boost::shared_ptr<librados::TestCluster>(
+    new librados::MockTestMemCluster()));
   TestFixture::SetUpTestCase();
 }
 
 void TestMockFixture::TearDownTestCase() {
   TestFixture::TearDownTestCase();
-  librados_test_stub::set_rados_client(s_test_rados_client);
-  s_test_rados_client->put();
-  s_test_rados_client.reset();
-}
-
-void TestMockFixture::SetUp() {
-  TestFixture::SetUp();
+  librados_test_stub::set_cluster(s_test_cluster);
 }
 
 void TestMockFixture::TearDown() {
-  TestFixture::TearDown();
-
   // Mock rados client lives across tests -- reset it to initial state
-  ::testing::Mock::VerifyAndClear(s_mock_rados_client);
-  s_mock_rados_client->default_to_dispatch();
+  librados::MockTestMemRadosClient *mock_rados_client =
+    get_mock_io_ctx(m_local_io_ctx).get_mock_rados_client();
+  ASSERT_TRUE(mock_rados_client != nullptr);
+
+  ::testing::Mock::VerifyAndClear(mock_rados_client);
+  mock_rados_client->default_to_dispatch();
+
+  TestFixture::TearDown();
 }
 
 void TestMockFixture::expect_test_features(librbd::MockImageCtx &mock_image_ctx) {
index 18ee6146763d94ea07241c6ac69d5c327fe286a6..7855f284546f6aebffb904d2265fbb84ce9b0543 100644 (file)
@@ -9,6 +9,7 @@
 #include "common/WorkQueue.h"
 #include <boost/shared_ptr.hpp>
 #include <gmock/gmock.h>
+#include "include/assert.h"
 
 namespace librados {
 class TestRadosClient;
@@ -40,23 +41,17 @@ namespace mirror {
 
 class TestMockFixture : public TestFixture {
 public:
-  typedef boost::shared_ptr<librados::TestRadosClient> TestRadosClientPtr;
+  typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
 
   static void SetUpTestCase();
   static void TearDownTestCase();
 
-  void SetUp() override;
   void TearDown() override;
 
   void expect_test_features(librbd::MockImageCtx &mock_image_ctx);
 
-  ::testing::NiceMock<librados::MockTestMemRadosClient> &get_mock_rados_client() {
-    return *s_mock_rados_client;
-  }
-
 private:
-  static TestRadosClientPtr s_test_rados_client;
-  static ::testing::NiceMock<librados::MockTestMemRadosClient> *s_mock_rados_client;
+  static TestClusterRef s_test_cluster;
 };
 
 } // namespace mirror
index 734ed829f77c56bc1152adf5227de858cd6b4a58..66f7542e6127564abe0a5bfdc4b2f6ee01afe4f2 100644 (file)
@@ -39,7 +39,7 @@ ImageCopyRequest<I>::ImageCopyRequest(I *local_image_ctx, I *remote_image_ctx,
     m_progress_ctx(progress_ctx),
     m_lock(unique_lock_name("ImageCopyRequest::m_lock", this)),
     m_updating_sync_point(false), m_update_sync_ctx(nullptr),
-    m_update_sync_point_interval(g_ceph_context->_conf->rbd_mirror_sync_point_update_age),
+    m_update_sync_point_interval(m_local_image_ctx->cct->_conf->rbd_mirror_sync_point_update_age),
     m_client_meta_copy(*client_meta) {
   assert(!m_client_meta_copy.sync_points.empty());
 }