]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tools: some modification at RO
authorshangdehao1 <dehao.shang@intel.com>
Tue, 22 Jan 2019 01:39:37 +0000 (09:39 +0800)
committerYuan Zhou <yuan.zhou@intel.com>
Thu, 21 Mar 2019 16:16:27 +0000 (00:16 +0800)
1) : add new data member to ObjectCacheRequest,
     including m_pool_namepace, m_pool_id, m_snap_id,
     m_cache_path
2) : add some log at CacheSession
3) : change pool name to pool_nspace + poo_id + snap_id
4) : change cache file name
5) : add generate_cache_file_name method
6) : modify UT interface

Signed-off-by: Dehao Shang <dehao.shang@intel.com>
12 files changed:
src/test/immutable_object_cache/test_DomainSocket.cc
src/test/immutable_object_cache/test_ObjectCacheFile.cc
src/test/immutable_object_cache/test_multi_session.cc
src/test/immutable_object_cache/test_object_store.cc
src/tools/immutable_object_cache/CacheClient.cc
src/tools/immutable_object_cache/CacheClient.h
src/tools/immutable_object_cache/CacheController.cc
src/tools/immutable_object_cache/CacheSession.cc
src/tools/immutable_object_cache/ObjectCacheStore.cc
src/tools/immutable_object_cache/ObjectCacheStore.h
src/tools/immutable_object_cache/Types.cc
src/tools/immutable_object_cache/Types.h

index 944e535e574e6a94dd7acaea98593920e03ac10e..a1b25bc44fa0b8f403fa35feb34ac097bb7b6bff 100644 (file)
@@ -120,7 +120,7 @@ public:
         usleep(1);
       }
 
-      m_cache_client->lookup_object("test_pool", "123456", ctx);
+      m_cache_client->lookup_object("test_pool", 1, 2, "123456", ctx);
       m_send_request_index++;
     }
     m_wait_event.wait();
@@ -133,7 +133,7 @@ public:
        hit = ack->m_head.type == RBDSC_READ_REPLY;
        m_wait_event.signal();
     });
-    m_cache_client->lookup_object(pool_name, object_id, ctx);
+    m_cache_client->lookup_object(pool_name, 1, 2, object_id, ctx);
     m_wait_event.wait();
     return hit;
   }
index 29d90b2a026beb6ec418b485bd0a7d901f38d418..1ddbfa2e473b8b59a89407fa2400126fdcd0c15a 100644 (file)
@@ -105,7 +105,7 @@ TEST_F(TestObjectCacheFile, test_read_object_from_file) {
   sub_bl.substr_of(*buf_1, 2, 4);
   ASSERT_EQ(0, (strncmp(sub_bl.c_str(), buf_5->c_str(), 4)));
 
-  ASSERT_EQ(m_cache_file_1->read_object_from_file(buf_6, 12, 4), -1);
+  ASSERT_EQ(m_cache_file_1->read_object_from_file(buf_6, 12, 4), 0);
   ASSERT_EQ(0, buf_6->length());
 
 
index d8cc2ce2391bdeb46a4fe141cc878b86cb0c3662..a9e0c8fa17bf0d1f6a08cb8c0e4f13f5fdf3adde 100644 (file)
@@ -123,7 +123,7 @@ public:
       });
       m_send_request_index++;
       // here just for concurrently testing register + lookup, so fix object id.
-      m_cache_client_vec[index]->lookup_object(pool, "1234", ctx);
+      m_cache_client_vec[index]->lookup_object(pool, 1, 2, "1234", ctx);
     }
 
     if (is_last) {
index 534677f6f587de987009b5fe6fcc2e628a53a59f..caa14dc9a1a3231ce60261fe388ddf43cec9495d 100644 (file)
@@ -67,7 +67,7 @@ public:
   }
 
   void lookup_object_cache_store(std::string pool_name, std::string vol_name, std::string obj_name, int& ret) {
-    ret = m_object_cache_store->lookup_object(pool_name, obj_name);
+    ret = m_object_cache_store->lookup_object(pool_name, 1, 2, obj_name);
   }
 
   void TearDown() override {
index de373f93d968d2a4aac4679d59efe63e9726be27..71bc6d11abc48d147eeb8d1715a384bef1971784 100644 (file)
@@ -99,8 +99,8 @@ namespace immutable_obj_cache {
     return 0;
   }
 
-  void CacheClient::lookup_object(std::string pool_name, std::string oid,
-                                  GenContext<ObjectCacheRequest*>* on_finish) {
+  void CacheClient::lookup_object(std::string pool_nspace, uint64_t pool_id, uint64_t snap_id,
+                                  std::string oid, GenContext<ObjectCacheRequest*>* on_finish) {
 
     ObjectCacheRequest* req = new ObjectCacheRequest();
     req->m_head.version = 0;
@@ -109,7 +109,10 @@ namespace immutable_obj_cache {
     req->m_head.padding = 0;
     req->m_head.seq = ++m_sequence_id;
 
-    req->m_data.m_pool_name = pool_name;
+    req->m_data.m_pool_id = pool_id;
+    req->m_data.m_snap_id = snap_id;
+    req->m_data.m_pool_name = "";
+    req->m_data.m_pool_namespace = pool_nspace;
     req->m_data.m_oid = oid;
     req->m_process_msg = on_finish;
     req->encode();
index a30ce155847202cf1ecfba25cf5e8a10c8fdc625..56ef42321fa4a622f2732a6828f8b8a8d39f30ff 100644 (file)
@@ -31,7 +31,8 @@ public:
   void close();
   int stop();
   int connect();
-  void lookup_object(std::string pool_name, std::string oid, GenContext<ObjectCacheRequest*>* on_finish);
+  void lookup_object(std::string pool_nspace, uint64_t pool_id, uint64_t snap_id,
+                     std::string oid, GenContext<ObjectCacheRequest*>* on_finish);
   int register_client(Context* on_finish);
 
 private:
index 9b439f6c980ab276a80004e16d07b3997debeb3f..8e07aea930b342a9bfd77c2cb916a43d58d292dd 100644 (file)
@@ -74,7 +74,10 @@ void CacheController::handle_request(uint64_t session_id, ObjectCacheRequest* re
     }
     case RBDSC_READ: {
       // lookup object in local cache store
-      int ret = m_object_cache_store->lookup_object(req->m_data.m_pool_name, req->m_data.m_oid);
+      int ret = m_object_cache_store->lookup_object(req->m_data.m_pool_namespace,
+                                                    req->m_data.m_pool_id,
+                                                    req->m_data.m_snap_id,
+                                                    req->m_data.m_oid);
       if (ret < 0) {
         req->m_head.type = RBDSC_READ_RADOS;
       } else {
index 52098f33cc1d2e1190971bf911e886331a172243..f58bd8163c3c0844a7be1ce496b2ce6f89e1201c 100644 (file)
@@ -46,6 +46,7 @@ void CacheSession::start() {
 }
 
 void CacheSession::read_request_header() {
+  ldout(cct, 20) << dendl;
   boost::asio::async_read(m_dm_socket,
                           boost::asio::buffer(m_head_buffer, sizeof(ObjectCacheMsgHeader)),
                           boost::asio::transfer_exactly(sizeof(ObjectCacheMsgHeader)),
@@ -56,7 +57,8 @@ void CacheSession::read_request_header() {
 }
 
 void CacheSession::handle_request_header(const boost::system::error_code& err,
-                               size_t bytes_transferred) {
+                                         size_t bytes_transferred) {
+  ldout(cct, 20) << dendl;
   if(err || bytes_transferred != sizeof(ObjectCacheMsgHeader)) {
     fault();
     return;
@@ -72,6 +74,7 @@ void CacheSession::handle_request_header(const boost::system::error_code& err,
 }
 
 void CacheSession::read_request_data(uint64_t data_len) {
+  ldout(cct, 20) << dendl;
   bufferptr bp_data(buffer::create(data_len));
   boost::asio::async_read(m_dm_socket,
                           boost::asio::buffer(bp_data.c_str(), bp_data.length()),
@@ -85,6 +88,7 @@ void CacheSession::read_request_data(uint64_t data_len) {
 void CacheSession::handle_request_data(bufferptr bp, uint64_t data_len,
                                       const boost::system::error_code& err,
                                       size_t bytes_transferred) {
+  ldout(cct, 20) << dendl;
   if(err || bytes_transferred != data_len) {
     fault();
     return;
@@ -99,10 +103,12 @@ void CacheSession::handle_request_data(bufferptr bp, uint64_t data_len,
 }
 
 void CacheSession::process(ObjectCacheRequest* req) {
+  ldout(cct, 20) << dendl;
    m_server_process_msg(m_session_id, req);
 }
 
 void CacheSession::send(ObjectCacheRequest* reply) {
+  ldout(cct, 20) << dendl;
   reply->m_head_buffer.clear();
   reply->m_data_buffer.clear();
   reply->encode();
@@ -123,6 +129,7 @@ void CacheSession::send(ObjectCacheRequest* reply) {
 }
 
 void CacheSession::fault() {
+  ldout(cct, 20) << dendl;
   // TODO
 }
 
index cb5a84e9c05c69dacbe16c208697c64fa121d600..9f27d9f50e7b98efe6d96fba19897c06909cc89e 100644 (file)
@@ -45,7 +45,7 @@ int ObjectCacheStore::init(bool reset) {
 
   ret = m_rados->connect();
   if(ret < 0 ) {
-    lderr(m_cct) << "fail to conect to cluster" << dendl;
+    lderr(m_cct) << "fail to connect to cluster" << dendl;
     return ret;
   }
 
@@ -87,29 +87,32 @@ int ObjectCacheStore::init_cache() {
   return 0;
 }
 
-int ObjectCacheStore::do_promote(std::string pool_name, std::string object_name) {
+int ObjectCacheStore::do_promote(std::string pool_nspace,
+                                  uint64_t pool_id, uint64_t snap_id,
+                                  std::string object_name) {
   ldout(m_cct, 20) << "to promote object = "
-                   << object_name << " from pool: "
-                   << pool_name << dendl;
+                   << object_name << " from pool ID : "
+                   << pool_id << dendl;
 
   int ret = 0;
-  std::string cache_file_name =  pool_name + object_name;
+  std::string cache_file_name = std::move(generate_cache_file_name(pool_nspace,
+                                          pool_id, snap_id, object_name));
   {
     Mutex::Locker _locker(m_ioctxs_lock);
-    if (m_ioctxs.find(pool_name) == m_ioctxs.end()) {
+    if (m_ioctxs.find(pool_id) == m_ioctxs.end()) {
       librados::IoCtx* io_ctx = new librados::IoCtx();
-      ret = m_rados->ioctx_create(pool_name.c_str(), *io_ctx);
+      ret = m_rados->ioctx_create2(pool_id, *io_ctx);
       if (ret < 0) {
         lderr(m_cct) << "fail to create ioctx" << dendl;
         return ret;
       }
-      m_ioctxs.emplace(pool_name, io_ctx);
+      m_ioctxs.emplace(pool_id, io_ctx);
     }
   }
 
-  ceph_assert(m_ioctxs.find(pool_name) != m_ioctxs.end());
+  ceph_assert(m_ioctxs.find(pool_id) != m_ioctxs.end());
 
-  librados::IoCtx* ioctx = m_ioctxs[pool_name];
+  librados::IoCtx* ioctx = m_ioctxs[pool_id];
 
   librados::bufferlist* read_buf = new librados::bufferlist();
 
@@ -169,17 +172,19 @@ int ObjectCacheStore::handle_promote_callback(int ret, bufferlist* read_buf,
   return ret;
 }
 
-int ObjectCacheStore::lookup_object(std::string pool_name,
+int ObjectCacheStore::lookup_object(std::string pool_nspace,
+                                    uint64_t pool_id, uint64_t snap_id,
                                     std::string object_name) {
   ldout(m_cct, 20) << "object name = " << object_name
-                   << " in pool: " << pool_name << dendl;
+                   << " in pool ID : " << pool_id << dendl;
 
   int pret = -1;
-  cache_status_t ret = m_policy->lookup_object(pool_name + object_name);
+  cache_status_t ret = m_policy->lookup_object(
+                        generate_cache_file_name(pool_nspace, pool_id, snap_id, object_name));
 
   switch(ret) {
     case OBJ_CACHE_NONE: {
-      pret = do_promote(pool_name, object_name);
+      pret = do_promote(pool_nspace, pool_id, snap_id, object_name);
       if (pret < 0) {
         lderr(m_cct) << "fail to start promote" << dendl;
       }
@@ -250,5 +255,14 @@ int ObjectCacheStore::do_evict(std::string cache_file) {
   return ret;
 }
 
+std::string ObjectCacheStore::generate_cache_file_name(std::string pool_nspace,
+                                                       uint64_t pool_id,
+                                                       uint64_t snap_id,
+                                                       std::string oid) {
+  return pool_nspace + ":" +
+         std::to_string(pool_id) + ":" +
+         std::to_string(snap_id) + ":" + oid;
+}
+
 } // namespace immutable_obj_cache
 } // namespace ceph
index 2db9f95e08900daee203f15e7783d30abba0f622..01bd7d61e3077a0e0fb1fe09e1687ebb79f740fc 100644 (file)
@@ -27,38 +27,32 @@ class ObjectCacheStore
   public:
     ObjectCacheStore(CephContext *cct, ContextWQ* work_queue);
     ~ObjectCacheStore();
-
     int init(bool reset);
-
     int shutdown();
-
     int init_cache();
-    int lookup_object(std::string pool_name, std::string object_name);
+    int lookup_object(std::string pool_nspace,
+                      uint64_t pool_id, uint64_t snap_id,
+                      std::string object_name);
 
   private:
+    std::string generate_cache_file_name(std::string pool_nspace, uint64_t pool_id,
+                                         uint64_t snap_id, std::string oid);
     int evict_objects();
-
-    int do_promote(std::string pool_name, std::string object_name);
-
+    int do_promote(std::string pool_nspace, uint64_t pool_id,
+                    uint64_t snap_id, std::string object_name);
     int promote_object(librados::IoCtx*, std::string object_name,
                        librados::bufferlist* read_buf,
                        Context* on_finish);
-
    int handle_promote_callback(int, bufferlist*, std::string);
    int do_evict(std::string cache_file);
 
     CephContext *m_cct;
     ContextWQ* m_work_queue;
     RadosRef m_rados;
-
-
-    std::map<std::string, librados::IoCtx*> m_ioctxs;
+    std::map<uint64_t, librados::IoCtx*> m_ioctxs;
     Mutex m_ioctxs_lock;
-
     ObjectCacheFile *m_cache_file;
-
     Policy* m_policy;
-
     //TODO(): make this configurable
     int m_dir_num = 10;
     uint64_t object_cache_max_size;
index dba1488db0d58da70f23583ad307b51641d2deab..a6748a5862766a6dcb19fb0cbe7f07d3e8b8b31d 100644 (file)
@@ -32,9 +32,13 @@ void ObjectCacheMsgData::encode(bufferlist& bl) {
   ceph::encode(m_image_size, bl);
   ceph::encode(m_read_offset, bl);
   ceph::encode(m_read_len, bl);
+  ceph::encode(m_pool_id, bl);
+  ceph::encode(m_snap_id, bl);
   ceph::encode(m_pool_name, bl);
   ceph::encode(m_image_name, bl);
   ceph::encode(m_oid, bl);
+  ceph::encode(m_pool_namespace, bl);
+  ceph::encode(m_cache_path, bl);
 }
 
 void ObjectCacheMsgData::decode(bufferlist& bl) {
@@ -42,9 +46,13 @@ void ObjectCacheMsgData::decode(bufferlist& bl) {
   ceph::decode(m_image_size, i);
   ceph::decode(m_read_offset, i);
   ceph::decode(m_read_len, i);
+  ceph::decode(m_pool_id, i);
+  ceph::decode(m_snap_id, i);
   ceph::decode(m_pool_name, i);
   ceph::decode(m_image_name, i);
   ceph::decode(m_oid, i);
+  ceph::decode(m_pool_namespace, i);
+  ceph::decode(m_cache_path, i);
 }
 
 void ObjectCacheRequest::encode() {
index 943a5f2aaa6b06d7c53149f9b424ad4ce6f48dd7..675dc84ed29fc6be5bdb9a4f8d0a7fa35ec88cf2 100644 (file)
@@ -22,14 +22,19 @@ struct ObjectCacheMsgHeader {
     void decode(bufferlist::const_iterator& it);
 };
 
+// TODO : cleanup useless data
 class ObjectCacheMsgData {
 public:
   uint64_t m_image_size;
   uint64_t m_read_offset;
   uint64_t m_read_len;
+  uint64_t m_pool_id;
+  uint64_t m_snap_id;
   std::string m_pool_name;
   std::string m_image_name;
   std::string m_oid;
+  std::string m_pool_namespace; // TODO : Jason suggestion
+  std::string m_cache_path;
 
   void encode(bufferlist& bl);
   void decode(bufferlist& bl);