]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd/cache/pwl: clean code and update comments
authorYin Congmin <congmin.yin@intel.com>
Mon, 29 Nov 2021 03:47:52 +0000 (11:47 +0800)
committerYin Congmin <congmin.yin@intel.com>
Fri, 31 Dec 2021 08:04:09 +0000 (16:04 +0800)
some comments are outdated, some are no longger needed, update them.
remove redundant ";". remove redundant log. remove redundant "".

Signed-off-by: Yin Congmin <congmin.yin@intel.com>
src/librbd/cache/pwl/AbstractWriteLog.cc
src/librbd/cache/pwl/Request.cc
src/librbd/cache/pwl/SyncPoint.cc
src/librbd/cache/pwl/Types.h
src/librbd/cache/pwl/rwl/Request.cc
src/librbd/cache/pwl/rwl/WriteLog.cc
src/librbd/cache/pwl/ssd/Request.cc
src/librbd/cache/pwl/ssd/WriteLog.cc

index 7c1fa44d724c9f0d2f83bd4fe3a8cbda97398804..d19a6f4a26b90cc9ef84c197eb0a1819804debad 100644 (file)
@@ -412,7 +412,7 @@ void AbstractWriteLog<I>::update_sync_points(std::map<uint64_t, bool> &missing_s
     ceph_assert(kv.first == m_current_sync_gen+1);
     init_flush_new_sync_point(later);
     ceph_assert(kv.first == m_current_sync_gen);
-    sync_point_entries[kv.first] = m_current_sync_point->log_entry;;
+    sync_point_entries[kv.first] = m_current_sync_point->log_entry;
   }
 
   /*
@@ -519,7 +519,7 @@ void AbstractWriteLog<I>::pwl_init(Context *on_finish, DeferredContexts &later)
     ldout(cct, 5) << "There's an existing pool file " << m_log_pool_name
                   << ", While there's no cache in the image metatata." << dendl;
     if (remove(m_log_pool_name.c_str()) != 0) {
-      lderr(cct) << "Failed to remove the pool file " << m_log_pool_name
+      lderr(cct) << "failed to remove the pool file " << m_log_pool_name
                  << dendl;
       on_finish->complete(-errno);
       return;
@@ -528,7 +528,8 @@ void AbstractWriteLog<I>::pwl_init(Context *on_finish, DeferredContexts &later)
     }
   } else if ((m_cache_state->present) &&
              (access(m_log_pool_name.c_str(), F_OK) != 0)) {
-    ldout(cct, 5) << "Can't find the existed pool file " << m_log_pool_name << dendl;
+    lderr(cct) << "can't find the existed pool file: " << m_log_pool_name
+               << ". error: " << cpp_strerror(-errno) << dendl;
     on_finish->complete(-errno);
     return;
   }
@@ -828,11 +829,9 @@ void AbstractWriteLog<I>::write(Extents &&image_extents,
 
   ceph_assert(m_initialized);
 
-  /* Split images because PMDK's space management is not perfect, there are
-   * fragment problems. The larger the block size difference of the block,
-   * the easier the fragmentation problem will occur, resulting in the
-   * remaining space can not be allocated in large size. We plan to manage
-   * pmem space and allocation by ourselves in the future.
+  /* Split image extents larger than 1M. This isn't strictly necessary but
+   * makes libpmemobj allocator's job easier and reduces pmemobj_defrag() cost.
+   * We plan to manage pmem space and allocation by ourselves in the future.
    */
   Extents split_image_extents;
   uint64_t max_extent_size = get_max_extent();
@@ -2136,7 +2135,7 @@ bool AbstractWriteLog<I>::can_retire_entry(std::shared_ptr<GenericLogEntry> log_
 template <typename I>
 void AbstractWriteLog<I>::check_image_cache_state_clean() {
   ceph_assert(m_deferred_ios.empty());
-  ceph_assert(m_ops_to_append.empty());;
+  ceph_assert(m_ops_to_append.empty());
   ceph_assert(m_async_flush_ops == 0);
   ceph_assert(m_async_append_ops == 0);
   ceph_assert(m_dirty_log_entries.empty());
index 4770e498fba543962eac75960317b94b8f9035eb..d69ea725379c72273c6ce7082e9c3906c942c2c2 100644 (file)
@@ -35,16 +35,16 @@ C_BlockIORequest<T>::~C_BlockIORequest() {
 template <typename T>
 std::ostream &operator<<(std::ostream &os,
                          const C_BlockIORequest<T> &req) {
-   os << "image_extents=[" << req.image_extents << "], "
-      << "image_extents_summary=[" << req.image_extents_summary << "], "
-      << "bl=" << req.bl << ", "
-      << "user_req=" << req.user_req << ", "
-      << "m_user_req_completed=" << req.m_user_req_completed << ", "
-      << "m_deferred=" << req.m_deferred << ", "
-      << "detained=" << req.detained << ", "
-      << "waited_lanes=" << req.waited_lanes << ", "
-      << "waited_entries=" << req.waited_entries << ", "
-      << "waited_buffers=" << req.waited_buffers << "";
+   os << "image_extents=" << req.image_extents << ","
+      << " image_extents_summary=[" << req.image_extents_summary << "],"
+      << " bl=" << req.bl << ","
+      << " user_req=" << req.user_req << ","
+      << " m_user_req_completed=" << req.m_user_req_completed << ","
+      << " m_deferred=" << req.m_deferred << ","
+      << " detained=" << req.detained << ","
+      << " waited_lanes=" << req.waited_lanes << ","
+      << " waited_entries=" << req.waited_entries << ","
+      << " waited_buffers=" << req.waited_buffers;
    return os;
 }
 
@@ -141,7 +141,7 @@ std::ostream &operator<<(std::ostream &os,
   os << (C_BlockIORequest<T>&)req
      << " m_resources.allocated=" << req.m_resources.allocated;
   if (req.op_set) {
-     os << "op_set=" << *req.op_set;
+     os << " op_set=" << *req.op_set;
   }
   return os;
 }
@@ -211,7 +211,8 @@ void C_WriteRequest<T>::setup_log_operations(DeferredContexts &on_exit) {
                                         current_sync_point,
                                         pwl.get_persist_on_flush(),
                                         pwl.get_context(), this);
-    ldout(pwl.get_context(), 20) << "write_req=" << *this << " op_set=" << op_set.get() << dendl;
+    ldout(pwl.get_context(), 20) << "write_req=[" << *this << "]"
+                                 << " op_set=" << op_set.get() << dendl;
     ceph_assert(m_resources.allocated);
     /* op_set->operations initialized differently for plain write or write same */
     auto allocation = m_resources.buffers.begin();
@@ -222,7 +223,8 @@ void C_WriteRequest<T>::setup_log_operations(DeferredContexts &on_exit) {
       this->op_set->operations.emplace_back(operation);
 
       /* A WS is also a write */
-      ldout(pwl.get_context(), 20) << "write_req=" << *this << " op_set=" << op_set.get()
+      ldout(pwl.get_context(), 20) << "write_req=[" << *this << "]"
+                                   << " op_set=" << op_set.get()
                                    << " operation=" << operation << dendl;
       log_entries.emplace_back(operation->log_entry);
       if (!op_set->persist_on_flush) {
index 8fb2f82052e630abe7826a0f7596fbf34334e41a..e0e13edf6fa1725bd78152159f3dfc87205c3667 100644 (file)
@@ -38,7 +38,7 @@ std::ostream &operator<<(std::ostream &os,
      << "m_append_scheduled=" << p.m_append_scheduled << ", "
      << "appending=" << p.appending << ", "
      << "on_sync_point_appending=" << p.on_sync_point_appending.size() << ", "
-     << "on_sync_point_persisted=" << p.on_sync_point_persisted.size() << "";
+     << "on_sync_point_persisted=" << p.on_sync_point_persisted.size();
   return os;
 }
 
index 863292e16117270f9779f4f5493416ae8db4156d..445be8262732cacec70688d2aa2e74f4213cc04d 100644 (file)
@@ -283,7 +283,7 @@ struct WriteLogPoolRoot {
   #ifdef WITH_RBD_RWL
   union {
     struct {
-      uint8_t layout_version;    /* Version of this structure (RWL_LAYOUT_VERSION) */
+      uint8_t layout_version;
     };
     uint64_t _u64;
   } header;
@@ -291,15 +291,16 @@ struct WriteLogPoolRoot {
   #endif
   #ifdef WITH_RBD_SSD_CACHE
   uint64_t layout_version = 0;
-  uint64_t cur_sync_gen = 0;
+  uint64_t cur_sync_gen = 0;    /* TODO: remove it when changing disk format */
   #endif
   uint64_t pool_size;
-  uint64_t flushed_sync_gen;     /* All writing entries with this or a lower
-                                  * sync gen number are flushed. */
-  uint32_t block_size;           /* block size */
+  uint64_t flushed_sync_gen;    /* All writing entries with this or a lower
+                                 * sync gen number are flushed. */
+  uint32_t block_size;
   uint32_t num_log_entries;
-  uint64_t first_free_entry;     /* Entry following the newest valid entry */
-  uint64_t first_valid_entry;    /* Index of the oldest valid entry in the log */
+  uint64_t first_free_entry;    /* The free entry following the latest valid
+                                 * entry, which is going to be written */
+  uint64_t first_valid_entry;   /* The oldest valid entry to be retired */
 
   #ifdef WITH_RBD_SSD_CACHE
   DENC(WriteLogPoolRoot, v, p) {
@@ -346,7 +347,7 @@ public:
                                   const ExtentsSummary &s) {
     os << "total_bytes=" << s.total_bytes << ", "
        << "first_image_byte=" << s.first_image_byte << ", "
-       << "last_image_byte=" << s.last_image_byte << "";
+       << "last_image_byte=" << s.last_image_byte;
     return os;
   }
   BlockExtent block_extent() {
index 09158127241a33ded3d6b6460e167277e4faf86b..093e09a94f5c67d6d745cd80cbae68ec79cf49ab 100644 (file)
@@ -49,10 +49,10 @@ template <typename T>
 std::ostream &operator<<(std::ostream &os,
                          const C_CompAndWriteRequest<T> &req) {
   os << (C_WriteRequest<T>&)req
-     << "cmp_bl=" << req.cmp_bl << ", "
-     << "read_bl=" << req.read_bl << ", "
-     << "compare_succeeded=" << req.compare_succeeded << ", "
-     << "mismatch_offset=" << req.mismatch_offset;
+     << " cmp_bl=" << req.cmp_bl << ","
+     << " read_bl=" << req.read_bl << ","
+     << " compare_succeeded=" << req.compare_succeeded << ","
+     << " mismatch_offset=" << req.mismatch_offset;
   return os;
 }
 
index c340fe6a8c8d739cb51d490d9a72ab651ea56e3e..44ca037f82e1b9a1ac183a1d1c2ef49a469fda00 100644 (file)
@@ -160,9 +160,6 @@ int WriteLog<I>::append_op_log_entries(GenericLogOperations &ops)
                                << "from " << &operation->get_log_entry()->ram_entry << " "
                                << "to " << operation->get_log_entry()->cache_entry << " "
                                << "operation=[" << *operation << "]" << dendl;
-    ldout(m_image_ctx.cct, 05) << "APPENDING: index="
-                               << operation->get_log_entry()->log_entry_index << " "
-                               << "operation=[" << *operation << "]" << dendl;
     operation->log_append_start_time = now;
     *operation->get_log_entry()->cache_entry = operation->get_log_entry()->ram_entry;
     ldout(m_image_ctx.cct, 20) << "APPENDING: index="
@@ -270,8 +267,8 @@ bool WriteLog<I>::initialize_pool(Context *on_finish, pwl::DeferredContexts &lat
                         this->m_pwl_pool_layout_name,
                         this->m_log_pool_size,
                         (S_IWUSR | S_IRUSR))) == NULL) {
-      lderr(cct) << "failed to create pool (" << this->m_log_pool_name << ")"
-                 << pmemobj_errormsg() << dendl;
+      lderr(cct) << "failed to create pool: " << this->m_log_pool_name
+                 << ". error: " << pmemobj_errormsg() << dendl;
       m_cache_state->present = false;
       m_cache_state->clean = true;
       m_cache_state->empty = true;
@@ -317,7 +314,8 @@ bool WriteLog<I>::initialize_pool(Context *on_finish, pwl::DeferredContexts &lat
     } TX_ONABORT {
       this->m_total_log_entries = 0;
       this->m_free_log_entries = 0;
-      lderr(cct) << "failed to initialize pool (" << this->m_log_pool_name << ")" << dendl;
+      lderr(cct) << "failed to initialize pool: " << this->m_log_pool_name
+                 << ". pmemobj TX errno: " << pmemobj_tx_errno() << dendl;
       r = -pmemobj_tx_errno();
       goto err_close_pool;
     } TX_FINALLY {
@@ -336,13 +334,13 @@ bool WriteLog<I>::initialize_pool(Context *on_finish, pwl::DeferredContexts &lat
     pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
     if (D_RO(pool_root)->header.layout_version != RWL_LAYOUT_VERSION) {
       // TODO: will handle upgrading version in the future
-      lderr(cct) << "Pool layout version is "
+      lderr(cct) << "pool layout version is "
                  << D_RO(pool_root)->header.layout_version
                  << " expected " << RWL_LAYOUT_VERSION << dendl;
       goto err_close_pool;
     }
     if (D_RO(pool_root)->block_size != MIN_WRITE_ALLOC_SIZE) {
-      lderr(cct) << "Pool block size is " << D_RO(pool_root)->block_size
+      lderr(cct) << "pool block size is " << D_RO(pool_root)->block_size
                  << " expected " << MIN_WRITE_ALLOC_SIZE << dendl;
       goto err_close_pool;
     }
@@ -482,7 +480,7 @@ bool WriteLog<I>::retire_entries(const unsigned long int frees_per_tx) {
            this->can_retire_entry(m_log_entries.front())) {
       auto entry = m_log_entries.front();
       if (entry->log_entry_index != first_valid_entry) {
-        lderr(cct) << "Retiring entry index (" << entry->log_entry_index
+        lderr(cct) << "retiring entry index (" << entry->log_entry_index
                    << ") and first valid log entry index (" << first_valid_entry
                    << ") must be ==." << dendl;
       }
index e92e547c8e60782ccbda78acad8de4a140a6b43f..308db78f7c36378af5abf058a58168ab2b402d65 100644 (file)
@@ -34,10 +34,10 @@ template <typename T>
 std::ostream &operator<<(std::ostream &os,
                          const C_CompAndWriteRequest<T> &req) {
   os << (C_WriteRequest<T>&)req
-     << "cmp_bl=" << req.cmp_bl << ", "
-     << "read_bl=" << req.read_bl << ", "
-     << "compare_succeeded=" << req.compare_succeeded << ", "
-     << "mismatch_offset=" << req.mismatch_offset;
+     << " cmp_bl=" << req.cmp_bl << ","
+     << " read_bl=" << req.read_bl << ","
+     << " compare_succeeded=" << req.compare_succeeded << ","
+     << " mismatch_offset=" << req.mismatch_offset;
   return os;
 }
 
index b537ce948a65d4cb25805809a8ba3f62a06b6de5..8ffdb7e94d2c3e86293ccb5ee239f03b7ab9991b 100644 (file)
@@ -209,7 +209,7 @@ bool WriteLog<I>::initialize_pool(Context *on_finish,
     ::IOContext ioctx(cct, nullptr);
     r = bdev->read(0, MIN_WRITE_ALLOC_SSD_SIZE, &bl, &ioctx, false);
     if (r < 0) {
-      lderr(cct) << "Read ssd cache superblock failed " << dendl;
+      lderr(cct) << "read ssd cache superblock failed " << dendl;
       goto err_close_bdev;
     }
     auto p = bl.cbegin();
@@ -222,14 +222,14 @@ bool WriteLog<I>::initialize_pool(Context *on_finish,
                   << dendl;
     ceph_assert(is_valid_pool_root(pool_root));
     if (pool_root.layout_version != SSD_LAYOUT_VERSION) {
-      lderr(cct) << "Pool layout version is "
+      lderr(cct) << "pool layout version is "
                  << pool_root.layout_version
                  << " expected " << SSD_LAYOUT_VERSION
                  << dendl;
       goto err_close_bdev;
     }
     if (pool_root.block_size != MIN_WRITE_ALLOC_SSD_SIZE) {
-      lderr(cct) << "Pool block size is " << pool_root.block_size
+      lderr(cct) << "pool block size is " << pool_root.block_size
                  << " expected " << MIN_WRITE_ALLOC_SSD_SIZE
                  << dendl;
       goto err_close_bdev;
@@ -446,8 +446,8 @@ void WriteLog<I>::append_scheduled_ops(void) {
   GenericLogOperations ops;
   ldout(m_image_ctx.cct, 20) << dendl;
 
-  bool ops_remain = false; //no-op variable for SSD
-  bool appending = false; //no-op variable for SSD
+  bool ops_remain = false;  // unused, no-op variable for SSD
+  bool appending = false;   // unused, no-op variable for SSD
   this->append_scheduled(ops, ops_remain, appending);
 
   if (ops.size()) {
@@ -1026,7 +1026,7 @@ void WriteLog<I>::update_root_scheduled_ops() {
   });
   Context *append_ctx = new LambdaContext([this, ctx](int r) {
     ldout(m_image_ctx.cct, 15) << "Finish the update of pool root." << dendl;
-    bool need_finisher = false;;
+    bool need_finisher = false;
     assert(r == 0);
     {
       std::lock_guard locker(m_lock);