]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd/cache/pwl/ssd: add layout version control
authorYin Congmin <congmin.yin@intel.com>
Tue, 6 Jul 2021 11:35:42 +0000 (19:35 +0800)
committerYin Congmin <congmin.yin@intel.com>
Fri, 5 Nov 2021 03:36:57 +0000 (11:36 +0800)
Signed-off-by: Yin Congmin <congmin.yin@intel.com>
src/librbd/cache/pwl/Types.h
src/librbd/cache/pwl/rwl/WriteLog.cc
src/librbd/cache/pwl/ssd/WriteLog.cc

index ce6f6ab9da87019031075ee37e7e1e841ba5244a..863292e16117270f9779f4f5493416ae8db4156d 100644 (file)
@@ -177,7 +177,8 @@ const uint64_t MIN_POOL_SIZE = DEFAULT_POOL_SIZE;
 const uint64_t POOL_SIZE_ALIGN = 1 << 20;
 constexpr double USABLE_SIZE = (7.0 / 10);
 const uint64_t BLOCK_ALLOC_OVERHEAD_BYTES = 16;
-const uint8_t RWL_POOL_VERSION = 1;
+const uint8_t RWL_LAYOUT_VERSION = 1;
+const uint8_t SSD_LAYOUT_VERSION = 1;
 const uint64_t MAX_LOG_ENTRIES = (1024 * 1024);
 const double AGGRESSIVE_RETIRE_HIGH_WATER = 0.75;
 const double RETIRE_HIGH_WATER = 0.50;
@@ -282,7 +283,7 @@ struct WriteLogPoolRoot {
   #ifdef WITH_RBD_RWL
   union {
     struct {
-      uint8_t layout_version;    /* Version of this structure (RWL_POOL_VERSION) */
+      uint8_t layout_version;    /* Version of this structure (RWL_LAYOUT_VERSION) */
     };
     uint64_t _u64;
   } header;
index f635015329a6c073f49a1ac8b0f744d3742bf059..e90f2fa8eba0b8867aa7888e9966023a988cf86e 100644 (file)
@@ -301,7 +301,7 @@ bool WriteLog<I>::initialize_pool(Context *on_finish, pwl::DeferredContexts &lat
     m_first_valid_entry = 0;
     TX_BEGIN(m_log_pool) {
       TX_ADD(pool_root);
-      D_RW(pool_root)->header.layout_version = RWL_POOL_VERSION;
+      D_RW(pool_root)->header.layout_version = RWL_LAYOUT_VERSION;
       D_RW(pool_root)->log_entries =
         TX_ZALLOC(struct WriteLogCacheEntry,
                   sizeof(struct WriteLogCacheEntry) * num_small_writes);
@@ -334,11 +334,11 @@ bool WriteLog<I>::initialize_pool(Context *on_finish, pwl::DeferredContexts &lat
       return false;
     }
     pool_root = POBJ_ROOT(m_log_pool, struct WriteLogPoolRoot);
-    if (D_RO(pool_root)->header.layout_version != RWL_POOL_VERSION) {
+    if (D_RO(pool_root)->header.layout_version != RWL_LAYOUT_VERSION) {
       // TODO: will handle upgrading version in the future
       lderr(cct) << "Pool layout version is "
                  << D_RO(pool_root)->header.layout_version
-                 << " expected " << RWL_POOL_VERSION << dendl;
+                 << " expected " << RWL_LAYOUT_VERSION << dendl;
       on_finish->complete(-EINVAL);
       return false;
     }
index 6a30c6f422c3013aabb85d420cdbae17cf357627..a0cb80d4eb672e38787fe9082b92d536f64ba335 100644 (file)
@@ -133,6 +133,7 @@ template <typename I>
 bool WriteLog<I>::initialize_pool(Context *on_finish,
                                   pwl::DeferredContexts &later) {
   int r;
+  CephContext *cct = m_image_ctx.cct;
 
   ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
   if (access(this->m_log_pool_name.c_str(), F_OK) != 0) {
@@ -177,6 +178,7 @@ bool WriteLog<I>::initialize_pool(Context *on_finish,
     m_first_valid_entry = DATA_RING_BUFFER_OFFSET;
 
     auto new_root = std::make_shared<WriteLogPoolRoot>(pool_root);
+    new_root->layout_version = SSD_LAYOUT_VERSION;
     new_root->pool_size = this->m_log_pool_size;
     new_root->flushed_sync_gen = this->m_flushed_sync_gen;
     new_root->block_size = MIN_WRITE_ALLOC_SSD_SIZE;
@@ -187,8 +189,8 @@ bool WriteLog<I>::initialize_pool(Context *on_finish,
 
     r = update_pool_root_sync(new_root);
     if (r != 0) {
-      lderr(m_image_ctx.cct) << "failed to initialize pool ("
-                             << this->m_log_pool_name << ")" << dendl;
+      lderr(cct) << "failed to initialize pool ("
+                 << this->m_log_pool_name << ")" << dendl;
       bdev->close();
       delete bdev;
       on_finish->complete(r);
@@ -201,11 +203,57 @@ bool WriteLog<I>::initialize_pool(Context *on_finish,
       on_finish->complete(r);
       return false;
     }
+
+    bufferlist bl;
+    SuperBlock superblock;
+    ::IOContext ioctx(cct, nullptr);
+    r = bdev->read(0, MIN_WRITE_ALLOC_SSD_SIZE, &bl, &ioctx, false);
+    if (r < 0) {
+      lderr(cct) << "Read ssd cache superblock failed " << dendl;
+      goto error_handle;
+    }
+    auto p = bl.cbegin();
+    decode(superblock, p);
+    pool_root = superblock.root;
+    ldout(cct, 1) << "Decoded root: pool_size=" << pool_root.pool_size
+                  << " first_valid_entry=" << pool_root.first_valid_entry
+                  << " first_free_entry=" << pool_root.first_free_entry
+                  << " flushed_sync_gen=" << pool_root.flushed_sync_gen
+                  << dendl;
+    ceph_assert(is_valid_pool_root(pool_root));
+    if (pool_root.layout_version != SSD_LAYOUT_VERSION) {
+      lderr(cct) << "Pool layout version is "
+                 << pool_root.layout_version
+                 << " expected " << SSD_LAYOUT_VERSION
+                 << dendl;
+      goto error_handle;
+    }
+    if (pool_root.block_size != MIN_WRITE_ALLOC_SSD_SIZE) {
+      lderr(cct) << "Pool block size is " << pool_root.block_size
+                 << " expected " << MIN_WRITE_ALLOC_SSD_SIZE
+                 << dendl;
+      goto error_handle;
+    }
+
+    this->m_log_pool_size = pool_root.pool_size;
+    this->m_flushed_sync_gen = pool_root.flushed_sync_gen;
+    this->m_first_valid_entry = pool_root.first_valid_entry;
+    this->m_first_free_entry = pool_root.first_free_entry;
+    this->m_bytes_allocated_cap = this->m_log_pool_size -
+                                  DATA_RING_BUFFER_OFFSET -
+                                  MIN_WRITE_ALLOC_SSD_SIZE;
+
     load_existing_entries(later);
     m_cache_state->clean = this->m_dirty_log_entries.empty();
     m_cache_state->empty = m_log_entries.empty();
   }
   return true;
+
+error_handle:
+  bdev->close();
+  delete bdev;
+  on_finish->complete(-EINVAL);
+  return false;
 }
 
 template <typename I>
@@ -235,33 +283,8 @@ void WriteLog<I>::remove_pool_file() {
 
 template <typename I>
 void WriteLog<I>::load_existing_entries(pwl::DeferredContexts &later) {
-  bufferlist bl;
   CephContext *cct = m_image_ctx.cct;
-  ::IOContext ioctx(cct, nullptr);
-  bdev->read(0, MIN_WRITE_ALLOC_SSD_SIZE, &bl, &ioctx, false);
-  SuperBlock superblock;
-
-  auto p = bl.cbegin();
-  decode(superblock, p);
-
-  pool_root = superblock.root;
-  ldout(cct, 1) << "Decoded root: pool_size=" << pool_root.pool_size
-                << " first_valid_entry=" << pool_root.first_valid_entry
-                << " first_free_entry=" << pool_root.first_free_entry
-                << " flushed_sync_gen=" << pool_root.flushed_sync_gen
-                << dendl;
-  ceph_assert(is_valid_pool_root(pool_root));
-
-  this->m_log_pool_size = pool_root.pool_size;
-  this->m_flushed_sync_gen = pool_root.flushed_sync_gen;
-  this->m_first_valid_entry = pool_root.first_valid_entry;
-  this->m_first_free_entry = pool_root.first_free_entry;
-
-  this->m_bytes_allocated_cap = this->m_log_pool_size -
-      DATA_RING_BUFFER_OFFSET - MIN_WRITE_ALLOC_SSD_SIZE;
-
   std::map<uint64_t, std::shared_ptr<SyncPointLogEntry>> sync_point_entries;
-
   std::map<uint64_t, bool> missing_sync_points;
 
   // Iterate through the log_entries and append all the write_bytes