]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
Merge pull request #47969 from myoungwon/wip-rbm-integration-step1
authorYingxin <yingxin.cheng@intel.com>
Wed, 30 Nov 2022 01:23:46 +0000 (09:23 +0800)
committerGitHub <noreply@github.com>
Wed, 30 Nov 2022 01:23:46 +0000 (09:23 +0800)
crimson/os/seastore: add RBM OOL path

Reviewed-by: Yingxin Cheng <yingxin.cheng@intel.com>
Reviewed-by: Samuel Just <sjust@redhat.com>
1  2 
src/crimson/os/seastore/async_cleaner.cc
src/crimson/os/seastore/async_cleaner.h
src/crimson/os/seastore/cache.cc
src/crimson/os/seastore/extent_placement_manager.h
src/crimson/os/seastore/random_block_manager.h
src/crimson/os/seastore/transaction.h

index 599d6631f87b9bd217fe48a58846344ba51a996a,c7b6f9fb531d935245c4b7470ab6d674ec27d92b..4e03980c942cc8ae96cb6deee4eab73e6662716b
@@@ -1284,11 -1532,120 +1532,127 @@@ private
    SegmentSeqAllocatorRef ool_segment_seq_allocator;
  };
  
+ class RBMCleaner;
+ using RBMCleanerRef = std::unique_ptr<RBMCleaner>;
+ class RBMCleaner : public AsyncCleaner {
+ public:
+   RBMCleaner(
+     RBMDeviceGroupRef&& rb_group,
+     BackrefManager &backref_manager,
+     bool detailed);
+   static RBMCleanerRef create(
+       RBMDeviceGroupRef&& rb_group,
+       BackrefManager &backref_manager,
+       bool detailed) {
+     return std::make_unique<RBMCleaner>(
+       std::move(rb_group), backref_manager, detailed);
+   }
+   RBMDeviceGroup* get_rb_group() {
+     return rb_group.get();
+   }
+   /*
+    * AsyncCleaner interfaces
+    */
+   void set_background_callback(BackgroundListener *cb) final {
+     background_callback = cb;
+   }
+   void set_extent_callback(ExtentCallbackInterface *cb) final {
+     extent_callback = cb;
+   }
+   store_statfs_t get_stat() const final {
+     store_statfs_t st;
+     // TODO 
+     return st;
+   }
+   void print(std::ostream &, bool is_detailed) const final;
+   mount_ret mount() final;
+   void mark_space_used(paddr_t, extent_len_t) final;
+   void mark_space_free(paddr_t, extent_len_t) final;
+   void commit_space_used(paddr_t, extent_len_t) final;
+   void reserve_projected_usage(std::size_t) final;
+   void release_projected_usage(size_t) final;
+   bool should_block_io_on_clean() const final {
+     return false;
+   }
+   bool should_clean_space() const final {
+     return false;
+   }
+   clean_space_ret clean_space() final;
+   RandomBlockManager* get_rbm(paddr_t paddr) {
+     auto rbs = rb_group->get_rb_managers();
+     for (auto p : rbs) {
+       if (p->get_device_id() == paddr.get_device_id()) {
+       return p;
+       }
+     }
+     return nullptr;
+   }
+   paddr_t alloc_paddr(extent_len_t length) {
+     // TODO: implement allocation strategy (dirty metadata and multiple devices)
+     auto rbs = rb_group->get_rb_managers();
+     return rbs[0]->alloc_extent(length);
+   }
+   // Testing interfaces
+   bool check_usage() final;
+   bool check_usage_is_empty() const final {
+     // TODO
+     return true;
+   }
+ private:
+   bool equals(const RBMSpaceTracker &other) const;
+   const bool detailed;
+   RBMDeviceGroupRef rb_group;
+   BackrefManager &backref_manager;
+   struct {
+     /**
+      * used_bytes
+      *
+      * Bytes occupied by live extents
+      */
+     uint64_t used_bytes = 0;
+     /**
+      * projected_used_bytes
+      *
+      * Sum of projected bytes used by each transaction between throttle
+      * acquisition and commit completion.  See reserve_projected_usage()
+      */
+     uint64_t projected_used_bytes = 0;
+   } stats;
+   ExtentCallbackInterface *extent_callback = nullptr;
+   BackgroundListener *background_callback = nullptr;
+ };
  }
 +
 +#if FMT_VERSION >= 90000
 +template <> struct fmt::formatter<crimson::os::seastore::segment_info_t> : fmt::ostream_formatter {};
 +template <> struct fmt::formatter<crimson::os::seastore::segments_info_t> : fmt::ostream_formatter {};
 +template <> struct fmt::formatter<crimson::os::seastore::AsyncCleaner::stat_printer_t> : fmt::ostream_formatter {};
 +template <> struct fmt::formatter<crimson::os::seastore::JournalTrimmerImpl::stat_printer_t> : fmt::ostream_formatter {};
 +#endif
Simple merge
index d94a52f46c3c445befd5926da1cb2d31f764b0b5,d6eaa975b27bf10aa4e221f380a79dbe49e97126..2141a3a971bdc1563e4241ed8c07b9c012f5f37f
@@@ -149,11 -194,31 +194,31 @@@ public
      assert(is_target_rewrite_generation(gen));
      assert(gen == INIT_GENERATION || hint == placement_hint_t::REWRITE);
  
 -    // XXX: bp might be extended to point to differnt memory (e.g. PMem)
 +    // XXX: bp might be extended to point to different memory (e.g. PMem)
      // according to the allocator.
-     auto bp = ceph::bufferptr(
+     auto alloc_paddr = [this](rewrite_gen_t gen, 
+       data_category_t category, extent_len_t length) 
+       -> alloc_result_t {
+       auto bp = ceph::bufferptr(
        buffer::create_page_aligned(length));
-     bp.zero();
+       bp.zero();
+       paddr_t addr;
+       if (gen == INLINE_GENERATION) {
+       addr = make_record_relative_paddr(0);
+       } else if (category == data_category_t::DATA) {
+       assert(data_writers_by_gen[generation_to_writer(gen)]);
+       addr = data_writers_by_gen[
+         generation_to_writer(gen)]->alloc_paddr(length);
+       } else {
+       assert(category == data_category_t::METADATA);
+       assert(md_writers_by_gen[generation_to_writer(gen)]);
+       addr = md_writers_by_gen[
+         generation_to_writer(gen)]->alloc_paddr(length);
+       }
+       return {addr,
+             std::move(bp),
+             gen};
+     };
  
      if (!is_logical_type(type)) {
        // TODO: implement out-of-line strategy for physical extent.
Simple merge