]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson: add ObjectContext/Registry
authorSamuel Just <sjust@redhat.com>
Mon, 7 Oct 2019 18:37:10 +0000 (11:37 -0700)
committerSamuel Just <sjust@redhat.com>
Tue, 3 Dec 2019 05:35:36 +0000 (21:35 -0800)
ObjectContext will hold the ephemeral state and lock status
for an object as in classic osd.

Signed-off-by: Samuel Just <sjust@redhat.com>
src/common/options.cc
src/crimson/osd/CMakeLists.txt
src/crimson/osd/object_context.cc [new file with mode: 0644]
src/crimson/osd/object_context.h [new file with mode: 0644]
src/crimson/osd/ops_executer.h
src/crimson/osd/osd_operation.h
src/crimson/osd/pg_backend.h
src/crimson/osd/shard_services.cc
src/crimson/osd/shard_services.h

index 560e5ebfc5b1c622f1944c1d3762222e64a0e157..3cc2a8600894ed6995d66475de06f39e66710106 100644 (file)
@@ -5248,6 +5248,14 @@ std::vector<Option> get_global_options() {
     Option("debug_heartbeat_testing_span", Option::TYPE_INT, Option::LEVEL_DEV)
     .set_default(0)
     .set_description("Override 60 second periods for testing only"),
+
+    // ----------------------------
+    // Crimson specific options
+
+    Option("crimson_osd_obc_lru_size", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+    .set_default(10)
+    .set_description("Number of obcs to cache")
+
   });
 }
 
index e872d5f3f3983af9a11b192f253478d71a561721..e976b9a7d76ec54731d0cbbeeb6e5088a071b4df 100644 (file)
@@ -10,6 +10,7 @@ add_executable(crimson-osd
   pg_meta.cc
   replicated_backend.cc
   shard_services.cc
+  object_context.cc
   ops_executer.cc
   osd_operation.cc
   osd_operations/client_request.cc
diff --git a/src/crimson/osd/object_context.cc b/src/crimson/osd/object_context.cc
new file mode 100644 (file)
index 0000000..0c70df5
--- /dev/null
@@ -0,0 +1,38 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "crimson/osd/object_context.h"
+
+namespace crimson::osd {
+
+void ObjectContext::dump_detail(Formatter *f) const
+{
+  f->open_object_section("ObjectContext");
+  obs.oi.dump(f);
+  f->close_section();
+}
+
+ObjectContextRegistry::ObjectContextRegistry(crimson::common::ConfigProxy &conf)
+{
+  obc_lru.set_target_size(conf.get_val<uint64_t>("crimson_osd_obc_lru_size"));
+  conf.add_observer(this);
+}
+
+const char** ObjectContextRegistry::get_tracked_conf_keys() const
+{
+  static const char* KEYS[] = {
+    "crimson_osd_obc_lru_size",
+    nullptr
+  };
+  return KEYS;
+}
+
+void ObjectContextRegistry::handle_conf_change(
+  const ConfigProxy& conf,
+  const std::set <std::string> &changed)
+{
+  obc_lru.set_target_size(conf.get_val<uint64_t>("crimson_osd_obc_lru_size"));
+}
+
+
+}
diff --git a/src/crimson/osd/object_context.h b/src/crimson/osd/object_context.h
new file mode 100644 (file)
index 0000000..69af42c
--- /dev/null
@@ -0,0 +1,225 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <seastar/core/shared_future.hh>
+
+#include <boost/intrusive_ptr.hpp>
+#include <boost/intrusive/list.hpp>
+#include <boost/intrusive/set.hpp>
+#include <boost/smart_ptr/intrusive_ref_counter.hpp>
+
+#include "common/intrusive_lru.h"
+#include "osd/object_state.h"
+#include "crimson/common/config_proxy.h"
+#include "crimson/osd/osd_operation.h"
+
+namespace crimson::osd {
+
+template <typename OBC>
+struct obc_to_hoid {
+  using type = hobject_t;
+  const type &operator()(const OBC &obc) {
+    return obc.obs.oi.soid;
+  }
+};
+
+class ObjectContext : public Blocker,
+                     public ceph::common::intrusive_lru_base<
+  ceph::common::intrusive_lru_config<
+    hobject_t, ObjectContext, obc_to_hoid<ObjectContext>>>
+{
+public:
+  Ref head; // Ref defined as part of ceph::common::intrusive_lru_base
+  ObjectState obs;
+  std::optional<SnapSet> ss;
+  bool loaded : 1;
+  ObjectContext(const hobject_t &hoid) : obs(hoid), loaded(false) {}
+
+  const hobject_t &get_oid() const {
+    return obs.oi.soid;
+  }
+
+  bool is_head() const {
+    return get_oid().is_head();
+  }
+
+  const SnapSet &get_ro_ss() const {
+    if (is_head()) {
+      ceph_assert(ss);
+      return *ss;
+    } else {
+      ceph_assert(head);
+      return head->get_ro_ss();
+    }
+  }
+
+  void set_head_state(ObjectState &&_obs, SnapSet &&_ss) {
+    ceph_assert(is_head());
+    obs = std::move(_obs);
+    ss = std::move(_ss);
+    loaded = true;
+  }
+
+  void set_clone_state(ObjectState &&_obs, Ref &&_head) {
+    ceph_assert(!is_head());
+    obs = std::move(_obs);
+    head = _head;
+    loaded = true;
+  }
+
+private:
+  RWState rwstate;
+  seastar::shared_mutex wait_queue;
+  std::optional<seastar::shared_promise<>> wake;
+
+  template <typename F>
+  seastar::future<> with_queue(F &&f) {
+    return wait_queue.lock().then([this, f=std::move(f)] {
+      ceph_assert(!wake);
+      return seastar::repeat([this, f=std::move(f)]() {
+       if (f()) {
+         wait_queue.unlock();
+         return seastar::make_ready_future<seastar::stop_iteration>(
+           seastar::stop_iteration::yes);
+       } else {
+         rwstate.inc_waiters();
+         wake = seastar::shared_promise<>();
+         return wake->get_shared_future().then([this, f=std::move(f)] {
+           wake = std::nullopt;
+           rwstate.dec_waiters(1);
+           return seastar::make_ready_future<seastar::stop_iteration>(
+             seastar::stop_iteration::no);
+         });
+       }
+      });
+    });
+  }
+
+
+  const char *get_type_name() const final {
+    return "ObjectContext";
+  }
+  void dump_detail(Formatter *f) const final;
+
+  template <typename LockF>
+  seastar::future<> get_lock(
+    Operation *op,
+    LockF &&lockf) {
+    return op->with_blocking_future(
+      make_blocking_future(with_queue(std::forward<LockF>(lockf))));
+  }
+
+  template <typename UnlockF>
+  void put_lock(
+    UnlockF &&unlockf) {
+    if (unlockf() && wake) wake->set_value();
+  }
+public:
+  seastar::future<> get_lock_type(Operation *op, RWState::State type) {
+    switch (type) {
+    case RWState::RWWRITE:
+      return get_lock(op, [this] { return rwstate.get_write_lock(); });
+    case RWState::RWREAD:
+      return get_lock(op, [this] { return rwstate.get_read_lock(); });
+    case RWState::RWEXCL:
+      return get_lock(op, [this] { return rwstate.get_excl_lock(); });
+    case RWState::RWNONE:
+      return seastar::now();
+    default:
+      ceph_abort_msg("invalid lock type");
+      return seastar::now();
+    }
+  }
+
+  void put_lock_type(RWState::State type) {
+    switch (type) {
+    case RWState::RWWRITE:
+      return put_lock([this] { return rwstate.put_write(); });
+    case RWState::RWREAD:
+      return put_lock([this] { return rwstate.put_read(); });
+    case RWState::RWEXCL:
+      return put_lock([this] { return rwstate.put_excl(); });
+    case RWState::RWNONE:
+      return;
+    default:
+      ceph_abort_msg("invalid lock type");
+      return;
+    }
+  }
+
+  void degrade_excl_to(RWState::State type) {
+    // assume we already hold an excl lock
+    bool put = rwstate.put_excl();
+    bool success = false;
+    switch (type) {
+    case RWState::RWWRITE:
+      success = rwstate.get_write_lock();
+      break;
+    case RWState::RWREAD:
+      success = rwstate.get_read_lock();
+      break;
+    case RWState::RWEXCL:
+      success = rwstate.get_excl_lock();
+      break;
+    case RWState::RWNONE:
+      success = true;
+      break;
+    default:
+      ceph_abort_msg("invalid lock type");
+      break;
+    }
+    ceph_assert(success);
+    if (put && wake) {
+      wake->set_value();
+    }
+  }
+
+  bool empty() const { return rwstate.empty(); }
+
+  template <typename F>
+  seastar::future<> get_write_greedy(Operation *op) {
+    return get_lock(op, [this] { return rwstate.get_write_lock(true); });
+  }
+
+  bool try_get_read_lock() {
+    return rwstate.get_read_lock();
+  }
+  void drop_read() {
+    return put_lock_type(RWState::RWREAD);
+  }
+  bool get_recovery_read() {
+    return rwstate.get_recovery_read();
+  }
+  void drop_recovery_read() {
+    ceph_assert(rwstate.recovery_read_marker);
+    drop_read();
+    rwstate.recovery_read_marker = false;
+  }
+  bool maybe_get_excl() {
+    return rwstate.get_excl_lock();
+  }
+
+  bool is_request_pending() const {
+    return !rwstate.empty();
+  }
+};
+using ObjectContextRef = ObjectContext::Ref;
+
+class ObjectContextRegistry : public md_config_obs_t  {
+  ObjectContext::lru_t obc_lru;
+
+public:
+  ObjectContextRegistry(crimson::common::ConfigProxy &conf);
+
+  std::pair<ObjectContextRef, bool> get_cached_obc(const hobject_t &hoid) {
+    return obc_lru.get_or_create(hoid);
+  }
+
+  const char** get_tracked_conf_keys() const final;
+  void handle_conf_change(const ConfigProxy& conf,
+                          const std::set <std::string> &changed) final;
+};
+
+}
index 0696443501e068f3ea9c16f50fb928c9c64a682a..66ab7ad8b663b0d194239697570126cc05aae863 100644 (file)
@@ -17,7 +17,7 @@
 #include "crimson/net/Fwd.h"
 #include "os/Transaction.h"
 #include "osd/osd_types.h"
-#include "osd/osd_internal_types.h"
+#include "crimson/osd/object_context.h"
 
 #include "crimson/common/errorator.h"
 #include "crimson/common/type_helpers.h"
index 6fe98d6df08a279c0013283e297ec9d00e3af25f..7e8a9c2fb54b184dcf640698d01553bca73d6f99 100644 (file)
@@ -13,6 +13,8 @@
 #include <seastar/core/shared_mutex.hh>
 #include <seastar/core/future.hh>
 
+#include "include/ceph_assert.h"
+
 namespace ceph {
   class Formatter;
 }
index 6530ef11602d83aa221b86fc4728d3b43bbbad6f..d355ab02598eb772d0abf8ff5445aeffcfbade2a 100644 (file)
@@ -13,7 +13,7 @@
 #include "crimson/osd/acked_peers.h"
 #include "crimson/common/shared_lru.h"
 #include "osd/osd_types.h"
-#include "osd/osd_internal_types.h"
+#include "crimson/osd/object_context.h"
 
 struct hobject_t;
 class MOSDRepOpReply;
index e282df5ce97be97d45512795612d31114333eb9b..818006a2901bbb8d5a28e08d30a7e8adf2b468a5 100644 (file)
@@ -37,7 +37,8 @@ ShardServices::ShardServices(
       public_msgr(public_msgr),
       monc(monc),
       mgrc(mgrc),
-      store(store)
+      store(store),
+      obc_registry(crimson::common::local_conf())
 {
   perf = build_osd_logger(&cct);
   cct.get_perfcounters_collection()->add(perf);
index d19aa648d8516a3825c274ab8a1b810a42f897d9..48b5b94d4a2342fa3ad311df5e3970fb2d7f7ab5 100644 (file)
@@ -11,6 +11,7 @@
 #include "crimson/os/futurized_collection.h"
 #include "osd/PeeringState.h"
 #include "crimson/osd/osdmap_service.h"
+#include "crimson/osd/object_context.h"
 
 namespace crimson::net {
   class Messenger;
@@ -158,6 +159,8 @@ public:
   }
   HeartbeatStampsRef get_hb_stamps(int peer);
   std::map<int, HeartbeatStampsRef> heartbeat_stamps;
+
+  crimson::osd::ObjectContextRegistry obc_registry;
 };
 
 }