+#include "crimson/common/coroutine.h"
#include "crimson/osd/object_context_loader.h"
#include "osd/osd_types_fmt.h"
#include "osd/object_state_fmt.h"
using crimson::common::local_conf;
+
+ObjectContextLoader::load_and_lock_fut
+ObjectContextLoader::load_and_lock_head(Manager &manager, RWState::State lock_type)
+{
+ LOG_PREFIX(ObjectContextLoader::load_and_lock_head);
+ DEBUGDPP("{} {}", dpp, manager.target, lock_type);
+ auto releaser = manager.get_releaser();
+ // no users pre-populate head_state on this path, so we don't bother to
+ // handle it
+ ceph_assert(manager.target.is_head());
+ ceph_assert(manager.head_state.is_empty());
+ ceph_assert(manager.target_state.is_empty());
+ auto [obc, existed] = obc_registry.get_cached_obc(manager.target);
+ manager.set_state_obc(manager.target_state, obc);
+ manager.set_state_obc(manager.head_state, obc);
+
+ if (existed) {
+ co_await manager.target_state.lock_to(lock_type);
+ } else {
+ manager.target_state.lock_excl_sync();
+ co_await load_obc(manager.target_state.obc);
+ manager.target_state.demote_excl_to(lock_type);
+ }
+ releaser.cancel();
+}
+
+ObjectContextLoader::load_and_lock_fut
+ObjectContextLoader::load_and_lock_clone(Manager &manager, RWState::State lock_type)
+{
+ LOG_PREFIX(ObjectContextLoader::load_and_lock_clone);
+ DEBUGDPP("{} {}", dpp, manager.target, lock_type);
+ auto releaser = manager.get_releaser();
+
+ ceph_assert(!manager.target.is_head());
+ ceph_assert(manager.target_state.is_empty());
+
+ if (manager.head_state.is_empty()) {
+ auto [obc, existed] = obc_registry.get_cached_obc(manager.target.get_head());
+ manager.set_state_obc(manager.head_state, obc);
+
+ if (existed) {
+ co_await manager.head_state.lock_to(RWState::RWREAD);
+ } else {
+ manager.head_state.lock_excl_sync();
+ co_await load_obc(manager.head_state.obc);
+ manager.head_state.demote_excl_to(RWState::RWREAD);
+ }
+ }
+
+ if (manager.options.resolve_clone) {
+ auto resolved_oid = resolve_oid(
+ manager.head_state.obc->get_head_ss(),
+ manager.target);
+ if (!resolved_oid) {
+ ERRORDPP("clone {} not found", dpp, manager.target);
+ co_await load_obc_iertr::future<>(
+ crimson::ct_error::enoent::make()
+ );
+ }
+ // note: might be head if snap was taken after most recent write!
+ manager.target = *resolved_oid;
+ }
+
+ if (manager.target.is_head()) {
+ /* Yes, we assert at the top that manager.target is not head. However, it's
+ * possible that the requested snap (the resolve_clone path above) actually
+ * maps to head (a read on an rbd snapshot more recent than the most recent
+ * write on this specific rbd block, for example).
+ *
+ * In such an event, it's hypothetically possible that lock_type isn't
+ * RWREAD, in which case we need to drop and reacquire the lock. However,
+ * this case is at present impossible. Actual client requests cannot write
+ * to a snapshot and will therefore always be RWREAD. The pathways that
+ * actually can mutate a clone do not set resolve_clone, so target will not
+ * become head here.
+ */
+ manager.set_state_obc(manager.target_state, manager.head_state.obc);
+ if (lock_type != manager.head_state.state) {
+ // This case isn't actually possible at the moment for the above reason.
+ manager.head_state.release_lock();
+ co_await manager.target_state.lock_to(lock_type);
+ } else {
+ manager.target_state.state = manager.head_state.state;
+ manager.head_state.state = RWState::RWNONE;
+ }
+ } else {
+ auto [obc, existed] = obc_registry.get_cached_obc(manager.target);
+ manager.set_state_obc(manager.target_state, obc);
+
+ if (existed) {
+ co_await manager.target_state.lock_to(RWState::RWREAD);
+ } else {
+ manager.target_state.lock_excl_sync();
+ co_await load_obc(manager.target_state.obc);
+ manager.target_state.obc->set_clone_ssc(manager.head_state.obc->ssc);
+ manager.target_state.demote_excl_to(RWState::RWREAD);
+ }
+ }
+
+ releaser.cancel();
+}
+
+ObjectContextLoader::load_and_lock_fut
+ObjectContextLoader::load_and_lock(Manager &manager, RWState::State lock_type)
+{
+ LOG_PREFIX(ObjectContextLoader::load_and_lock);
+ DEBUGDPP("{} {}", dpp, manager.target, lock_type);
+ if (manager.target.is_head()) {
+ return load_and_lock_head(manager, lock_type);
+ } else {
+ return load_and_lock_clone(manager, lock_type);
+ }
+}
+
template<RWState::State State>
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::with_head_obc(const hobject_t& oid,
#pragma once
#include <seastar/core/future.hh>
+#include <seastar/util/defer.hh>
#include "crimson/common/errorator.h"
+#include "crimson/common/log.h"
#include "crimson/osd/object_context.h"
#include "crimson/osd/pg_backend.h"
+#include "osd/object_state_fmt.h"
namespace crimson::osd {
class ObjectContextLoader {
::crimson::osd::IOInterruptCondition,
load_obc_ertr>;
+ class Manager {
+ ObjectContextLoader &loader;
+ hobject_t target;
+
+ Manager() = delete;
+ template <typename T>
+ Manager(ObjectContextLoader &loader, T &&t)
+ : loader(loader), target(std::forward<T>(t)) {}
+ Manager(const Manager &) = delete;
+ Manager &operator=(const Manager &o) = delete;
+
+ struct options_t {
+ bool resolve_clone = true;
+ } options;
+
+ struct state_t {
+ RWState::State state = RWState::RWNONE;
+ ObjectContextRef obc;
+ bool is_empty() const { return !obc; }
+
+ void lock_excl_sync() {
+ bool locked = obc->lock.try_lock_for_excl();
+ ceph_assert(locked);
+ state = RWState::RWEXCL;
+ }
+
+ void demote_excl_to(RWState::State lock_type) {
+ assert(state == RWState::RWEXCL);
+ switch (lock_type) {
+ case RWState::RWWRITE:
+ obc->lock.demote_to_write();
+ state = RWState::RWWRITE;
+ break;
+ case RWState::RWREAD:
+ obc->lock.demote_to_read();
+ state = RWState::RWREAD;
+ break;
+ case RWState::RWNONE:
+ obc->lock.unlock_for_excl();
+ state = RWState::RWNONE;
+ break;
+ case RWState::RWEXCL:
+ //noop
+ break;
+ default:
+ ceph_assert(0 == "impossible");
+ }
+ }
+
+ auto lock_to(RWState::State lock_type) {
+ assert(state == RWState::RWNONE);
+ switch (lock_type) {
+ case RWState::RWWRITE:
+ return interruptor::make_interruptible(
+ obc->lock.lock_for_write().then([this] {
+ state = RWState::RWWRITE;
+ }));
+ case RWState::RWREAD:
+ return interruptor::make_interruptible(
+ obc->lock.lock_for_read().then([this] {
+ state = RWState::RWREAD;
+ }));
+ case RWState::RWNONE:
+ // noop
+ return interruptor::now();
+ case RWState::RWEXCL:
+ return interruptor::make_interruptible(
+ obc->lock.lock_for_excl().then([this] {
+ state = RWState::RWEXCL;
+ }));
+ default:
+ ceph_assert(0 == "impossible");
+ return interruptor::now();
+ }
+ }
+
+ void release_lock() {
+ switch (state) {
+ case RWState::RWREAD:
+ obc->lock.unlock_for_read();
+ break;
+ case RWState::RWWRITE:
+ obc->lock.unlock_for_write();
+ break;
+ case RWState::RWEXCL:
+ obc->lock.unlock_for_excl();
+ break;
+ case RWState::RWNONE:
+ // noop
+ break;
+ default:
+ ceph_assert(0 == "invalid");
+ }
+ state = RWState::RWNONE;
+ }
+ };
+ state_t head_state;
+ state_t target_state;
+
+ friend ObjectContextLoader;
+
+ void set_state_obc(state_t &s, ObjectContextRef _obc) {
+ s.obc = std::move(_obc);
+ s.obc->append_to(loader.obc_set_accessing);
+ }
+
+ void release_state(state_t &s) {
+ LOG_PREFIX(ObjectContextLoader::release_state);
+ if (s.is_empty()) return;
+
+ s.release_lock();
+ SUBDEBUGDPP(
+ osd, "released object {}, {}",
+ loader.dpp, s.obc->get_oid(), s.obc->obs);
+ s.obc->remove_from(loader.obc_set_accessing);
+ s = state_t();
+ }
+ public:
+ Manager(Manager &&rhs) : loader(rhs.loader) {
+ std::swap(target, rhs.target);
+ std::swap(options, rhs.options);
+ std::swap(head_state, rhs.head_state);
+ std::swap(target_state, rhs.target_state);
+ }
+
+ Manager &operator=(Manager &&o) {
+ this->~Manager();
+ new(this) Manager(std::move(o));
+ return *this;
+ }
+
+ ObjectContextRef &get_obc() {
+ ceph_assert(!target_state.is_empty());
+ return target_state.obc;
+ }
+
+ void release() {
+ release_state(head_state);
+ release_state(target_state);
+ }
+
+ auto get_releaser() {
+ return seastar::defer([this] {
+ release();
+ });
+ }
+
+ ~Manager() {
+ release();
+ }
+ };
+ Manager get_obc_manager(hobject_t oid, bool resolve_clone = true) {
+ Manager ret(*this, oid);
+ ret.options.resolve_clone = resolve_clone;
+ return ret;
+ }
+
+ using load_and_lock_ertr = load_obc_ertr;
+ using load_and_lock_iertr = interruptible::interruptible_errorator<
+ IOInterruptCondition, load_and_lock_ertr>;
+ using load_and_lock_fut = load_and_lock_iertr::future<>;
+private:
+ load_and_lock_fut load_and_lock_head(Manager &, RWState::State);
+ load_and_lock_fut load_and_lock_clone(Manager &, RWState::State);
+public:
+ load_and_lock_fut load_and_lock(Manager &, RWState::State);
+
using interruptor = ::crimson::interruptible::interruptor<
::crimson::osd::IOInterruptCondition>;
load_obc_iertr::future<> load_obc(ObjectContextRef obc);
};
+
+using ObjectContextManager = ObjectContextLoader::Manager;
+
}