static_cast<tri_mutex*>(this)->unlock_for_excl();
}
-void excl_lock_from_read::lock()
-{
- static_cast<tri_mutex*>(this)->promote_from_read();
-}
-
-void excl_lock_from_read::unlock()
-{
- static_cast<tri_mutex*>(this)->demote_to_read();
-}
-
-void excl_lock_from_write::lock()
-{
- static_cast<tri_mutex*>(this)->promote_from_write();
-}
-
-void excl_lock_from_write::unlock()
-{
- static_cast<tri_mutex*>(this)->demote_to_write();
-}
-
tri_mutex::~tri_mutex()
{
LOG_PREFIX(tri_mutex::~tri_mutex());
}
}
-void tri_mutex::promote_from_read()
-{
- LOG_PREFIX(tri_mutex::promote_from_read());
- DEBUGDPP("", *this);
- assert(readers == 1);
- --readers;
- exclusively_used = true;
-}
-
void tri_mutex::demote_to_read()
{
LOG_PREFIX(tri_mutex::demote_to_read());
}
}
-void tri_mutex::promote_from_write()
-{
- LOG_PREFIX(tri_mutex::promote_from_write());
- DEBUGDPP("", *this);
- assert(writers == 1);
- --writers;
- exclusively_used = true;
-}
-
void tri_mutex::demote_to_write()
{
LOG_PREFIX(tri_mutex::demote_to_write());
void unlock();
};
-// promote from read to excl
-class excl_lock_from_read {
-public:
- void lock();
- void unlock();
-};
-
-// promote from write to excl
-class excl_lock_from_write {
-public:
- void lock();
- void unlock();
-};
-
/// shared/exclusive mutual exclusion
///
/// Unlike reader/write lock, tri_mutex does not enforce the exclusive access
/// - readers
/// - writers
/// - exclusive users
-///
-/// For lock promotion, a read or a write lock is only allowed to be promoted
-/// atomically upon the first locking.
class tri_mutex : private read_lock,
write_lock,
- excl_lock,
- excl_lock_from_read,
- excl_lock_from_write
+ excl_lock
{
public:
tri_mutex() = default;
excl_lock& for_excl() {
return *this;
}
- excl_lock_from_read& excl_from_read() {
- return *this;
- }
- excl_lock_from_write& excl_from_write() {
- return *this;
- }
// for shared readers
std::optional<seastar::future<>> lock_for_read();
bool try_lock_for_read() noexcept;
void unlock_for_read();
- void promote_from_read();
void demote_to_read();
unsigned get_readers() const {
return readers;
std::optional<seastar::future<>> lock_for_write();
bool try_lock_for_write() noexcept;
void unlock_for_write();
- void promote_from_write();
void demote_to_write();
unsigned get_writers() const {
return writers;
friend class read_lock;
friend class write_lock;
friend class excl_lock;
- friend class excl_lock_from_read;
- friend class excl_lock_from_write;
- friend class excl_lock_from_excl;
friend std::ostream& operator<<(std::ostream &lhs, const tri_mutex &rhs);
};
});
}
- template <typename Lock, typename Func>
- auto _with_promoted_lock(Lock& lock, Func&& func) {
- Ref obc = this;
- lock.lock();
- return seastar::futurize_invoke(func).finally([&lock, obc] {
- lock.unlock();
- });
- }
-
boost::intrusive::list_member_hook<> obc_accessing_hook;
uint64_t list_link_cnt = 0;
bool fully_loaded = false;
}
}
}
- template<RWState::State Type, typename InterruptCond = void, typename Func>
- auto with_promoted_lock(Func&& func) {
- if constexpr (!std::is_void_v<InterruptCond>) {
- auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
- switch (Type) {
- case RWState::RWWRITE:
- return _with_promoted_lock(lock.excl_from_write(), std::move(wrapper));
- case RWState::RWREAD:
- return _with_promoted_lock(lock.excl_from_read(), std::move(wrapper));
- case RWState::RWEXCL:
- return seastar::futurize_invoke(std::move(wrapper));
- case RWState::RWNONE:
- return _with_lock(lock.for_excl(), std::move(wrapper));
- default:
- assert(0 == "noop");
- }
- } else {
- switch (Type) {
- case RWState::RWWRITE:
- return _with_promoted_lock(lock.excl_from_write(), std::forward<Func>(func));
- case RWState::RWREAD:
- return _with_promoted_lock(lock.excl_from_read(), std::forward<Func>(func));
- case RWState::RWEXCL:
- return seastar::futurize_invoke(std::forward<Func>(func));
- case RWState::RWNONE:
- return _with_lock(lock.for_excl(), std::forward<Func>(func));
- default:
- assert(0 == "noop");
- }
- }
- }
/**
* load_then_with_lock
});
}
- template<RWState::State State>
- ObjectContextLoader::load_obc_iertr::future<ObjectContextRef>
- ObjectContextLoader::get_or_load_obc(ObjectContextRef obc,
- bool existed)
- {
- LOG_PREFIX(ObjectContextLoader::get_or_load_obc);
- DEBUGDPP("{} -- fully_loaded={}, "
- "invalidated_by_interval_change={}",
- dpp, obc->get_oid(),
- obc->fully_loaded,
- obc->invalidated_by_interval_change);
- if (existed) {
- // obc is already loaded - avoid loading_mutex usage
- DEBUGDPP("cache hit on {}", dpp, obc->get_oid());
- return get_obc(obc, existed);
- }
- // See ObjectContext::_with_lock(),
- // this function must be able to support atomicity before
- // acquiring the lock
- ceph_assert(obc->loading_mutex.try_lock());
- return _get_or_load_obc<State>(obc, existed
- ).finally([obc]{
- obc->loading_mutex.unlock();
- });
- }
-
- template<RWState::State State>
- ObjectContextLoader::load_obc_iertr::future<ObjectContextRef>
- ObjectContextLoader::_get_or_load_obc(ObjectContextRef obc,
- bool existed)
- {
- LOG_PREFIX(ObjectContextLoader::_get_or_load_obc);
- if (existed) {
- DEBUGDPP("cache hit on {}", dpp, obc->get_oid());
- return get_obc(obc, existed);
- } else {
- DEBUGDPP("cache miss on {}", dpp, obc->get_oid());
- return obc->template with_promoted_lock<State, IOInterruptCondition>(
- [obc, this] {
- return load_obc(obc);
- });
- }
- }
-
ObjectContextLoader::load_obc_iertr::future<>
ObjectContextLoader::reload_obc(ObjectContext& obc) const
{
get_or_load_obc(ObjectContextRef obc,
bool existed);
- template<RWState::State State>
- load_obc_iertr::future<ObjectContextRef>
- _get_or_load_obc(ObjectContextRef obc,
- bool existed);
-
- static inline load_obc_iertr::future<ObjectContextRef>
- get_obc(ObjectContextRef obc,
- bool existed) {
- ceph_assert(existed && obc->is_valid() && obc->is_loaded());
- return load_obc_iertr::make_ready_future<ObjectContextRef>(obc);
- }
-
load_obc_iertr::future<> load_obc(ObjectContextRef obc);
};
}