From: Samuel Just Date: Thu, 13 Jun 2024 00:47:08 +0000 (+0000) Subject: crimson/.../tri_mutex: lock() methods return normal future X-Git-Tag: v20.0.0~1674^2~6 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=dbdbbbd97163f08dc0b341772347deae3613c306;p=ceph.git crimson/.../tri_mutex: lock() methods return normal future f63d76a2 modified the lock() variants on tri_mutex so that the obc loading pathway wouldn't invoke .then() on returned future known statically to be ready. Now that the loading pathway uses demotion mechanisms that cannot block and do not return futures, we no longer have any users like that and can drop the extra std::nullopt possibility. In a larger sense, if lock() *can* return a non-ready future in a particular pathway, there's no semantic difference between returning std::optional> and future<> as the caller would still have to deal with a possible non-ready future return even if std::nullopt is also possible. If the pathway can be demonstrated statically to be non-blocking, as with the obc loading mechanism, we really want to use a mechanism that obviously cannot block rather relying on a mechanism with a return signature of std::optional> to return std::nullopt. Signed-off-by: Samuel Just --- diff --git a/src/crimson/common/tri_mutex.cc b/src/crimson/common/tri_mutex.cc index 8d8d44c11799d..83fa381920e84 100644 --- a/src/crimson/common/tri_mutex.cc +++ b/src/crimson/common/tri_mutex.cc @@ -8,8 +8,7 @@ SET_SUBSYS(osd); //TODO: SET_SUBSYS(crimson_tri_mutex); -std::optional> -read_lock::lock() +seastar::future<> read_lock::lock() { return static_cast(this)->lock_for_read(); } @@ -19,8 +18,7 @@ void read_lock::unlock() static_cast(this)->unlock_for_read(); } -std::optional> -write_lock::lock() +seastar::future<> write_lock::lock() { return static_cast(this)->lock_for_write(); } @@ -30,8 +28,7 @@ void write_lock::unlock() static_cast(this)->unlock_for_write(); } -std::optional> -excl_lock::lock() +seastar::future<> excl_lock::lock() { return static_cast(this)->lock_for_excl(); } @@ -48,14 +45,13 @@ tri_mutex::~tri_mutex() assert(!is_acquired()); } -std::optional> -tri_mutex::lock_for_read() +seastar::future<> tri_mutex::lock_for_read() { LOG_PREFIX(tri_mutex::lock_for_read()); DEBUGDPP("", *this); if (try_lock_for_read()) { DEBUGDPP("lock_for_read successfully", *this); - return std::nullopt; + return seastar::now(); } DEBUGDPP("can't lock_for_read, adding to waiters", *this); waiters.emplace_back(seastar::promise<>(), type_t::read, name); @@ -92,14 +88,13 @@ void tri_mutex::demote_to_read() ++readers; } -std::optional> -tri_mutex::lock_for_write() +seastar::future<> tri_mutex::lock_for_write() { LOG_PREFIX(tri_mutex::lock_for_write()); DEBUGDPP("", *this); if (try_lock_for_write()) { DEBUGDPP("lock_for_write successfully", *this); - return std::nullopt; + return seastar::now(); } DEBUGDPP("can't lock_for_write, adding to waiters", *this); waiters.emplace_back(seastar::promise<>(), type_t::write, name); @@ -137,14 +132,13 @@ void tri_mutex::demote_to_write() } // for exclusive users -std::optional> -tri_mutex::lock_for_excl() +seastar::future<> tri_mutex::lock_for_excl() { LOG_PREFIX(tri_mutex::lock_for_excl()); DEBUGDPP("", *this); if (try_lock_for_excl()) { DEBUGDPP("lock_for_excl, successfully", *this); - return std::nullopt; + return seastar::now(); } DEBUGDPP("can't lock_for_excl, adding to waiters", *this); waiters.emplace_back(seastar::promise<>(), type_t::exclusive, name); diff --git a/src/crimson/common/tri_mutex.h b/src/crimson/common/tri_mutex.h index 6e265316c4af8..bd3ed6dd4d81e 100644 --- a/src/crimson/common/tri_mutex.h +++ b/src/crimson/common/tri_mutex.h @@ -3,27 +3,25 @@ #pragma once -#include - #include #include #include "crimson/common/log.h" class read_lock { public: - std::optional> lock(); + seastar::future<> lock(); void unlock(); }; class write_lock { public: - std::optional> lock(); + seastar::future<> lock(); void unlock(); }; class excl_lock { public: - std::optional> lock(); + seastar::future<> lock(); void unlock(); }; @@ -64,7 +62,7 @@ public: } // for shared readers - std::optional> lock_for_read(); + seastar::future<> lock_for_read(); bool try_lock_for_read() noexcept; void unlock_for_read(); void demote_to_read(); @@ -73,7 +71,7 @@ public: } // for shared writers - std::optional> lock_for_write(); + seastar::future<> lock_for_write(); bool try_lock_for_write() noexcept; void unlock_for_write(); void demote_to_write(); @@ -82,7 +80,7 @@ public: } // for exclusive users - std::optional> lock_for_excl(); + seastar::future<> lock_for_excl(); bool try_lock_for_excl() noexcept; void unlock_for_excl(); bool is_excl_acquired() const { diff --git a/src/crimson/osd/object_context.h b/src/crimson/osd/object_context.h index a19279ee5ceb5..ae8d1d34a4a5f 100644 --- a/src/crimson/osd/object_context.h +++ b/src/crimson/osd/object_context.h @@ -137,34 +137,17 @@ public: private: template auto _with_lock(Lock& lock, Func&& func) { - auto maybe_fut = lock.lock(); - return seastar::futurize_invoke([ - maybe_fut=std::move(maybe_fut), - func=std::forward(func), - obc=Ref(this), - &lock]() mutable { - if (maybe_fut) { - return std::move(*maybe_fut - ).then([func=std::forward(func), obc, &lock]() mutable { - return seastar::futurize_invoke( - func - ).finally([&lock, obc] { - /* We chain the finally block here because it's possible for - * *maybe_fut from lock.lock() above to fail due to a call to - * ObjectContext::interrupt, which calls tri_mutex::abort. - * In the event of such an error, the lock isn't actually taken - * and calling unlock() would be incorrect. */ - lock.unlock(); - }); - }); - } else { - // atomically calling func upon locking - return seastar::futurize_invoke( - func - ).finally([&lock, obc] { - lock.unlock(); - }); - } + return lock.lock( + ).then([&lock, func=std::forward(func), obc=Ref(this)]() mutable { + return seastar::futurize_invoke( + func + ).finally([&lock, obc=std::move(obc)] { + /* We chain the finally block here because it's possible for lock.lock() + * above to fail due to a call to ObjectContext::interrupt, which calls + * tri_mutex::abort. In the event of such an error, the lock isn't + * actually taken and calling unlock() would be incorrect. */ + lock.unlock(); + }); }); }