int AioCompletion::wait_for_complete() {
tracepoint(librbd, aio_wait_for_complete_enter, this);
- {
- std::unique_lock<std::mutex> locker(lock);
- while (state != AIO_STATE_COMPLETE) {
- cond.wait(locker);
- }
- }
+ completed.wait(false, std::memory_order_acquire);
tracepoint(librbd, aio_wait_for_complete_exit, 0);
return 0;
}
bool AioCompletion::is_complete() {
tracepoint(librbd, aio_is_complete_enter, this);
- bool done = (this->state == AIO_STATE_COMPLETE);
+ bool done = completed.load(std::memory_order_acquire);
tracepoint(librbd, aio_is_complete_exit, done);
return done;
}
}
void AioCompletion::mark_complete_and_notify() {
- state = AIO_STATE_COMPLETE;
+ completed.store(true, std::memory_order_release);
if (ictx != nullptr && event_notify && ictx->event_socket.is_valid()) {
ictx->event_socket_completions.push(this);
ictx->event_socket.notify();
}
- {
- std::unique_lock<std::mutex> locker(lock);
- cond.notify_all();
- }
+ completed.notify_all();
if (image_dispatcher_ctx != nullptr) {
image_dispatcher_ctx->complete(rval);
* context or via a thread pool context for cache read hits).
*/
struct AioCompletion {
- typedef enum {
- AIO_STATE_PENDING = 0,
- AIO_STATE_COMPLETE,
- } aio_state_t;
-
mutable std::mutex lock;
- std::condition_variable cond;
callback_t complete_cb = nullptr;
void *complete_arg = nullptr;
rbd_completion_t rbd_comp = nullptr;
- /// note: only using atomic for built-in memory barrier
- std::atomic<aio_state_t> state{AIO_STATE_PENDING};
+ std::atomic<bool> completed{false};
std::atomic<ssize_t> rval{0};
std::atomic<int> error_rval{0};