Allow ImageFlushRequest to directly execute a flush call through
AsyncOperation. This will allow the flush to be directly linked
to its preceeding IOs.
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
(cherry picked from commit
09e4127d5df1e2a79f2536dc784ec3730feea9ce)
Conflicts:
src/librbd/io/AsyncOperation.cc: trivial resolution
src/test/librbd/io/test_mock_CopyupRequest.cc: DNE
return len;
}
- void ImageCtx::flush_async_operations() {
- C_SaferCond ctx;
- flush_async_operations(&ctx);
- ctx.wait();
- }
-
- void ImageCtx::flush_async_operations(Context *on_finish) {
- {
- Mutex::Locker l(async_ops_lock);
- if (!async_ops.empty()) {
- ldout(cct, 20) << "flush async operations: " << on_finish << " "
- << "count=" << async_ops.size() << dendl;
- async_ops.front()->add_flush_context(on_finish);
- return;
- }
- }
- on_finish->complete(0);
- }
-
void ImageCtx::cancel_async_requests() {
C_SaferCond ctx;
cancel_async_requests(&ctx);
uint64_t prune_parent_extents(vector<pair<uint64_t,uint64_t> >& objectx,
uint64_t overlap);
- void flush_async_operations();
- void flush_async_operations(Context *on_finish);
-
void cancel_async_requests();
void cancel_async_requests(Context *on_finish);
ImageCtx *image_ctx;
std::list<Context *> flush_contexts;
- explicit C_CompleteFlushes(ImageCtx *image_ctx, std::list<Context *> &&flush_contexts)
+ explicit C_CompleteFlushes(ImageCtx *image_ctx,
+ std::list<Context *> &&flush_contexts)
: image_ctx(image_ctx), flush_contexts(std::move(flush_contexts)) {
}
void finish(int r) override {
}
}
-void AsyncOperation::add_flush_context(Context *on_finish) {
- assert(m_image_ctx->async_ops_lock.is_locked());
- ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": "
- << "flush=" << on_finish << dendl;
- m_flush_contexts.push_back(on_finish);
+void AsyncOperation::flush(Context* on_finish) {
+ {
+ Mutex::Locker locker(m_image_ctx->async_ops_lock);
+ xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
+ ++iter;
+
+ // linked list stored newest -> oldest ops
+ if (!iter.end()) {
+ (*iter)->m_flush_contexts.push_back(on_finish);
+ return;
+ }
+ }
+
+ m_image_ctx->op_work_queue->queue(on_finish);
}
} // namespace io
void start_op(ImageCtx &image_ctx);
void finish_op();
- void add_flush_context(Context *on_finish);
+ void flush(Context *on_finish);
private:
}
// ensure all in-flight IOs are settled if non-user flush request
- image_ctx.flush_async_operations(ctx);
aio_comp->start_op(true);
+ aio_comp->async_op.flush(ctx);
aio_comp->put();
// might be flushing during image shutdown
mock_image_ctx.image_ctx->op_work_queue->queue(&spec->dispatcher_ctx, r);
}));
}
-
- void expect_flush_async_operations(MockImageCtx &mock_image_ctx, int r) {
- EXPECT_CALL(mock_image_ctx, flush_async_operations(_))
- .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
- }
};
TEST_F(TestMockIoImageRequest, AioWriteJournalAppendDisabled) {
InSequence seq;
expect_is_journal_appending(mock_journal, false);
- expect_flush_async_operations(mock_image_ctx, 0);
expect_object_request_send(mock_image_ctx, 0);
C_SaferCond aio_comp_ctx;
librados::snap_t id));
MOCK_METHOD0(user_flushed, void());
- MOCK_METHOD1(flush_async_operations, void(Context *));
MOCK_METHOD1(flush_copyup, void(Context *));
MOCK_CONST_METHOD1(test_features, bool(uint64_t test_features));