template <typename I>
bool ObjectCacherObjectDispatch<I>::flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
- io::DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) {
+ uint64_t* journal_tid, io::DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
- io::DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) override;
+ uint64_t* journal_tid, io::DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) override;
bool invalidate_cache(Context* on_finish) override;
bool reset_existence_cache(Context* on_finish) override;
// ensure no locks are held when flush is complete
ctx = librbd::util::create_async_context_callback(image_ctx, ctx);
+ uint64_t journal_tid = 0;
if (journaling) {
// in-flight ops are flushed prior to closing the journal
- uint64_t journal_tid = image_ctx.journal->append_io_event(
+ ceph_assert(image_ctx.journal != NULL);
+ journal_tid = image_ctx.journal->append_io_event(
journal::EventEntry(journal::AioFlushEvent()), 0, 0, false, 0);
-
- ctx = new FunctionContext(
- [&image_ctx, journal_tid, ctx](int r) {
- image_ctx.journal->commit_io_event(journal_tid, r);
- ctx->complete(r);
- });
- ctx = new FunctionContext(
- [&image_ctx, journal_tid, ctx](int r) {
- image_ctx.journal->flush_event(journal_tid, ctx);
- });
- } else {
- // flush rbd cache only when journaling is not enabled
- auto object_dispatch_spec = ObjectDispatchSpec::create_flush(
- &image_ctx, OBJECT_DISPATCH_LAYER_NONE, m_flush_source, this->m_trace,
- ctx);
- ctx = new FunctionContext([object_dispatch_spec](int r) {
- object_dispatch_spec->send();
- });
}
+ auto object_dispatch_spec = ObjectDispatchSpec::create_flush(
+ &image_ctx, OBJECT_DISPATCH_LAYER_NONE, m_flush_source, journal_tid,
+ this->m_trace, ctx);
+ ctx = new FunctionContext([object_dispatch_spec](int r) {
+ object_dispatch_spec->send();
+ });
+
// ensure all in-flight IOs are settled if non-user flush request
image_ctx.flush_async_operations(ctx);
aio_comp->start_op(true);
bool flush(
FlushSource flush_source, const ZTracer::Trace &parent_trace,
- DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) override {
+ uint64_t* journal_tid, DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) override {
return false;
}
virtual bool flush(
FlushSource flush_source, const ZTracer::Trace &parent_trace,
- DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) = 0;
+ uint64_t* journal_tid, DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) = 0;
virtual bool invalidate_cache(Context* on_finish) = 0;
virtual bool reset_existence_cache(Context* on_finish) = 0;
struct FlushRequest {
FlushSource flush_source;
+ uint64_t journal_tid;
- FlushRequest(FlushSource flush_source) : flush_source(flush_source) {
+ FlushRequest(FlushSource flush_source, uint64_t journal_tid)
+ : flush_source(flush_source), journal_tid(journal_tid) {
}
};
template <typename ImageCtxT>
static ObjectDispatchSpec* create_flush(
ImageCtxT* image_ctx, ObjectDispatchLayer object_dispatch_layer,
- FlushSource flush_source, const ZTracer::Trace &parent_trace,
- Context *on_finish) {
+ FlushSource flush_source, uint64_t journal_tid,
+ const ZTracer::Trace &parent_trace, Context *on_finish) {
return new ObjectDispatchSpec(image_ctx->io_object_dispatcher,
object_dispatch_layer,
- FlushRequest{flush_source}, 0,
+ FlushRequest{flush_source, journal_tid}, 0,
parent_trace, on_finish);
}
bool operator()(ObjectDispatchSpec::FlushRequest& flush) const {
return object_dispatch->flush(
flush.flush_source, object_dispatch_spec->parent_trace,
+ &flush.journal_tid,
&object_dispatch_spec->dispatch_result,
&object_dispatch_spec->dispatcher_ctx.on_finish,
&object_dispatch_spec->dispatcher_ctx);
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
- io::DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) {
+ uint64_t* journal_tid, io::DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
- io::DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) override;
+ uint64_t* journal_tid, io::DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) override;
bool invalidate_cache(Context* on_finish) override {
return false;
return true;
}
+template <typename I>
+bool ObjectDispatch<I>::flush(
+ io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
+ uint64_t* journal_tid, io::DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) {
+ if (*journal_tid == 0) {
+ // non-journaled IO
+ return false;
+ }
+
+ auto cct = m_image_ctx->cct;
+ ldout(cct, 20) << dendl;
+
+ auto ctx = *on_finish;
+ *on_finish = new FunctionContext(
+ [image_ctx=m_image_ctx, ctx, journal_tid=*journal_tid](int r) {
+ image_ctx->journal->commit_io_event(journal_tid, r);
+ ctx->complete(r);
+ });
+
+ *dispatch_result = io::DISPATCH_RESULT_CONTINUE;
+ wait_or_flush_event(*journal_tid, io::OBJECT_DISPATCH_FLAG_FLUSH,
+ on_dispatched);
+ return true;
+}
+
template <typename I>
void ObjectDispatch<I>::extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
- io::DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) override {
- return false;
- }
+ uint64_t* journal_tid, io::DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) override;
bool invalidate_cache(Context* on_finish) override {
return false;
C_SaferCond cond;
Context *on_finish = &cond;
ASSERT_FALSE(mock_simple_scheduler_object_dispatch.flush(
- FLUSH_SOURCE_USER, {}, nullptr, &on_finish, nullptr));
+ FLUSH_SOURCE_USER, {}, nullptr, nullptr, &on_finish, nullptr));
ASSERT_EQ(on_finish, &cond); // not modified
on_finish->complete(0);
ASSERT_EQ(0, cond.wait());
C_SaferCond cond3;
Context *on_finish3 = &cond3;
ASSERT_FALSE(mock_simple_scheduler_object_dispatch.flush(
- FLUSH_SOURCE_USER, {}, nullptr, &on_finish3, nullptr));
+ FLUSH_SOURCE_USER, {}, nullptr, nullptr, &on_finish3, nullptr));
ASSERT_EQ(on_finish3, &cond3);
on_finish1->complete(0);
dispatch_result, on_dispatched);
}
- MOCK_METHOD3(execute_flush, bool(FlushSource, DispatchResult*,
+ MOCK_METHOD4(execute_flush, bool(FlushSource, uint64_t*, DispatchResult*,
Context*));
bool flush(FlushSource flush_source, const ZTracer::Trace &parent_trace,
- DispatchResult* dispatch_result, Context** on_finish,
- Context* on_dispatched) {
- return execute_flush(flush_source, dispatch_result, on_dispatched);
+ uint64_t* journal_tid, DispatchResult* dispatch_result,
+ Context** on_finish, Context* on_dispatched) {
+ return execute_flush(flush_source, journal_tid, dispatch_result,
+ on_dispatched);
}
MOCK_METHOD1(invalidate_cache, bool(Context*));