auto aio_comp = io::AioCompletion::create_and_start(on_finish, &m_image_ctx,
io::AIO_TYPE_FLUSH);
- io::ImageFlushRequest<I> req(m_image_ctx, aio_comp, {});
+ io::ImageFlushRequest<I> req(m_image_ctx, aio_comp, io::FLUSH_SOURCE_INTERNAL,
+ {});
req.set_bypass_image_cache();
req.send();
}
void operator()(Flush& flush) const {
ImageRequest<I>::aio_flush(
- &spec->m_image_ctx, spec->m_aio_comp,
+ &spec->m_image_ctx, spec->m_aio_comp, FLUSH_SOURCE_USER,
spec->m_parent_trace);
}
};
template <typename I>
void ImageRequest<I>::aio_flush(I *ictx, AioCompletion *c,
- const ZTracer::Trace &parent_trace) {
- ImageFlushRequest<I> req(*ictx, c, parent_trace);
+ FlushSource flush_source,
+ const ZTracer::Trace &parent_trace) {
+ ImageFlushRequest<I> req(*ictx, c, flush_source, parent_trace);
req.send();
}
template <typename I>
void ImageFlushRequest<I>::send_request() {
I &image_ctx = this->m_image_ctx;
- image_ctx.user_flushed();
+ if (m_flush_source == FLUSH_SOURCE_USER) {
+ // flag cache for writeback mode if configured
+ image_ctx.user_flushed();
+ }
bool journaling = false;
{
RWLock::RLocker snap_locker(image_ctx.snap_lock);
- journaling = (image_ctx.journal != nullptr &&
+ journaling = (m_flush_source == FLUSH_SOURCE_USER &&
+ image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending());
}
AioCompletion *aio_comp = this->m_aio_comp;
+ aio_comp->set_request_count(1);
+
+ Context *ctx;
if (journaling) {
// in-flight ops are flushed prior to closing the journal
uint64_t journal_tid = image_ctx.journal->append_io_event(
journal::EventEntry(journal::AioFlushEvent()),
ObjectRequests(), 0, 0, false, 0);
-
- aio_comp->set_request_count(1);
aio_comp->associate_journal_event(journal_tid);
- FunctionContext *flush_ctx = new FunctionContext(
- [aio_comp, &image_ctx, journal_tid] (int r) {
- auto ctx = new C_FlushJournalCommit<I>(image_ctx, aio_comp,
- journal_tid);
+ ctx = new C_FlushJournalCommit<I>(image_ctx, aio_comp, journal_tid);
+ ctx = new FunctionContext(
+ [&image_ctx, journal_tid, ctx] (int r) {
image_ctx.journal->flush_event(journal_tid, ctx);
-
- // track flush op for block writes
- aio_comp->start_op(true);
- aio_comp->put();
- });
-
- image_ctx.flush_async_operations(flush_ctx);
+ });
} else {
// flush rbd cache only when journaling is not enabled
- aio_comp->set_request_count(1);
- C_AioRequest *req_comp = new C_AioRequest(aio_comp);
- image_ctx.flush(req_comp);
-
- aio_comp->start_op(true);
- aio_comp->put();
+ ctx = new C_AioRequest(aio_comp);
+ if (image_ctx.object_cacher != nullptr) {
+ ctx = new FunctionContext([aio_comp, &image_ctx, ctx](int r) {
+ image_ctx.flush_cache(ctx);
+ });
+ }
}
+ // ensure all in-flight IOs are settled if non-user flush request
+ image_ctx.flush_async_operations(ctx);
+ aio_comp->start_op(true);
+ aio_comp->put();
+
image_ctx.perfcounter->inc(l_librbd_flush);
}
Extents &&image_extents, bool skip_partial_discard,
const ZTracer::Trace &parent_trace);
static void aio_flush(ImageCtxT *ictx, AioCompletion *c,
- const ZTracer::Trace &parent_trace);
+ FlushSource flush_source,
+ const ZTracer::Trace &parent_trace);
static void aio_writesame(ImageCtxT *ictx, AioCompletion *c,
Extents &&image_extents, bufferlist &&bl,
int op_flags, const ZTracer::Trace &parent_trace);
class ImageFlushRequest : public ImageRequest<ImageCtxT> {
public:
ImageFlushRequest(ImageCtxT &image_ctx, AioCompletion *aio_comp,
- const ZTracer::Trace &parent_trace)
- : ImageRequest<ImageCtxT>(image_ctx, aio_comp, {}, "flush", parent_trace) {
+ FlushSource flush_source,
+ const ZTracer::Trace &parent_trace)
+ : ImageRequest<ImageCtxT>(image_ctx, aio_comp, {}, "flush", parent_trace),
+ m_flush_source(flush_source) {
}
protected:
const char *get_request_type() const override {
return "aio_flush";
}
+
+private:
+ FlushSource m_flush_source;
+
};
template <typename ImageCtxT = ImageCtx>
if (m_image_ctx.non_blocking_aio || writes_blocked() || !writes_empty()) {
queue(ImageDispatchSpec<I>::create_flush_request(m_image_ctx, c, trace));
} else {
- ImageRequest<I>::aio_flush(&m_image_ctx, c, trace);
+ ImageRequest<I>::aio_flush(&m_image_ctx, c, FLUSH_SOURCE_USER, trace);
finish_in_flight_io();
}
trace.event("finish");
AIO_TYPE_COMPARE_AND_WRITE,
} aio_type_t;
+enum FlushSource {
+ FLUSH_SOURCE_USER,
+ FLUSH_SOURCE_INTERNAL,
+ FLUSH_SOURCE_SHUTDOWN
+};
+
enum Direction {
DIRECTION_READ,
DIRECTION_WRITE,
// execute the following outside of lock scope
if (flush_comp != nullptr) {
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp, {});
+ io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
+ io::FLUSH_SOURCE_INTERNAL, {});
}
if (on_finish != nullptr) {
on_finish->complete(0);
}
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp, {});
+ io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp,
+ io::FLUSH_SOURCE_INTERNAL, {});
}
template <typename I>
m_lock.Unlock();
if (flush_comp != nullptr) {
- io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp, {});
+ io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
+ io::FLUSH_SOURCE_INTERNAL, {});
}
}
}
m_lock.Unlock();
if (flush_comp != nullptr) {
- io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp, {});
+ io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
+ io::FLUSH_SOURCE_INTERNAL, {});
}
}
}
}
if (aio_comp != nullptr) {
- io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp, {});
+ io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp,
+ io::FLUSH_SOURCE_INTERNAL, {});
}
on_ready->complete(0);
}
m_lock.Unlock();
if (flush_comp != nullptr) {
- io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp, {});
+ io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
+ io::FLUSH_SOURCE_INTERNAL, {});
}
}
}
auto flush_comp = create_aio_flush_completion(nullptr);
m_lock.Unlock();
- io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp, {});
+ io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
+ io::FLUSH_SOURCE_INTERNAL, {});
}
}
}
void expect_flush(MockImageCtx &mock_image_ctx, int r) {
- EXPECT_CALL(mock_image_ctx, flush(_))
+ EXPECT_CALL(mock_image_ctx, flush_cache(_))
+ .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
+ }
+
+ void expect_flush_async_operations(MockImageCtx &mock_image_ctx, int r) {
+ EXPECT_CALL(mock_image_ctx, flush_async_operations(_))
.WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
}
};
InSequence seq;
expect_user_flushed(mock_image_ctx);
expect_is_journal_appending(mock_journal, false);
+ expect_flush_async_operations(mock_image_ctx, 0);
expect_flush(mock_image_ctx, 0);
C_SaferCond aio_comp_ctx;
AioCompletion *aio_comp = AioCompletion::create_and_start(
&aio_comp_ctx, ictx, AIO_TYPE_FLUSH);
- MockImageFlushRequest mock_aio_image_flush(mock_image_ctx, aio_comp, {});
+ MockImageFlushRequest mock_aio_image_flush(mock_image_ctx, aio_comp,
+ FLUSH_SOURCE_USER, {});
{
RWLock::RLocker owner_locker(mock_image_ctx.owner_lock);
mock_aio_image_flush.send();
MOCK_METHOD1(aio_flush, void(AioCompletion *c));
static void aio_flush(MockReplayImageCtx *ictx, AioCompletion *c,
- const ZTracer::Trace &parent_trace) {
+ FlushSource, const ZTracer::Trace &parent_trace) {
assert(s_instance != nullptr);
s_instance->aio_flush(c);
}