} else {
lderr(cct) << "could not release all objects from cache: "
<< unclean << " bytes remain" << dendl;
- r = -EBUSY;
+ if (r == 0) {
+ r = -EBUSY;
+ }
}
if (reentrant_safe) {
return result;
}
- void ImageCtx::invalidate_cache(Context *on_finish) {
+ void ImageCtx::invalidate_cache(bool purge_on_error, Context *on_finish) {
if (object_cacher == NULL) {
op_work_queue->queue(on_finish, 0);
return;
object_cacher->release_set(object_set);
cache_lock.Unlock();
- flush_cache(new C_InvalidateCache(this, false, false, on_finish));
+ flush_cache(new C_InvalidateCache(this, purge_on_error, false, on_finish));
}
void ImageCtx::clear_nonexistence_cache() {
object_cacher->clear_nonexistence(object_set);
}
+ bool ImageCtx::is_cache_empty() {
+ Mutex::Locker locker(cache_lock);
+ return object_cacher->set_is_empty(object_set);
+ }
+
void ImageCtx::register_watch(Context *on_finish) {
assert(image_watcher == NULL);
image_watcher = new ImageWatcher<>(*this);
void user_flushed();
void flush_cache(Context *onfinish);
void shut_down_cache(Context *on_finish);
- int invalidate_cache(bool purge_on_error=false);
- void invalidate_cache(Context *on_finish);
+ int invalidate_cache(bool purge_on_error);
+ void invalidate_cache(bool purge_on_error, Context *on_finish);
void clear_nonexistence_cache();
+ bool is_cache_empty();
void register_watch(Context *on_finish);
uint64_t prune_parent_extents(vector<pair<uint64_t,uint64_t> >& objectx,
uint64_t overlap);
virtual void complete(int r) {
if (request_sent || r < 0) {
- commit_io_event_extent(r);
+ if (request_sent && r == 0) {
+ // only commit IO events that are safely recorded to the backing image
+ // since the cache will retry all IOs that fail
+ commit_io_event_extent(0);
+ }
+
req_comp->complete(r);
delete this;
} else {
if (*ret_val == -EBLACKLISTED) {
// allow clean shut down if blacklisted
- lderr(cct) << "failed to block writes: " << cpp_strerror(*ret_val) << dendl;
- *ret_val = 0;
+ lderr(cct) << "failed to block writes because client is blacklisted"
+ << dendl;
} else if (*ret_val < 0) {
+ lderr(cct) << "failed to block writes: " << cpp_strerror(*ret_val) << dendl;
+ m_image_ctx.aio_work_queue->unblock_writes();
+ return m_on_finish;
+ }
+
+ send_invalidate_cache(false);
+ return nullptr;
+}
+
+template <typename I>
+void ReleaseRequest<I>::send_invalidate_cache(bool purge_on_error) {
+ if (m_image_ctx.object_cacher == nullptr) {
+ send_flush_notifies();
+ return;
+ }
+
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << ": purge_on_error=" << purge_on_error << dendl;
+
+ RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ Context *ctx = create_async_context_callback(
+ m_image_ctx, create_context_callback<
+ ReleaseRequest<I>,
+ &ReleaseRequest<I>::handle_invalidate_cache>(this));
+ m_image_ctx.invalidate_cache(purge_on_error, ctx);
+}
+
+template <typename I>
+Context *ReleaseRequest<I>::handle_invalidate_cache(int *ret_val) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << ": r=" << *ret_val << dendl;
+
+ if (*ret_val == -EBLACKLISTED) {
+ lderr(cct) << "failed to invalidate cache because client is blacklisted"
+ << dendl;
+ if (!m_image_ctx.is_cache_empty()) {
+ // force purge the cache after after being blacklisted
+ send_invalidate_cache(true);
+ return nullptr;
+ }
+ } else if (*ret_val < 0 && *ret_val != -EBUSY) {
+ lderr(cct) << "failed to invalidate cache: " << cpp_strerror(*ret_val)
+ << dendl;
m_image_ctx.aio_work_queue->unblock_writes();
return m_on_finish;
}
* BLOCK_WRITES
* |
* v
+ * INVALIDATE_CACHE
+ * |
+ * v
* FLUSH_NOTIFIES . . . . . . . . . . . . . .
* | .
* v .
void send_block_writes();
Context *handle_block_writes(int *ret_val);
+ void send_invalidate_cache(bool purge_on_error);
+ Context *handle_invalidate_cache(int *ret_val);
+
void send_flush_notifies();
Context *handle_flush_notifies(int *ret_val);
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker md_locker(ictx->md_lock);
- r = ictx->invalidate_cache();
+ r = ictx->invalidate_cache(false);
ictx->perfcounter->inc(l_librbd_invalidate_cache);
return r;
}
// need to invalidate since we're deleting objects, and
// ObjectCacher doesn't track non-existent objects
RWLock::RLocker owner_locker(image_ctx.owner_lock);
- image_ctx.invalidate_cache(create_async_context_callback(
+ image_ctx.invalidate_cache(false, create_async_context_callback(
image_ctx, create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_invalidate_cache>(this)));
}
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_invalidate_cache>(this);
- image_ctx.invalidate_cache(ctx);
+ image_ctx.invalidate_cache(true, ctx);
return nullptr;
}
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
+using ::testing::WithArg;
static const std::string TEST_COOKIE("auto 123");
.WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
}
+ void expect_invalidate_cache(MockImageCtx &mock_image_ctx, bool purge,
+ int r) {
+ if (mock_image_ctx.object_cacher != nullptr) {
+ EXPECT_CALL(mock_image_ctx, invalidate_cache(purge, _))
+ .WillOnce(WithArg<1>(CompleteContext(r, NULL)));
+ }
+ }
+
+ void expect_is_cache_empty(MockImageCtx &mock_image_ctx, bool empty) {
+ if (mock_image_ctx.object_cacher != nullptr) {
+ EXPECT_CALL(mock_image_ctx, is_cache_empty())
+ .WillOnce(Return(empty));
+ }
+ }
+
void expect_flush_notifies(MockImageCtx &mock_image_ctx) {
EXPECT_CALL(*mock_image_ctx.image_watcher, flush(_))
.WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
expect_prepare_lock(mock_image_ctx);
expect_cancel_op_requests(mock_image_ctx, 0);
expect_block_writes(mock_image_ctx, 0);
+ expect_invalidate_cache(mock_image_ctx, false, 0);
expect_flush_notifies(mock_image_ctx);
MockJournal *mock_journal = new MockJournal();
InSequence seq;
expect_prepare_lock(mock_image_ctx);
expect_cancel_op_requests(mock_image_ctx, 0);
+ expect_invalidate_cache(mock_image_ctx, false, 0);
expect_flush_notifies(mock_image_ctx);
MockObjectMap *mock_object_map = new MockObjectMap();
InSequence seq;
expect_cancel_op_requests(mock_image_ctx, 0);
+ expect_invalidate_cache(mock_image_ctx, false, 0);
expect_flush_notifies(mock_image_ctx);
expect_unlock(mock_image_ctx, 0);
expect_prepare_lock(mock_image_ctx);
expect_cancel_op_requests(mock_image_ctx, 0);
expect_block_writes(mock_image_ctx, -EBLACKLISTED);
+ expect_invalidate_cache(mock_image_ctx, false, -EBLACKLISTED);
+ expect_is_cache_empty(mock_image_ctx, false);
+ expect_invalidate_cache(mock_image_ctx, true, -EBLACKLISTED);
+ expect_is_cache_empty(mock_image_ctx, true);
expect_flush_notifies(mock_image_ctx);
MockJournal *mock_journal = new MockJournal();
InSequence seq;
expect_cancel_op_requests(mock_image_ctx, 0);
expect_block_writes(mock_image_ctx, 0);
+ expect_invalidate_cache(mock_image_ctx, false, 0);
expect_flush_notifies(mock_image_ctx);
expect_unlock(mock_image_ctx, -EINVAL);
MOCK_METHOD1(flush_copyup, void(Context *));
MOCK_METHOD1(flush_cache, void(Context *));
- MOCK_METHOD1(invalidate_cache, void(Context *));
+ MOCK_METHOD2(invalidate_cache, void(bool, Context *));
MOCK_METHOD1(shut_down_cache, void(Context *));
+ MOCK_METHOD0(is_cache_empty, bool());
MOCK_CONST_METHOD1(test_features, bool(uint64_t test_features));
MOCK_CONST_METHOD2(test_features, bool(uint64_t test_features,
}
void expect_invalidate_cache(MockImageCtx &mock_image_ctx, int r) {
- EXPECT_CALL(mock_image_ctx, invalidate_cache(_))
- .WillOnce(CompleteContext(r, NULL));
+ EXPECT_CALL(mock_image_ctx, invalidate_cache(false, _))
+ .WillOnce(WithArg<1>(CompleteContext(r, NULL)));
expect_op_work_queue(mock_image_ctx);
}
void expect_invalidate_cache(MockOperationImageCtx &mock_image_ctx, int r) {
if (mock_image_ctx.object_cacher != nullptr) {
- EXPECT_CALL(mock_image_ctx, invalidate_cache(_))
- .WillOnce(CompleteContext(r, NULL));
+ EXPECT_CALL(mock_image_ctx, invalidate_cache(true, _))
+ .WillOnce(WithArg<1>(CompleteContext(r, NULL)));
}
}