template <typename I>
inline bool is_copy_on_read(I *ictx, librados::snap_t snap_id) {
- assert(ictx->snap_lock.is_locked());
+ RWLock::RLocker snap_locker(ictx->snap_lock);
return (ictx->clone_copy_on_read &&
!ictx->read_only && snap_id == CEPH_NOSNAP &&
(ictx->exclusive_lock == nullptr ||
m_trace.copy_name(trace_name + std::string(" ") + oid);
m_trace.event("start");
}
-
- Striper::extent_to_file(m_ictx->cct, &m_ictx->layout, m_object_no,
- 0, m_ictx->layout.object_size, m_parent_extents);
-
- RWLock::RLocker snap_locker(m_ictx->snap_lock);
- RWLock::RLocker parent_locker(m_ictx->parent_lock);
- compute_parent_extents();
}
template <typename I>
}
template <typename I>
-bool ObjectRequest<I>::compute_parent_extents() {
+bool ObjectRequest<I>::compute_parent_extents(Extents *parent_extents) {
assert(m_ictx->snap_lock.is_locked());
assert(m_ictx->parent_lock.is_locked());
+ m_has_parent = false;
+ parent_extents->clear();
+
uint64_t parent_overlap;
int r = m_ictx->get_parent_overlap(m_snap_id, &parent_overlap);
if (r < 0) {
// still reading from it
lderr(m_ictx->cct) << "failed to retrieve parent overlap: "
<< cpp_strerror(r) << dendl;
- m_has_parent = false;
- m_parent_extents.clear();
+ return false;
+ } else if (parent_overlap == 0) {
return false;
}
- uint64_t object_overlap = m_ictx->prune_parent_extents(
- m_parent_extents, parent_overlap);
+ Striper::extent_to_file(m_ictx->cct, &m_ictx->layout, m_object_no, 0,
+ m_ictx->layout.object_size, *parent_extents);
+ uint64_t object_overlap = m_ictx->prune_parent_extents(*parent_extents,
+ parent_overlap);
if (object_overlap > 0) {
ldout(m_ictx->cct, 20) << "overlap " << parent_overlap << " "
- << "extents " << m_parent_extents << dendl;
- m_has_parent = !m_parent_extents.empty();
+ << "extents " << *parent_extents << dendl;
+ m_has_parent = !parent_extents->empty();
return true;
}
return false;
}
+template <typename I>
+void ObjectRequest<I>::async_finish(int r) {
+ ldout(m_ictx->cct, 20) << "r=" << r << dendl;
+ m_ictx->op_work_queue->queue(util::create_context_callback<
+ ObjectRequest<I>, &ObjectRequest<I>::finish>(this), r);
+}
+
+template <typename I>
+void ObjectRequest<I>::finish(int r) {
+ ldout(m_ictx->cct, 20) << "r=" << r << dendl;
+ m_completion->complete(r);
+ delete this;
+}
+
/** read **/
template <typename I>
Context *completion)
: ObjectRequest<I>(ictx, oid, objectno, offset, len, snap_id, false, "read",
parent_trace, completion),
- m_tried_parent(false), m_op_flags(op_flags), m_state(LIBRBD_AIO_READ_FLAT) {
- guard_read();
+ m_op_flags(op_flags) {
}
template <typename I>
-void ObjectReadRequest<I>::guard_read()
-{
+void ObjectReadRequest<I>::send() {
I *image_ctx = this->m_ictx;
- RWLock::RLocker snap_locker(image_ctx->snap_lock);
- RWLock::RLocker parent_locker(image_ctx->parent_lock);
+ ldout(image_ctx->cct, 20) << dendl;
- if (this->has_parent()) {
- ldout(image_ctx->cct, 20) << "guarding read" << dendl;
- m_state = LIBRBD_AIO_READ_GUARD;
+ if (image_ctx->object_cacher != nullptr) {
+ read_cache();
+ } else {
+ read_object();
}
}
template <typename I>
-bool ObjectReadRequest<I>::should_complete(int r)
-{
+void ObjectReadRequest<I>::read_cache() {
I *image_ctx = this->m_ictx;
- ldout(image_ctx->cct, 20) << this->m_oid << " "
- << this->m_object_off << "~" << this->m_object_len
- << " r = " << r << dendl;
+ ldout(image_ctx->cct, 20) << dendl;
- bool finished = true;
+ // must use async callback to avoid cache_lock cycle
+ auto cache_ctx = util::create_async_context_callback(
+ *image_ctx, util::create_context_callback<
+ ObjectReadRequest<I>, &ObjectReadRequest<I>::handle_read_cache>(this));
+ image_ctx->aio_read_from_cache(
+ this->m_oid, this->m_object_no, &m_read_data, this->m_object_len,
+ this->m_object_off, cache_ctx, m_op_flags,
+ (this->m_trace.valid() ? &this->m_trace : nullptr));
+}
- switch (m_state) {
- case LIBRBD_AIO_READ_GUARD:
- ldout(image_ctx->cct, 20) << "READ_CHECK_GUARD" << dendl;
-
- // This is the step to read from parent
- if (!m_tried_parent && r == -ENOENT) {
- {
- RWLock::RLocker snap_locker(image_ctx->snap_lock);
- RWLock::RLocker parent_locker(image_ctx->parent_lock);
- if (image_ctx->parent == NULL) {
- ldout(image_ctx->cct, 20) << "parent is gone; do nothing" << dendl;
- break;
- }
-
- // calculate reverse mapping onto the image
- vector<pair<uint64_t,uint64_t> > parent_extents;
- Striper::extent_to_file(image_ctx->cct, &image_ctx->layout,
- this->m_object_no, this->m_object_off,
- this->m_object_len, parent_extents);
-
- uint64_t parent_overlap = 0;
- uint64_t object_overlap = 0;
- r = image_ctx->get_parent_overlap(this->m_snap_id, &parent_overlap);
- if (r == 0) {
- object_overlap = image_ctx->prune_parent_extents(parent_extents,
- parent_overlap);
- }
-
- if (object_overlap > 0) {
- m_tried_parent = true;
- if (is_copy_on_read(image_ctx, this->m_snap_id)) {
- m_state = LIBRBD_AIO_READ_COPYUP;
- }
-
- read_from_parent(std::move(parent_extents));
- finished = false;
- }
- }
- }
- break;
- case LIBRBD_AIO_READ_COPYUP:
- ldout(image_ctx->cct, 20) << "READ_COPYUP" << dendl;
- // This is the extra step for copy-on-read: kick off an asynchronous copyup.
- // It is different from copy-on-write as asynchronous copyup will finish
- // by itself so state won't go back to LIBRBD_AIO_READ_GUARD.
-
- assert(m_tried_parent);
- if (r > 0) {
- // If read entire object from parent success and CoR is possible, kick
- // off a asynchronous copyup. This approach minimizes the latency
- // impact.
- send_copyup();
- }
- break;
- case LIBRBD_AIO_READ_FLAT:
- ldout(image_ctx->cct, 20) << "READ_FLAT" << dendl;
- // The read content should be deposit in m_read_data
- break;
- default:
- lderr(image_ctx->cct) << "invalid request state: " << m_state << dendl;
- ceph_abort();
+template <typename I>
+void ObjectReadRequest<I>::handle_read_cache(int r) {
+ I *image_ctx = this->m_ictx;
+ ldout(image_ctx->cct, 20) << "r=" << r << dendl;
+
+ if (r == -ENOENT) {
+ read_parent();
+ return;
+ } else if (r < 0) {
+ lderr(image_ctx->cct) << "failed to read from cache: "
+ << cpp_strerror(r) << dendl;
+ this->finish(r);
+ return;
}
- return finished;
+ this->finish(0);
}
template <typename I>
-void ObjectReadRequest<I>::send() {
+void ObjectReadRequest<I>::read_object() {
I *image_ctx = this->m_ictx;
- ldout(image_ctx->cct, 20) << this->m_oid << " " << this->m_object_off
- << "~" << this->m_object_len
- << dendl;
-
{
RWLock::RLocker snap_locker(image_ctx->snap_lock);
-
- // send read request to parent if the object doesn't exist locally
if (image_ctx->object_map != nullptr &&
!image_ctx->object_map->object_may_exist(this->m_object_no)) {
- image_ctx->op_work_queue->queue(util::create_context_callback<
- ObjectRequest<I> >(this), -ENOENT);
+ image_ctx->op_work_queue->queue(new FunctionContext([this](int r) {
+ read_parent();
+ }), 0);
return;
}
}
+ ldout(image_ctx->cct, 20) << dendl;
+
librados::ObjectReadOperation op;
- int flags = image_ctx->get_read_flags(this->m_snap_id);
if (this->m_object_len >= image_ctx->sparse_read_threshold_bytes) {
op.sparse_read(this->m_object_off, this->m_object_len, &m_ext_map,
&m_read_data, nullptr);
}
op.set_op_flags2(m_op_flags);
- librados::AioCompletion *rados_completion =
- util::create_rados_callback(this);
+ librados::AioCompletion *rados_completion = util::create_rados_callback<
+ ObjectReadRequest<I>, &ObjectReadRequest<I>::handle_read_object>(this);
+ int flags = image_ctx->get_read_flags(this->m_snap_id);
int r = image_ctx->data_ctx.aio_operate(
this->m_oid, rados_completion, &op, flags, nullptr,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
}
template <typename I>
-void ObjectReadRequest<I>::send_copyup()
-{
+void ObjectReadRequest<I>::handle_read_object(int r) {
I *image_ctx = this->m_ictx;
- ldout(image_ctx->cct, 20) << this->m_oid << " " << this->m_object_off
- << "~" << this->m_object_len << dendl;
+ ldout(image_ctx->cct, 20) << "r=" << r << dendl;
+
+ if (r == -ENOENT) {
+ read_parent();
+ return;
+ } else if (r < 0) {
+ lderr(image_ctx->cct) << "failed to read from object: "
+ << cpp_strerror(r) << dendl;
+ this->finish(r);
+ return;
+ }
+ this->finish(0);
+}
+
+template <typename I>
+void ObjectReadRequest<I>::read_parent() {
+ I *image_ctx = this->m_ictx;
+ uint64_t object_overlap = 0;
+ Extents parent_extents;
{
RWLock::RLocker snap_locker(image_ctx->snap_lock);
RWLock::RLocker parent_locker(image_ctx->parent_lock);
- if (!this->compute_parent_extents() ||
- (image_ctx->exclusive_lock != nullptr &&
- !image_ctx->exclusive_lock->is_lock_owner())) {
- return;
+
+ // calculate reverse mapping onto the image
+ Striper::extent_to_file(image_ctx->cct, &image_ctx->layout,
+ this->m_object_no, this->m_object_off,
+ this->m_object_len, parent_extents);
+
+ uint64_t parent_overlap = 0;
+ int r = image_ctx->get_parent_overlap(this->m_snap_id, &parent_overlap);
+ if (r == 0) {
+ object_overlap = image_ctx->prune_parent_extents(parent_extents,
+ parent_overlap);
}
}
+ if (object_overlap == 0) {
+ this->finish(-ENOENT);
+ return;
+ }
+
+ ldout(image_ctx->cct, 20) << dendl;
+
+ AioCompletion *parent_completion = AioCompletion::create_and_start<
+ ObjectReadRequest<I>, &ObjectReadRequest<I>::handle_read_parent>(
+ this, util::get_image_ctx(image_ctx->parent), AIO_TYPE_READ);
+ ImageRequest<I>::aio_read(image_ctx->parent, parent_completion,
+ std::move(parent_extents), ReadResult{&m_read_data},
+ 0, this->m_trace);
+}
+
+template <typename I>
+void ObjectReadRequest<I>::handle_read_parent(int r) {
+ I *image_ctx = this->m_ictx;
+ ldout(image_ctx->cct, 20) << "r=" << r << dendl;
+
+ if (r == -ENOENT) {
+ this->finish(r);
+ return;
+ } else if (r < 0) {
+ lderr(image_ctx->cct) << "failed to read parent extents: "
+ << cpp_strerror(r) << dendl;
+ this->finish(r);
+ return;
+ }
+
+ copyup();
+}
+
+template <typename I>
+void ObjectReadRequest<I>::copyup() {
+ I *image_ctx = this->m_ictx;
+ if (!is_copy_on_read(image_ctx, this->m_snap_id)) {
+ this->finish(0);
+ return;
+ }
+
+ image_ctx->owner_lock.get_read();
+ image_ctx->snap_lock.get_read();
+ image_ctx->parent_lock.get_read();
+ Extents parent_extents;
+ if (!this->compute_parent_extents(&parent_extents) ||
+ (image_ctx->exclusive_lock != nullptr &&
+ !image_ctx->exclusive_lock->is_lock_owner())) {
+ image_ctx->parent_lock.put_read();
+ image_ctx->snap_lock.put_read();
+ image_ctx->owner_lock.put_read();
+ this->finish(0);
+ return;
+ }
+
+ ldout(image_ctx->cct, 20) << dendl;
+
Mutex::Locker copyup_locker(image_ctx->copyup_list_lock);
auto it = image_ctx->copyup_list.find(this->m_object_no);
if (it == image_ctx->copyup_list.end()) {
// create and kick off a CopyupRequest
auto new_req = CopyupRequest<I>::create(
- image_ctx, this->m_oid, this->m_object_no,
- std::move(this->m_parent_extents), this->m_trace);
- this->m_parent_extents.clear();
+ image_ctx, this->m_oid, this->m_object_no, std::move(parent_extents),
+ this->m_trace);
image_ctx->copyup_list[this->m_object_no] = new_req;
new_req->send();
}
+
+ image_ctx->parent_lock.put_read();
+ image_ctx->snap_lock.put_read();
+ image_ctx->owner_lock.put_read();
+ this->finish(0);
}
template <typename I>
-void ObjectReadRequest<I>::read_from_parent(Extents&& parent_extents)
-{
- I *image_ctx = this->m_ictx;
- AioCompletion *parent_completion = AioCompletion::create_and_start<
- ObjectRequest<I> >(this, util::get_image_ctx(image_ctx), AIO_TYPE_READ);
-
- ldout(image_ctx->cct, 20) << "parent completion " << parent_completion
- << " extents " << parent_extents << dendl;
- ImageRequest<I>::aio_read(image_ctx->parent, parent_completion,
- std::move(parent_extents),
- ReadResult{&m_read_data}, 0, this->m_trace);
+bool ObjectReadRequest<I>::should_complete(int r) {
+ // TODO remove once fully converted
+ assert(false);
}
/** write **/
m_state(LIBRBD_AIO_WRITE_FLAT), m_snap_seq(snapc.seq.val)
{
m_snaps.insert(m_snaps.end(), snapc.snaps.begin(), snapc.snaps.end());
+
+ RWLock::RLocker snap_locker(ictx->snap_lock);
+ RWLock::RLocker parent_locker(ictx->parent_lock);
+ this->compute_parent_extents(&this->m_parent_extents);
}
template <typename I>
{
RWLock::RLocker snap_locker(image_ctx->snap_lock);
RWLock::RLocker parent_locker(image_ctx->parent_lock);
- has_parent = this->compute_parent_extents();
+ has_parent = this->compute_parent_extents(&this->m_parent_extents);
}
// If parent still exists, overlap might also have changed.
if (has_parent) {
#include "test/librbd/mock/cache/MockImageCache.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
+#include "include/rbd/librbd.hpp"
#include "librbd/io/CopyupRequest.h"
#include "librbd/io/ImageRequest.h"
#include "librbd/io/ObjectRequest.h"
template <>
struct CopyupRequest<librbd::MockImageCtx> {
+ static CopyupRequest* s_instance;
static CopyupRequest* create(librbd::MockImageCtx *ictx,
const std::string &oid, uint64_t objectno,
Extents &&image_extents,
const ZTracer::Trace &parent_trace) {
- return nullptr;
+ return s_instance;
}
MOCK_METHOD0(send, void());
+
+ CopyupRequest() {
+ s_instance = this;
+ }
};
template <>
struct ImageRequest<librbd::MockImageCtx> {
+ static ImageRequest *s_instance;
static void aio_read(librbd::MockImageCtx *ictx, AioCompletion *c,
Extents &&image_extents, ReadResult &&read_result,
int op_flags, const ZTracer::Trace &parent_trace) {
+ s_instance->aio_read(c, image_extents);
+ }
+
+ MOCK_METHOD2(aio_read, void(AioCompletion *, const Extents&));
+
+ ImageRequest() {
+ s_instance = this;
}
};
+CopyupRequest<librbd::MockImageCtx>* CopyupRequest<librbd::MockImageCtx>::s_instance = nullptr;
+ImageRequest<librbd::MockImageCtx>* ImageRequest<librbd::MockImageCtx>::s_instance = nullptr;
+
} // namespace io
} // namespace librbd
using ::testing::Invoke;
using ::testing::Return;
using ::testing::WithArg;
+using ::testing::WithArgs;
struct TestMockIoObjectRequest : public TestMockFixture {
typedef ObjectRequest<librbd::MockImageCtx> MockObjectRequest;
typedef ObjectReadRequest<librbd::MockImageCtx> MockObjectReadRequest;
+ typedef CopyupRequest<librbd::MockImageCtx> MockCopyupRequest;
+ typedef ImageRequest<librbd::MockImageCtx> MockImageRequest;
void expect_object_may_exist(MockTestImageCtx &mock_image_ctx,
uint64_t object_no, bool exists) {
}
void expect_prune_parent_extents(MockTestImageCtx &mock_image_ctx,
- const MockObjectRequest::Extents& extents,
+ const Extents& extents,
uint64_t overlap, uint64_t object_overlap) {
EXPECT_CALL(mock_image_ctx, prune_parent_extents(_, overlap))
- .WillOnce(WithArg<0>(Invoke([extents, object_overlap](MockObjectRequest::Extents& e) {
+ .WillOnce(WithArg<0>(Invoke([extents, object_overlap](Extents& e) {
e = extents;
return object_overlap;
})));
void expect_read(MockTestImageCtx &mock_image_ctx,
const std::string& oid, uint64_t off, uint64_t len,
- int r) {
+ const std::string& data, int r) {
+ bufferlist bl;
+ bl.append(data);
+
auto& expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx),
read(oid, len, off, _));
if (r < 0) {
expect.WillOnce(Return(r));
} else {
- expect.WillOnce(DoDefault());
+ expect.WillOnce(WithArg<3>(Invoke([bl](bufferlist *out_bl) {
+ out_bl->append(bl);
+ return bl.length();
+ })));
}
}
void expect_sparse_read(MockTestImageCtx &mock_image_ctx,
const std::string& oid, uint64_t off, uint64_t len,
- int r) {
+ const std::string& data, int r) {
+ bufferlist bl;
+ bl.append(data);
+
auto& expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.data_ctx),
sparse_read(oid, off, len, _, _));
if (r < 0) {
expect.WillOnce(Return(r));
} else {
- expect.WillOnce(DoDefault());
+ expect.WillOnce(WithArg<4>(Invoke([bl](bufferlist *out_bl) {
+ out_bl->append(bl);
+ return bl.length();
+ })));
}
}
+
+ void expect_cache_read(MockTestImageCtx &mock_image_ctx,
+ const std::string& oid, uint64_t object_no,
+ uint64_t off, uint64_t len, const std::string& data,
+ int r) {
+ bufferlist bl;
+ bl.append(data);
+
+ EXPECT_CALL(mock_image_ctx, aio_read_from_cache({oid}, object_no, _, len,
+ off, _, _, _))
+ .WillOnce(WithArgs<2, 5>(Invoke([bl, r](bufferlist *out_bl,
+ Context *on_finish) {
+ out_bl->append(bl);
+ on_finish->complete(r);
+ })));
+ }
+
+ void expect_aio_read(MockImageRequest& mock_image_request,
+ Extents&& extents, int r) {
+ EXPECT_CALL(mock_image_request, aio_read(_, extents))
+ .WillOnce(WithArg<0>(Invoke([r](AioCompletion* aio_comp) {
+ aio_comp->set_request_count(1);
+ aio_comp->add_request();
+ aio_comp->complete_request(r);
+ })));
+ }
+
+ void expect_copyup(MockCopyupRequest& mock_copyup_request, int r) {
+ EXPECT_CALL(mock_copyup_request, send())
+ .WillOnce(Invoke([&mock_copyup_request, r]() {
+ }));
+ }
};
TEST_F(TestMockIoObjectRequest, Read) {
ictx->sparse_read_threshold_bytes = 8096;
MockTestImageCtx mock_image_ctx(*ictx);
+ mock_image_ctx.object_cacher = nullptr;
+
MockObjectMap mock_object_map;
if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
mock_image_ctx.object_map = &mock_object_map;
}
InSequence seq;
- expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 0, 0);
- expect_prune_parent_extents(mock_image_ctx, {}, 0, 0);
expect_object_may_exist(mock_image_ctx, 0, true);
expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
- expect_read(mock_image_ctx, "object0", 0, 4096, 0);
+ expect_read(mock_image_ctx, "object0", 0, 4096, std::string(4096, '1'), 0);
C_SaferCond ctx;
auto req = MockObjectReadRequest::create(
&mock_image_ctx, "object0", 0, 0, 4096, CEPH_NOSNAP, 0, {}, &ctx);
req->send();
- ASSERT_EQ(-ENOENT, ctx.wait());
+ ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockIoObjectRequest, SparseReadThreshold) {
ictx->sparse_read_threshold_bytes = ictx->get_object_size();
MockTestImageCtx mock_image_ctx(*ictx);
+ mock_image_ctx.object_cacher = nullptr;
+
MockObjectMap mock_object_map;
if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
mock_image_ctx.object_map = &mock_object_map;
}
InSequence seq;
- expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 0, 0);
- expect_prune_parent_extents(mock_image_ctx, {}, 0, 0);
expect_object_may_exist(mock_image_ctx, 0, true);
expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
expect_sparse_read(mock_image_ctx, "object0", 0,
- ictx->sparse_read_threshold_bytes, 0);
+ ictx->sparse_read_threshold_bytes,
+ std::string(ictx->sparse_read_threshold_bytes, '1'), 0);
C_SaferCond ctx;
auto req = MockObjectReadRequest::create(
&mock_image_ctx, "object0", 0, 0, ictx->sparse_read_threshold_bytes,
CEPH_NOSNAP, 0, {}, &ctx);
req->send();
- ASSERT_EQ(-ENOENT, ctx.wait());
+ ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockIoObjectRequest, ReadError) {
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+ ictx->sparse_read_threshold_bytes = 8096;
+
+ MockTestImageCtx mock_image_ctx(*ictx);
+ mock_image_ctx.object_cacher = nullptr;
+
+ MockObjectMap mock_object_map;
+ if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ mock_image_ctx.object_map = &mock_object_map;
+ }
+
+ InSequence seq;
+ expect_object_may_exist(mock_image_ctx, 0, true);
+ expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
+ expect_read(mock_image_ctx, "object0", 0, 4096, "", -EPERM);
+
+ C_SaferCond ctx;
+ auto req = MockObjectReadRequest::create(
+ &mock_image_ctx, "object0", 0, 0, 4096, CEPH_NOSNAP, 0, {}, &ctx);
+ req->send();
+ ASSERT_EQ(-EPERM, ctx.wait());
+}
+
+TEST_F(TestMockIoObjectRequest, CacheRead) {
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+ ictx->sparse_read_threshold_bytes = 8096;
+
+ MockTestImageCtx mock_image_ctx(*ictx);
+ MockObjectMap mock_object_map;
+ if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ mock_image_ctx.object_map = &mock_object_map;
+ }
+
+ expect_op_work_queue(mock_image_ctx);
+
+ InSequence seq;
+ expect_cache_read(mock_image_ctx, "object0", 0, 0, 4096,
+ std::string(4096, '1'), 0);
+
+ C_SaferCond ctx;
+ auto req = MockObjectReadRequest::create(
+ &mock_image_ctx, "object0", 0, 0, 4096, CEPH_NOSNAP, 0, {}, &ctx);
+ req->send();
+ ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockIoObjectRequest, CacheReadError) {
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+ ictx->sparse_read_threshold_bytes = 8096;
+
+ MockTestImageCtx mock_image_ctx(*ictx);
+ MockObjectMap mock_object_map;
+ if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ mock_image_ctx.object_map = &mock_object_map;
+ }
+
+ expect_op_work_queue(mock_image_ctx);
+
+ InSequence seq;
+ expect_cache_read(mock_image_ctx, "object0", 0, 0, 4096,
+ "", -EPERM);
+
+ C_SaferCond ctx;
+ auto req = MockObjectReadRequest::create(
+ &mock_image_ctx, "object0", 0, 0, 4096, CEPH_NOSNAP, 0, {}, &ctx);
+ req->send();
+ ASSERT_EQ(-EPERM, ctx.wait());
+}
+
+TEST_F(TestMockIoObjectRequest, ParentRead) {
+ REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
+
+ librbd::Image image;
+ librbd::RBD rbd;
+ ASSERT_EQ(0, rbd.open(m_ioctx, image, m_image_name.c_str(), NULL));
+ ASSERT_EQ(0, image.snap_create("one"));
+ ASSERT_EQ(0, image.snap_protect("one"));
+ image.close();
+
+ std::string clone_name = get_temp_image_name();
+ int order = 0;
+ ASSERT_EQ(0, rbd.clone(m_ioctx, m_image_name.c_str(), "one", m_ioctx,
+ clone_name.c_str(), RBD_FEATURE_LAYERING, &order));
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(clone_name, &ictx));
+ ictx->sparse_read_threshold_bytes = 8096;
+ ictx->clone_copy_on_read = false;
+
+ MockTestImageCtx mock_image_ctx(*ictx);
+ mock_image_ctx.parent = &mock_image_ctx;
+ mock_image_ctx.object_cacher = nullptr;
+
+ MockObjectMap mock_object_map;
+ if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ mock_image_ctx.object_map = &mock_object_map;
+ }
+
+ InSequence seq;
+ expect_object_may_exist(mock_image_ctx, 0, true);
+ expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
+ expect_read(mock_image_ctx, "object0", 0, 4096, "", -ENOENT);
+
+ MockImageRequest mock_image_request;
+ expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 4096, 0);
+ expect_prune_parent_extents(mock_image_ctx, {{0, 4096}}, 4096, 4096);
+ expect_aio_read(mock_image_request, {{0, 4096}}, 0);
+
+ C_SaferCond ctx;
+ auto req = MockObjectReadRequest::create(
+ &mock_image_ctx, "object0", 0, 0, 4096, CEPH_NOSNAP, 0, {}, &ctx);
+ req->send();
+ ASSERT_EQ(0, ctx.wait());
+}
+
+TEST_F(TestMockIoObjectRequest, ParentReadError) {
+ REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
+
+ librbd::Image image;
+ librbd::RBD rbd;
+ ASSERT_EQ(0, rbd.open(m_ioctx, image, m_image_name.c_str(), NULL));
+ ASSERT_EQ(0, image.snap_create("one"));
+ ASSERT_EQ(0, image.snap_protect("one"));
+ image.close();
+
+ std::string clone_name = get_temp_image_name();
+ int order = 0;
+ ASSERT_EQ(0, rbd.clone(m_ioctx, m_image_name.c_str(), "one", m_ioctx,
+ clone_name.c_str(), RBD_FEATURE_LAYERING, &order));
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(clone_name, &ictx));
+ ictx->sparse_read_threshold_bytes = 8096;
+ ictx->clone_copy_on_read = false;
+
+ MockTestImageCtx mock_image_ctx(*ictx);
+ mock_image_ctx.parent = &mock_image_ctx;
+ mock_image_ctx.object_cacher = nullptr;
+
+ MockObjectMap mock_object_map;
+ if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ mock_image_ctx.object_map = &mock_object_map;
+ }
+
+ InSequence seq;
+ expect_object_may_exist(mock_image_ctx, 0, true);
+ expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
+ expect_read(mock_image_ctx, "object0", 0, 4096, "", -ENOENT);
+
+ MockImageRequest mock_image_request;
+ expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 4096, 0);
+ expect_prune_parent_extents(mock_image_ctx, {{0, 4096}}, 4096, 4096);
+ expect_aio_read(mock_image_request, {{0, 4096}}, -EPERM);
+
+ C_SaferCond ctx;
+ auto req = MockObjectReadRequest::create(
+ &mock_image_ctx, "object0", 0, 0, 4096, CEPH_NOSNAP, 0, {}, &ctx);
+ req->send();
+ ASSERT_EQ(-EPERM, ctx.wait());
+}
+
+TEST_F(TestMockIoObjectRequest, CopyOnRead) {
+ REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
+
+ librbd::Image image;
+ librbd::RBD rbd;
+ ASSERT_EQ(0, rbd.open(m_ioctx, image, m_image_name.c_str(), NULL));
+ ASSERT_EQ(0, image.snap_create("one"));
+ ASSERT_EQ(0, image.snap_protect("one"));
+ image.close();
+
+ std::string clone_name = get_temp_image_name();
+ int order = 0;
+ ASSERT_EQ(0, rbd.clone(m_ioctx, m_image_name.c_str(), "one", m_ioctx,
+ clone_name.c_str(), RBD_FEATURE_LAYERING, &order));
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(clone_name, &ictx));
+ ictx->sparse_read_threshold_bytes = 8096;
+ ictx->clone_copy_on_read = true;
+
+ MockTestImageCtx mock_image_ctx(*ictx);
+ mock_image_ctx.parent = &mock_image_ctx;
+ mock_image_ctx.object_cacher = nullptr;
+
+ MockObjectMap mock_object_map;
+ if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ mock_image_ctx.object_map = &mock_object_map;
+ }
+
+ InSequence seq;
+ expect_object_may_exist(mock_image_ctx, 0, true);
+ expect_get_read_flags(mock_image_ctx, CEPH_NOSNAP, 0);
+ expect_read(mock_image_ctx, "object0", 0, 4096, "", -ENOENT);
+
+ MockImageRequest mock_image_request;
+ expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 4096, 0);
+ expect_prune_parent_extents(mock_image_ctx, {{0, 4096}}, 4096, 4096);
+ expect_aio_read(mock_image_request, {{0, 4096}}, 0);
+
+ MockCopyupRequest mock_copyup_request;
+ expect_get_parent_overlap(mock_image_ctx, CEPH_NOSNAP, 4096, 0);
+ expect_prune_parent_extents(mock_image_ctx, {{0, 4096}}, 4096, 4096);
+ expect_copyup(mock_copyup_request, 0);
+
+ C_SaferCond ctx;
+ auto req = MockObjectReadRequest::create(
+ &mock_image_ctx, "object0", 0, 0, 4096, CEPH_NOSNAP, 0, {}, &ctx);
+ req->send();
+ ASSERT_EQ(0, ctx.wait());
}
} // namespace io