This fixes cases where CRYPTO HEADER area is larger than DATA area.
In particular, it was effectively impossible to flatten unformatted
clones of such images.
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
}
ictx->image_lock.lock_shared();
- r = clip_io(ictx, off, &len);
+ r = clip_io(ictx, off, &len, io::ImageArea::DATA);
ictx->image_lock.unlock_shared();
if (r < 0) {
return r;
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
- int r = clip_io(util::get_image_ctx(&image_ctx), off, &len);
+ int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
+ io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
- int r = clip_io(util::get_image_ctx(&image_ctx), off, &len);
+ int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
+ io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
<< "len = " << len << ", data_len " << bl.length() << dendl;
image_ctx.image_lock.lock_shared();
- int r = clip_io(util::get_image_ctx(&image_ctx), off, &len);
+ int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
+ io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
- int r = clip_io(util::get_image_ctx(&image_ctx), off, &len);
+ int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
+ io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
<< off << ", " << "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
- int r = clip_io(util::get_image_ctx(&image_ctx), off, &len);
+ int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
+ io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
uint64_t mylen = len;
ictx->image_lock.lock_shared();
- r = clip_io(ictx, off, &mylen);
+ r = clip_io(ictx, off, &mylen, io::ImageArea::DATA);
ictx->image_lock.unlock_shared();
if (r < 0)
return r;
return total_read;
}
- // validate extent against image size; clip to image size if necessary
- int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len)
- {
+ // validate extent against area size; clip to area size if necessary
+ int clip_io(ImageCtx* ictx, uint64_t off, uint64_t* len, io::ImageArea area) {
ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
if (ictx->snap_id != CEPH_NOSNAP &&
if (*len == 0)
return 0;
- // TODO: pass area
- uint64_t area_size = ictx->get_area_size(io::ImageArea::DATA);
+ uint64_t area_size = ictx->get_area_size(area);
// can't start past end
if (off >= area_size)
namespace librbd {
struct ImageCtx;
- namespace io { struct AioCompletion; }
+ namespace io {
+ struct AioCompletion;
+ enum class ImageArea;
+ }
class NoOpProgressContext : public ProgressContext
{
void image_info(const ImageCtx *ictx, image_info_t& info, size_t info_size);
uint64_t oid_to_object_no(const std::string& oid,
const std::string& object_prefix);
- int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len);
+ int clip_io(ImageCtx* ictx, uint64_t off, uint64_t* len, io::ImageArea area);
void init_rbd_header(struct rbd_obj_header_ondisk& ondisk,
uint64_t size, int order, uint64_t bid);
}
bool clip_request() const {
+ auto area = (image_dispatch_spec->image_dispatch_flags &
+ IMAGE_DISPATCH_FLAG_CRYPTO_HEADER ? ImageArea::CRYPTO_HEADER :
+ ImageArea::DATA);
int r = util::clip_request(image_dispatcher->m_image_ctx,
- &image_dispatch_spec->image_extents);
+ &image_dispatch_spec->image_extents, area);
if (r < 0) {
image_dispatch_spec->fail(r);
return true;
}
template <typename I>
-int clip_request(I *image_ctx, Extents *image_extents) {
+int clip_request(I* image_ctx, Extents* image_extents, ImageArea area) {
std::shared_lock image_locker{image_ctx->image_lock};
for (auto &image_extent : *image_extents) {
auto clip_len = image_extent.second;
int r = clip_io(librbd::util::get_image_ctx(image_ctx),
- image_extent.first, &clip_len);
+ image_extent.first, &clip_len, area);
if (r < 0) {
return r;
}
librbd::ImageCtx *image_ctx, uint64_t object_no, ReadExtents* extents,
librados::snap_t snap_id, const ZTracer::Trace &trace, Context* on_finish);
template int librbd::io::util::clip_request(
- librbd::ImageCtx *image_ctx, Extents *image_extents);
+ librbd::ImageCtx* image_ctx, Extents* image_extents, ImageArea area);
template bool librbd::io::util::trigger_copyup(
librbd::ImageCtx *image_ctx, uint64_t object_no, IOContext io_context,
Context* on_finish);
const ZTracer::Trace &trace, Context* on_finish);
template <typename ImageCtxT = librbd::ImageCtx>
-int clip_request(ImageCtxT *image_ctx, Extents *image_extents);
+int clip_request(ImageCtxT* image_ctx, Extents* image_extents, ImageArea area);
inline uint64_t get_extents_length(const Extents &extents) {
uint64_t total_bytes = 0;
}
}
+TEST_F(TestLibRBD, EncryptedFlattenSmallData)
+{
+ REQUIRE_FEATURE(RBD_FEATURE_LAYERING);
+ REQUIRE(!is_feature_enabled(RBD_FEATURE_STRIPINGV2));
+ REQUIRE(!is_feature_enabled(RBD_FEATURE_JOURNALING));
+
+ librados::IoCtx ioctx;
+ ASSERT_EQ(0, _rados.ioctx_create(m_pool_name.c_str(), ioctx));
+
+ librbd::RBD rbd;
+ std::string parent_name = get_temp_image_name();
+ std::string clone_name = get_temp_image_name();
+ uint64_t data_size = 5000;
+ uint64_t luks2_meta_size = 16 << 20;
+ std::string passphrase = "some passphrase";
+
+ {
+ int order = 22;
+ ASSERT_EQ(0, create_image_pp(rbd, ioctx, parent_name.c_str(),
+ luks2_meta_size + data_size, &order));
+ librbd::Image parent;
+ ASSERT_EQ(0, rbd.open(ioctx, parent, parent_name.c_str(), nullptr));
+
+ librbd::encryption_luks2_format_options_t opts = {
+ RBD_ENCRYPTION_ALGORITHM_AES256, passphrase};
+ ASSERT_EQ(0, parent.encryption_format(RBD_ENCRYPTION_FORMAT_LUKS2, &opts,
+ sizeof(opts)));
+
+ ceph::bufferlist bl;
+ bl.append(std::string(data_size, 'a'));
+ ASSERT_EQ(data_size, parent.write(0, data_size, bl));
+
+ ASSERT_EQ(0, parent.snap_create("snap"));
+ ASSERT_EQ(0, parent.snap_protect("snap"));
+ uint64_t features;
+ ASSERT_EQ(0, parent.features(&features));
+ ASSERT_EQ(0, rbd.clone(ioctx, parent_name.c_str(), "snap", ioctx,
+ clone_name.c_str(), features, &order));
+ }
+
+ {
+ librbd::Image clone;
+ ASSERT_EQ(0, rbd.open(ioctx, clone, clone_name.c_str(), nullptr));
+
+ librbd::encryption_luks_format_options_t opts = {passphrase};
+ ASSERT_EQ(0, clone.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS, &opts,
+ sizeof(opts)));
+ uint64_t size;
+ ASSERT_EQ(0, clone.size(&size));
+ ASSERT_EQ(data_size, size);
+ uint64_t overlap;
+ ASSERT_EQ(0, clone.overlap(&overlap));
+ ASSERT_EQ(data_size, overlap);
+
+ ceph::bufferlist expected_bl;
+ expected_bl.append(std::string(data_size, 'a'));
+
+ ceph::bufferlist read_bl1;
+ ASSERT_EQ(data_size, clone.read(0, data_size, read_bl1));
+ ASSERT_TRUE(expected_bl.contents_equal(read_bl1));
+
+ ASSERT_EQ(0, clone.flatten());
+
+ ceph::bufferlist read_bl2;
+ ASSERT_EQ(data_size, clone.read(0, data_size, read_bl2));
+ ASSERT_TRUE(expected_bl.contents_equal(read_bl2));
+ }
+
+ {
+ librbd::Image clone;
+ ASSERT_EQ(0, rbd.open(ioctx, clone, clone_name.c_str(), nullptr));
+
+ librbd::encryption_luks_format_options_t opts = {passphrase};
+ ASSERT_EQ(0, clone.encryption_load(RBD_ENCRYPTION_FORMAT_LUKS, &opts,
+ sizeof(opts)));
+ uint64_t size;
+ ASSERT_EQ(0, clone.size(&size));
+ ASSERT_EQ(data_size, size);
+ uint64_t overlap;
+ ASSERT_EQ(0, clone.overlap(&overlap));
+ ASSERT_EQ(0, overlap);
+
+ ceph::bufferlist expected_bl;
+ expected_bl.append(std::string(data_size, 'a'));
+
+ ceph::bufferlist read_bl;
+ ASSERT_EQ(data_size, clone.read(0, data_size, read_bl));
+ ASSERT_TRUE(expected_bl.contents_equal(read_bl));
+ }
+}
+
#endif
TEST_F(TestLibRBD, TestIOWithIOHint)