}
void AbstractAioObjectWrite::send_pre() {
- bool write = false;
{
RWLock::RLocker snap_lock(m_ictx->snap_lock);
if (m_ictx->object_map == nullptr) {
m_object_exist = true;
- write = true;
} else {
// should have been flushed prior to releasing lock
assert(m_ictx->exclusive_lock->is_lock_owner());
pre_object_map_update(&new_state);
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
- if (m_ictx->object_map->update_required(m_object_no, new_state)) {
- ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
- << m_object_off << "~" << m_object_len
- << dendl;
- m_state = LIBRBD_AIO_WRITE_PRE;
-
- Context *ctx = util::create_context_callback<AioObjectRequest>(this);
- bool updated = m_ictx->object_map->aio_update(m_object_no, new_state,
- {}, ctx);
- assert(updated);
- } else {
- write = true;
+ ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
+ << m_object_off << "~" << m_object_len
+ << dendl;
+ m_state = LIBRBD_AIO_WRITE_PRE;
+
+ if (m_ictx->object_map->aio_update<AioObjectRequest>(
+ CEPH_NOSNAP, m_object_no, new_state, {}, this)) {
+ return;
}
}
}
- // avoid possible recursive lock attempts
- if (write) {
- // no object map update required
- send_write();
- }
+ // no object map update required
+ send_write();
}
bool AbstractAioObjectWrite::send_post() {
assert(m_ictx->exclusive_lock->is_lock_owner());
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
- if (!m_ictx->object_map->update_required(m_object_no, OBJECT_NONEXISTENT)) {
- return true;
- }
-
ldout(m_ictx->cct, 20) << "send_post " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len << dendl;
m_state = LIBRBD_AIO_WRITE_POST;
- Context *ctx = util::create_context_callback<AioObjectRequest>(this);
- bool updated = m_ictx->object_map->aio_update(m_object_no,
- OBJECT_NONEXISTENT,
- OBJECT_PENDING, ctx);
- assert(updated);
- return false;
+ if (m_ictx->object_map->aio_update<AioObjectRequest>(
+ CEPH_NOSNAP, m_object_no, OBJECT_NONEXISTENT, OBJECT_PENDING, this)) {
+ return false;
+ }
+
+ return true;
}
void AbstractAioObjectWrite::send_write() {
RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
assert(m_image_ctx.exclusive_lock->is_lock_owner());
assert(m_image_ctx.object_map != nullptr);
- bool sent = m_image_ctx.object_map->aio_update(m_object_no, OBJECT_EXISTS,
- boost::optional<uint8_t>(),
- this);
+ bool sent = m_image_ctx.object_map->aio_update<Context>(
+ CEPH_NOSNAP, m_object_no, OBJECT_EXISTS, {}, this);
return (sent ? 0 : 1);
}
return 1;
}
- m_image_ctx.object_map->aio_update(snap_id, m_object_no, m_object_no + 1,
- state, boost::optional<uint8_t>(), this);
+ bool sent = m_image_ctx.object_map->aio_update<Context>(
+ snap_id, m_object_no, state, {}, this);
+ assert(sent);
return 0;
}
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
+
#include "librbd/ObjectMap.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
req->send();
}
-bool ObjectMap::aio_update(uint64_t object_no, uint8_t new_state,
- const boost::optional<uint8_t> ¤t_state,
- Context *on_finish)
-{
- return aio_update(object_no, object_no + 1, new_state, current_state,
- on_finish);
-}
-
-bool ObjectMap::aio_update(uint64_t start_object_no, uint64_t end_object_no,
- uint8_t new_state,
+void ObjectMap::aio_update(uint64_t snap_id, uint64_t start_object_no,
+ uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
- Context *on_finish)
-{
+ Context *on_finish) {
assert(m_image_ctx.snap_lock.is_locked());
assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
- assert(m_image_ctx.image_watcher != NULL);
+ assert(m_image_ctx.image_watcher != nullptr);
assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
- assert(m_image_ctx.object_map_lock.is_wlocked());
+ assert(snap_id != CEPH_NOSNAP || m_image_ctx.object_map_lock.is_wlocked());
assert(start_object_no < end_object_no);
CephContext *cct = m_image_ctx.cct;
<< (current_state ?
stringify(static_cast<uint32_t>(*current_state)) : "")
<< "->" << static_cast<uint32_t>(new_state) << dendl;
- if (end_object_no > m_object_map.size()) {
- ldout(cct, 20) << "skipping update of invalid object map" << dendl;
- return false;
- }
+ if (snap_id == CEPH_NOSNAP) {
+ if (end_object_no > m_object_map.size()) {
+ ldout(cct, 20) << "skipping update of invalid object map" << dendl;
+ m_image_ctx.op_work_queue->queue(on_finish, 0);
+ return;
+ }
- for (uint64_t object_no = start_object_no; object_no < end_object_no;
- ++object_no) {
- uint8_t state = m_object_map[object_no];
- if ((!current_state || state == *current_state ||
- (*current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN)) &&
- state != new_state) {
- aio_update(m_snap_id, start_object_no, end_object_no, new_state,
- current_state, on_finish);
- return true;
+ uint64_t object_no;
+ for (object_no = start_object_no; object_no < end_object_no; ++object_no) {
+ if (update_required(object_no, new_state)) {
+ break;
+ }
+ }
+ if (object_no == end_object_no) {
+ ldout(cct, 20) << "object map update not required" << dendl;
+ m_image_ctx.op_work_queue->queue(on_finish, 0);
+ return;
}
}
- return false;
-}
-void ObjectMap::aio_update(uint64_t snap_id, uint64_t start_object_no,
- uint64_t end_object_no, uint8_t new_state,
- const boost::optional<uint8_t> ¤t_state,
- Context *on_finish) {
object_map::UpdateRequest *req = new object_map::UpdateRequest(
m_image_ctx, &m_object_map, snap_id, start_object_no, end_object_no,
new_state, current_state, on_finish);
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
+
#ifndef CEPH_LIBRBD_OBJECT_MAP_H
#define CEPH_LIBRBD_OBJECT_MAP_H
#include "include/fs_types.h"
#include "include/rbd/object_map_types.h"
#include "common/bit_vector.hpp"
+#include "librbd/Utils.h"
#include <boost/optional.hpp>
class Context;
void close(Context *on_finish);
bool object_may_exist(uint64_t object_no) const;
- bool update_required(uint64_t object_no, uint8_t new_state);
void aio_save(Context *on_finish);
void aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish);
- bool aio_update(uint64_t object_no, uint8_t new_state,
- const boost::optional<uint8_t> ¤t_state,
- Context *on_finish);
- bool aio_update(uint64_t start_object_no, uint64_t end_object_no,
- uint8_t new_state,
- const boost::optional<uint8_t> ¤t_state,
- Context *on_finish);
- void aio_update(uint64_t snap_id, uint64_t start_object_no,
+ template <typename T, void(T::*MF)(int) = &T::complete>
+ bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state,
+ const boost::optional<uint8_t> ¤t_state,
+ T *callback_object) {
+ return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1,
+ new_state, current_state, callback_object);
+ }
+
+ template <typename T, void(T::*MF)(int) = &T::complete>
+ bool aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
- Context *on_finish);
+ T *callback_object) {
+ assert(start_object_no < end_object_no);
+ if (snap_id == CEPH_NOSNAP) {
+ uint64_t object_no;
+ for (object_no = start_object_no; object_no < end_object_no;
+ ++object_no) {
+ if (update_required(object_no, new_state)) {
+ break;
+ }
+ }
+
+ if (object_no == end_object_no) {
+ return false;
+ }
+ }
+
+ aio_update(snap_id, start_object_no, end_object_no, new_state,
+ current_state,
+ util::create_context_callback<T, MF>(callback_object));
+ return true;
+ }
void rollback(uint64_t snap_id, Context *on_finish);
void snapshot_add(uint64_t snap_id, Context *on_finish);
ceph::BitVector<2> m_object_map;
uint64_t m_snap_id;
+ void aio_update(uint64_t snap_id, uint64_t start_object_no,
+ uint64_t end_object_no, uint8_t new_state,
+ const boost::optional<uint8_t> ¤t_state,
+ Context *on_finish);
+ bool update_required(uint64_t object_no, uint8_t new_state);
+
};
} // namespace librbd
m_copyup_start = m_delete_start;
m_delete_start = m_copyup_end;
- bool copyup_objects = false;
{
RWLock::RLocker snap_locker(image_ctx.snap_lock);
- if (image_ctx.object_map == nullptr) {
- copyup_objects = true;
- } else {
+ if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_pre_copyup: "
<< " copyup_start=" << m_copyup_start
<< " copyup_end=" << m_copyup_end << dendl;
assert(image_ctx.exclusive_lock->is_lock_owner());
- Context *ctx = this->create_callback_context();
RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
- if (!image_ctx.object_map->aio_update(m_copyup_start, m_copyup_end,
- OBJECT_PENDING, OBJECT_EXISTS, ctx)) {
- delete ctx;
- copyup_objects = true;
+ if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+ CEPH_NOSNAP, m_copyup_start, m_copyup_end, OBJECT_PENDING,
+ OBJECT_EXISTS, this)) {
+ return;
}
}
}
- if (copyup_objects) {
- send_copyup_objects();
- }
+ send_copyup_objects();
}
template <typename I>
return;
}
- bool remove_objects = false;
{
RWLock::RLocker snap_locker(image_ctx.snap_lock);
- if (image_ctx.object_map == nullptr) {
- remove_objects = true;
- } else {
+ if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_pre_remove: "
<< " delete_start=" << m_delete_start
<< " num_objects=" << m_num_objects << dendl;
assert(image_ctx.exclusive_lock->is_lock_owner());
// flag the objects as pending deletion
- Context *ctx = this->create_callback_context();
RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
- if (!image_ctx.object_map->aio_update(m_delete_start, m_num_objects,
- OBJECT_PENDING, OBJECT_EXISTS,
- ctx)) {
- delete ctx;
- remove_objects = true;
+ if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+ CEPH_NOSNAP, m_delete_start, m_num_objects, OBJECT_PENDING,
+ OBJECT_EXISTS, this)) {
+ return;
}
}
}
- // avoid possible recursive lock attempts
- if (remove_objects) {
- // no object map update required
- send_remove_objects();
- }
+ // no object map update required
+ send_remove_objects();
}
template<typename I>
I &image_ctx = this->m_image_ctx;
assert(image_ctx.owner_lock.is_locked());
- bool pre_remove_objects = false;
{
RWLock::RLocker snap_locker(image_ctx.snap_lock);
- if (image_ctx.object_map == nullptr) {
- pre_remove_objects = true;
- } else {
+ if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_post_copyup:"
<< " copyup_start=" << m_copyup_start
<< " copyup_end=" << m_copyup_end << dendl;
assert(image_ctx.exclusive_lock->is_lock_owner());
- Context *ctx = this->create_callback_context();
RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
- if (!image_ctx.object_map->aio_update(m_copyup_start, m_copyup_end,
- OBJECT_NONEXISTENT, OBJECT_PENDING, ctx)) {
- delete ctx;
- pre_remove_objects = true;
+ if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+ CEPH_NOSNAP, m_copyup_start, m_copyup_end, OBJECT_NONEXISTENT,
+ OBJECT_PENDING, this)) {
+ return;
}
}
}
- if (pre_remove_objects) {
- send_pre_remove();
- }
+ send_pre_remove();
}
template <typename I>
I &image_ctx = this->m_image_ctx;
assert(image_ctx.owner_lock.is_locked());
- bool clean_boundary = false;
{
RWLock::RLocker snap_locker(image_ctx.snap_lock);
- if (image_ctx.object_map == nullptr) {
- clean_boundary = true;
- } else {
+ if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_post_remove: "
<< " delete_start=" << m_delete_start
<< " num_objects=" << m_num_objects << dendl;
assert(image_ctx.exclusive_lock->is_lock_owner());
// flag the pending objects as removed
- Context *ctx = this->create_callback_context();
RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
- if (!image_ctx.object_map->aio_update(m_delete_start, m_num_objects,
- OBJECT_NONEXISTENT,
- OBJECT_PENDING, ctx)) {
- delete ctx;
- clean_boundary = true;
+ if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+ CEPH_NOSNAP, m_delete_start, m_num_objects, OBJECT_NONEXISTENT,
+ OBJECT_PENDING, this)) {
+ return;
}
}
}
- // avoid possible recursive lock attempts
- if (clean_boundary) {
- // no object map update required
- send_clean_boundary();
- }
+ // no object map update required
+ send_clean_boundary();
}
template <typename I>
#define CEPH_TEST_LIBRBD_MOCK_OBJECT_MAP_H
#include "common/RWLock.h"
+#include "librbd/Utils.h"
#include "gmock/gmock.h"
namespace librbd {
MOCK_METHOD3(aio_resize, void(uint64_t new_size, uint8_t default_object_state,
Context *on_finish));
- MOCK_METHOD6(aio_update, void(uint64_t snap_id, uint64_t start_object_no,
+
+ template <typename T, void(T::*MF)(int)>
+ bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state,
+ const boost::optional<uint8_t> ¤t_state,
+ T *callback_object) {
+ return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1,
+ new_state, current_state, callback_object);
+ }
+
+ template <typename T, void(T::*MF)(int)>
+ bool aio_update(uint64_t snap_id, uint64_t start_object_no,
+ uint64_t end_object_no, uint8_t new_state,
+ const boost::optional<uint8_t> ¤t_state,
+ T *callback_object) {
+ return aio_update(snap_id, start_object_no, end_object_no, new_state,
+ current_state,
+ util::create_context_callback<T, MF>(callback_object));
+ }
+ MOCK_METHOD6(aio_update, bool(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
Context *on_finish));
if (mock_image_ctx.image_ctx->object_map != nullptr) {
auto &expect = EXPECT_CALL(mock_object_map, aio_update(snap_id, 0, 1, state, _, _));
if (r < 0) {
- expect.WillOnce(WithArg<5>(Invoke([this, r](Context *ctx) {
- m_threads->work_queue->queue(ctx, r);
- })));
+ expect.WillOnce(DoAll(WithArg<5>(Invoke([this, r](Context *ctx) {
+ m_threads->work_queue->queue(ctx, r);
+ })),
+ Return(true)));
} else {
- expect.WillOnce(WithArg<5>(Invoke([&mock_image_ctx, snap_id, state, r](Context *ctx) {
- assert(mock_image_ctx.image_ctx->snap_lock.is_locked());
- assert(mock_image_ctx.image_ctx->object_map_lock.is_wlocked());
- mock_image_ctx.image_ctx->object_map->aio_update(snap_id, 0, 1,
- state,
- boost::none, ctx);
- })));
+ expect.WillOnce(DoAll(WithArg<5>(Invoke([&mock_image_ctx, snap_id, state, r](Context *ctx) {
+ assert(mock_image_ctx.image_ctx->snap_lock.is_locked());
+ assert(mock_image_ctx.image_ctx->object_map_lock.is_wlocked());
+ mock_image_ctx.image_ctx->object_map->aio_update<Context>(
+ snap_id, 0, 1, state, boost::none, ctx);
+ })),
+ Return(true)));
}
}
}
<< dendl;
RWLock::WLocker object_map_locker(m_local_image_ctx->object_map_lock);
- Context *ctx = create_context_callback<
+ bool sent = m_local_image_ctx->object_map->template aio_update<
ObjectCopyRequest<I>, &ObjectCopyRequest<I>::handle_update_object_map>(
+ snap_object_state.first, m_object_number, snap_object_state.second, {},
this);
-
- m_local_image_ctx->object_map->aio_update(snap_object_state.first,
- m_object_number,
- m_object_number + 1,
- snap_object_state.second,
- boost::none, ctx);
+ assert(sent);
m_local_image_ctx->snap_lock.put_read();
}