m_state = LIBRBD_AIO_WRITE_PRE;
FunctionContext *ctx = new FunctionContext(
boost::bind(&AioRequest::complete, this, _1));
- m_ictx->object_map->aio_update(m_object_no, new_state,
- current_state, ctx);
+ if (!m_ictx->object_map->aio_update(m_object_no, new_state,
+ current_state, ctx)) {
+ // no object map update required
+ return false;
+ }
}
}
m_state = LIBRBD_AIO_WRITE_POST;
FunctionContext *ctx = new FunctionContext(
boost::bind(&AioRequest::complete, this, _1));
- m_ictx->object_map->aio_update(m_object_no, OBJECT_NONEXISTENT,
- OBJECT_PENDING, ctx);
+ if (!m_ictx->object_map->aio_update(m_object_no, OBJECT_NONEXISTENT,
+ OBJECT_PENDING, ctx)) {
+ // no object map update required
+ return true;
+ }
return false;
}
switch (m_state) {
case STATE_TRIM_IMAGE:
ldout(cct, 5) << "TRIM_IMAGE" << dendl;
- send_grow_object_map();
+ send_update_header();
break;
case STATE_GROW_OBJECT_MAP:
}
}
+ // avoid possible recursive lock attempts
if (!object_map_enabled) {
send_update_header();
} else if (lost_exclusive_lock) {
- // only complete when not holding locks
complete(-ERESTART);
}
}
}
}
+ // avoid possible recursive lock attempts
if (lost_exclusive_lock) {
- // only complete when not holding locks
complete(-ERESTART);
}
return false;
}
}
+ // avoid possible recursive lock attempts
if (lost_exclusive_lock) {
- // only complete when not holding locks
complete(-ERESTART);
}
}
}
void AsyncTrimRequest::send_pre_remove() {
+ bool remove_objects = false;
bool lost_exclusive_lock = false;
{
RWLock::RLocker l(m_image_ctx.owner_lock);
RWLock::RLocker l2(m_image_ctx.md_lock);
if (m_image_ctx.object_map == NULL) {
- send_remove_objects();
- return;
- }
-
- ldout(m_image_ctx.cct, 5) << this << " send_pre_remove: "
- << " delete_start=" << m_delete_start
- << " num_objects=" << m_num_objects << dendl;
- m_state = STATE_PRE_REMOVE;
-
- if (!m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
- lost_exclusive_lock = true;
+ remove_objects = true;
} else {
- // flag the objects as pending deletion
- m_image_ctx.object_map->aio_update(
- m_delete_start, m_num_objects, OBJECT_PENDING, OBJECT_EXISTS,
- create_callback_context());
+ ldout(m_image_ctx.cct, 5) << this << " send_pre_remove: "
+ << " delete_start=" << m_delete_start
+ << " num_objects=" << m_num_objects << dendl;
+ m_state = STATE_PRE_REMOVE;
+
+ if (!m_image_ctx.image_watcher->is_lock_owner()) {
+ ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
+ lost_exclusive_lock = true;
+ } else {
+ // flag the objects as pending deletion
+ if (!m_image_ctx.object_map->aio_update(m_delete_start, m_num_objects,
+ OBJECT_PENDING, OBJECT_EXISTS,
+ create_callback_context())) {
+ remove_objects = true;
+ }
+ }
}
}
- if (lost_exclusive_lock) {
+ // avoid possible recursive lock attempts
+ if (remove_objects) {
+ // no object map update required
+ send_remove_objects();
+ } else if (lost_exclusive_lock) {
complete(-ERESTART);
}
}
bool AsyncTrimRequest::send_post_remove() {
+ bool clean_boundary = false;
bool lost_exclusive_lock = false;
{
RWLock::RLocker l(m_image_ctx.owner_lock);
RWLock::RLocker l2(m_image_ctx.md_lock);
if (m_image_ctx.object_map == NULL) {
- return send_clean_boundary();
- }
-
- ldout(m_image_ctx.cct, 5) << this << " send_post_remove: "
- << " delete_start=" << m_delete_start
- << " num_objects=" << m_num_objects << dendl;
- m_state = STATE_POST_REMOVE;
-
- if (!m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
+ clean_boundary = true;
} else {
- // flag the pending objects as removed
- m_image_ctx.object_map->aio_update(
- m_delete_start, m_num_objects, OBJECT_NONEXISTENT, OBJECT_PENDING,
- create_callback_context());
+ ldout(m_image_ctx.cct, 5) << this << " send_post_remove: "
+ << " delete_start=" << m_delete_start
+ << " num_objects=" << m_num_objects << dendl;
+ m_state = STATE_POST_REMOVE;
+
+ if (!m_image_ctx.image_watcher->is_lock_owner()) {
+ ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
+ } else {
+ // flag the pending objects as removed
+ if (!m_image_ctx.object_map->aio_update(m_delete_start, m_num_objects,
+ OBJECT_NONEXISTENT,
+ OBJECT_PENDING,
+ create_callback_context())) {
+ clean_boundary = true;
+ }
+ }
}
}
- if (lost_exclusive_lock) {
+ // avoid possible recursive lock attempts
+ if (clean_boundary) {
+ // no object map update required
+ return send_clean_boundary();
+ } else if (lost_exclusive_lock) {
complete(-ERESTART);
}
return false;
}
+ // avoid possible recursive lock attempts
if (lost_exclusive_lock) {
complete(-ERESTART);
} else if (completion != NULL) {
}
bool CopyupRequest::send_object_map() {
- bool object_map_enabled = true;
+ bool copyup = false;
{
RWLock::RLocker l(m_ictx->owner_lock);
RWLock::RLocker l2(m_ictx->md_lock);
if (m_ictx->object_map == NULL) {
- object_map_enabled = false;
+ copyup = true;
} else if (!m_ictx->image_watcher->is_lock_owner()) {
ldout(m_ictx->cct, 20) << "exclusive lock not held for copy-on-read"
<< dendl;
return true;
} else {
m_state = STATE_OBJECT_MAP;
- m_ictx->object_map->aio_update(m_object_no, OBJECT_EXISTS,
- boost::optional<uint8_t>(),
- create_callback_context());
+ if (!m_ictx->object_map->aio_update(m_object_no, OBJECT_EXISTS,
+ boost::optional<uint8_t>(),
+ create_callback_context())) {
+ copyup = true;
+ }
}
}
- if (!object_map_enabled) {
+ // avoid possible recursive lock attempts
+ if (copyup) {
+ // no object map update required
send_copyup();
return true;
- }
+ }
return false;
}
snap_name = in_snap_name;
snap_exists = true;
data_ctx.snap_set_read(snap_id);
+
+ if (object_map != NULL) {
+ object_map->refresh();
+ }
return 0;
}
return -ENOENT;
snap_name = "";
snap_exists = true;
data_ctx.snap_set_read(snap_id);
+
+ if (object_map != NULL) {
+ object_map->refresh();
+ }
}
snap_t ImageCtx::get_snap_id(string in_snap_name) const
#include "librbd/internal.h"
#include "common/dout.h"
#include "common/errno.h"
+#include "include/stringify.h"
#include "cls/lock/cls_lock_client.h"
#define dout_subsys ceph_subsys_rbd
bool broke_lock = false;
CephContext *cct = m_image_ctx.cct;
while (true) {
- ldout(cct, 10) << "locking object map" << dendl;
+ ldout(cct, 10) << &m_image_ctx << " locking object map" << dendl;
r = rados::cls::lock::lock(&m_image_ctx.md_ctx,
object_map_name(m_image_ctx.id),
RBD_LOCK_NAME, LOCK_EXCLUSIVE, "", "", "",
return 0;
}
+ ldout(m_image_ctx.cct, 10) << &m_image_ctx << " unlocking object map"
+ << dendl;
int r = rados::cls::lock::unlock(&m_image_ctx.md_ctx,
object_map_name(m_image_ctx.id),
RBD_LOCK_NAME, "");
RWLock::RLocker l(m_image_ctx.object_map_lock);
assert(object_no < object_map.size());
- return (object_map[object_no] == OBJECT_EXISTS ||
- object_map[object_no] == OBJECT_PENDING);
+
+ bool exists = (object_map[object_no] == OBJECT_EXISTS ||
+ object_map[object_no] == OBJECT_PENDING);
+ ldout(m_image_ctx.cct, 20) << &m_image_ctx << " object_may_exist: "
+ << "object_no=" << object_no << " r=" << exists
+ << dendl;
+ return exists;
}
int ObjectMap::refresh()
if ((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) == 0) {
return 0;
}
-
- RWLock::WLocker l(m_image_ctx.object_map_lock);
+
CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << &m_image_ctx << " refreshing object map" << dendl;
+
+ RWLock::WLocker l(m_image_ctx.object_map_lock);
int r = cls_client::object_map_load(&m_image_ctx.data_ctx,
object_map_name(m_image_ctx.id),
&object_map);
req->send();
}
-void ObjectMap::aio_update(uint64_t object_no, uint8_t new_state,
+bool ObjectMap::aio_update(uint64_t object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
Context *on_finish)
{
- aio_update(object_no, object_no + 1, new_state, current_state, on_finish);
+ return aio_update(object_no, object_no + 1, new_state, current_state,
+ on_finish);
}
-void ObjectMap::aio_update(uint64_t start_object_no, uint64_t end_object_no,
+bool ObjectMap::aio_update(uint64_t start_object_no, uint64_t end_object_no,
uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
Context *on_finish)
assert(m_image_ctx.owner_lock.is_locked());
assert(m_image_ctx.image_watcher->is_lock_owner());
- bool update_required = false;
- {
- RWLock::WLocker l(m_image_ctx.object_map_lock);
- assert(start_object_no < end_object_no);
+ RWLock::WLocker l(m_image_ctx.object_map_lock);
+ assert(start_object_no < end_object_no);
- CephContext *cct = m_image_ctx.cct;
- if (end_object_no > object_map.size()) {
- ldout(cct, 20) << "skipping update of invalid object map" << dendl;
- return;
- }
+ CephContext *cct = m_image_ctx.cct;
+ if (end_object_no > object_map.size()) {
+ ldout(cct, 20) << "skipping update of invalid object map" << dendl;
+ return false;
+ }
- for (uint64_t object_no = start_object_no; object_no < end_object_no;
- ++object_no) {
- if ((!current_state || object_map[object_no] == *current_state) &&
- object_map[object_no] != new_state) {
- update_required = true;
- break;
- }
- }
-
- if (update_required) {
+ for (uint64_t object_no = start_object_no; object_no < end_object_no;
+ ++object_no) {
+ if ((!current_state || object_map[object_no] == *current_state) &&
+ object_map[object_no] != new_state) {
UpdateRequest *req = new UpdateRequest(m_image_ctx, start_object_no,
end_object_no, new_state,
current_state, on_finish);
req->send();
+ return true;
}
}
-
- if (!update_required) {
- on_finish->complete(0);
- }
+ return false;
}
void ObjectMap::invalidate() {
CephContext *cct = m_image_ctx.cct;
- lderr(cct) << this << " invalidating object map" << dendl;
+ lderr(cct) << &m_image_ctx << " invalidating object map" << dendl;
m_image_ctx.flags |= RBD_FLAG_OBJECT_MAP_INVALID;
librados::ObjectWriteOperation op;
bool ObjectMap::Request::should_complete(int r) {
CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << this << " should_complete: r=" << r << dendl;
+ ldout(cct, 20) << &m_image_ctx << " should_complete: r=" << r << dendl;
switch (m_state)
{
if (r < 0) {
lderr(cct) << "failed to invalidate object map: " << cpp_strerror(r)
<< dendl;
- return true;
}
- break;
+ return true;
default:
lderr(cct) << "invalid state: " << m_state << dendl;
CephContext *cct = m_image_ctx.cct;
RWLock::WLocker l(m_image_ctx.md_lock);
- lderr(cct) << this << " invalidating object map" << dendl;
+ lderr(cct) << &m_image_ctx << " invalidating object map" << dendl;
m_state = STATE_INVALIDATE;
m_image_ctx.flags |= RBD_FLAG_OBJECT_MAP_INVALID;
RWLock::WLocker l(m_image_ctx.object_map_lock);
m_num_objs = Striper::get_num_objects(m_image_ctx.layout, m_new_size);
- ldout(cct, 5) << this << " resizing on-disk object map: " << m_num_objs << dendl;
+ ldout(cct, 5) << &m_image_ctx << " resizing on-disk object map: "
+ << m_num_objs << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, LOCK_EXCLUSIVE, "", "");
void ObjectMap::ResizeRequest::finish(ObjectMap *object_map) {
CephContext *cct = m_image_ctx.cct;
- ldout(cct, 5) << this << " resizing in-memory object map: " << m_num_objs << dendl;
+ ldout(cct, 5) << &m_image_ctx << " resizing in-memory object map: "
+ << m_num_objs << dendl;
size_t orig_object_map_size = object_map->object_map.size();
object_map->object_map.resize(m_num_objs);
for (uint64_t i = orig_object_map_size; i < object_map->object_map.size(); ++i) {
void ObjectMap::UpdateRequest::send() {
CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << this << " updating on-disk object map: ["
+ ldout(cct, 20) << &m_image_ctx << " updating on-disk object map: ["
<< m_start_object_no << "," << m_end_object_no << ") = "
- << static_cast<uint32_t>(m_new_state) << dendl;
+ << (m_current_state ? stringify(*m_current_state) : "")
+ << "->" << static_cast<uint32_t>(m_new_state)
+ << dendl;
librados::ObjectWriteOperation op;
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, LOCK_EXCLUSIVE, "", "");
void ObjectMap::UpdateRequest::finish(ObjectMap *object_map) {
CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << this << " updating in-memory object map" << dendl;
+ ldout(cct, 20) << &m_image_ctx << " updating in-memory object map" << dendl;
for (uint64_t object_no = m_start_object_no;
object_no < MIN(m_end_object_no, object_map->object_map.size());
++object_no) {
void aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish);
- void aio_update(uint64_t object_no, uint8_t new_state,
- const boost::optional<uint8_t> ¤t_state,
- Context *on_finish);
- void aio_update(uint64_t start_object_no, uint64_t end_object_no,
+ bool aio_update(uint64_t object_no, uint8_t new_state,
+ const boost::optional<uint8_t> ¤t_state,
+ Context *on_finish);
+ bool aio_update(uint64_t start_object_no, uint64_t end_object_no,
uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
Context *on_finish);
if (r < 0) {
return r;
}
-
- if (ictx->object_map != NULL) {
- ictx->object_map->refresh();
- }
refresh_parent(ictx);
return 0;
}
def test_resize_io(self):
parent_data = self.image.read(IMG_SIZE / 2, 256)
+ self.image.resize(0)
self.clone.resize(IMG_SIZE / 2 + 128)
child_data = self.clone.read(IMG_SIZE / 2, 128)
eq(child_data, parent_data[:128])