void AioRead::send_copyup()
{
- RWLock::RLocker snap_locker(m_ictx->snap_lock);
- RWLock::RLocker parent_locker(m_ictx->parent_lock);
+ {
+ RWLock::RLocker snap_locker(m_ictx->snap_lock);
+ RWLock::RLocker parent_locker(m_ictx->parent_lock);
+ if (!compute_parent_extents()) {
+ return;
+ }
+ }
+
Mutex::Locker copyup_locker(m_ictx->copyup_list_lock);
map<uint64_t, CopyupRequest*>::iterator it =
m_ictx->copyup_list.find(m_object_no);
if (it == m_ictx->copyup_list.end()) {
- if (compute_parent_extents()) {
- // create and kick off a CopyupRequest
- CopyupRequest *new_req = new CopyupRequest(m_ictx, m_oid,
- m_object_no,
- m_parent_extents);
- m_ictx->copyup_list[m_object_no] = new_req;
- new_req->queue_send();
- }
+ // create and kick off a CopyupRequest
+ CopyupRequest *new_req = new CopyupRequest(m_ictx, m_oid, m_object_no,
+ m_parent_extents);
+ m_ictx->copyup_list[m_object_no] = new_req;
+ new_req->queue_send();
}
}
ldout(m_ictx->cct, 20) << "WRITE_CHECK_GUARD" << dendl;
if (r == -ENOENT) {
- RWLock::RLocker l(m_ictx->snap_lock);
- RWLock::RLocker l2(m_ictx->parent_lock);
+ bool has_parent;
+ {
+ RWLock::RLocker snap_locker(m_ictx->snap_lock);
+ RWLock::RLocker parent_locker(m_ictx->parent_lock);
+ has_parent = compute_parent_extents();
+ }
// If parent still exists, overlap might also have changed.
- if (compute_parent_extents()) {
+ if (has_parent) {
send_copyup();
} else {
// parent may have disappeared -- send original write again
bool CopyupRequest::send_object_map() {
{
- RWLock::RLocker l(m_ictx->owner_lock);
+ RWLock::RLocker owner_locker(m_ictx->owner_lock);
+ RWLock::RLocker snap_locker(m_ictx->snap_lock);
if (m_ictx->object_map.enabled()) {
if (!m_ictx->image_watcher->is_lock_owner()) {
- ldout(m_ictx->cct, 20) << "exclusive lock not held for copyup request"
- << dendl;
+ ldout(m_ictx->cct, 20) << "exclusive lock not held for copyup request"
+ << dendl;
assert(m_pending_requests.empty());
return true;
}
- RWLock::RLocker snap_locker(m_ictx->snap_lock);
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
if (m_ictx->object_map[m_object_no] != OBJECT_EXISTS ||
!m_ictx->snaps.empty()) {
void ImageCtx::shutdown_cache() {
flush_async_operations();
+
+ RWLock::RLocker owner_locker(owner_lock);
invalidate_cache(true);
object_cacher->stop();
}
}
void ImageCtx::flush_async_operations(Context *on_finish) {
- bool complete = false;
- {
- Mutex::Locker l(async_ops_lock);
- if (async_ops.empty()) {
- complete = true;
- } else {
- ldout(cct, 20) << "flush async operations: " << on_finish << " "
- << "count=" << async_ops.size() << dendl;
- async_ops.front()->add_flush_context(on_finish);
- }
+ Mutex::Locker l(async_ops_lock);
+ if (async_ops.empty()) {
+ op_work_queue->queue(on_finish, 0);
+ return;
}
- if (complete) {
- on_finish->complete(0);
- }
+ ldout(cct, 20) << "flush async operations: " << on_finish << " "
+ << "count=" << async_ops.size() << dendl;
+ async_ops.front()->add_flush_context(on_finish);
}
void ImageCtx::cancel_async_requests() {
#include "librbd/ImageWatcher.h"
#include "librbd/AioCompletion.h"
#include "librbd/ImageCtx.h"
+#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/TaskFinisher.h"
#include "cls/lock/cls_lock_client.h"
ldout(m_image_ctx.cct, 10) << "acquired exclusive lock" << dendl;
m_lock_owner_state = LOCK_OWNER_STATE_LOCKED;
+ ClientId owner_client_id = get_client_id();
{
Mutex::Locker l(m_owner_client_id_lock);
- m_owner_client_id = get_client_id();
+ m_owner_client_id = owner_client_id;
}
if (m_image_ctx.object_map.enabled()) {
return r;
}
} else {
+ RWLock::RLocker owner_lock(ictx->owner_lock);
r = snap_remove_helper(ictx, NULL, snap_name);
if (r < 0) {
return r;
int snap_remove_helper(ImageCtx *ictx, Context *ctx, const char *snap_name)
{
+ assert(ictx->owner_lock.is_locked());
{
- RWLock::RLocker snap_locker(ictx->snap_lock);
if ((ictx->features & RBD_FEATURE_FAST_DIFF) != 0) {
- assert(ictx->owner_lock.is_locked());
assert(!ictx->image_watcher->is_lock_supported() ||
ictx->image_watcher->is_lock_owner());
}
}
assert(watchers.size() == 1);
- ictx->md_lock.get_read();
trim_image(ictx, 0, prog_ctx);
- ictx->md_lock.put_read();
ictx->parent_lock.get_read();
// struct assignment
if (r < 0)
return r;
- RWLock::RLocker l(ictx->owner_lock);
+ RWLock::RLocker owner_locker(ictx->owner_lock);
snap_t snap_id;
uint64_t new_size;
{
- RWLock::WLocker l2(ictx->md_lock);
{
// need to drop snap_lock before invalidating cache
- RWLock::RLocker l3(ictx->snap_lock);
+ RWLock::RLocker snap_locker(ictx->snap_lock);
if (!ictx->snap_exists) {
return -ENOENT;
}
// need to flush any pending writes before resizing and rolling back -
// writes might create new snapshots. Rolling back will replace
// the current version, so we have to invalidate that too.
+ RWLock::WLocker md_locker(ictx->md_lock);
ictx->flush_async_operations();
r = ictx->invalidate_cache();
if (r < 0) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "async_rebuild_object_map " << ictx << dendl;
+ if (ictx->read_only) {
+ return -EROFS;
+ }
+ if (!ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ return -EINVAL;
+ }
+
int r = ictx_check(ictx);
if (r < 0) {
return r;
}
- {
- RWLock::RLocker snap_locker(ictx->snap_lock);
- if (ictx->read_only) {
- return -EROFS;
- }
- if (!ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
- return -EINVAL;
- }
- }
-
RebuildObjectMapRequest *req = new RebuildObjectMapRequest(*ictx, ctx,
prog_ctx);
req->send();