template <typename I>
ObjectMap<I>::ObjectMap(I &image_ctx, uint64_t snap_id)
: m_image_ctx(image_ctx), m_snap_id(snap_id),
+ m_lock(util::unique_lock_name("librbd::ObjectMap::lock", this)),
m_update_guard(new UpdateGuard(m_image_ctx.cct)) {
}
return (object_count <= cls::rbd::MAX_OBJECT_MAP_OBJECT_COUNT);
}
-template <typename I>
-ceph::BitVector<2u>::Reference ObjectMap<I>::operator[](uint64_t object_no)
-{
- ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
- ceph_assert(object_no < m_object_map.size());
- return m_object_map[object_no];
-}
-
template <typename I>
uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
{
- ceph_assert(m_image_ctx.object_map_lock.is_locked());
+ RWLock::RLocker locker(m_lock);
ceph_assert(object_no < m_object_map.size());
return m_object_map[object_no];
}
return true;
}
- RWLock::RLocker l(m_image_ctx.object_map_lock);
uint8_t state = (*this)[object_no];
bool exists = (state != OBJECT_NONEXISTENT);
ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r=" << exists
return true;
}
- RWLock::RLocker l(m_image_ctx.object_map_lock);
uint8_t state = (*this)[object_no];
bool nonexistent = (state != OBJECT_EXISTS && state != OBJECT_EXISTS_CLEAN);
ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r="
template <typename I>
bool ObjectMap<I>::update_required(const ceph::BitVector<2>::Iterator& it,
uint8_t new_state) {
- ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
+ ceph_assert(m_lock.is_locked());
uint8_t state = *it;
if ((state == new_state) ||
(new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
template <typename I>
void ObjectMap<I>::open(Context *on_finish) {
auto req = object_map::RefreshRequest<I>::create(
- m_image_ctx, &m_object_map, m_snap_id, on_finish);
+ m_image_ctx, &m_lock, &m_object_map, m_snap_id, on_finish);
req->send();
}
ceph_assert(m_image_ctx.image_lock.is_locked());
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
- RWLock::RLocker object_map_locker(m_image_ctx.object_map_lock);
+ RWLock::WLocker locker(m_lock);
m_object_map = target_object_map;
return true;
}
template <typename I>
void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
ceph_assert(m_image_ctx.image_lock.is_locked());
- ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
+ RWLock::WLocker locker(m_lock);
object_map::SnapshotRollbackRequest *req =
new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, on_finish);
req->send();
ceph_assert(snap_id != CEPH_NOSNAP);
object_map::SnapshotCreateRequest *req =
- new object_map::SnapshotCreateRequest(m_image_ctx, &m_object_map, snap_id,
- on_finish);
+ new object_map::SnapshotCreateRequest(m_image_ctx, &m_lock, &m_object_map,
+ snap_id, on_finish);
req->send();
}
ceph_assert(snap_id != CEPH_NOSNAP);
object_map::SnapshotRemoveRequest *req =
- new object_map::SnapshotRemoveRequest(m_image_ctx, &m_object_map, snap_id,
- on_finish);
+ new object_map::SnapshotRemoveRequest(m_image_ctx, &m_lock, &m_object_map,
+ snap_id, on_finish);
req->send();
}
ceph_assert(m_image_ctx.image_lock.is_locked());
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
- RWLock::RLocker object_map_locker(m_image_ctx.object_map_lock);
+ RWLock::RLocker locker(m_lock);
librados::ObjectWriteOperation op;
if (m_snap_id == CEPH_NOSNAP) {
m_image_ctx.exclusive_lock->is_lock_owner());
object_map::ResizeRequest *req = new object_map::ResizeRequest(
- m_image_ctx, &m_object_map, m_snap_id, new_size, default_object_state,
- on_finish);
+ m_image_ctx, &m_lock, &m_object_map, m_snap_id, new_size,
+ default_object_state, on_finish);
req->send();
}
ldout(cct, 20) << dendl;
ceph_assert(m_image_ctx.image_lock.is_locked());
- ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
+ ceph_assert(m_lock.is_wlocked());
BlockGuardCell *cell;
int r = m_update_guard->detain({op.start_object_no, op.end_object_no},
{
RWLock::RLocker image_locker(m_image_ctx.image_lock);
- RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+ RWLock::WLocker locker(m_lock);
for (auto &op : block_ops) {
detained_aio_update(std::move(op));
}
ceph_assert(m_image_ctx.image_watcher != nullptr);
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
- ceph_assert(snap_id != CEPH_NOSNAP ||
- m_image_ctx.object_map_lock.is_wlocked());
ceph_assert(start_object_no < end_object_no);
CephContext *cct = m_image_ctx.cct;
stringify(static_cast<uint32_t>(*current_state)) : "")
<< "->" << static_cast<uint32_t>(new_state) << dendl;
if (snap_id == CEPH_NOSNAP) {
+ ceph_assert(m_lock.is_wlocked());
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
ldout(cct, 20) << "skipping update of invalid object map" << dendl;
}
auto req = object_map::UpdateRequest<I>::create(
- m_image_ctx, &m_object_map, snap_id, start_object_no, end_object_no,
- new_state, current_state, parent_trace, ignore_enoent, on_finish);
+ m_image_ctx, &m_lock, &m_object_map, snap_id, start_object_no,
+ end_object_no, new_state, current_state, parent_trace, ignore_enoent,
+ on_finish);
req->send();
}
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/object_map_types.h"
#include "common/bit_vector.hpp"
+#include "common/RWLock.h"
#include "librbd/Utils.h"
#include <boost/optional.hpp>
class Context;
-class RWLock;
namespace ZTracer { struct Trace; }
namespace librbd {
static bool is_compatible(const file_layout_t& layout, uint64_t size);
- ceph::BitVector<2u>::Reference operator[](uint64_t object_no);
uint8_t operator[](uint64_t object_no) const;
inline uint64_t size() const {
+ RWLock::RLocker locker(m_lock);
return m_object_map.size();
}
+ inline void set_state(uint64_t object_no, uint8_t new_state,
+ const boost::optional<uint8_t> ¤t_state) {
+ RWLock::WLocker locker(m_lock);
+ ceph_assert(object_no < m_object_map.size());
+ if (current_state && m_object_map[object_no] != *current_state) {
+ return;
+ }
+ m_object_map[object_no] = new_state;
+ }
+
void open(Context *on_finish);
void close(Context *on_finish);
bool set_object_map(ceph::BitVector<2> &target_object_map);
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
ceph_assert(start_object_no < end_object_no);
+ RWLock::WLocker locker(m_lock);
+
if (snap_id == CEPH_NOSNAP) {
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
typedef BlockGuard<UpdateOperation> UpdateGuard;
ImageCtxT &m_image_ctx;
- ceph::BitVector<2> m_object_map;
uint64_t m_snap_id;
+ RWLock m_lock;
+ ceph::BitVector<2> m_object_map;
+
UpdateGuard *m_update_guard = nullptr;
void detained_aio_update(UpdateOperation &&update_operation);
namespace object_map {
template <typename I>
-RefreshRequest<I>::RefreshRequest(I &image_ctx, ceph::BitVector<2> *object_map,
+RefreshRequest<I>::RefreshRequest(I &image_ctx, RWLock* object_map_lock,
+ ceph::BitVector<2> *object_map,
uint64_t snap_id, Context *on_finish)
- : m_image_ctx(image_ctx), m_object_map(object_map), m_snap_id(snap_id),
- m_on_finish(on_finish), m_object_count(0),
- m_truncate_on_disk_object_map(false) {
+ : m_image_ctx(image_ctx), m_object_map_lock(object_map_lock),
+ m_object_map(object_map), m_snap_id(snap_id), m_on_finish(on_finish),
+ m_object_count(0), m_truncate_on_disk_object_map(false) {
}
template <typename I>
}
ceph_assert(m_on_disk_object_map.size() >= num_objs);
+ RWLock::WLocker object_map_locker(*m_object_map_lock);
*m_object_map = m_on_disk_object_map;
}
*ret_val = -EFBIG;
}
+ RWLock::WLocker object_map_locker(*m_object_map_lock);
m_object_map->clear();
return m_on_finish;
}
#include "common/bit_vector.hpp"
class Context;
+class RWLock;
namespace librbd {
template <typename ImageCtxT = ImageCtx>
class RefreshRequest {
public:
- static RefreshRequest *create(ImageCtxT &image_ctx,
+ static RefreshRequest *create(ImageCtxT &image_ctx, RWLock* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, Context *on_finish) {
- return new RefreshRequest(image_ctx, object_map, snap_id, on_finish);
+ return new RefreshRequest(image_ctx, object_map_lock, object_map, snap_id,
+ on_finish);
}
- RefreshRequest(ImageCtxT &image_ctx, ceph::BitVector<2> *object_map,
- uint64_t snap_id, Context *on_finish);
+ RefreshRequest(ImageCtxT &image_ctx, RWLock* object_map_lock,
+ ceph::BitVector<2> *object_map, uint64_t snap_id,
+ Context *on_finish);
void send();
*/
ImageCtxT &m_image_ctx;
+ RWLock* m_object_map_lock;
ceph::BitVector<2> *m_object_map;
uint64_t m_snap_id;
Context *m_on_finish;
void ResizeRequest::send() {
CephContext *cct = m_image_ctx.cct;
- RWLock::WLocker l(m_image_ctx.object_map_lock);
+ RWLock::WLocker l(*m_object_map_lock);
m_num_objs = Striper::get_num_objects(m_image_ctx.layout, m_new_size);
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
}
void ResizeRequest::finish_request() {
- RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
-
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " resizing in-memory object map: "
<< m_num_objs << dendl;
+ RWLock::WLocker object_map_locker(*m_object_map_lock);
resize(m_object_map, m_num_objs, m_default_object_state);
}
#include "common/bit_vector.hpp"
class Context;
+class RWLock;
namespace librbd {
class ResizeRequest : public Request {
public:
- ResizeRequest(ImageCtx &image_ctx, ceph::BitVector<2> *object_map,
- uint64_t snap_id, uint64_t new_size,
- uint8_t default_object_state, Context *on_finish)
- : Request(image_ctx, snap_id, on_finish), m_object_map(object_map),
+ ResizeRequest(ImageCtx &image_ctx, RWLock *object_map_lock,
+ ceph::BitVector<2> *object_map, uint64_t snap_id,
+ uint64_t new_size, uint8_t default_object_state,
+ Context *on_finish)
+ : Request(image_ctx, snap_id, on_finish),
+ m_object_map_lock(object_map_lock), m_object_map(object_map),
m_num_objs(0), m_new_size(new_size),
m_default_object_state(default_object_state)
{
void finish_request() override;
private:
+ RWLock* m_object_map_lock;
ceph::BitVector<2> *m_object_map;
uint64_t m_num_objs;
uint64_t m_new_size;
}
void SnapshotCreateRequest::update_object_map() {
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
- RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+ RWLock::WLocker object_map_locker(*m_object_map_lock);
auto it = m_object_map.begin();
auto end_it = m_object_map.end();
#include "librbd/object_map/Request.h"
class Context;
+class RWLock;
namespace librbd {
STATE_ADD_SNAPSHOT
};
- SnapshotCreateRequest(ImageCtx &image_ctx, ceph::BitVector<2> *object_map,
- uint64_t snap_id, Context *on_finish)
+ SnapshotCreateRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+ ceph::BitVector<2> *object_map, uint64_t snap_id,
+ Context *on_finish)
: Request(image_ctx, snap_id, on_finish),
- m_object_map(*object_map), m_ret_val(0) {
+ m_object_map_lock(object_map_lock), m_object_map(*object_map),
+ m_ret_val(0) {
}
void send() override;
bool should_complete(int r) override;
private:
- State m_state = STATE_READ_MAP;
+ RWLock* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
+ State m_state = STATE_READ_MAP;
bufferlist m_read_bl;
int m_ret_val;
void SnapshotRemoveRequest::update_object_map() {
assert(m_image_ctx.image_lock.is_locked());
- RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+ RWLock::WLocker object_map_locker(*m_object_map_lock);
if (m_next_snap_id == m_image_ctx.snap_id && m_next_snap_id == CEPH_NOSNAP) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
#include "common/bit_vector.hpp"
#include "librbd/AsyncRequest.h"
+class RWLock;
+
namespace librbd {
namespace object_map {
* otherwise, the state machine proceeds to remove the object map.
*/
- SnapshotRemoveRequest(ImageCtx &image_ctx, ceph::BitVector<2> *object_map,
- uint64_t snap_id, Context *on_finish)
- : AsyncRequest(image_ctx, on_finish), m_object_map(*object_map),
+ SnapshotRemoveRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+ ceph::BitVector<2> *object_map, uint64_t snap_id,
+ Context *on_finish)
+ : AsyncRequest(image_ctx, on_finish),
+ m_object_map_lock(object_map_lock), m_object_map(*object_map),
m_snap_id(snap_id), m_next_snap_id(CEPH_NOSNAP) {
}
}
private:
+ RWLock* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
uint64_t m_snap_id;
uint64_t m_next_snap_id;
template <typename I>
void UpdateRequest<I>::update_object_map() {
ceph_assert(m_image_ctx.image_lock.is_locked());
- ceph_assert(m_image_ctx.object_map_lock.is_locked());
+ ceph_assert(m_object_map_lock->is_locked());
CephContext *cct = m_image_ctx.cct;
// break very large requests into manageable batches
{
RWLock::RLocker image_locker(m_image_ctx.image_lock);
- RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+ RWLock::WLocker object_map_locker(*m_object_map_lock);
update_in_memory_object_map();
if (m_update_end_object_no < m_end_object_no) {
template <typename I>
void UpdateRequest<I>::update_in_memory_object_map() {
ceph_assert(m_image_ctx.image_lock.is_locked());
- ceph_assert(m_image_ctx.object_map_lock.is_locked());
+ ceph_assert(m_object_map_lock->is_locked());
// rebuilding the object map might update on-disk only
if (m_snap_id == m_image_ctx.snap_id) {
#include <boost/optional.hpp>
class Context;
+class RWLock;
namespace librbd {
class UpdateRequest : public Request {
public:
static UpdateRequest *create(ImageCtx &image_ctx,
+ RWLock* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish) {
- return new UpdateRequest(image_ctx, object_map, snap_id, start_object_no,
- end_object_no, new_state, current_state,
- parent_trace, ignore_enoent, on_finish);
+ return new UpdateRequest(image_ctx, object_map_lock, object_map, snap_id,
+ start_object_no, end_object_no, new_state,
+ current_state, parent_trace, ignore_enoent,
+ on_finish);
}
- UpdateRequest(ImageCtx &image_ctx, ceph::BitVector<2> *object_map,
- uint64_t snap_id, uint64_t start_object_no,
- uint64_t end_object_no, uint8_t new_state,
+ UpdateRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+ ceph::BitVector<2> *object_map, uint64_t snap_id,
+ uint64_t start_object_no, uint64_t end_object_no,
+ uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
Context *on_finish)
- : Request(image_ctx, snap_id, on_finish), m_object_map(*object_map),
+ : Request(image_ctx, snap_id, on_finish),
+ m_object_map_lock(object_map_lock), m_object_map(*object_map),
m_start_object_no(start_object_no), m_end_object_no(end_object_no),
m_update_start_object_no(start_object_no), m_new_state(new_state),
m_current_state(current_state),
* @endverbatim
*/
+ RWLock* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
uint64_t m_start_object_no;
uint64_t m_end_object_no;
CephContext *cct = image_ctx.cct;
uint64_t snap_id = image_ctx.snap_id;
- uint8_t state = (*image_ctx.object_map)[object_no];
- if (state == OBJECT_EXISTS && new_state == OBJECT_NONEXISTENT &&
+ current_state = (*image_ctx.object_map)[object_no];
+ if (current_state == OBJECT_EXISTS && new_state == OBJECT_NONEXISTENT &&
snap_id == CEPH_NOSNAP) {
// might be writing object to OSD concurrently
- new_state = state;
+ new_state = current_state;
}
- if (new_state != state) {
+ if (new_state != current_state) {
ldout(cct, 15) << image_ctx.get_object_name(object_no)
- << " rebuild updating object map "
- << static_cast<uint32_t>(state) << "->"
- << static_cast<uint32_t>(new_state) << dendl;
- (*image_ctx.object_map)[object_no] = new_state;
+ << " rebuild updating object map "
+ << static_cast<uint32_t>(current_state) << "->"
+ << static_cast<uint32_t>(new_state) << dendl;
+ image_ctx.object_map->set_state(object_no, new_state, current_state);
}
return false;
}
} else {
expect.WillOnce(DoAll(WithArg<7>(Invoke([&mock_image_ctx, snap_id, state](Context *ctx) {
ceph_assert(mock_image_ctx.image_ctx->image_lock.is_locked());
- ceph_assert(mock_image_ctx.image_ctx->object_map_lock.is_wlocked());
mock_image_ctx.image_ctx->object_map->aio_update<Context>(
snap_id, 0, 1, state, boost::none, {}, false, ctx);
})),
init_object_map(mock_image_ctx, &on_disk_object_map);
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
MockLockRequest mock_lock_request;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- CEPH_NOSNAP, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, CEPH_NOSNAP, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, CEPH_NOSNAP,
init_object_map(mock_image_ctx, &on_disk_object_map);
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
init_object_map(mock_image_ctx, &on_disk_object_map);
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
init_object_map(mock_image_ctx, &on_disk_object_map);
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
init_object_map(mock_image_ctx, &on_disk_object_map);
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
ceph::BitVector<2> small_object_map;
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
ceph::BitVector<2> small_object_map;
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
large_object_map.resize(on_disk_object_map.size() * 2);
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
ceph::BitVector<2> small_object_map;
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
init_object_map(mock_image_ctx, &on_disk_object_map);
C_SaferCond ctx;
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, &object_map,
- TEST_SNAP_ID, &ctx);
+ MockRefreshRequest *req = new MockRefreshRequest(
+ mock_image_ctx, &object_map_lock, &object_map, TEST_SNAP_ID, &ctx);
InSequence seq;
expect_get_image_size(mock_image_ctx, TEST_SNAP_ID,
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new ResizeRequest(
- *ictx, &object_map, CEPH_NOSNAP, object_map.size(), OBJECT_EXISTS,
- &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, object_map.size(),
+ OBJECT_EXISTS, &cond_ctx);
req->send();
ASSERT_EQ(0, cond_ctx.wait());
expect_resize(ictx, CEPH_NOSNAP, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new ResizeRequest(
- *ictx, &object_map, CEPH_NOSNAP, object_map.size(), OBJECT_EXISTS,
- &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, object_map.size(),
+ OBJECT_EXISTS, &cond_ctx);
req->send();
ASSERT_EQ(0, cond_ctx.wait());
uint64_t snap_id = ictx->snap_id;
expect_resize(ictx, snap_id, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new ResizeRequest(
- *ictx, &object_map, snap_id, object_map.size(), OBJECT_EXISTS,
- &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, object_map.size(),
+ OBJECT_EXISTS, &cond_ctx);
req->send();
ASSERT_EQ(0, cond_ctx.wait());
expect_resize(ictx, CEPH_NOSNAP, -EINVAL);
expect_invalidate(ictx);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new ResizeRequest(
- *ictx, &object_map, CEPH_NOSNAP, object_map.size(), OBJECT_EXISTS,
- &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, object_map.size(),
+ OBJECT_EXISTS, &cond_ctx);
req->send();
ASSERT_EQ(0, cond_ctx.wait());
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
uint64_t snap_id = 1;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotCreateRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
request->send();
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
uint64_t snap_id = 1;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotCreateRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
request->send();
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
uint64_t snap_id = 1;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotCreateRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
request->send();
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
uint64_t snap_id = 1;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotCreateRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
request->send();
ASSERT_EQ(0, open_image(m_image_name, &ictx));
ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1024);
for (uint64_t i = 0; i < object_map.size(); ++i) {
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotCreateRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
request->send();
}
expect_remove_map(ictx, snap_id, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
expect_load_map(ictx, snap_id, -ENOENT);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
expect_invalidate(ictx);
expect_remove_map(ictx, snap_id, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
expect_remove_snapshot(ictx, -ENOENT);
expect_remove_map(ictx, snap_id, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
expect_invalidate(ictx);
expect_remove_map(ictx, snap_id, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
}
expect_remove_map(ictx, snap_id, -ENOENT);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
}
expect_remove_map(ictx, snap_id, -EINVAL);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
C_SaferCond cond_ctx;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
librbd::NoOpProgressContext prog_ctx;
uint64_t size = 4294967296; // 4GB = 1024 * 4MB
ASSERT_EQ(0, resize(ictx, size));
-
- // update image objectmap for snap inherit
+
+ // update image objectmap for snap inherit
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1024);
for (uint64_t i = 512; i < object_map.size(); ++i) {
object_map[i] = i % 2 == 0 ? OBJECT_EXISTS : OBJECT_NONEXISTENT;
}
-
+
C_SaferCond cond_ctx1;
{
librbd::ObjectMap om(*ictx, ictx->snap_id);
C_SaferCond cond_ctx2;
uint64_t snap_id = ictx->snap_info.rbegin()->first;
AsyncRequest<> *request = new SnapshotRemoveRequest(
- *ictx, &object_map, snap_id, &cond_ctx2);
+ *ictx, &object_map_lock, &object_map, snap_id, &cond_ctx2);
{
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::WLocker image_locker(ictx->image_lock);
ASSERT_EQ(0, ictx->operations->resize(4 << ictx->order, true, no_progress));
ASSERT_EQ(0, acquire_exclusive_lock(*ictx));
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(4);
for (uint64_t i = 0; i < object_map.size(); ++i) {
C_SaferCond cond_ctx;
AsyncRequest<> *req = new UpdateRequest<>(
- *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
- OBJECT_EXISTS, {}, false, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(),
+ OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
+ RWLock::WLocker object_map_locker(object_map_lock);
req->send();
}
ASSERT_EQ(0, cond_ctx.wait());
expect_update(ictx, CEPH_NOSNAP, 0, 1, OBJECT_NONEXISTENT, OBJECT_EXISTS, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new UpdateRequest<>(
- *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
- OBJECT_EXISTS, {}, false, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(),
+ OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
+ RWLock::WLocker object_map_locker(object_map_lock);
req->send();
}
ASSERT_EQ(0, cond_ctx.wait());
uint64_t snap_id = ictx->snap_id;
expect_update(ictx, snap_id, 0, 1, OBJECT_NONEXISTENT, OBJECT_EXISTS, 0);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new UpdateRequest<>(
- *ictx, &object_map, snap_id, 0, object_map.size(), OBJECT_NONEXISTENT,
- OBJECT_EXISTS, {}, false, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, 0, object_map.size(),
+ OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
+ RWLock::WLocker object_map_locker(object_map_lock);
req->send();
}
ASSERT_EQ(0, cond_ctx.wait());
-EINVAL);
expect_invalidate(ictx);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new UpdateRequest<>(
- *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
- OBJECT_EXISTS, {}, false, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(),
+ OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
+ RWLock::WLocker object_map_locker(object_map_lock);
req->send();
}
ASSERT_EQ(0, cond_ctx.wait());
boost::optional<uint8_t>(), 0);
expect_unlock_exclusive_lock(*ictx);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new UpdateRequest<>(
- *ictx, &object_map, snap_id, 0, object_map.size(), OBJECT_EXISTS_CLEAN,
- boost::optional<uint8_t>(), {}, false, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, snap_id, 0, object_map.size(),
+ OBJECT_EXISTS_CLEAN, boost::optional<uint8_t>(), {}, false, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
+ RWLock::WLocker object_map_locker(object_map_lock);
req->send();
}
ASSERT_EQ(0, cond_ctx.wait());
OBJECT_EXISTS, 0);
expect_unlock_exclusive_lock(*ictx);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(712312);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new UpdateRequest<>(
- *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
- OBJECT_EXISTS, {}, false, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(),
+ OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, false, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
+ RWLock::WLocker object_map_locker(object_map_lock);
req->send();
}
ASSERT_EQ(0, cond_ctx.wait());
expect_update(ictx, CEPH_NOSNAP, 0, 1, OBJECT_NONEXISTENT, OBJECT_EXISTS,
-ENOENT);
+ RWLock object_map_lock("lock");
ceph::BitVector<2> object_map;
object_map.resize(1);
C_SaferCond cond_ctx;
AsyncRequest<> *req = new UpdateRequest<>(
- *ictx, &object_map, CEPH_NOSNAP, 0, object_map.size(), OBJECT_NONEXISTENT,
- OBJECT_EXISTS, {}, true, &cond_ctx);
+ *ictx, &object_map_lock, &object_map, CEPH_NOSNAP, 0, object_map.size(),
+ OBJECT_NONEXISTENT, OBJECT_EXISTS, {}, true, &cond_ctx);
{
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
+ RWLock::WLocker object_map_locker(object_map_lock);
req->send();
}
ASSERT_EQ(0, cond_ctx.wait());
RWLock::RLocker owner_locker(ictx->owner_lock);
RWLock::RLocker image_locker(ictx->image_lock);
- RWLock::WLocker object_map_locker(ictx->object_map_lock);
ASSERT_TRUE(ictx->object_map != nullptr);
if (!ictx->object_map->aio_update<
C_SaferCond ctx;
object_map.open(&ctx);
ASSERT_EQ(0, ctx.wait());
-
- RWLock::WLocker object_map_locker(ictx2->object_map_lock);
ASSERT_EQ(state, object_map[0]);
}
}
Context *on_finish = nullptr;
ceph::BitVector<2u> *object_map = nullptr;
static RefreshRequest *s_instance;
- static RefreshRequest *create(MockTestImageCtx &image_ctx,
+ static RefreshRequest *create(MockTestImageCtx &image_ctx, RWLock*,
ceph::BitVector<2u> *object_map,
uint64_t snap_id, Context *on_finish) {
ceph_assert(s_instance != nullptr);
struct UpdateRequest<MockTestImageCtx> {
Context *on_finish = nullptr;
static UpdateRequest *s_instance;
- static UpdateRequest *create(MockTestImageCtx &image_ctx,
+ static UpdateRequest *create(MockTestImageCtx &image_ctx, RWLock*,
ceph::BitVector<2u> *object_map,
uint64_t snap_id,
uint64_t start_object_no, uint64_t end_object_no,
C_SaferCond update_ctx2;
{
RWLock::RLocker image_locker(mock_image_ctx.image_lock);
- RWLock::WLocker object_map_locker(mock_image_ctx.object_map_lock);
mock_object_map.aio_update(CEPH_NOSNAP, 0, 1, {}, {}, false, &update_ctx1);
mock_object_map.aio_update(CEPH_NOSNAP, 1, 1, {}, {}, false, &update_ctx2);
}
C_SaferCond update_ctx4;
{
RWLock::RLocker image_locker(mock_image_ctx.image_lock);
- RWLock::WLocker object_map_locker(mock_image_ctx.object_map_lock);
mock_object_map.aio_update(CEPH_NOSNAP, 1, 4, 1, {}, {}, false,
&update_ctx1);
mock_object_map.aio_update(CEPH_NOSNAP, 1, 3, 1, {}, {}, false,