const char *this_major = udev_device_get_property_value(dev, "MAJOR");
const char *this_minor = udev_device_get_property_value(dev, "MINOR");
- assert(!minor ^ have_minor_attr());
+ ceph_assert(!minor ^ have_minor_attr());
if (strcmp(this_major, major) == 0 &&
(!minor || strcmp(this_minor, minor) == 0)) {
string name = get_kernel_rbd_name(udev_device_get_sysname(bus_dev));
- assert(strcmp(udev_device_get_devnode(dev), name.c_str()) == 0);
+ ceph_assert(strcmp(udev_device_get_devnode(dev), name.c_str()) == 0);
*pname = name;
udev_device_unref(dev);
}
/* make sure there is only one match */
- assert(!udev_list_entry_get_next(l));
+ ceph_assert(!udev_list_entry_get_next(l));
dev = udev_device_new_from_syspath(udev, udev_list_entry_get_name(l));
if (!dev) {
template <typename T>
void AsyncObjectThrottle<T>::start_ops(uint64_t max_concurrent) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
bool complete;
{
Mutex::Locker l(m_lock);
AsyncRequest<T>::AsyncRequest(T &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish), m_canceled(false),
m_xlist_item(this) {
- assert(m_on_finish != NULL);
+ ceph_assert(m_on_finish != NULL);
start_request();
}
decltype(m_image_ctx.async_requests_waiters) waiters;
{
Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock);
- assert(m_xlist_item.remove_myself());
+ ceph_assert(m_xlist_item.remove_myself());
if (m_image_ctx.async_requests.empty()) {
waiters = std::move(m_image_ctx.async_requests_waiters);
void release(BlockGuardCell *cell, BlockOperations *block_operations) {
Mutex::Locker locker(m_lock);
- assert(cell != nullptr);
+ ceph_assert(cell != nullptr);
auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>(
*cell);
ldout(m_cct, 20) << "block_start="
template <typename I>
DeepCopyRequest<I>::~DeepCopyRequest() {
- assert(m_snapshot_copy_request == nullptr);
- assert(m_image_copy_request == nullptr);
+ ceph_assert(m_snapshot_copy_request == nullptr);
+ ceph_assert(m_image_copy_request == nullptr);
}
template <typename I>
return;
}
- assert(m_dst_image_ctx->object_map != nullptr);
+ ceph_assert(m_dst_image_ctx->object_map != nullptr);
ldout(m_cct, 20) << dendl;
handle_copy_object_map(r);
finish_op_ctx->complete(0);
});
- assert(m_snap_seqs->count(m_snap_id_end) > 0);
+ ceph_assert(m_snap_seqs->count(m_snap_id_end) > 0);
librados::snap_t copy_snap_id = (*m_snap_seqs)[m_snap_id_end];
m_dst_image_ctx->object_map->rollback(copy_snap_id, ctx);
m_dst_image_ctx->snap_lock.put_read();
void DeepCopyRequest<I>::handle_copy_object_map(int r) {
ldout(m_cct, 20) << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
send_refresh_object_map();
}
void DeepCopyRequest<I>::handle_refresh_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
{
RWLock::WLocker snap_locker(m_dst_image_ctx->snap_lock);
std::swap(m_dst_image_ctx->object_map, m_object_map);
void ExclusiveLock<I>::unblock_requests() {
Mutex::Locker locker(ML<I>::m_lock);
- assert(m_request_blocked_count > 0);
+ ceph_assert(m_request_blocked_count > 0);
m_request_blocked_count--;
if (m_request_blocked_count == 0) {
m_request_blocked_ret_val = 0;
template <typename I>
void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
ldout(m_image_ctx.cct, 10) << dendl;
{
}
ldout(m_image_ctx.cct, 10) << dendl;
- assert(ML<I>::is_action_acquire_lock());
+ ceph_assert(ML<I>::is_action_acquire_lock());
m_acquire_lock_peer_ret_val = r;
ML<I>::execute_next_action();
template <typename I>
Context *ExclusiveLock<I>::start_op() {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
Mutex::Locker locker(ML<I>::m_lock);
if (!accept_ops(ML<I>::m_lock)) {
return;
} else if (r < 0) {
ML<I>::m_lock.Lock();
- assert(ML<I>::is_state_acquiring());
+ ceph_assert(ML<I>::is_state_acquiring());
// PostAcquire state machine will not run, so we need complete prepare
m_image_ctx.state->handle_prepare_lock_complete();
Mutex::Locker locker(ML<I>::m_lock);
- assert(r == 0);
+ ceph_assert(r == 0);
// lock is owned at this point
ML<I>::set_state_post_acquiring();
Context *on_finish = nullptr;
{
Mutex::Locker locker(ML<I>::m_lock);
- assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
+ ceph_assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
assert (m_pre_post_callback != nullptr);
std::swap(m_pre_post_callback, on_finish);
if (!shutting_down) {
{
Mutex::Locker locker(ML<I>::m_lock);
- assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
+ ceph_assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
}
if (r >= 0) {
}
ImageCtx::~ImageCtx() {
- assert(image_watcher == NULL);
- assert(exclusive_lock == NULL);
- assert(object_map == NULL);
- assert(journal == NULL);
- assert(asok_hook == NULL);
+ ceph_assert(image_watcher == NULL);
+ ceph_assert(exclusive_lock == NULL);
+ ceph_assert(object_map == NULL);
+ ceph_assert(journal == NULL);
+ ceph_assert(asok_hook == NULL);
if (perfcounter) {
perf_stop();
}
void ImageCtx::init() {
- assert(!header_oid.empty());
- assert(old_format || !id.empty());
+ ceph_assert(!header_oid.empty());
+ ceph_assert(old_format || !id.empty());
asok_hook = new LibrbdAdminSocketHook(this);
trace_endpoint.copy_name(pname);
perf_start(pname);
- assert(image_watcher == NULL);
+ ceph_assert(image_watcher == NULL);
image_watcher = new ImageWatcher<>(*this);
}
}
void ImageCtx::perf_stop() {
- assert(perfcounter);
+ ceph_assert(perfcounter);
cct->get_perfcounters_collection()->remove(perfcounter);
delete perfcounter;
}
}
int ImageCtx::snap_set(uint64_t in_snap_id) {
- assert(snap_lock.is_wlocked());
+ ceph_assert(snap_lock.is_wlocked());
auto it = snap_info.find(in_snap_id);
if (in_snap_id != CEPH_NOSNAP && it != snap_info.end()) {
snap_id = in_snap_id;
void ImageCtx::snap_unset()
{
- assert(snap_lock.is_wlocked());
+ ceph_assert(snap_lock.is_wlocked());
snap_id = CEPH_NOSNAP;
snap_namespace = {};
snap_name = "";
snap_t ImageCtx::get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace,
const string& in_snap_name) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
auto it = snap_ids.find({in_snap_namespace, in_snap_name});
if (it != snap_ids.end()) {
return it->second;
const SnapInfo* ImageCtx::get_snap_info(snap_t in_snap_id) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
map<snap_t, SnapInfo>::const_iterator it =
snap_info.find(in_snap_id);
if (it != snap_info.end())
int ImageCtx::get_snap_name(snap_t in_snap_id,
string *out_snap_name) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_name = info->name;
int ImageCtx::get_snap_namespace(snap_t in_snap_id,
cls::rbd::SnapshotNamespace *out_snap_namespace) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_namespace = info->snap_namespace;
uint64_t ImageCtx::get_current_size() const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
return size;
}
void ImageCtx::set_access_timestamp(utime_t at)
{
- assert(timestamp_lock.is_wlocked());
+ ceph_assert(timestamp_lock.is_wlocked());
access_timestamp = at;
}
void ImageCtx::set_modify_timestamp(utime_t mt)
{
- assert(timestamp_lock.is_locked());
+ ceph_assert(timestamp_lock.is_locked());
modify_timestamp = mt;
}
int ImageCtx::is_snap_protected(snap_t in_snap_id,
bool *is_protected) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_protected =
int ImageCtx::is_snap_unprotected(snap_t in_snap_id,
bool *is_unprotected) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_unprotected =
const ParentInfo &parent, uint8_t protection_status,
uint64_t flags, utime_t timestamp)
{
- assert(snap_lock.is_wlocked());
+ ceph_assert(snap_lock.is_wlocked());
snaps.push_back(id);
SnapInfo info(in_snap_name, in_snap_namespace,
in_size, parent, protection_status, flags, timestamp);
string in_snap_name,
snap_t id)
{
- assert(snap_lock.is_wlocked());
+ ceph_assert(snap_lock.is_wlocked());
snaps.erase(std::remove(snaps.begin(), snaps.end(), id), snaps.end());
snap_info.erase(id);
snap_ids.erase({in_snap_namespace, in_snap_name});
uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
if (in_snap_id == CEPH_NOSNAP) {
if (!resize_reqs.empty() &&
resize_reqs.front()->shrinking()) {
}
uint64_t ImageCtx::get_object_count(snap_t in_snap_id) const {
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
uint64_t image_size = get_image_size(in_snap_id);
return Striper::get_num_objects(layout, image_size);
}
bool ImageCtx::test_features(uint64_t in_features,
const RWLock &in_snap_lock) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
return ((features & in_features) == in_features);
}
bool ImageCtx::test_op_features(uint64_t in_op_features,
const RWLock &in_snap_lock) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
return ((op_features & in_op_features) == in_op_features);
}
int ImageCtx::get_flags(librados::snap_t _snap_id, uint64_t *_flags) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
if (_snap_id == CEPH_NOSNAP) {
*_flags = flags;
return 0;
int ImageCtx::test_flags(uint64_t flags, const RWLock &in_snap_lock,
bool *flags_set) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
uint64_t snap_flags;
int r = get_flags(snap_id, &snap_flags);
if (r < 0) {
int ImageCtx::update_flags(snap_t in_snap_id, uint64_t flag, bool enabled)
{
- assert(snap_lock.is_wlocked());
+ ceph_assert(snap_lock.is_wlocked());
uint64_t *_flags;
if (in_snap_id == CEPH_NOSNAP) {
_flags = &flags;
const ParentInfo* ImageCtx::get_parent_info(snap_t in_snap_id) const
{
- assert(snap_lock.is_locked());
- assert(parent_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
+ ceph_assert(parent_lock.is_locked());
if (in_snap_id == CEPH_NOSNAP)
return &parent_md;
const SnapInfo *info = get_snap_info(in_snap_id);
int ImageCtx::get_parent_overlap(snap_t in_snap_id, uint64_t *overlap) const
{
- assert(snap_lock.is_locked());
+ ceph_assert(snap_lock.is_locked());
const ParentInfo *info = get_parent_info(in_snap_id);
if (info) {
*overlap = info->overlap;
}
void ImageCtx::register_watch(Context *on_finish) {
- assert(image_watcher != NULL);
+ ceph_assert(image_watcher != NULL);
image_watcher->register_watch(on_finish);
}
}
exclusive_lock::Policy *ImageCtx::get_exclusive_lock_policy() const {
- assert(owner_lock.is_locked());
- assert(exclusive_lock_policy != nullptr);
+ ceph_assert(owner_lock.is_locked());
+ ceph_assert(exclusive_lock_policy != nullptr);
return exclusive_lock_policy;
}
void ImageCtx::set_exclusive_lock_policy(exclusive_lock::Policy *policy) {
- assert(owner_lock.is_wlocked());
- assert(policy != nullptr);
+ ceph_assert(owner_lock.is_wlocked());
+ ceph_assert(policy != nullptr);
delete exclusive_lock_policy;
exclusive_lock_policy = policy;
}
journal::Policy *ImageCtx::get_journal_policy() const {
- assert(snap_lock.is_locked());
- assert(journal_policy != nullptr);
+ ceph_assert(snap_lock.is_locked());
+ ceph_assert(journal_policy != nullptr);
return journal_policy;
}
void ImageCtx::set_journal_policy(journal::Policy *policy) {
- assert(snap_lock.is_wlocked());
- assert(policy != nullptr);
+ ceph_assert(snap_lock.is_wlocked());
+ ceph_assert(policy != nullptr);
delete journal_policy;
journal_policy = policy;
}
}
~ImageUpdateWatchers() {
- assert(m_watchers.empty());
- assert(m_in_flight.empty());
- assert(m_pending_unregister.empty());
- assert(m_on_shut_down_finish == nullptr);
+ ceph_assert(m_watchers.empty());
+ ceph_assert(m_in_flight.empty());
+ ceph_assert(m_pending_unregister.empty());
+ ceph_assert(m_on_shut_down_finish == nullptr);
destroy_work_queue();
}
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
{
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down_finish == nullptr);
+ ceph_assert(m_on_shut_down_finish == nullptr);
m_watchers.clear();
if (!m_in_flight.empty()) {
m_on_shut_down_finish = on_finish;
ldout(m_cct, 20) << __func__ << ": watcher=" << watcher << dendl;
Mutex::Locker locker(m_lock);
- assert(m_on_shut_down_finish == nullptr);
+ ceph_assert(m_on_shut_down_finish == nullptr);
create_work_queue();
r = -ENOENT;
} else {
if (m_in_flight.find(handle) != m_in_flight.end()) {
- assert(m_pending_unregister.find(handle) == m_pending_unregister.end());
+ ceph_assert(m_pending_unregister.find(handle) == m_pending_unregister.end());
m_pending_unregister[handle] = on_finish;
on_finish = nullptr;
}
}
void send_notify(uint64_t handle, UpdateWatchCtx *watcher) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << ", watcher=" << watcher << dendl;
Mutex::Locker locker(m_lock);
auto in_flight_it = m_in_flight.find(handle);
- assert(in_flight_it != m_in_flight.end());
+ ceph_assert(in_flight_it != m_in_flight.end());
m_in_flight.erase(in_flight_it);
// If there is no more in flight notifications for this watcher
}
if (m_in_flight.empty()) {
- assert(m_pending_unregister.empty());
+ ceph_assert(m_pending_unregister.empty());
if (m_on_shut_down_finish != nullptr) {
std::swap(m_on_shut_down_finish, on_shut_down_finish);
}
template <typename I>
ImageState<I>::~ImageState() {
- assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
+ ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
delete m_update_watchers;
}
ldout(cct, 20) << __func__ << dendl;
m_lock.Lock();
- assert(m_state == STATE_UNINITIALIZED);
+ ceph_assert(m_state == STATE_UNINITIALIZED);
m_open_flags = flags;
Action action(ACTION_TYPE_OPEN);
ldout(cct, 20) << __func__ << dendl;
m_lock.Lock();
- assert(!is_closed());
+ ceph_assert(!is_closed());
Action action(ACTION_TYPE_CLOSE);
action.refresh_seq = m_refresh_seq;
template <typename I>
const typename ImageState<I>::Action *
ImageState<I>::find_pending_refresh() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
auto it = std::find_if(m_actions_contexts.rbegin(),
m_actions_contexts.rend(),
template <typename I>
bool ImageState<I>::is_closed() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return ((m_state == STATE_CLOSED) ||
(!m_actions_contexts.empty() &&
template <typename I>
void ImageState<I>::append_context(const Action &action, Context *context) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
ActionContexts *action_contexts = nullptr;
for (auto &action_ctxs : m_actions_contexts) {
template <typename I>
void ImageState<I>::execute_next_action_unlock() {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_actions_contexts.empty());
switch (m_actions_contexts.front().first.action_type) {
case ACTION_TYPE_OPEN:
send_open_unlock();
template <typename I>
void ImageState<I>::execute_action_unlock(const Action &action,
Context *on_finish) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
append_context(action, on_finish);
if (!is_transition_state()) {
template <typename I>
void ImageState<I>::complete_action_unlock(State next_state, int r) {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
template <typename I>
void ImageState<I>::send_open_unlock() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
template <typename I>
void ImageState<I>::send_close_unlock() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
template <typename I>
void ImageState<I>::send_refresh_unlock() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_REFRESHING;
- assert(!m_actions_contexts.empty());
+ ceph_assert(!m_actions_contexts.empty());
auto &action_context = m_actions_contexts.front().first;
- assert(action_context.action_type == ACTION_TYPE_REFRESH);
+ ceph_assert(action_context.action_type == ACTION_TYPE_REFRESH);
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
m_lock.Lock();
- assert(!m_actions_contexts.empty());
+ ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
- assert(action_contexts.first.action_type == ACTION_TYPE_REFRESH);
- assert(m_last_refresh <= action_contexts.first.refresh_seq);
+ ceph_assert(action_contexts.first.action_type == ACTION_TYPE_REFRESH);
+ ceph_assert(m_last_refresh <= action_contexts.first.refresh_seq);
if (r == -ERESTART) {
ldout(cct, 5) << "incomplete refresh: not updating sequence" << dendl;
template <typename I>
void ImageState<I>::send_set_snap_unlock() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_state = STATE_SETTING_SNAP;
- assert(!m_actions_contexts.empty());
+ ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
- assert(action_contexts.first.action_type == ACTION_TYPE_SET_SNAP);
+ ceph_assert(action_contexts.first.action_type == ACTION_TYPE_SET_SNAP);
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": "
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_state = STATE_PREPARING_LOCK;
- assert(!m_actions_contexts.empty());
+ ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
- assert(action_contexts.first.action_type == ACTION_TYPE_LOCK);
+ ceph_assert(action_contexts.first.action_type == ACTION_TYPE_LOCK);
Context *on_ready = action_contexts.first.on_ready;
m_lock.Unlock();
void ImageWatcher<I>::notify_flatten(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
bool allow_shrink,
ProgressContext &prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
void ImageWatcher<I>::notify_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
notify_lock_owner(SnapCreatePayload(snap_namespace, snap_name), on_finish);
void ImageWatcher<I>::notify_snap_rename(const snapid_t &src_snap_id,
const std::string &dst_snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
notify_lock_owner(SnapRenamePayload(src_snap_id, dst_snap_name), on_finish);
void ImageWatcher<I>::notify_snap_remove(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
notify_lock_owner(SnapRemovePayload(snap_namespace, snap_name), on_finish);
void ImageWatcher<I>::notify_snap_protect(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
notify_lock_owner(SnapProtectPayload(snap_namespace, snap_name), on_finish);
void ImageWatcher<I>::notify_snap_unprotect(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
notify_lock_owner(SnapUnprotectPayload(snap_namespace, snap_name), on_finish);
void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
template <typename I>
void ImageWatcher<I>::notify_rename(const std::string &image_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
notify_lock_owner(RenamePayload(image_name), on_finish);
template <typename I>
void ImageWatcher<I>::notify_update_features(uint64_t features, bool enabled,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
notify_lock_owner(UpdateFeaturesPayload(features, enabled), on_finish);
void ImageWatcher<I>::notify_migrate(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
template <typename I>
void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) {
- assert(m_owner_client_id_lock.is_locked());
+ ceph_assert(m_owner_client_id_lock.is_locked());
m_owner_client_id = client_id;
ldout(m_image_ctx.cct, 10) << this << " current lock owner: "
<< m_owner_client_id << dendl;
template <typename I>
void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
if (m_image_ctx.exclusive_lock == nullptr) {
// exclusive lock dynamically disabled via image refresh
return;
}
- assert(m_image_ctx.exclusive_lock &&
+ ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
RWLock::RLocker watch_locker(this->m_watch_lock);
template <typename I>
void ImageWatcher<I>::notify_lock_owner(const Payload& payload,
Context *on_finish) {
- assert(on_finish != nullptr);
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(on_finish != nullptr);
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
bufferlist bl;
encode(NotifyMessage(payload), bl);
const Payload& payload,
ProgressContext& prog_ctx,
Context *on_finish) {
- assert(on_finish != nullptr);
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(on_finish != nullptr);
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id
<< dendl;
bool accept_request = m_image_ctx.exclusive_lock->accept_requests(&r);
if (accept_request) {
- assert(r == 0);
+ ceph_assert(r == 0);
Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
if (!m_owner_client_id.is_valid()) {
return true;
delete m_work_queue;
}
- assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
- assert(m_journaler == NULL);
- assert(m_journal_replay == NULL);
- assert(m_wait_for_state_contexts.empty());
+ ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
+ ceph_assert(m_journaler == NULL);
+ ceph_assert(m_journal_replay == NULL);
+ ceph_assert(m_wait_for_state_contexts.empty());
}
template <typename I>
bool Journal<I>::is_journal_supported(I &image_ctx) {
- assert(image_ctx.snap_lock.is_locked());
+ ceph_assert(image_ctx.snap_lock.is_locked());
return ((image_ctx.features & RBD_FEATURE_JOURNALING) &&
!image_ctx.read_only && image_ctx.snap_id == CEPH_NOSNAP);
}
template <typename I>
bool Journal<I>::is_journal_replaying(const Mutex &) const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return (m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_REPLAY ||
m_state == STATE_FLUSHING_RESTART ||
template <typename I>
bool Journal<I>::is_journal_appending() const {
- assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
Mutex::Locker locker(m_lock);
return (m_state == STATE_READY &&
!m_image_ctx.get_journal_policy()->append_disabled());
journal::ObjectDispatch<I>::create(&m_image_ctx, this));
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_UNINITIALIZED);
+ ceph_assert(m_state == STATE_UNINITIALIZED);
wait_for_steady_state(on_finish);
create_journaler();
}
m_listener_notify = false;
m_listener_cond.Signal();
- assert(m_state != STATE_UNINITIALIZED);
+ ceph_assert(m_state != STATE_UNINITIALIZED);
if (m_state == STATE_CLOSED) {
on_finish->complete(m_error_result);
return;
template <typename I>
bool Journal<I>::is_tag_owner(const Mutex &) const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return (m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
}
predecessor.mirror_uuid = LOCAL_MIRROR_UUID;
{
Mutex::Locker locker(m_lock);
- assert(m_journaler != nullptr && is_tag_owner(m_lock));
+ ceph_assert(m_journaler != nullptr && is_tag_owner(m_lock));
cls::journal::Client client;
int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client);
// since we are primary, populate the predecessor with our known commit
// position
- assert(m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
+ ceph_assert(m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
if (!client.commit_position.object_positions.empty()) {
auto position = client.commit_position.object_positions.front();
predecessor.commit_valid = true;
<< dendl;
Mutex::Locker locker(m_lock);
- assert(m_journaler != nullptr);
+ ceph_assert(m_journaler != nullptr);
journal::TagData tag_data;
tag_data.mirror_uuid = mirror_uuid;
ldout(cct, 20) << this << " " << __func__ << dendl;
Mutex::Locker locker(m_lock);
- assert(m_journaler != nullptr);
+ ceph_assert(m_journaler != nullptr);
m_journaler->flush_commit_position(on_finish);
}
uint64_t Journal<I>::append_write_event(uint64_t offset, size_t length,
const bufferlist &bl,
bool flush_entry) {
- assert(m_max_append_size > journal::AioWriteEvent::get_fixed_size());
+ ceph_assert(m_max_append_size > journal::AioWriteEvent::get_fixed_size());
uint64_t max_write_data_size =
m_max_append_size - journal::AioWriteEvent::get_fixed_size();
const Bufferlists &bufferlists,
uint64_t offset, size_t length,
bool flush_entry, int filter_ret_val) {
- assert(!bufferlists.empty());
+ ceph_assert(!bufferlists.empty());
uint64_t tid;
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_READY);
+ ceph_assert(m_state == STATE_READY);
tid = ++m_event_tid;
- assert(tid != 0);
+ ceph_assert(tid != 0);
}
Futures futures;
for (auto &bl : bufferlists) {
- assert(bl.length() <= m_max_append_size);
+ ceph_assert(bl.length() <= m_max_append_size);
futures.push_back(m_journaler->append(m_tag_tid, bl));
}
template <typename I>
void Journal<I>::commit_io_event_extent(uint64_t tid, uint64_t offset,
uint64_t length, int r) {
- assert(length > 0);
+ ceph_assert(length > 0);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
void Journal<I>::append_op_event(uint64_t op_tid,
journal::EventEntry &&event_entry,
Context *on_safe) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
bufferlist bl;
event_entry.timestamp = ceph_clock_now();
Future future;
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_READY);
+ ceph_assert(m_state == STATE_READY);
future = m_journaler->append(m_tag_tid, bl);
// delay committing op event to ensure consistent replay
- assert(m_op_futures.count(op_tid) == 0);
+ ceph_assert(m_op_futures.count(op_tid) == 0);
m_op_futures[op_tid] = future;
}
Future op_finish_future;
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_READY);
+ ceph_assert(m_state == STATE_READY);
// ready to commit op event
auto it = m_op_futures.find(op_tid);
- assert(it != m_op_futures.end());
+ ceph_assert(it != m_op_futures.end());
op_start_future = it->second;
m_op_futures.erase(it);
{
Mutex::Locker locker(m_lock);
- assert(m_journal_replay != nullptr);
+ ceph_assert(m_journal_replay != nullptr);
m_journal_replay->replay_op_ready(op_tid, on_resume);
}
}
template <typename I>
typename Journal<I>::Future Journal<I>::wait_event(Mutex &lock, uint64_t tid,
Context *on_safe) {
- assert(m_event_lock.is_locked());
+ ceph_assert(m_event_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
typename Events::iterator it = m_events.find(tid);
- assert(it != m_events.end());
+ ceph_assert(it != m_events.end());
Event &event = it->second;
if (event.safe) {
ldout(cct, 20) << this << " " << __func__ << dendl;
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_READY);
- assert(m_journal_replay == nullptr);
+ ceph_assert(m_state == STATE_READY);
+ ceph_assert(m_journal_replay == nullptr);
on_start = util::create_async_context_callback(m_image_ctx, on_start);
on_start = new FunctionContext(
ldout(cct, 20) << this << " " << __func__ << dendl;
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_READY);
- assert(m_journal_replay == nullptr);
+ ceph_assert(m_state == STATE_READY);
+ ceph_assert(m_journal_replay == nullptr);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
ldout(cct, 20) << this << " " << __func__ << dendl;
Mutex::Locker locker(m_lock);
- assert(m_journal_replay != nullptr);
- assert(m_state == STATE_REPLAYING);
+ ceph_assert(m_journal_replay != nullptr);
+ ceph_assert(m_state == STATE_REPLAYING);
delete m_journal_replay;
m_journal_replay = nullptr;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- assert(m_lock.is_locked());
- assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY);
- assert(m_journaler == NULL);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY);
+ ceph_assert(m_journaler == NULL);
transition_state(STATE_INITIALIZING, 0);
::journal::Settings settings;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
delete m_journal_replay;
m_journal_replay = NULL;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
- assert(m_lock.is_locked());
- assert(m_state == STATE_FLUSHING_RESTART ||
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
delete m_journal_replay;
template <typename I>
void Journal<I>::complete_event(typename Events::iterator it, int r) {
- assert(m_event_lock.is_locked());
- assert(m_state == STATE_READY);
+ ceph_assert(m_event_lock.is_locked());
+ ceph_assert(m_state == STATE_READY);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << it->first << " "
if (r < 0) {
// event recorded to journal but failed to update disk, we cannot
// commit this IO event. this event must be replayed.
- assert(event.safe);
+ ceph_assert(event.safe);
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit IO to disk, replay required: "
<< cpp_strerror(r) << dendl;
template <typename I>
void Journal<I>::start_append() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_journaler->start_append(m_image_ctx.journal_object_flush_interval,
m_image_ctx.journal_object_flush_bytes,
m_image_ctx.journal_object_flush_age);
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_INITIALIZING);
+ ceph_assert(m_state == STATE_INITIALIZING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
}
// only one entry should be in-flight at a time
- assert(!m_processing_entry);
+ ceph_assert(!m_processing_entry);
m_processing_entry = true;
}
State state;
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_FLUSHING_RESTART ||
+ ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
state = m_state;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
{
Mutex::Locker locker(m_lock);
- assert(m_processing_entry);
+ ceph_assert(m_processing_entry);
m_processing_entry = false;
}
handle_replay_ready();
CephContext *cct = m_image_ctx.cct;
m_lock.Lock();
- assert(m_state == STATE_REPLAYING ||
+ ceph_assert(m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
<< "shut down replay" << dendl;
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_FLUSHING_RESTART);
+ ceph_assert(m_state == STATE_FLUSHING_RESTART);
}
m_journal_replay->shut_down(true, ctx);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- assert(r == 0);
- assert(m_state == STATE_FLUSHING_RESTART);
+ ceph_assert(r == 0);
+ ceph_assert(m_state == STATE_FLUSHING_RESTART);
if (m_close_pending) {
destroy_journaler(r);
return;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- assert(m_state == STATE_FLUSHING_REPLAY || m_state == STATE_FLUSHING_RESTART);
+ ceph_assert(m_state == STATE_FLUSHING_REPLAY || m_state == STATE_FLUSHING_RESTART);
if (m_close_pending) {
destroy_journaler(0);
return;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_STOPPING);
+ ceph_assert(m_state == STATE_STOPPING);
destroy_journaler(r);
}
delete m_journaler;
m_journaler = nullptr;
- assert(m_state == STATE_CLOSING || m_state == STATE_RESTARTING_REPLAY);
+ ceph_assert(m_state == STATE_CLOSING || m_state == STATE_RESTARTING_REPLAY);
if (m_state == STATE_RESTARTING_REPLAY) {
create_journaler();
return;
<< "tid=" << tid << dendl;
// journal will be flushed before closing
- assert(m_state == STATE_READY || m_state == STATE_STOPPING);
+ ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit IO event: " << cpp_strerror(r) << dendl;
{
Mutex::Locker event_locker(m_event_lock);
typename Events::iterator it = m_events.find(tid);
- assert(it != m_events.end());
+ ceph_assert(it != m_events.end());
Event &event = it->second;
on_safe_contexts.swap(event.on_safe_contexts);
<< "tid=" << tid << dendl;
// journal will be flushed before closing
- assert(m_state == STATE_READY || m_state == STATE_STOPPING);
+ ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit op event: " << cpp_strerror(r) << dendl;
template <typename I>
void Journal<I>::stop_recording() {
- assert(m_lock.is_locked());
- assert(m_journaler != NULL);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_journaler != NULL);
- assert(m_state == STATE_READY);
+ ceph_assert(m_state == STATE_READY);
transition_state(STATE_STOPPING, 0);
m_journaler->stop_append(util::create_async_context_callback(
void Journal<I>::transition_state(State state, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": new state=" << state << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_state = state;
if (m_error_result == 0 && r < 0) {
template <typename I>
bool Journal<I>::is_steady_state() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
switch (m_state) {
case STATE_READY:
case STATE_CLOSED:
template <typename I>
void Journal<I>::wait_for_steady_state(Context *on_state) {
- assert(m_lock.is_locked());
- assert(!is_steady_state());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!is_steady_state());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": on_state=" << on_state
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- assert(m_lock.is_locked());
- assert(do_resync != nullptr);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(do_resync != nullptr);
cls::journal::Client client;
int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client);
uint64_t allocate_op_tid() {
uint64_t op_tid = ++m_op_tid;
- assert(op_tid != 0);
+ ceph_assert(op_tid != 0);
return op_tid;
}
std::string_view format,
bufferlist& out) {
Commands::const_iterator i = commands.find(command);
- assert(i != commands.end());
+ ceph_assert(i != commands.end());
stringstream ss;
bool r = i->second->call(&ss);
out.append(ss);
ldout(m_cct, 20) << "C_OrderedWrite completing " << m_result << dendl;
{
Mutex::Locker l(m_wb_handler->m_lock);
- assert(!m_result->done);
+ ceph_assert(!m_result->done);
m_result->done = true;
m_result->ret = r;
m_wb_handler->complete_writes(m_result->oid);
void finish(int r) override {
// all IO operations are flushed prior to closing the journal
- assert(image_ctx->journal != nullptr);
+ ceph_assert(image_ctx->journal != nullptr);
image_ctx->journal->commit_io_event_extent(journal_tid, offset, length,
r);
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
// all IO operations are flushed prior to closing the journal
- assert(original_journal_tid != 0 && m_ictx->journal != NULL);
+ ceph_assert(original_journal_tid != 0 && m_ictx->journal != NULL);
Extents file_extents;
Striper::extent_to_file(m_ictx->cct, &m_ictx->layout, object_no, off,
void LibrbdWriteback::complete_writes(const std::string& oid)
{
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
std::queue<write_result_d*>& results = m_writes[oid];
ldout(m_ictx->cct, 20) << "complete_writes() oid " << oid << dendl;
std::list<write_result_d*> finished;
template <typename I>
ManagedLock<I>::~ManagedLock() {
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
+ ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
m_state == STATE_UNINITIALIZED);
if (m_state == STATE_UNINITIALIZED) {
// never initialized -- ensure any in-flight ops are complete
m_async_op_tracker.wait_for_ops(&ctx);
ctx.wait();
}
- assert(m_async_op_tracker.empty());
+ ceph_assert(m_async_op_tracker.empty());
}
template <typename I>
template <typename I>
bool ManagedLock<I>::is_lock_owner(Mutex &lock) const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
bool lock_owner;
ldout(m_cct, 10) << dendl;
Mutex::Locker locker(m_lock);
- assert(!is_state_shutdown());
+ ceph_assert(!is_state_shutdown());
if (m_state == STATE_WAITING_FOR_REGISTER) {
// abort stalled acquire lock state
ldout(m_cct, 10) << "woke up waiting acquire" << dendl;
Action active_action = get_active_action();
- assert(active_action == ACTION_TRY_LOCK ||
+ ceph_assert(active_action == ACTION_TRY_LOCK ||
active_action == ACTION_ACQUIRE_LOCK);
complete_active_action(STATE_UNLOCKED, -ESHUTDOWN);
}
// restart the acquire lock process now that watch is valid
ldout(m_cct, 10) << "woke up waiting acquire" << dendl;
Action active_action = get_active_action();
- assert(active_action == ACTION_TRY_LOCK ||
+ ceph_assert(active_action == ACTION_TRY_LOCK ||
active_action == ACTION_ACQUIRE_LOCK);
execute_next_action();
} else if (!is_state_shutdown() &&
template <typename I>
void ManagedLock<I>::append_context(Action action, Context *ctx) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
for (auto &action_ctxs : m_actions_contexts) {
if (action == action_ctxs.first) {
template <typename I>
void ManagedLock<I>::execute_action(Action action, Context *ctx) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
append_context(action, ctx);
if (!is_transition_state()) {
template <typename I>
void ManagedLock<I>::execute_next_action() {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_actions_contexts.empty());
switch (get_active_action()) {
case ACTION_ACQUIRE_LOCK:
case ACTION_TRY_LOCK:
template <typename I>
typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_actions_contexts.empty());
return m_actions_contexts.front().first;
}
template <typename I>
void ManagedLock<I>::complete_active_action(State next_state, int r) {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
template <typename I>
bool ManagedLock<I>::is_state_shutdown() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return ((m_state == STATE_SHUTDOWN) ||
(!m_actions_contexts.empty() &&
template <typename I>
void ManagedLock<I>::send_acquire_lock() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_state == STATE_LOCKED) {
complete_active_action(STATE_LOCKED, 0);
return;
template <typename I>
void ManagedLock<I>::handle_no_op_reacquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
- assert(r >= 0);
+ ceph_assert(r >= 0);
complete_active_action(STATE_LOCKED, 0);
}
m_work_queue, m_oid, m_cookie,
new FunctionContext([this, r](int ret) {
Mutex::Locker locker(m_lock);
- assert(ret == 0);
+ ceph_assert(ret == 0);
complete_active_action(STATE_UNLOCKED, r);
}));
m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req));
template <typename I>
void ManagedLock<I>::send_reacquire_lock() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_state != STATE_LOCKED) {
complete_active_action(m_state, 0);
ldout(m_cct, 10) << "r=" << r << dendl;
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_REACQUIRING);
+ ceph_assert(m_state == STATE_REACQUIRING);
if (r < 0) {
if (r == -EOPNOTSUPP) {
// be updated on older OSDs
execute_action(ACTION_RELEASE_LOCK, nullptr);
- assert(!m_actions_contexts.empty());
+ ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
// reacquire completes when the request lock completes
template <typename I>
void ManagedLock<I>::send_release_lock() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_state == STATE_UNLOCKED) {
complete_active_action(STATE_UNLOCKED, 0);
return;
{
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_PRE_RELEASING);
+ ceph_assert(m_state == STATE_PRE_RELEASING);
m_state = STATE_RELEASING;
}
ldout(m_cct, 10) << "r=" << r << dendl;
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_RELEASING);
+ ceph_assert(m_state == STATE_RELEASING);
if (r >= 0) {
m_cookie = "";
template <typename I>
void ManagedLock<I>::send_shutdown() {
ldout(m_cct, 10) << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_state == STATE_UNLOCKED) {
m_state = STATE_SHUTTING_DOWN;
m_work_queue->queue(new FunctionContext([this](int r) {
return;
}
- assert(m_state == STATE_LOCKED);
+ ceph_assert(m_state == STATE_LOCKED);
m_state = STATE_PRE_SHUTTING_DOWN;
m_lock.Unlock();
Mutex::Locker locker(m_lock);
cookie = m_cookie;
- assert(m_state == STATE_PRE_SHUTTING_DOWN);
+ ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
m_state = STATE_SHUTTING_DOWN;
}
ActionContexts action_contexts;
{
Mutex::Locker locker(m_lock);
- assert(m_lock.is_locked());
- assert(m_actions_contexts.size() == 1);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_actions_contexts.size() == 1);
action_contexts = std::move(m_actions_contexts.front());
m_actions_contexts.pop_front();
mutable Mutex m_lock;
inline void set_state_uninitialized() {
- assert(m_lock.is_locked());
- assert(m_state == STATE_UNLOCKED);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_UNLOCKED);
m_state = STATE_UNINITIALIZED;
}
inline void set_state_initializing() {
- assert(m_lock.is_locked());
- assert(m_state == STATE_UNINITIALIZED);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_UNINITIALIZED);
m_state = STATE_INITIALIZING;
}
inline void set_state_unlocked() {
- assert(m_lock.is_locked());
- assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING);
m_state = STATE_UNLOCKED;
}
inline void set_state_waiting_for_lock() {
- assert(m_lock.is_locked());
- assert(m_state == STATE_ACQUIRING);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_WAITING_FOR_LOCK;
}
inline void set_state_post_acquiring() {
- assert(m_lock.is_locked());
- assert(m_state == STATE_ACQUIRING);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_POST_ACQUIRING;
}
bool is_state_shutdown() const;
inline bool is_state_acquiring() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return m_state == STATE_ACQUIRING;
}
inline bool is_state_post_acquiring() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return m_state == STATE_POST_ACQUIRING;
}
inline bool is_state_releasing() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return m_state == STATE_RELEASING;
}
inline bool is_state_pre_releasing() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return m_state == STATE_PRE_RELEASING;
}
inline bool is_state_locked() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return m_state == STATE_LOCKED;
}
inline bool is_state_waiting_for_lock() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return m_state == STATE_WAITING_FOR_LOCK;
}
inline bool is_action_acquire_lock() const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return get_active_action() == ACTION_ACQUIRE_LOCK;
}
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS,
nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS,
nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template <typename I>
ceph::BitVector<2u>::Reference ObjectMap<I>::operator[](uint64_t object_no)
{
- assert(m_image_ctx.object_map_lock.is_wlocked());
- assert(object_no < m_object_map.size());
+ ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
+ ceph_assert(object_no < m_object_map.size());
return m_object_map[object_no];
}
template <typename I>
uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
{
- assert(m_image_ctx.object_map_lock.is_locked());
- assert(object_no < m_object_map.size());
+ ceph_assert(m_image_ctx.object_map_lock.is_locked());
+ ceph_assert(object_no < m_object_map.size());
return m_object_map[object_no];
}
template <typename I>
bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
{
- assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
// Fall back to default logic if object map is disabled or invalid
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
template <typename I>
bool ObjectMap<I>::update_required(const ceph::BitVector<2>::Iterator& it,
uint8_t new_state) {
- assert(m_image_ctx.object_map_lock.is_wlocked());
+ ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
uint8_t state = *it;
if ((state == new_state) ||
(new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
template <typename I>
bool ObjectMap<I>::set_object_map(ceph::BitVector<2> &target_object_map) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.snap_lock));
RWLock::RLocker object_map_locker(m_image_ctx.object_map_lock);
m_object_map = target_object_map;
template <typename I>
void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.object_map_lock.is_wlocked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
object_map::SnapshotRollbackRequest *req =
new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, on_finish);
template <typename I>
void ObjectMap<I>::snapshot_add(uint64_t snap_id, Context *on_finish) {
- assert(m_image_ctx.snap_lock.is_locked());
- assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
- assert(snap_id != CEPH_NOSNAP);
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
+ ceph_assert(snap_id != CEPH_NOSNAP);
object_map::SnapshotCreateRequest *req =
new object_map::SnapshotCreateRequest(m_image_ctx, &m_object_map, snap_id,
template <typename I>
void ObjectMap<I>::snapshot_remove(uint64_t snap_id, Context *on_finish) {
- assert(m_image_ctx.snap_lock.is_wlocked());
- assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
- assert(snap_id != CEPH_NOSNAP);
+ ceph_assert(m_image_ctx.snap_lock.is_wlocked());
+ ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
+ ceph_assert(snap_id != CEPH_NOSNAP);
object_map::SnapshotRemoveRequest *req =
new object_map::SnapshotRemoveRequest(m_image_ctx, &m_object_map, snap_id,
template <typename I>
void ObjectMap<I>::aio_save(Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.snap_lock));
RWLock::RLocker object_map_locker(m_image_ctx.object_map_lock);
librados::AioCompletion *comp = util::create_rados_callback(on_finish);
int r = m_image_ctx.md_ctx.aio_operate(oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ObjectMap<I>::aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.snap_lock));
- assert(m_image_ctx.image_watcher != NULL);
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.image_watcher != NULL);
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
object_map::ResizeRequest *req = new object_map::ResizeRequest(
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.object_map_lock.is_wlocked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.object_map_lock.is_wlocked());
BlockGuardCell *cell;
int r = m_update_guard->detain({op.start_object_no, op.end_object_no},
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
Context *on_finish) {
- assert(m_image_ctx.snap_lock.is_locked());
- assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
- assert(m_image_ctx.image_watcher != nullptr);
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
+ ceph_assert(m_image_ctx.image_watcher != nullptr);
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
- assert(snap_id != CEPH_NOSNAP || m_image_ctx.object_map_lock.is_wlocked());
- assert(start_object_no < end_object_no);
+ ceph_assert(snap_id != CEPH_NOSNAP || m_image_ctx.object_map_lock.is_wlocked());
+ ceph_assert(start_object_no < end_object_no);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "start=" << start_object_no << ", "
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, T *callback_object) {
- assert(start_object_no < end_object_no);
+ ceph_assert(start_object_no < end_object_no);
if (snap_id == CEPH_NOSNAP) {
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
}
void send_remote_request() {
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
}
void send_local_request() {
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
template <typename I>
void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
}
::SnapContext snapc = m_image_ctx.snapc;
- assert(m_image_ctx.parent != NULL);
+ ceph_assert(m_image_ctx.parent != NULL);
uint64_t overlap;
int r = m_image_ctx.get_parent_overlap(CEPH_NOSNAP, &overlap);
- assert(r == 0);
- assert(overlap <= m_image_ctx.size);
+ ceph_assert(r == 0);
+ ceph_assert(overlap <= m_image_ctx.size);
uint64_t overlap_objects = Striper::get_num_objects(m_image_ctx.layout,
overlap);
template <typename I>
void Operations<I>::execute_rebuild_object_map(ProgressContext &prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
void Operations<I>::object_map_iterate(ProgressContext &prog_ctx,
operation::ObjectIterateWork<I> handle_mismatch,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
template <typename I>
void Operations<I>::execute_rename(const std::string &dest_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
void Operations<I>::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
Context *on_finish,
uint64_t journal_op_tid) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
Context *on_finish,
uint64_t journal_op_tid,
bool skip_object_map) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
const std::string &snap_name,
ProgressContext& prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
{
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
}
void Operations<I>::execute_snap_rename(const uint64_t src_snap_id,
const std::string &dest_snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
void Operations<I>::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
void Operations<I>::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
template <typename I>
void Operations<I>::execute_snap_set_limit(const uint64_t limit,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit
void Operations<I>::execute_update_features(uint64_t features, bool enabled,
Context *on_finish,
uint64_t journal_op_tid) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
void Operations<I>::execute_metadata_set(const std::string &key,
const std::string &value,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
template <typename I>
void Operations<I>::execute_metadata_remove(const std::string &key,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
template <typename I>
void Operations<I>::execute_migrate(ProgressContext &prog_ctx,
Context *on_finish) {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
template <typename I>
int Operations<I>::prepare_image_update(bool request_lock) {
- assert(m_image_ctx.owner_lock.is_locked() &&
+ ceph_assert(m_image_ctx.owner_lock.is_locked() &&
!m_image_ctx.owner_lock.is_wlocked());
if (m_image_ctx.image_watcher == nullptr) {
return -EROFS;
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
if (it->second.second != NULL) {
- assert(m_safe_timer->cancel_event(it->second.second));
+ ceph_assert(m_safe_timer->cancel_event(it->second.second));
delete it->second.first;
} else {
// task already scheduled on the finisher
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template <typename I>
void wait(I &image_ctx, Context *on_finish) {
- assert(m_on_finish == nullptr);
+ ceph_assert(m_on_finish == nullptr);
on_finish = create_async_context_callback(image_ctx, on_finish);
if (m_refs == 0) {
librados::AioCompletion *aio_comp = create_rados_callback(this);
r = rados.aio_watch_flush(aio_comp);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
return;
}
void Watcher::C_NotifyAck::finish(int r) {
ldout(cct, 10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
watcher->acknowledge_notify(notify_id, handle, out);
}
Watcher::~Watcher() {
RWLock::RLocker l(m_watch_lock);
- assert(is_unregistered(m_watch_lock));
+ ceph_assert(is_unregistered(m_watch_lock));
}
void Watcher::register_watch(Context *on_finish) {
ldout(m_cct, 10) << dendl;
RWLock::RLocker watch_locker(m_watch_lock);
- assert(is_unregistered(m_watch_lock));
+ ceph_assert(is_unregistered(m_watch_lock));
m_watch_state = WATCH_STATE_REGISTERING;
librados::AioCompletion *aio_comp = create_rados_callback(
new C_RegisterWatch(this, on_finish));
int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_watch_handle, &m_watch_ctx);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
Context *unregister_watch_ctx = nullptr;
{
RWLock::WLocker watch_locker(m_watch_lock);
- assert(m_watch_state == WATCH_STATE_REGISTERING);
+ ceph_assert(m_watch_state == WATCH_STATE_REGISTERING);
m_watch_state = WATCH_STATE_IDLE;
if (r < 0) {
ldout(m_cct, 10) << "delaying unregister until register completed"
<< dendl;
- assert(m_unregister_watch_ctx == nullptr);
+ ceph_assert(m_unregister_watch_ctx == nullptr);
m_unregister_watch_ctx = new FunctionContext([this, on_finish](int r) {
unregister_watch(on_finish);
});
librados::AioCompletion *aio_comp = create_rados_callback(
new C_UnwatchAndFlush(m_ioctx, on_finish));
int r = m_ioctx.aio_unwatch(m_watch_handle, aio_comp);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
m_watch_handle = 0;
return;
void Watcher::unblock_notifies() {
RWLock::WLocker locker(m_watch_lock);
- assert(m_blocked_count > 0);
+ ceph_assert(m_blocked_count > 0);
--m_blocked_count;
ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
}
void Watcher::set_oid(const string& oid) {
RWLock::WLocker watch_locker(m_watch_lock);
- assert(is_unregistered(m_watch_lock));
+ ceph_assert(is_unregistered(m_watch_lock));
m_oid = oid;
}
Context *unregister_watch_ctx = nullptr;
{
RWLock::WLocker watch_locker(m_watch_lock);
- assert(m_watch_state == WATCH_STATE_REWATCHING);
+ ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_watch_state = WATCH_STATE_IDLE;
Context *unregister_watch_ctx = nullptr;
{
RWLock::WLocker watch_locker(m_watch_lock);
- assert(m_watch_state == WATCH_STATE_REWATCHING);
+ ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
ldout(m_cct, 10) << "image is closing, skip rewatch" << dendl;
Context *unregister_watch_ctx = nullptr;
{
RWLock::WLocker watch_locker(m_watch_lock);
- assert(m_watch_state == WATCH_STATE_REWATCHING);
+ ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_watch_state = WATCH_STATE_IDLE;
op.list_snaps(&m_snap_set, &m_snap_ret);
int r = m_head_ctx.aio_operate(m_oid, rados_completion, &op, NULL);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
}
opos += r->second;
}
- assert(opos == q->offset + q->length);
+ ceph_assert(opos == q->offset + q->length);
}
}
template <typename I>
int DiffIterate<I>::diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id,
BitVector<2>* object_diff_state) {
- assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
CephContext* cct = m_image_ctx.cct;
bool diff_from_start = (from_snap_id == 0);
if (current_snap_id != CEPH_NOSNAP) {
std::map<librados::snap_t, SnapInfo>::const_iterator snap_it =
m_image_ctx.snap_info.find(current_snap_id);
- assert(snap_it != m_image_ctx.snap_info.end());
+ ceph_assert(snap_it != m_image_ctx.snap_info.end());
current_size = snap_it->second.size;
++snap_it;
template <typename I>
snap_t get_group_snap_id(I* ictx,
const cls::rbd::SnapshotNamespace& in_snap_namespace) {
- assert(ictx->snap_lock.is_locked());
+ ceph_assert(ictx->snap_lock.is_locked());
auto it = ictx->snap_ids.lower_bound({in_snap_namespace, ""});
if (it != ictx->snap_ids.end() && it->first.first == in_snap_namespace) {
return it->second;
// retrieve clone v2 children attached to this snapshot
IoCtx parent_io_ctx;
r = rados.ioctx_create2(parent_spec.pool_id, parent_io_ctx);
- assert(r == 0);
+ ceph_assert(r == 0);
// TODO support clone v2 parent namespaces
parent_io_ctx.set_namespace(ictx->md_ctx.get_namespace());
m_prog_ctx(prog_ctx), m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
m_lock(util::unique_lock_name("librbd::api::MigrationProgressContext",
this)) {
- assert(m_prog_ctx != nullptr);
+ ceph_assert(m_prog_ctx != nullptr);
}
~MigrationProgressContext() {
void set_state_description() {
ldout(m_cct, 20) << "state_description=" << m_state_description << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
librados::ObjectWriteOperation op;
cls_client::migration_set_state(&op, m_state, m_state_description);
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_state_description>(this);
int r = m_io_ctx.aio_operate(m_header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
m_in_flight_state_updates++;
ldout(m_cct, 10) << "removing dst image" << dendl;
- assert(dst_image_ctx->ignore_migrating);
+ ceph_assert(dst_image_ctx->ignore_migrating);
ThreadPool *thread_pool;
ContextWQ *op_work_queue;
return -ENOENT;
}
- assert(!image_ctx->id.empty());
+ ceph_assert(!image_ctx->id.empty());
ldout(m_cct, 10) << dendl;
}
}
- assert(m_src_image_ctx->ignore_migrating);
+ ceph_assert(m_src_image_ctx->ignore_migrating);
ThreadPool *thread_pool;
ContextWQ *op_work_queue;
}
void finish(int r) override {
- assert(dispatcher->m_cache_lock.is_locked());
+ ceph_assert(dispatcher->m_cache_lock.is_locked());
auto cct = dispatcher->m_image_ctx->cct;
if (r == -EBLACKLISTED) {
template <typename I>
void ImageCopyRequest<I>::send_next_object_copy() {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_canceled && m_ret_val == 0) {
ldout(m_cct, 10) << "image copy canceled" << dendl;
bool complete;
{
Mutex::Locker locker(m_lock);
- assert(m_current_ops > 0);
+ ceph_assert(m_current_ops > 0);
--m_current_ops;
if (r < 0 && r != -ENOENT) {
m_lock.Unlock();
m_prog_ctx->update_progress(progress_object_no, m_end_object_no);
m_lock.Lock();
- assert(m_updating_progress);
+ ceph_assert(m_updating_progress);
m_updating_progress = false;
}
}
m_dst_image_ctx(dst_image_ctx), m_cct(dst_image_ctx->cct),
m_snap_map(snap_map), m_dst_object_number(dst_object_number),
m_flatten(flatten), m_on_finish(on_finish) {
- assert(!m_snap_map.empty());
+ ceph_assert(!m_snap_map.empty());
m_src_io_ctx.dup(m_src_image_ctx->data_ctx);
m_dst_io_ctx.dup(m_dst_image_ctx->data_ctx);
template <typename I>
void ObjectCopyRequest<I>::send_list_snaps() {
- assert(!m_src_objects.empty());
+ ceph_assert(!m_src_objects.empty());
m_src_ono = *m_src_objects.begin();
m_src_oid = m_src_image_ctx->get_object_name(m_src_ono);
m_src_io_ctx.snap_set_read(CEPH_SNAPDIR);
int r = m_src_io_ctx.aio_operate(m_src_oid, rados_completion, &op,
nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
// all snapshots have been read
merge_write_ops();
- assert(!m_src_objects.empty());
+ ceph_assert(!m_src_objects.empty());
m_src_objects.erase(m_src_objects.begin());
if (!m_src_objects.empty()) {
ldout(m_cct, 20) << "read " << m_src_oid << dendl;
int r = m_src_io_ctx.aio_operate(m_src_oid, comp, &op, nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
return;
}
- assert(!m_read_snaps.empty());
+ ceph_assert(!m_read_snaps.empty());
m_read_snaps.erase(m_read_snaps.begin());
send_read_object();
ldout(m_cct, 20) << dendl;
- assert(m_src_parent_image_ctx != nullptr);
+ ceph_assert(m_src_parent_image_ctx != nullptr);
auto ctx = create_context_callback<
ObjectCopyRequest<I>, &ObjectCopyRequest<I>::handle_read_from_parent>(this);
}
if (!m_read_ops.empty()) {
- assert(m_read_ops.size() == 1);
+ ceph_assert(m_read_ops.size() == 1);
auto src_snap_seq = m_read_ops.begin()->first.first;
auto ©_ops = m_read_ops.begin()->second;
uint64_t offset = 0;
template <typename I>
void ObjectCopyRequest<I>::send_write_object() {
- assert(!m_write_ops.empty());
+ ceph_assert(!m_write_ops.empty());
auto& copy_ops = m_write_ops.begin()->second;
// retrieve the destination snap context for the op
librados::snap_t src_snap_seq = m_write_ops.begin()->first;
if (src_snap_seq != 0) {
auto snap_map_it = m_snap_map.find(src_snap_seq);
- assert(snap_map_it != m_snap_map.end());
+ ceph_assert(snap_map_it != m_snap_map.end());
auto dst_snap_id = snap_map_it->second.front();
auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_id);
- assert(dst_may_exist_it != m_dst_object_may_exist.end());
+ ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
if (!dst_may_exist_it->second && !copy_ops.empty()) {
// if the object cannot exist, the only valid op is to remove it
- assert(copy_ops.size() == 1U);
- assert(copy_ops.begin()->type == COPY_OP_TYPE_REMOVE);
+ ceph_assert(copy_ops.size() == 1U);
+ ceph_assert(copy_ops.begin()->type == COPY_OP_TYPE_REMOVE);
}
// write snapshot context should be before actual snapshot
if (snap_map_it != m_snap_map.begin()) {
--snap_map_it;
- assert(!snap_map_it->second.empty());
+ ceph_assert(!snap_map_it->second.empty());
dst_snap_seq = snap_map_it->second.front();
dst_snap_ids = snap_map_it->second;
}
librados::AioCompletion *comp = create_rados_callback(ctx);
int r = m_dst_io_ctx.aio_operate(m_dst_oid, comp, &op, dst_snap_seq,
dst_snap_ids, nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
auto &dst_object_state = *m_dst_object_state.begin();
auto it = m_snap_map.find(dst_object_state.first);
- assert(it != m_snap_map.end());
+ ceph_assert(it != m_snap_map.end());
auto dst_snap_id = it->second.front();
auto object_state = dst_object_state.second;
m_dst_object_state.erase(m_dst_object_state.begin());
m_dst_image_ctx->snap_lock.put_read();
m_dst_image_ctx->owner_lock.put_read();
if (!sent) {
- assert(dst_snap_id == CEPH_NOSNAP);
+ ceph_assert(dst_snap_id == CEPH_NOSNAP);
ctx->complete(0);
}
}
void ObjectCopyRequest<I>::handle_update_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
if (!m_dst_object_state.empty()) {
send_update_object_map();
return;
template <typename I>
Context *ObjectCopyRequest<I>::start_lock_op(RWLock &owner_lock) {
- assert(m_dst_image_ctx->owner_lock.is_locked());
+ ceph_assert(m_dst_image_ctx->owner_lock.is_locked());
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new FunctionContext([](int r) {});
}
std::vector<std::pair<uint64_t, uint64_t>> image_extents;
Striper::extent_to_file(m_cct, &m_src_image_ctx->layout, objectno, offset, 1,
image_extents);
- assert(image_extents.size() == 1);
+ ceph_assert(image_extents.size() == 1);
auto dst_object_offset = image_extents.begin()->first;
std::map<object_t, std::vector<ObjectExtent>> dst_object_extents;
Striper::file_to_extents(m_cct, m_dst_image_ctx->format_string,
&m_dst_image_ctx->layout, dst_object_offset, 1, 0,
dst_object_extents);
- assert(dst_object_extents.size() == 1);
- assert(dst_object_extents.begin()->second.size() == 1);
+ ceph_assert(dst_object_extents.size() == 1);
+ ceph_assert(dst_object_extents.begin()->second.size() == 1);
auto &e = *dst_object_extents.begin()->second.begin();
- assert(e.objectno == m_dst_object_number);
+ ceph_assert(e.objectno == m_dst_object_number);
return e.offset;
}
m_src_objects.insert(s.objectno);
total += s.length;
while (s.length > 0) {
- assert(s.length >= stripe_unit);
+ ceph_assert(s.length >= stripe_unit);
auto dst_object_offset = src_to_dst_object_offset(s.objectno, s.offset);
m_src_object_extents[dst_object_offset] = {s.objectno, s.offset,
stripe_unit};
}
}
- assert(total == m_dst_image_ctx->layout.object_size);
+ ceph_assert(total == m_dst_image_ctx->layout.object_size);
ldout(m_cct, 20) << m_src_object_extents.size() << " src extents" << dendl;
}
librados::snap_t start_src_snap_id = 0;
for (auto &pair : m_snap_map) {
- assert(!pair.second.empty());
+ ceph_assert(!pair.second.empty());
librados::snap_t end_src_snap_id = pair.first;
librados::snap_t end_dst_snap_id = pair.second.front();
// reads should be issued against the newest (existing) snapshot within
// the associated snapshot object clone. writes should be issued
// against the oldest snapshot in the snap_map.
- assert(clone_end_snap_id >= end_src_snap_id);
+ ceph_assert(clone_end_snap_id >= end_src_snap_id);
if (clone_end_snap_id > src_copy_point_snap_id) {
// do not read past the copy point snapshot
clone_end_snap_id = src_copy_point_snap_id;
<< ", dst_object_offset=" << dst_object_offset
<< ", read: " << read_interval << dendl;
- assert(exists || read_interval.empty());
+ ceph_assert(exists || read_interval.empty());
for (auto it = read_interval.begin(); it != read_interval.end();
it++) {
- assert(it.get_start() >= e.offset);
+ ceph_assert(it.get_start() >= e.offset);
auto offset = it.get_start() - e.offset;
ldout(m_cct, 20) << "read/write op: " << it.get_start() << "~"
<< it.get_len() << " dst: "
<< "~" << e.length << " overlap " << parent_overlap
<< " parent extents " << image_extents << dendl;
- assert(image_extents.size() == 1);
+ ceph_assert(image_extents.size() == 1);
auto src_image_offset = image_extents.begin()->first;
auto length = image_extents.begin()->second;
<< zero_len << dendl;
m_dst_zero_interval[src_snap_seq].insert(dst_offset, zero_len);
} else {
- assert(dst_offset == copy_op.dst_offset + copy_op.length);
+ ceph_assert(dst_offset == copy_op.dst_offset + copy_op.length);
}
m_write_ops[src_snap_seq].emplace_back(std::move(copy_op));
}
auto &zero_interval = it.second;
auto snap_map_it = m_snap_map.find(src_snap_seq);
- assert(snap_map_it != m_snap_map.end());
+ ceph_assert(snap_map_it != m_snap_map.end());
auto dst_snap_seq = snap_map_it->second.front();
auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_seq);
- assert(dst_may_exist_it != m_dst_object_may_exist.end());
+ ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end());
if (!dst_may_exist_it->second && prev_end_size > 0) {
ldout(m_cct, 5) << "object DNE for snap_id: " << dst_snap_seq << dendl;
m_write_ops[src_snap_seq].emplace_back(COPY_OP_TYPE_REMOVE, 0, 0, 0);
for (auto e : image_extents) {
prev_end_size += e.second;
}
- assert(prev_end_size <= m_dst_image_ctx->layout.object_size);
+ ceph_assert(prev_end_size <= m_dst_image_ctx->layout.object_size);
}
}
}
: m_image_ctx(image_ctx), m_size(size), m_parent_spec(spec),
m_parent_overlap(parent_overlap), m_on_finish(on_finish),
m_cct(image_ctx->cct) {
- assert(m_parent_overlap <= m_size);
+ ceph_assert(m_parent_overlap <= m_size);
}
template <typename I>
});
librados::AioCompletion *comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
});
librados::AioCompletion *comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
});
librados::AioCompletion *comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::snap_t> &pair) {
return pair.second == snap_id;
});
- assert(snap_it != image_ctx->snap_ids.end());
+ ceph_assert(snap_it != image_ctx->snap_ids.end());
return snap_it->first.second;
}
return;
}
- assert(m_prev_snap_id != CEPH_NOSNAP);
+ ceph_assert(m_prev_snap_id != CEPH_NOSNAP);
auto snap_it = m_dst_image_ctx->snap_ids.find(
{cls::rbd::UserSnapshotNamespace(), m_snap_name});
- assert(snap_it != m_dst_image_ctx->snap_ids.end());
+ ceph_assert(snap_it != m_dst_image_ctx->snap_ids.end());
librados::snap_t dst_snap_id = snap_it->second;
ldout(m_cct, 20) << "mapping source snap id " << m_prev_snap_id << " to "
// if destination snapshot is not protected, protect it
auto snap_seq_it = m_snap_seqs.find(src_snap_id);
- assert(snap_seq_it != m_snap_seqs.end());
+ ceph_assert(snap_seq_it != m_snap_seqs.end());
m_dst_image_ctx->snap_lock.get_read();
bool dst_protected;
void SnapshotCopyRequest<I>::handle_resize_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
finish(0);
}
template <typename I>
Context *SnapshotCopyRequest<I>::start_lock_op(RWLock &owner_lock) {
- assert(m_dst_image_ctx->owner_lock.is_locked());
+ ceph_assert(m_dst_image_ctx->owner_lock.is_locked());
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new FunctionContext([](int r) {});
}
});
librados::AioCompletion *comp = create_rados_callback(ctx);
int r = m_dst_image_ctx->md_ctx.aio_operate(object_map_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
namespace exclusive_lock {
int AutomaticPolicy::lock_requested(bool force) {
- assert(m_image_ctx->owner_lock.is_locked());
- assert(m_image_ctx->exclusive_lock != nullptr);
+ ceph_assert(m_image_ctx->owner_lock.is_locked());
+ ceph_assert(m_image_ctx->exclusive_lock != nullptr);
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
<< dendl;
ldout(cct, 10) << "r=" << r << dendl;
// object map should never result in an error
- assert(r == 0);
+ ceph_assert(r == 0);
revert();
finish();
}
void PostAcquireRequest<I>::apply() {
{
RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
- assert(m_image_ctx.object_map == nullptr);
+ ceph_assert(m_image_ctx.object_map == nullptr);
m_image_ctx.object_map = m_object_map;
- assert(m_image_ctx.journal == nullptr);
+ ceph_assert(m_image_ctx.journal == nullptr);
m_image_ctx.journal = m_journal;
}
delete m_object_map;
delete m_journal;
- assert(m_error_result < 0);
+ ceph_assert(m_error_result < 0);
}
template <typename I>
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
finish();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
send_block_writes();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
send_close_journal();
}
ldout(cct, 10) << "r=" << r << dendl;
// object map shouldn't return errors
- assert(r == 0);
+ ceph_assert(r == 0);
delete m_object_map;
send_unlock();
namespace exclusive_lock {
int StandardPolicy::lock_requested(bool force) {
- assert(m_image_ctx->owner_lock.is_locked());
- assert(m_image_ctx->exclusive_lock != nullptr);
+ ceph_assert(m_image_ctx->owner_lock.is_locked());
+ ceph_assert(m_image_ctx->exclusive_lock != nullptr);
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
<< dendl;
template <typename I>
void CloneRequest<I>::open_parent() {
ldout(m_cct, 20) << dendl;
- assert(m_parent_snap_name.empty() ^ (m_parent_snap_id == CEPH_NOSNAP));
+ ceph_assert(m_parent_snap_name.empty() ^ (m_parent_snap_id == CEPH_NOSNAP));
if (m_parent_snap_id != CEPH_NOSNAP) {
m_parent_image_ctx = I::create("", m_parent_image_id, m_parent_snap_id,
int r = m_ioctx.aio_operate(util::old_header_name(m_name), comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_parent>(this);
int r = m_imctx->md_ctx.aio_operate(m_imctx->header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
auto aio_comp = create_rados_callback<
CloneRequest<I>, &CloneRequest<I>::handle_v2_set_op_feature>(this);
int r = m_ioctx.aio_operate(m_imctx->header_oid, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
auto aio_comp = create_rados_callback<
CloneRequest<I>, &CloneRequest<I>::handle_v2_child_attach>(this);
int r = m_parent_image_ctx->md_ctx.aio_operate(m_parent_image_ctx->header_oid, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_v1_add_child>(this);
int r = m_ioctx.aio_operate(RBD_CHILDREN, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_metadata_set>(this);
int r = m_ioctx.aio_operate(m_imctx->header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
void CloneRequest<I>::close_child() {
ldout(m_cct, 20) << dendl;
- assert(m_imctx != nullptr);
+ ceph_assert(m_imctx != nullptr);
using klass = CloneRequest<I>;
Context *ctx = create_async_context_callback(
template <typename I>
void CloneRequest<I>::close_parent() {
ldout(m_cct, 20) << dendl;
- assert(m_parent_image_ctx != nullptr);
+ ceph_assert(m_parent_image_ctx != nullptr);
Context *ctx = create_async_context_callback(
*m_parent_image_ctx, create_context_callback<
CloseRequest<I>::CloseRequest(I *image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish), m_error_result(0),
m_exclusive_lock(nullptr) {
- assert(image_ctx != nullptr);
+ ceph_assert(image_ctx != nullptr);
}
template <typename I>
{
RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
- assert(m_image_ctx->exclusive_lock == nullptr);
+ ceph_assert(m_image_ctx->exclusive_lock == nullptr);
// object map and journal closed during exclusive lock shutdown
RWLock::RLocker snap_locker(m_image_ctx->snap_lock);
- assert(m_image_ctx->journal == nullptr);
- assert(m_image_ctx->object_map == nullptr);
+ ceph_assert(m_image_ctx->journal == nullptr);
+ ceph_assert(m_image_ctx->object_map == nullptr);
}
delete m_exclusive_lock;
m_outbl.clear();
int r = m_data_io_ctx.aio_operate(RBD_INFO, comp, &op, &m_outbl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_add_image_to_directory>(this);
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_create_id_object>(this);
int r = m_io_ctx.aio_operate(m_id_obj, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_outbl.clear();
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op, &m_outbl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template<typename I>
void CreateRequest<I>::create_image() {
ldout(m_cct, 20) << dendl;
- assert(m_data_pool.empty() || m_data_pool_id != -1);
+ ceph_assert(m_data_pool.empty() || m_data_pool_id != -1);
ostringstream oss;
oss << RBD_DATA_PREFIX;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_create_image>(this);
int r = m_io_ctx.aio_operate(m_header_obj, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_stripe_unit_count>(this);
int r = m_io_ctx.aio_operate(m_header_obj, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_object_map_resize>(this);
int r = m_io_ctx.aio_operate(m_objmap_name, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
create_rados_callback<klass, &klass::handle_fetch_mirror_mode>(this);
m_outbl.clear();
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_outbl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_object_map>(this);
int r = m_io_ctx.aio_remove(m_objmap_name, comp);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_header_object>(this);
int r = m_io_ctx.aio_remove(m_header_obj, comp);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_id_object>(this);
int r = m_io_ctx.aio_remove(m_id_obj, comp);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_remove_from_dir>(this);
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template <typename I>
DetachChildRequest<I>::~DetachChildRequest() {
- assert(m_parent_image_ctx == nullptr);
+ ceph_assert(m_parent_image_ctx == nullptr);
}
template <typename I>
librados::Rados rados(m_image_ctx.md_ctx);
int r = rados.ioctx_create2(m_parent_spec.pool_id, m_parent_io_ctx);
- assert(r == 0);
+ ceph_assert(r == 0);
// TODO support clone v2 parent namespaces
m_parent_io_ctx.set_namespace(m_image_ctx.md_ctx.get_namespace());
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v2_child_detach>(this);
r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
&DetachChildRequest<I>::handle_clone_v2_get_snapshot>(this);
int r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
DetachChildRequest<I>,
&DetachChildRequest<I>::handle_clone_v1_remove_child>(this);
int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
rados_completion, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
create_rados_callback<klass, &klass::handle_get_mirror_image>(this);
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, rados_completion,
&op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
bool RefreshParentRequest<I>::is_refresh_required(
I &child_image_ctx, const ParentInfo &parent_md,
const MigrationInfo &migration_info) {
- assert(child_image_ctx.snap_lock.is_locked());
- assert(child_image_ctx.parent_lock.is_locked());
+ ceph_assert(child_image_ctx.snap_lock.is_locked());
+ ceph_assert(child_image_ctx.parent_lock.is_locked());
return (is_open_required(child_image_ctx, parent_md, migration_info) ||
is_close_required(child_image_ctx, parent_md, migration_info));
}
template <typename I>
void RefreshParentRequest<I>::apply() {
- assert(m_child_image_ctx.snap_lock.is_wlocked());
- assert(m_child_image_ctx.parent_lock.is_wlocked());
+ ceph_assert(m_child_image_ctx.snap_lock.is_wlocked());
+ ceph_assert(m_child_image_ctx.parent_lock.is_wlocked());
std::swap(m_child_image_ctx.parent, m_parent_image_ctx);
std::swap(m_child_image_ctx.migration_parent, m_migration_parent_image_ctx);
}
template <typename I>
void RefreshParentRequest<I>::send_open_parent() {
- assert(m_parent_md.spec.pool_id >= 0);
+ ceph_assert(m_parent_md.spec.pool_id >= 0);
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
template <typename I>
void RefreshParentRequest<I>::send_open_migration_parent() {
- assert(m_parent_image_ctx != nullptr);
- assert(!m_migration_info.empty());
+ ceph_assert(m_parent_image_ctx != nullptr);
+ ceph_assert(!m_migration_info.empty());
CephContext *cct = m_child_image_ctx.cct;
ParentSpec parent_spec;
template <typename I>
void RefreshParentRequest<I>::send_close_parent() {
- assert(m_parent_image_ctx != nullptr);
+ ceph_assert(m_parent_image_ctx != nullptr);
CephContext *cct = m_child_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
template <typename I>
RefreshRequest<I>::~RefreshRequest() {
// these require state machine to close
- assert(m_exclusive_lock == nullptr);
- assert(m_object_map == nullptr);
- assert(m_journal == nullptr);
- assert(m_refresh_parent == nullptr);
- assert(!m_blocked_writes);
+ ceph_assert(m_exclusive_lock == nullptr);
+ ceph_assert(m_object_map == nullptr);
+ ceph_assert(m_journal == nullptr);
+ ceph_assert(m_refresh_parent == nullptr);
+ ceph_assert(!m_blocked_writes);
}
template <typename I>
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
m_out_bl.clear();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(m_refresh_parent != nullptr);
+ ceph_assert(m_refresh_parent != nullptr);
delete m_refresh_parent;
m_refresh_parent = nullptr;
{
RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
- assert(m_image_ctx.exclusive_lock == nullptr);
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr);
}
- assert(m_exclusive_lock != nullptr);
+ ceph_assert(m_exclusive_lock != nullptr);
delete m_exclusive_lock;
m_exclusive_lock = nullptr;
<< dendl;
}
- assert(m_journal != nullptr);
+ ceph_assert(m_journal != nullptr);
delete m_journal;
m_journal = nullptr;
- assert(m_blocked_writes);
+ ceph_assert(m_blocked_writes);
m_blocked_writes = false;
m_image_ctx.io_work_queue->unblock_writes();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(*result == 0);
- assert(m_object_map != nullptr);
+ ceph_assert(*result == 0);
+ ceph_assert(m_object_map != nullptr);
delete m_object_map;
m_object_map = nullptr;
m_image_ctx.snap_lock)) {
// disabling exclusive lock will automatically handle closing
// object map and journaling
- assert(m_exclusive_lock == nullptr);
+ ceph_assert(m_exclusive_lock == nullptr);
m_exclusive_lock = m_image_ctx.exclusive_lock;
} else {
if (m_exclusive_lock != nullptr) {
- assert(m_image_ctx.exclusive_lock == nullptr);
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr);
std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock);
}
if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST ||
(m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING)) {
- assert(m_migration_spec.header_type == cls::rbd::MIGRATION_HEADER_TYPE_SRC ||
+ ceph_assert(m_migration_spec.header_type == cls::rbd::MIGRATION_HEADER_TYPE_SRC ||
m_migration_spec.pool_id == -1 ||
m_migration_spec.state == cls::rbd::MIGRATION_STATE_EXECUTED);
return;
}
- assert(m_image_ctx->exclusive_lock == nullptr);
+ ceph_assert(m_image_ctx->exclusive_lock == nullptr);
validate_image_removal();
}
m_out_bl.clear();
int r = m_image_ctx->md_ctx.aio_operate(m_header_oid, rados_completion, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
return;
}
- assert(!m_snap_infos.empty());
+ ceph_assert(!m_snap_infos.empty());
m_snap_infos.erase(m_snap_infos.begin());
remove_snapshot();
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_remove_header>(this);
int r = m_ioctx.aio_remove(m_header_oid, rados_completion);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_remove_header_v2>(this);
int r = m_ioctx.aio_remove(m_header_oid, rados_completion);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
int r = ObjectMap<>::aio_remove(m_ioctx,
m_image_id,
rados_completion);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_mirror_image_remove>(this);
int r = m_ioctx.aio_operate(RBD_MIRRORING, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
create_rados_callback<klass, &klass::handle_dir_get_image_id>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
create_rados_callback<klass, &klass::handle_dir_get_image_name>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_remove_id_object>(this);
int r = m_ioctx.aio_remove(util::id_obj_name(m_image_name), rados_completion);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_dir_remove_image>(this);
int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *comp =
create_rados_callback(gather_ctx->new_sub());
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
gather_ctx->activate();
template <typename I>
SetSnapRequest<I>::~SetSnapRequest() {
- assert(!m_writes_blocked);
+ ceph_assert(!m_writes_blocked);
delete m_refresh_parent;
delete m_object_map;
delete m_exclusive_lock;
{
RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
if (m_image_ctx.exclusive_lock != nullptr) {
- assert(m_image_ctx.snap_id == CEPH_NOSNAP);
+ ceph_assert(m_image_ctx.snap_id == CEPH_NOSNAP);
send_complete();
return;
}
RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
RWLock::WLocker parent_locker(m_image_ctx.parent_lock);
if (m_snap_id != CEPH_NOSNAP) {
- assert(m_image_ctx.exclusive_lock == nullptr);
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr);
int r = m_image_ctx.snap_set(m_snap_id);
if (r < 0) {
return r;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
m_notifier.notify(m_bl, &m_notify_response, create_context_callback<
NotifyLockOwner, &NotifyLockOwner::handle_notify>(this));
}
void trim_image(ImageCtx *ictx, uint64_t newsize, ProgressContext& prog_ctx)
{
- assert(ictx->owner_lock.is_locked());
- assert(ictx->exclusive_lock == nullptr ||
+ ceph_assert(ictx->owner_lock.is_locked());
+ ceph_assert(ictx->exclusive_lock == nullptr ||
ictx->exclusive_lock->is_lock_owner());
C_SaferCond ctx;
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end()) {
- assert((*opts_)->find(optname) == (*opts_)->end());
+ ceph_assert((*opts_)->find(optname) == (*opts_)->end());
return -EINVAL;
}
}
}
pctx.update_progress(++i, size);
- assert(i <= size);
+ ceph_assert(i <= size);
}
return 0;
ImageOptions opts;
int r = opts.set(RBD_IMAGE_OPTION_ORDER, order_);
- assert(r == 0);
+ ceph_assert(r == 0);
r = create(io_ctx, imgname, "", size, opts, "", "", false);
int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_);
- assert(r1 == 0);
+ ceph_assert(r1 == 0);
*order = order_;
return r;
int r;
r = opts.set(RBD_IMAGE_OPTION_FORMAT, format);
- assert(r == 0);
+ ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_FEATURES, features);
- assert(r == 0);
+ ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_ORDER, order_);
- assert(r == 0);
+ ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
- assert(r == 0);
+ ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
- assert(r == 0);
+ ceph_assert(r == 0);
r = create(io_ctx, imgname, "", size, opts, "", "", false);
int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_);
- assert(r1 == 0);
+ ceph_assert(r1 == 0);
*order = order_;
return r;
}
int r1 = opts.set(RBD_IMAGE_OPTION_ORDER, order);
- assert(r1 == 0);
+ ceph_assert(r1 == 0);
return r;
}
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid)
{
- assert((p_id == nullptr) ^ (p_name == nullptr));
+ ceph_assert((p_id == nullptr) ^ (p_name == nullptr));
CephContext *cct = (CephContext *)p_ioctx.cct();
if (p_snap_name == nullptr) {
int snap_get_timestamp(ImageCtx *ictx, uint64_t snap_id, struct timespec *timestamp)
{
std::map<librados::snap_t, SnapInfo>::iterator snap_it = ictx->snap_info.find(snap_id);
- assert(snap_it != ictx->snap_info.end());
+ ceph_assert(snap_it != ictx->snap_info.end());
utime_t time = snap_it->second.timestamp;
time.to_timespec(timestamp);
return 0;
m_throttle->end_op(r);
return;
}
- assert(m_bl->length() == (size_t)r);
+ ceph_assert(m_bl->length() == (size_t)r);
if (m_bl->is_zero()) {
delete m_bl;
}
}
delete m_bl;
- assert(gather_ctx->get_sub_created_count() > 0);
+ ceph_assert(gather_ctx->get_sub_created_count() > 0);
gather_ctx->activate();
}
// validate extent against image size; clip to image size if necessary
int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len)
{
- assert(ictx->snap_lock.is_locked());
+ ceph_assert(ictx->snap_lock.is_locked());
uint64_t image_size = ictx->get_image_size(ictx->snap_id);
bool snap_exists = ictx->snap_exists;
void AioCompletion::finalize(ssize_t rval)
{
- assert(lock.is_locked());
- assert(ictx != nullptr);
+ ceph_assert(lock.is_locked());
+ ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
ldout(cct, 20) << "r=" << rval << dendl;
}
void AioCompletion::complete() {
- assert(lock.is_locked());
- assert(ictx != nullptr);
+ ceph_assert(lock.is_locked());
+ ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
tracepoint(librbd, aio_complete_enter, this, rval);
void AioCompletion::start_op(bool ignore_type) {
Mutex::Locker locker(lock);
- assert(ictx != nullptr);
- assert(!async_op.started());
+ ceph_assert(ictx != nullptr);
+ ceph_assert(!async_op.started());
if (state == AIO_STATE_PENDING &&
(ignore_type || aio_type != AIO_TYPE_FLUSH)) {
async_op.start_op(*ictx);
void AioCompletion::fail(int r)
{
lock.Lock();
- assert(ictx != nullptr);
+ ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
lderr(cct) << cpp_strerror(r) << dendl;
- assert(pending_count == 0);
+ ceph_assert(pending_count == 0);
rval = r;
complete();
put_unlock();
void AioCompletion::set_request_count(uint32_t count) {
lock.Lock();
- assert(ictx != nullptr);
+ ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
ldout(cct, 20) << "pending=" << count << dendl;
- assert(pending_count == 0);
+ ceph_assert(pending_count == 0);
pending_count = count;
lock.Unlock();
void AioCompletion::complete_request(ssize_t r)
{
lock.Lock();
- assert(ictx != nullptr);
+ ceph_assert(ictx != nullptr);
CephContext *cct = ictx->cct;
if (rval >= 0) {
else if (r > 0)
rval += r;
}
- assert(pending_count);
+ ceph_assert(pending_count);
int count = --pending_count;
ldout(cct, 20) << "cb=" << complete_cb << ", "
void set_request_count(uint32_t num);
void add_request() {
lock.Lock();
- assert(pending_count > 0);
+ ceph_assert(pending_count > 0);
lock.Unlock();
get();
}
void get() {
lock.Lock();
- assert(ref > 0);
+ ceph_assert(ref > 0);
ref++;
lock.Unlock();
}
void release() {
lock.Lock();
- assert(!released);
+ ceph_assert(!released);
released = true;
put_unlock();
}
put_unlock();
}
void put_unlock() {
- assert(ref > 0);
+ ceph_assert(ref > 0);
int n = --ref;
lock.Unlock();
if (!n) {
}
void unblock() {
Mutex::Locker l(lock);
- assert(blockers > 0);
+ ceph_assert(blockers > 0);
--blockers;
if (pending_count == 0 && blockers == 0) {
finalize(rval);
} // anonymous namespace
void AsyncOperation::start_op(ImageCtx &image_ctx) {
- assert(m_image_ctx == NULL);
+ ceph_assert(m_image_ctx == NULL);
m_image_ctx = &image_ctx;
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl;
Mutex::Locker l(m_image_ctx->async_ops_lock);
xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
++iter;
- assert(m_xlist_item.remove_myself());
+ ceph_assert(m_xlist_item.remove_myself());
// linked list stored newest -> oldest ops
if (!iter.end() && !m_flush_contexts.empty()) {
}
void AsyncOperation::add_flush_context(Context *on_finish) {
- assert(m_image_ctx->async_ops_lock.is_locked());
+ ceph_assert(m_image_ctx->async_ops_lock.is_locked());
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": "
<< "flush=" << on_finish << dendl;
m_flush_contexts.push_back(on_finish);
~AsyncOperation()
{
- assert(!m_xlist_item.is_on_list());
+ ceph_assert(!m_xlist_item.is_on_list());
}
inline bool started() const {
if (snap_id == CEPH_NOSNAP) {
RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
- assert(m_image_ctx.exclusive_lock->is_lock_owner());
- assert(m_image_ctx.object_map != nullptr);
+ ceph_assert(m_image_ctx.exclusive_lock->is_lock_owner());
+ ceph_assert(m_image_ctx.object_map != nullptr);
bool sent = m_image_ctx.object_map->aio_update<Context>(
CEPH_NOSNAP, m_object_no, OBJECT_EXISTS, {}, m_trace, this);
return (sent ? 0 : 1);
bool sent = m_image_ctx.object_map->aio_update<Context>(
snap_id, m_object_no, state, {}, m_trace, this);
- assert(sent);
+ ceph_assert(sent);
return 0;
}
template <typename I>
CopyupRequest<I>::~CopyupRequest() {
- assert(m_pending_requests.empty());
+ ceph_assert(m_pending_requests.empty());
m_async_op.finish_op();
}
r = m_data_ctx.aio_operate(
m_oid, comp, ©up_op, 0, snaps,
(m_trace.valid() ? m_trace.get_info() : nullptr));
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
r = m_ictx->data_ctx.aio_operate(
m_oid, comp, &write_op, snapc.seq, snaps,
(m_trace.valid() ? m_trace.get_info() : nullptr));
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
return false;
}
auto it = m_ictx->migration_info.snap_map.find(CEPH_NOSNAP);
- assert(it != m_ictx->migration_info.snap_map.end());
+ ceph_assert(it != m_ictx->migration_info.snap_map.end());
return it->second[0] != CEPH_NOSNAP;
}
case STATE_OBJECT_MAP_HEAD:
ldout(cct, 20) << "OBJECT_MAP_HEAD" << dendl;
- assert(*r == 0);
+ ceph_assert(*r == 0);
return send_object_map();
case STATE_OBJECT_MAP:
ldout(cct, 20) << "OBJECT_MAP" << dendl;
- assert(*r == 0);
+ ceph_assert(*r == 0);
if (!is_copyup_required()) {
ldout(cct, 20) << "skipping copyup" << dendl;
return true;
case STATE_COPYUP:
{
Mutex::Locker locker(m_lock);
- assert(m_pending_copyups > 0);
+ ceph_assert(m_pending_copyups > 0);
pending_copyups = --m_pending_copyups;
}
ldout(cct, 20) << "COPYUP (" << pending_copyups << " pending)"
template <typename I>
void CopyupRequest<I>::remove_from_list(Mutex &lock) {
- assert(m_ictx->copyup_list_lock.is_locked());
+ ceph_assert(m_ictx->copyup_list_lock.is_locked());
auto it = m_ictx->copyup_list.find(m_object_no);
- assert(it != m_ictx->copyup_list.end());
+ ceph_assert(it != m_ictx->copyup_list.end());
m_ictx->copyup_list.erase(it);
}
RWLock::RLocker snap_locker(m_ictx->snap_lock);
if (m_ictx->object_map != nullptr) {
bool copy_on_read = m_pending_requests.empty();
- assert(m_ictx->exclusive_lock->is_lock_owner());
+ ceph_assert(m_ictx->exclusive_lock->is_lock_owner());
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
librados::AioCompletion *comp = librbd::util::create_rados_callback(this);
int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ImageRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
- assert(m_aio_comp->is_initialized(get_aio_type()));
- assert(m_aio_comp->is_started() ^ (get_aio_type() == AIO_TYPE_FLUSH));
+ ceph_assert(m_aio_comp->is_initialized(get_aio_type()));
+ ceph_assert(m_aio_comp->is_started() ^ (get_aio_type() == AIO_TYPE_FLUSH));
CephContext *cct = image_ctx.cct;
AioCompletion *aio_comp = this->m_aio_comp;
template <typename I>
void ImageReadRequest<I>::send_image_cache_request() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.image_cache != nullptr);
+ ceph_assert(image_ctx.image_cache != nullptr);
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->set_request_count(1);
if (journaling) {
// in-flight ops are flushed prior to closing the journal
- assert(image_ctx.journal != NULL);
+ ceph_assert(image_ctx.journal != NULL);
journal_tid = append_journal_event(m_synchronous);
}
uint64_t tid = 0;
uint64_t buffer_offset = 0;
- assert(!this->m_image_extents.empty());
+ ceph_assert(!this->m_image_extents.empty());
for (auto &extent : this->m_image_extents) {
bufferlist sub_bl;
sub_bl.substr_of(m_bl, buffer_offset, extent.second);
template <typename I>
void ImageWriteRequest<I>::send_image_cache_request() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.image_cache != nullptr);
+ ceph_assert(image_ctx.image_cache != nullptr);
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->set_request_count(1);
I &image_ctx = this->m_image_ctx;
uint64_t tid = 0;
- assert(!this->m_image_extents.empty());
+ ceph_assert(!this->m_image_extents.empty());
for (auto &extent : this->m_image_extents) {
journal::EventEntry event_entry(
journal::AioDiscardEvent(extent.first,
template <typename I>
void ImageDiscardRequest<I>::send_image_cache_request() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.image_cache != nullptr);
+ ceph_assert(image_ctx.image_cache != nullptr);
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->set_request_count(this->m_image_extents.size());
template <typename I>
void ImageFlushRequest<I>::send_image_cache_request() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.image_cache != nullptr);
+ ceph_assert(image_ctx.image_cache != nullptr);
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->set_request_count(1);
I &image_ctx = this->m_image_ctx;
uint64_t tid = 0;
- assert(!this->m_image_extents.empty());
+ ceph_assert(!this->m_image_extents.empty());
for (auto &extent : this->m_image_extents) {
journal::EventEntry event_entry(journal::AioWriteSameEvent(extent.first,
extent.second,
template <typename I>
void ImageWriteSameRequest<I>::send_image_cache_request() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.image_cache != nullptr);
+ ceph_assert(image_ctx.image_cache != nullptr);
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->set_request_count(this->m_image_extents.size());
I &image_ctx = this->m_image_ctx;
uint64_t tid = 0;
- assert(this->m_image_extents.size() == 1);
+ ceph_assert(this->m_image_extents.size() == 1);
auto &extent = this->m_image_extents.front();
journal::EventEntry event_entry(
journal::AioCompareAndWriteEvent(extent.first, extent.second, m_cmp_bl,
template <typename I>
void ImageCompareAndWriteRequest<I>::send_image_cache_request() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.image_cache != nullptr);
+ ceph_assert(image_ctx.image_cache != nullptr);
AioCompletion *aio_comp = this->m_aio_comp;
aio_comp->set_request_count(1);
template <typename I>
void ImageRequestWQ<I>::shut_down(Context *on_shutdown) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
{
RWLock::WLocker locker(m_lock);
- assert(!m_shutdown);
+ ceph_assert(!m_shutdown);
m_shutdown = true;
CephContext *cct = m_image_ctx.cct;
template <typename I>
void ImageRequestWQ<I>::block_writes(Context *on_blocked) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
{
Contexts waiter_contexts;
{
RWLock::WLocker locker(m_lock);
- assert(m_write_blockers > 0);
+ ceph_assert(m_write_blockers > 0);
--m_write_blockers;
ldout(cct, 5) << &m_image_ctx << ", " << "num="
template <typename I>
void ImageRequestWQ<I>::wait_on_writes_unblocked(Context *on_unblocked) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
{
break;
}
}
- assert(throttle != nullptr);
+ ceph_assert(throttle != nullptr);
throttle->set_max(limit);
throttle->set_average(limit);
if (limit)
CephContext *cct = m_image_ctx.cct;
ldout(cct, 15) << "r=" << r << ", " << "req=" << item << dendl;
- assert(m_io_throttled.load() > 0);
+ ceph_assert(m_io_throttled.load() > 0);
item->set_throttled(flag);
if (item->were_all_throttled()) {
this->requeue(item);
auto item = reinterpret_cast<ImageDispatchSpec<I> *>(
ThreadPool::PointerWQ<ImageDispatchSpec<I> >::_void_dequeue());
- assert(peek_item == item);
+ ceph_assert(peek_item == item);
if (lock_required) {
this->get_pool_lock().Unlock();
void ImageRequestWQ<I>::finish_queued_io(ImageDispatchSpec<I> *req) {
RWLock::RLocker locker(m_lock);
if (req->is_write_op()) {
- assert(m_queued_writes > 0);
+ ceph_assert(m_queued_writes > 0);
m_queued_writes--;
} else {
- assert(m_queued_reads > 0);
+ ceph_assert(m_queued_reads > 0);
m_queued_reads--;
}
}
bool writes_blocked = false;
{
RWLock::RLocker locker(m_lock);
- assert(m_in_flight_writes > 0);
+ ceph_assert(m_in_flight_writes > 0);
if (--m_in_flight_writes == 0 &&
!m_write_blocker_contexts.empty()) {
writes_blocked = true;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << "completing shut down" << dendl;
- assert(on_shutdown != nullptr);
+ ceph_assert(on_shutdown != nullptr);
flush_image(m_image_ctx, on_shutdown);
}
template <typename I>
bool ImageRequestWQ<I>::is_lock_required(bool write_op) const {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
return ((write_op && m_require_lock_on_write) ||
(!write_op && m_require_lock_on_read));
}
template <typename I>
void ImageRequestWQ<I>::queue(ImageDispatchSpec<I> *req) {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "ictx=" << &m_image_ctx << ", "
this->requeue(req);
}
- assert(m_io_blockers.load() > 0);
+ ceph_assert(m_io_blockers.load() > 0);
--m_io_blockers;
this->signal();
}
this->requeue(req);
}
- assert(m_io_blockers.load() > 0);
+ ceph_assert(m_io_blockers.load() > 0);
--m_io_blockers;
this->signal();
}
finish(r);
break;
case DISPATCH_RESULT_INVALID:
- assert(false);
+ ceph_assert(false);
break;
}
}
}
void ObjectDispatchSpec::fail(int r) {
- assert(r < 0);
+ ceph_assert(r < 0);
dispatcher_ctx.complete(r);
}
template <typename I>
ObjectDispatcher<I>::~ObjectDispatcher() {
- assert(m_object_dispatches.empty());
+ ceph_assert(m_object_dispatches.empty());
}
template <typename I>
ldout(cct, 5) << "object_dispatch_layer=" << type << dendl;
RWLock::WLocker locker(m_lock);
- assert(type < OBJECT_DISPATCH_LAYER_LAST);
+ ceph_assert(type < OBJECT_DISPATCH_LAYER_LAST);
auto result = m_object_dispatches.insert(
{type, {object_dispatch, new AsyncOpTracker()}});
- assert(result.second);
+ ceph_assert(result.second);
}
template <typename I>
ObjectDispatchLayer object_dispatch_layer, Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << "object_dispatch_layer=" << object_dispatch_layer << dendl;
- assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST);
+ ceph_assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST);
ObjectDispatchMeta object_dispatch_meta;
{
RWLock::WLocker locker(m_lock);
auto it = m_object_dispatches.find(object_dispatch_layer);
- assert(it != m_object_dispatches.end());
+ ceph_assert(it != m_object_dispatches.end());
object_dispatch_meta = it->second;
m_object_dispatches.erase(it);
ldout(cct, 20) << "object_dispatch_spec=" << object_dispatch_spec << dendl;
auto object_dispatch_layer = object_dispatch_spec->object_dispatch_layer;
- assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST);
+ ceph_assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST);
// apply the IO request to all layers -- this method will be re-invoked
// by the dispatch layer if continuing / restarting the IO
template <typename I>
bool ObjectRequest<I>::compute_parent_extents(Extents *parent_extents,
bool read_request) {
- assert(m_ictx->snap_lock.is_locked());
- assert(m_ictx->parent_lock.is_locked());
+ ceph_assert(m_ictx->snap_lock.is_locked());
+ ceph_assert(m_ictx->parent_lock.is_locked());
m_has_parent = false;
parent_extents->clear();
int r = image_ctx->data_ctx.aio_operate(
this->m_oid, rados_completion, &op, flags, nullptr,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
m_object_may_exist = true;
} else {
// should have been flushed prior to releasing lock
- assert(image_ctx->exclusive_lock->is_lock_owner());
+ ceph_assert(image_ctx->exclusive_lock->is_lock_owner());
m_object_may_exist = image_ctx->object_map->object_may_exist(
this->m_object_no);
}
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
write_object();
}
add_write_hint(&write);
add_write_ops(&write);
- assert(write.size() != 0);
+ ceph_assert(write.size() != 0);
librados::AioCompletion *rados_completion = util::create_rados_callback<
AbstractObjectWriteRequest<I>,
int r = image_ctx->data_ctx.aio_operate(
this->m_oid, rados_completion, &write, m_snap_seq, m_snaps,
(this->m_trace.valid() ? this->m_trace.get_info() : nullptr));
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << dendl;
- assert(!m_copyup_in_progress);
+ ceph_assert(!m_copyup_in_progress);
m_copyup_in_progress = true;
image_ctx->copyup_list_lock.Lock();
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
- assert(m_copyup_in_progress);
+ ceph_assert(m_copyup_in_progress);
m_copyup_in_progress = false;
if (r < 0) {
ldout(image_ctx->cct, 20) << dendl;
// should have been flushed prior to releasing lock
- assert(image_ctx->exclusive_lock->is_lock_owner());
+ ceph_assert(image_ctx->exclusive_lock->is_lock_owner());
image_ctx->object_map_lock.get_write();
if (image_ctx->object_map->template aio_update<
AbstractObjectWriteRequest<I>,
I *image_ctx = this->m_ictx;
ldout(image_ctx->cct, 20) << "r=" << r << dendl;
- assert(r == 0);
+ ceph_assert(r == 0);
this->finish(0);
}
Striper::extent_to_file(image_ctx->cct, &image_ctx->layout,
this->m_object_no, offset, this->m_object_len,
image_extents);
- assert(image_extents.size() == 1);
+ ceph_assert(image_extents.size() == 1);
if (m_mismatch_offset) {
*m_mismatch_offset = image_extents[0].first;
case DISCARD_ACTION_ZERO:
return "zero";
}
- assert(false);
+ ceph_assert(false);
return nullptr;
}
wr->zero(this->m_object_off, this->m_object_len);
break;
default:
- assert(false);
+ ceph_assert(false);
break;
}
}
}
void operator()(Linear &linear) const {
- assert(length <= linear.buf_len);
+ ceph_assert(length <= linear.buf_len);
linear.buf_len = length;
}
it.copy(len, static_cast<char *>(vector.iov[idx].iov_base));
offset += len;
}
- assert(offset == bl.length());
+ ceph_assert(offset == bl.length());
}
void operator()(Bufferlist &bufferlist) const {
for (auto &image_extent : image_extents) {
length += image_extent.second;
}
- assert(length == bl.length());
+ ceph_assert(length == bl.length());
aio_completion->lock.Lock();
aio_completion->read_result.m_destriper.add_partial_result(
template <typename I>
DemoteRequest<I>::~DemoteRequest() {
- assert(m_journaler == nullptr);
+ ceph_assert(m_journaler == nullptr);
}
template <typename I>
template <typename I>
Replay<I>::~Replay() {
- assert(m_in_flight_aio_flush == 0);
- assert(m_in_flight_aio_modify == 0);
- assert(m_aio_modify_unsafe_contexts.empty());
- assert(m_aio_modify_safe_contexts.empty());
- assert(m_op_events.empty());
- assert(m_in_flight_op_events == 0);
+ ceph_assert(m_in_flight_aio_flush == 0);
+ ceph_assert(m_in_flight_aio_modify == 0);
+ ceph_assert(m_aio_modify_unsafe_contexts.empty());
+ ceph_assert(m_aio_modify_safe_contexts.empty());
+ ceph_assert(m_op_events.empty());
+ ceph_assert(m_in_flight_op_events == 0);
}
template <typename I>
// safely commit any remaining AIO modify operations
if ((m_in_flight_aio_flush + m_in_flight_aio_modify) != 0) {
flush_comp = create_aio_flush_completion(nullptr);
- assert(flush_comp != nullptr);
+ ceph_assert(flush_comp != nullptr);
}
for (auto &op_event_pair : m_op_events) {
}
}
- assert(!m_shut_down);
+ ceph_assert(!m_shut_down);
m_shut_down = true;
- assert(m_flush_ctx == nullptr);
+ ceph_assert(m_flush_ctx == nullptr);
if (m_in_flight_op_events > 0 || flush_comp != nullptr) {
std::swap(m_flush_ctx, on_finish);
}
Mutex::Locker locker(m_lock);
auto op_it = m_op_events.find(op_tid);
- assert(op_it != m_op_events.end());
+ ceph_assert(op_it != m_op_events.end());
OpEvent &op_event = op_it->second;
- assert(op_event.op_in_progress &&
+ ceph_assert(op_event.op_in_progress &&
op_event.on_op_finish_event == nullptr &&
op_event.on_finish_ready == nullptr &&
op_event.on_finish_safe == nullptr);
}
OpEvent &op_event = op_it->second;
- assert(op_event.on_finish_safe == nullptr);
+ ceph_assert(op_event.on_finish_safe == nullptr);
op_event.on_finish_ready = on_ready;
op_event.on_finish_safe = on_safe;
op_in_progress = op_event.op_in_progress;
m_aio_modify_safe_contexts.insert(on_safe);
} else {
// IO is safely stored on disk
- assert(m_in_flight_aio_modify > 0);
+ ceph_assert(m_in_flight_aio_modify > 0);
--m_in_flight_aio_modify;
if (m_on_aio_ready != nullptr) {
Context *on_flush = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(m_in_flight_aio_flush > 0);
- assert(m_in_flight_aio_modify >= on_safe_ctxs.size());
+ ceph_assert(m_in_flight_aio_flush > 0);
+ ceph_assert(m_in_flight_aio_modify >= on_safe_ctxs.size());
--m_in_flight_aio_flush;
m_in_flight_aio_modify -= on_safe_ctxs.size();
return nullptr;
}
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
if (m_op_events.count(op_tid) != 0) {
lderr(cct) << ": duplicate op tid detected: " << op_tid << dendl;
{
Mutex::Locker locker(m_lock);
auto op_it = m_op_events.find(op_tid);
- assert(op_it != m_op_events.end());
+ ceph_assert(op_it != m_op_events.end());
op_event = std::move(op_it->second);
m_op_events.erase(op_it);
if (m_shut_down) {
- assert(m_flush_ctx != nullptr);
+ ceph_assert(m_flush_ctx != nullptr);
shutting_down = true;
}
}
- assert(op_event.on_start_ready == nullptr || (r < 0 && r != -ERESTART));
+ ceph_assert(op_event.on_start_ready == nullptr || (r < 0 && r != -ERESTART));
if (op_event.on_start_ready != nullptr) {
// blocking op event failed before it became ready
- assert(op_event.on_finish_ready == nullptr &&
+ ceph_assert(op_event.on_finish_ready == nullptr &&
op_event.on_finish_safe == nullptr);
op_event.on_start_ready->complete(0);
} else {
// event kicked off by OpFinishEvent
- assert((op_event.on_finish_ready != nullptr &&
+ ceph_assert((op_event.on_finish_ready != nullptr &&
op_event.on_finish_safe != nullptr) || shutting_down);
}
Context *on_flush = nullptr;
{
Mutex::Locker locker(m_lock);
- assert(m_in_flight_op_events > 0);
+ ceph_assert(m_in_flight_op_events > 0);
--m_in_flight_op_events;
if (m_in_flight_op_events == 0 &&
(m_in_flight_aio_flush + m_in_flight_aio_modify) == 0) {
std::set<int> &&filters) {
Mutex::Locker locker(m_lock);
CephContext *cct = m_image_ctx.cct;
- assert(m_on_aio_ready == nullptr);
+ ceph_assert(m_on_aio_ready == nullptr);
if (m_shut_down) {
ldout(cct, 5) << ": ignoring event after shut down" << dendl;
if (m_in_flight_aio_modify == IN_FLIGHT_IO_HIGH_WATER_MARK) {
ldout(cct, 10) << ": hit AIO replay high-water mark: pausing replay"
<< dendl;
- assert(m_on_aio_ready == nullptr);
+ ceph_assert(m_on_aio_ready == nullptr);
std::swap(m_on_aio_ready, on_ready);
}
template <typename I>
io::AioCompletion *Replay<I>::create_aio_flush_completion(Context *on_safe) {
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
if (m_shut_down) {
template<typename I>
void StandardPolicy<I>::allocate_tag_on_lock(Context *on_finish) {
- assert(m_image_ctx->journal != nullptr);
+ ceph_assert(m_image_ctx->journal != nullptr);
if (!m_image_ctx->journal->is_tag_owner()) {
lderr(m_image_ctx->cct) << "local image not promoted" << dendl;
size_t i = 0;
for (auto &it : cpp_images) {
- assert(i < max);
+ ceph_assert(i < max);
const std::string &image_id = it.first;
image_ids[i] = strdup(image_id.c_str());
mirror_image_status_cpp_to_c(it.second, &images[i]);
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_lock>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
create_rados_callback<klass, &klass::handle_get_watchers>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_break_lock>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
create_rados_callback<klass, &klass::handle_get_lockers>(this);
m_out_bl.clear();
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = create_rados_callback<
ReacquireRequest, &ReacquireRequest::handle_set_cookie>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_unlock>(this);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
}
std::string encode_lock_cookie(uint64_t watch_handle) {
- assert(watch_handle != 0);
+ ceph_assert(watch_handle != 0);
std::ostringstream ss;
ss << WATCHER_LOCK_COOKIE_PREFIX << " " << watch_handle;
return ss.str();
create_rados_callback<klass, &klass::handle_get_mirror_image>(this);
m_out_bl.clear();
int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
create_rados_callback<klass, &klass::handle_set_mirror_image>(this);
m_out_bl.clear();
int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
ldout(cct, 10) << this << " " << __func__ << dendl;
// Not primary -- shouldn't have the journal open
- assert(m_image_ctx->journal == nullptr);
+ ceph_assert(m_image_ctx->journal == nullptr);
using klass = DisableRequest<I>;
Context *ctx = util::create_context_callback<
Mutex::Locker locker(m_lock);
- assert(m_current_ops.empty());
+ ceph_assert(m_current_ops.empty());
for (auto client : m_clients) {
journal::ClientData client_data;
ldout(cct, 10) << this << " " << __func__ << ": client_id=" << client_id
<< ", snap_name=" << snap_name << dendl;
- assert(m_lock.is_locked());
+ ceph_assert(m_lock.is_locked());
m_current_ops[client_id]++;
Mutex::Locker locker(m_lock);
- assert(m_current_ops[client_id] > 0);
+ ceph_assert(m_current_ops[client_id] > 0);
m_current_ops[client_id]--;
if (*result < 0 && *result != -ENOENT) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- assert(m_lock.is_locked());
- assert(m_current_ops[client_id] == 0);
+ ceph_assert(m_lock.is_locked());
+ ceph_assert(m_current_ops[client_id] == 0);
Context *ctx = create_context_callback(
&DisableRequest<I>::handle_unregister_client, client_id);
librados::AioCompletion *comp = create_rados_callback(ctx);
int r = m_image_ctx->md_ctx.aio_operate(header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
Mutex::Locker locker(m_lock);
- assert(m_current_ops[client_id] == 0);
+ ceph_assert(m_current_ops[client_id] == 0);
m_current_ops.erase(client_id);
if (*result < 0 && *result != -ENOENT) {
create_rados_callback<klass, &klass::handle_remove_mirror_image>(this);
m_out_bl.clear();
int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
create_rados_callback<klass, &klass::handle_get_mirror_image>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
create_rados_callback<klass, &klass::handle_set_mirror_image>(this);
m_out_bl.clear();
int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp = create_rados_callback<
GetInfoRequest<I>, &GetInfoRequest<I>::handle_get_mirror_image>(this);
int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp = create_rados_callback<
GetStatusRequest<I>, &GetStatusRequest<I>::handle_get_status>(this);
int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
std::string oid(ObjectMap<>::object_map_name(m_image_ctx->id, snap_id));
librados::AioCompletion *comp = create_rados_callback(gather_ctx->new_sub());
int r = m_image_ctx->md_ctx.aio_operate(oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
gather_ctx->activate();
template <typename I>
void InvalidateRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
- assert(image_ctx.snap_lock.is_wlocked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.snap_lock.is_wlocked());
uint64_t snap_flags;
int r = image_ctx.get_flags(m_snap_id, &snap_flags);
this->create_callback_completion();
r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion,
&op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_lock>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_get_lock_info>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_break_locks>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
num_objs = Striper::get_num_objects(
m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id));
}
- assert(m_on_disk_object_map.size() >= num_objs);
+ ceph_assert(m_on_disk_object_map.size() >= num_objs);
*m_object_map = m_on_disk_object_map;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- assert(*ret_val == 0);
+ ceph_assert(*ret_val == 0);
send_load();
return nullptr;
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_load>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
- assert(*ret_val == 0);
+ ceph_assert(*ret_val == 0);
apply();
return m_on_finish;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
- assert(*ret_val == 0);
+ ceph_assert(*ret_val == 0);
send_resize();
return nullptr;
}
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_resize>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl;
- assert(*ret_val == 0);
+ ceph_assert(*ret_val == 0);
*ret_val = -EFBIG;
m_object_map->clear();
}
Mutex::Locker locker(m_lock);
- assert(m_ref_counter == 0);
+ ceph_assert(m_ref_counter == 0);
for (auto snap_id : snap_ids) {
m_ref_counter++;
create_rados_callback<klass, &klass::handle_remove_object_map>(this);
int r = m_image_ctx->md_ctx.aio_remove(oid, comp);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
}
{
Mutex::Locker locker(m_lock);
- assert(m_ref_counter > 0);
+ ceph_assert(m_ref_counter > 0);
m_ref_counter--;
if (*result < 0 && *result != -ENOENT) {
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
}
void SnapshotCreateRequest::send_read_map() {
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.get_snap_info(m_snap_id) != NULL);
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.get_snap_info(m_snap_id) != NULL);
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op,
&m_read_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
return false;
}
} // anonymous namespace
void SnapshotRemoveRequest::send() {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.snap_lock.is_wlocked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_wlocked());
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
compute_next_snap_id();
uint64_t flags;
int r = m_image_ctx.get_flags(m_snap_id, &flags);
- assert(r == 0);
+ ceph_assert(r == 0);
if ((flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0) {
send_invalidate_next_map();
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotRemoveRequest::send_invalidate_next_map() {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.snap_lock.is_wlocked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_wlocked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
void SnapshotRemoveRequest::compute_next_snap_id() {
- assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
m_next_snap_id = CEPH_NOSNAP;
std::map<librados::snap_t, SnapInfo>::const_iterator it =
m_image_ctx.snap_info.find(m_snap_id);
- assert(it != m_image_ctx.snap_info.end());
+ ceph_assert(it != m_image_ctx.snap_info.end());
++it;
if (it != m_image_ctx.snap_info.end()) {
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op,
&m_read_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
Context *on_finish)
: Request(image_ctx, CEPH_NOSNAP, on_finish),
m_snap_id(snap_id), m_ret_val(0) {
- assert(snap_id != CEPH_NOSNAP);
+ ceph_assert(snap_id != CEPH_NOSNAP);
}
void send() override;
librados::AioCompletion *rados_completion =
create_rados_callback<klass, &klass::handle_unlock>(this);
int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void UpdateRequest<I>::update_object_map() {
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.object_map_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.object_map_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
// break very large requests into manageable batches
int r = m_image_ctx.md_ctx.aio_operate(
oid, rados_completion, &op, 0, snaps,
(m_trace.valid() ? m_trace.get_info() : nullptr));
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void UpdateRequest<I>::update_in_memory_object_map() {
- assert(m_image_ctx.snap_lock.is_locked());
- assert(m_image_ctx.object_map_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.object_map_lock.is_locked());
// rebuilding the object map might update on-disk only
if (m_snap_id == m_image_ctx.snap_id) {
void DisableFeaturesRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
<< dendl;
create_rados_callback<klass, &klass::handle_get_mirror_mode>(this);
m_out_bl.clear();
int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
create_rados_callback<klass, &klass::handle_get_mirror_image>(this);
m_out_bl.clear();
int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
<< dendl;
}
- assert(m_journal != nullptr);
+ ceph_assert(m_journal != nullptr);
delete m_journal;
m_journal = nullptr;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_features>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
void EnableFeaturesRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
<< dendl;
create_rados_callback<klass, &klass::handle_get_mirror_mode>(this);
m_out_bl.clear();
int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_features>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
int send() override {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
if (image_ctx.exclusive_lock != nullptr &&
template <typename I>
void FlattenRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " send" << dendl;
template <typename I>
bool FlattenRequest<I>::send_detach_child() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// if there are no snaps, remove from the children object as well
template <typename I>
bool FlattenRequest<I>::send_update_header() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " send_update_header" << dendl;
m_state = STATE_UPDATE_HEADER;
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
{
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
return false;
}
template <typename I>
void MetadataRemoveRequest<I>::send_metadata_remove() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template <typename I>
void MetadataSetRequest<I>::send_metadata_set() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
int send() override {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
if (image_ctx.exclusive_lock != nullptr &&
void start_async_op() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
- assert(m_async_op == nullptr);
+ ceph_assert(m_async_op == nullptr);
m_async_op = new io::AsyncOperation();
m_async_op->start_op(image_ctx);
void migrate_object() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
auto ctx = create_context_callback<
req->send();
} else {
- assert(image_ctx.parent != nullptr);
+ ceph_assert(image_ctx.parent != nullptr);
auto req = deep_copy::ObjectCopyRequest<I>::create(
image_ctx.parent, image_ctx.migration_parent, &image_ctx,
template <typename I>
void MigrateRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
void MigrateRequest<I>::migrate_objects() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
uint64_t overlap_objects = get_num_overlap_objects();
return true;
}
journal::Event create_event(uint64_t op_tid) const override {
- assert(0);
+ ceph_assert(0);
return journal::UnknownEvent();
}
void send_list_snaps() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
ldout(image_ctx.cct, 5) << m_oid
<< " C_VerifyObjectCallback::send_list_snaps"
<< dendl;
librados::AioCompletion *comp = util::create_rados_callback(this);
int r = m_io_ctx.aio_operate(m_oid, comp, &op, NULL);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
uint64_t next_valid_snap_id(uint64_t snap_id) {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.snap_lock.is_locked());
+ ceph_assert(image_ctx.snap_lock.is_locked());
std::map<librados::snap_t, SnapInfo>::iterator it =
image_ctx.snap_info.lower_bound(snap_id);
RWLock::RLocker owner_locker(image_ctx.owner_lock);
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
RWLock::RLocker snap_locker(image_ctx.snap_lock);
- assert(image_ctx.object_map != nullptr);
+ ceph_assert(image_ctx.object_map != nullptr);
RWLock::WLocker l(image_ctx.object_map_lock);
uint8_t state = (*image_ctx.object_map)[m_object_no];
if (state != new_state) {
int r = 0;
- assert(m_handle_mismatch);
+ ceph_assert(m_handle_mismatch);
r = m_handle_mismatch(image_ctx, m_object_no, state, new_state);
if (r) {
lderr(cct) << "object map error: object "
template <typename I>
void ObjectMapIterateRequest<I>::send_verify_objects() {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
uint64_t snap_id;
template <typename I>
uint64_t ObjectMapIterateRequest<I>::get_image_size() const {
- assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
if (m_image_ctx.snap_id == CEPH_NOSNAP) {
if (!m_image_ctx.resize_reqs.empty()) {
return m_image_ctx.resize_reqs.front()->get_image_size();
true,
this->create_callback_context());
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
req->send();
}
template <typename I>
void RebuildObjectMapRequest<I>::send_resize_object_map() {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
m_image_ctx.snap_lock.get_read();
- assert(m_image_ctx.object_map != nullptr);
+ ceph_assert(m_image_ctx.object_map != nullptr);
uint64_t size = get_image_size();
uint64_t num_objects = Striper::get_num_objects(m_image_ctx.layout, size);
m_state = STATE_RESIZE_OBJECT_MAP;
// should have been canceled prior to releasing lock
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
m_image_ctx.object_map->aio_resize(size, OBJECT_NONEXISTENT,
RWLock::RLocker l(m_image_ctx.owner_lock);
// should have been canceled prior to releasing lock
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
ldout(cct, 5) << this << " send_trim_image" << dendl;
m_state = STATE_TRIM_IMAGE;
uint64_t orig_size;
{
RWLock::RLocker l(m_image_ctx.snap_lock);
- assert(m_image_ctx.object_map != nullptr);
+ ceph_assert(m_image_ctx.object_map != nullptr);
new_size = get_image_size();
orig_size = m_image_ctx.get_object_size() *
template <typename I>
void RebuildObjectMapRequest<I>::send_verify_objects() {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
m_state = STATE_VERIFY_OBJECTS;
template <typename I>
void RebuildObjectMapRequest<I>::send_save_object_map() {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " send_save_object_map" << dendl;
m_state = STATE_SAVE_OBJECT_MAP;
// should have been canceled prior to releasing lock
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
- assert(m_image_ctx.object_map != nullptr);
+ ceph_assert(m_image_ctx.object_map != nullptr);
m_image_ctx.object_map->aio_save(this->create_callback_context());
}
template <typename I>
void RebuildObjectMapRequest<I>::send_update_header() {
- assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(m_image_ctx.owner_lock.is_locked());
// should have been canceled prior to releasing lock
- assert(m_image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
ldout(m_image_ctx.cct, 5) << this << " send_update_header" << dendl;
librados::AioCompletion *comp = this->create_callback_completion();
int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
template <typename I>
uint64_t RebuildObjectMapRequest<I>::get_image_size() const {
- assert(m_image_ctx.snap_lock.is_locked());
+ ceph_assert(m_image_ctx.snap_lock.is_locked());
if (m_image_ctx.snap_id == CEPH_NOSNAP) {
if (!m_image_ctx.resize_reqs.empty()) {
return m_image_ctx.resize_reqs.front()->get_image_size();
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(m_source_oid, rados_completion, &op,
&m_header_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(m_dest_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(RBD_DIRECTORY, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(m_source_oid, rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void Request<I>::send() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
// automatically create the event if we don't need to worry
// about affecting concurrent IO ops
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
- assert(!m_appended_op_event || m_committed_op_event);
+ ceph_assert(!m_appended_op_event || m_committed_op_event);
AsyncRequest<I>::finish(r);
}
bool Request<I>::append_op_event() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
RWLock::RLocker snap_locker(image_ctx.snap_lock);
if (image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending()) {
return false;
}
- assert(m_op_tid != 0);
- assert(!m_committed_op_event);
+ ceph_assert(m_op_tid != 0);
+ ceph_assert(!m_committed_op_event);
m_committed_op_event = true;
if (image_ctx.journal != nullptr &&
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
// ops will be canceled / completed before closing journal
- assert(image_ctx.journal->is_journal_ready());
+ ceph_assert(image_ctx.journal->is_journal_ready());
image_ctx.journal->commit_op_event(m_op_tid, r,
new C_CommitOpEvent(this, r));
return true;
template <typename I>
void Request<I>::replay_op_ready(Context *on_safe) {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
- assert(image_ctx.snap_lock.is_locked());
- assert(m_op_tid != 0);
+ ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.snap_lock.is_locked());
+ ceph_assert(m_op_tid != 0);
m_appended_op_event = true;
image_ctx.journal->replay_op_ready(
template <typename I>
void Request<I>::append_op_event(Context *on_safe) {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
- assert(image_ctx.snap_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.snap_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
this->finish(r);
delete this;
} else {
- assert(!can_affect_io());
+ ceph_assert(!can_affect_io());
// haven't started the request state machine yet
RWLock::RLocker owner_locker(image_ctx.owner_lock);
bool append_op_event(T *request) {
ImageCtxT &image_ctx = this->m_image_ctx;
- assert(can_affect_io());
+ ceph_assert(can_affect_io());
RWLock::RLocker owner_locker(image_ctx.owner_lock);
RWLock::RLocker snap_locker(image_ctx.snap_lock);
if (image_ctx.journal != nullptr) {
ResizeRequest *next_req = NULL;
{
RWLock::WLocker snap_locker(image_ctx.snap_lock);
- assert(m_xlist_item.remove_myself());
+ ceph_assert(m_xlist_item.remove_myself());
if (!image_ctx.resize_reqs.empty()) {
next_req = image_ctx.resize_reqs.front();
}
template <typename I>
void ResizeRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
{
RWLock::WLocker snap_locker(image_ctx.snap_lock);
}
}
- assert(image_ctx.resize_reqs.front() == this);
+ ceph_assert(image_ctx.resize_reqs.front() == this);
m_original_size = image_ctx.size;
compute_parent_overlap();
}
template <typename I>
void ResizeRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
if (this->is_canceled()) {
this->async_complete(-ERESTART);
ldout(cct, 5) << this << " " << __func__ << dendl;
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
image_ctx.object_map->aio_resize(
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(*result == 0);
+ ceph_assert(*result == 0);
send_post_block_writes();
return nullptr;
}
<< "new_size=" << m_new_size << dendl;
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
image_ctx.object_map->aio_resize(
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
update_size_and_overlap();
- assert(*result == 0);
+ ceph_assert(*result == 0);
return this->create_context_finisher(0);
}
// should have been canceled prior to releasing lock
RWLock::RLocker owner_locker(image_ctx.owner_lock);
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
librados::ObjectWriteOperation op;
ResizeRequest<I>, &ResizeRequest<I>::handle_update_header>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
template <typename I>
void SnapshotCreateRequest<I>::send_suspend_aio() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
RWLock::RLocker parent_locker(image_ctx.parent_lock);
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// save current size / parent info for creating snapshot record in ImageCtx
&SnapshotCreateRequest<I>::handle_create_snap>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(*result == 0);
+ ceph_assert(*result == 0);
image_ctx.io_work_queue->unblock_writes();
return this->create_context_finisher(0);
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
- assert(m_snap_id != CEPH_NOSNAP);
+ ceph_assert(m_snap_id != CEPH_NOSNAP);
librados::AioCompletion *rados_completion = create_rados_callback<
SnapshotCreateRequest<I>,
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(m_ret_val < 0);
+ ceph_assert(m_ret_val < 0);
*result = m_ret_val;
image_ctx.io_work_queue->unblock_writes();
ldout(cct, 5) << this << " " << __func__ << dendl;
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// immediately add a reference to the new snapshot
if (!image_ctx.migration_info.empty()) {
auto it = image_ctx.migration_info.snap_map.find(CEPH_NOSNAP);
- assert(it != image_ctx.migration_info.snap_map.end());
- assert(!it->second.empty());
+ ceph_assert(it != image_ctx.migration_info.snap_map.end());
+ ceph_assert(!it->second.empty());
if (it->second[0] == CEPH_NOSNAP) {
ldout(cct, 5) << this << " " << __func__
<< ": updating migration snap_map" << dendl;
template <typename I>
void SnapshotLimitRequest<I>::send_limit_snaps() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion,
&op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
}
template <typename I>
void SnapshotProtectRequest<I>::send_protect_snap() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
this->create_callback_completion();
r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion,
&op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
return 0;
}
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
{
RWLock::RLocker snap_locker(image_ctx.snap_lock);
RWLock::RLocker object_map_locker(image_ctx.object_map_lock);
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_trash_snap>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
&SnapshotRemoveRequest<I>::handle_get_snap>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op,
&m_out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
SnapshotRemoveRequest<I>,
&SnapshotRemoveRequest<I>::handle_remove_snap>(this);
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
template <typename I>
int SnapshotRemoveRequest<I>::scan_for_parents(ParentSpec &pspec) {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.snap_lock.is_locked());
- assert(image_ctx.parent_lock.is_locked());
+ ceph_assert(image_ctx.snap_lock.is_locked());
+ ceph_assert(image_ctx.parent_lock.is_locked());
if (pspec.pool_id != -1) {
map<uint64_t, SnapInfo>::iterator it;
template <typename I>
journal::Event SnapshotRenameRequest<I>::create_event(uint64_t op_tid) const {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.snap_lock.is_locked());
+ ceph_assert(image_ctx.snap_lock.is_locked());
std::string src_snap_name;
auto snap_info_it = image_ctx.snap_info.find(m_snap_id);
template <typename I>
void SnapshotRenameRequest<I>::send_rename_snap() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
RWLock::RLocker md_locker(image_ctx.md_lock);
RWLock::RLocker snap_locker(image_ctx.snap_lock);
librados::AioCompletion *rados_completion = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid,
rados_completion, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(*result == 0);
+ ceph_assert(*result == 0);
send_rollback_object_map();
return nullptr;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(*result == 0);
+ ceph_assert(*result == 0);
send_rollback_objects();
return nullptr;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl;
- assert(*result == 0);
+ ceph_assert(*result == 0);
return send_invalidate_cache();
}
int send() override {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " scanning pool '" << m_pool.second << "'"
util::create_rados_callback(this);
r = m_pool_ioctx.aio_operate(RBD_CHILDREN, rados_completion, &op,
&m_children_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
return 0;
}
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_start() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
void SnapshotUnprotectRequest<I>::send_scan_pool_children() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_finish() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_rollback() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
librados::AioCompletion *comp = this->create_callback_completion();
int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
librados::AioCompletion *comp = this->create_callback_completion();
r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
// TODO legacy code threw a notification post UNPROTECTING update -- required?
int send() override {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
string oid = image_ctx.get_object_name(m_object_no);
int send() override {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
{
librados::AioCompletion *rados_completion =
util::create_rados_callback(this);
int r = image_ctx.data_ctx.aio_remove(oid, rados_completion);
- assert(r == 0);
+ ceph_assert(r == 0);
rados_completion->release();
return 0;
}
template<typename I>
void TrimRequest<I>::send_pre_trim() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
if (m_delete_start >= m_num_objects) {
send_clean_boundary();
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_PRE_TRIM;
- assert(image_ctx.exclusive_lock->is_lock_owner());
+ ceph_assert(image_ctx.exclusive_lock->is_lock_owner());
RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
template<typename I>
void TrimRequest<I>::send_copyup_objects() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
::SnapContext snapc;
bool has_snapshots;
snapc = image_ctx.snapc;
has_snapshots = !image_ctx.snaps.empty();
int r = image_ctx.get_parent_overlap(CEPH_NOSNAP, &parent_overlap);
- assert(r == 0);
+ ceph_assert(r == 0);
}
// copyup is only required for portion of image that overlaps parent
template <typename I>
void TrimRequest<I>::send_remove_objects() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
ldout(image_ctx.cct, 5) << this << " send_remove_objects: "
<< " delete_start=" << m_delete_start
template<typename I>
void TrimRequest<I>::send_post_trim() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
{
RWLock::RLocker snap_locker(image_ctx.snap_lock);
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_POST_TRIM;
- assert(image_ctx.exclusive_lock->is_lock_owner());
+ ceph_assert(image_ctx.exclusive_lock->is_lock_owner());
RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
template <typename I>
void TrimRequest<I>::send_clean_boundary() {
I &image_ctx = this->m_image_ctx;
- assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
if (m_delete_off <= m_new_size) {
send_finish(0);
}
// should have been canceled prior to releasing lock
- assert(image_ctx.exclusive_lock == nullptr ||
+ ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
uint64_t delete_len = m_delete_off - m_new_size;
ldout(image_ctx.cct, 5) << this << " send_clean_boundary: "
auto aio_comp = create_rados_callback<
MoveRequest<I>, &MoveRequest<I>::handle_trash_add>(this);
int r = m_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
MoveRequest<I>, &MoveRequest<I>::handle_remove_id>(this);
int r = m_io_ctx.aio_remove(util::id_obj_name(m_trash_image_spec.name),
aio_comp);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
auto aio_comp = create_rados_callback<
MoveRequest<I>, &MoveRequest<I>::handle_directory_remove>(this);
int r = m_io_ctx.aio_operate(RBD_DIRECTORY, aio_comp, &op);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
Notifier::~Notifier() {
Mutex::Locker aio_notify_locker(m_aio_notify_lock);
- assert(m_pending_aio_notifies == 0);
+ ceph_assert(m_pending_aio_notifies == 0);
}
void Notifier::flush(Context *on_finish) {
C_AioNotify *ctx = new C_AioNotify(this, response, on_finish);
librados::AioCompletion *comp = util::create_rados_callback(ctx);
int r = m_ioctx.aio_notify(m_oid, comp, bl, NOTIFY_TIMEOUT, &ctx->out_bl);
- assert(r == 0);
+ ceph_assert(r == 0);
comp->release();
}
ldout(m_cct, 20) << "r=" << r << dendl;
Mutex::Locker aio_notify_locker(m_aio_notify_lock);
- assert(m_pending_aio_notifies > 0);
+ ceph_assert(m_pending_aio_notifies > 0);
--m_pending_aio_notifies;
ldout(m_cct, 20) << "pending=" << m_pending_aio_notifies << dendl;
}
void RewatchRequest::unwatch() {
- assert(m_watch_lock.is_wlocked());
- assert(*m_watch_handle != 0);
+ ceph_assert(m_watch_lock.is_wlocked());
+ ceph_assert(*m_watch_handle != 0);
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ldout(cct, 10) << dendl;
librados::AioCompletion *aio_comp = create_rados_callback<
RewatchRequest, &RewatchRequest::handle_unwatch>(this);
int r = m_ioctx.aio_unwatch(watch_handle, aio_comp);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}
librados::AioCompletion *aio_comp = create_rados_callback<
RewatchRequest, &RewatchRequest::handle_rewatch>(this);
int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_rewatch_handle, m_watch_ctx);
- assert(r == 0);
+ ceph_assert(r == 0);
aio_comp->release();
}