From e0afb5ac114d3742181136b93aa50f38cb0a32fe Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Thu, 23 Aug 2018 11:23:21 -0400 Subject: [PATCH] rbd: Use ceph_assert for asserts. Signed-off-by: Adam C. Emerson --- src/krbd.cc | 6 +- src/librbd/AsyncObjectThrottle.cc | 2 +- src/librbd/AsyncRequest.cc | 4 +- src/librbd/BlockGuard.h | 2 +- src/librbd/DeepCopyRequest.cc | 12 +- src/librbd/ExclusiveLock.cc | 16 +-- src/librbd/ImageCtx.cc | 82 +++++------ src/librbd/ImageState.cc | 70 ++++----- src/librbd/ImageWatcher.cc | 60 ++++---- src/librbd/Journal.cc | 136 +++++++++--------- src/librbd/Journal.h | 2 +- src/librbd/LibrbdAdminSocketHook.cc | 2 +- src/librbd/LibrbdWriteback.cc | 8 +- src/librbd/ManagedLock.cc | 58 ++++---- src/librbd/ManagedLock.h | 34 ++--- src/librbd/MirroringWatcher.cc | 4 +- src/librbd/ObjectMap.cc | 68 ++++----- src/librbd/ObjectMap.h | 2 +- src/librbd/Operations.cc | 68 ++++----- src/librbd/TaskFinisher.h | 2 +- src/librbd/TrashWatcher.cc | 4 +- src/librbd/Utils.h | 2 +- src/librbd/Watcher.cc | 26 ++-- src/librbd/api/DiffIterate.cc | 8 +- src/librbd/api/Group.cc | 2 +- src/librbd/api/Image.cc | 2 +- src/librbd/api/Migration.cc | 12 +- .../cache/ObjectCacherObjectDispatch.cc | 2 +- src/librbd/deep_copy/ImageCopyRequest.cc | 6 +- src/librbd/deep_copy/ObjectCopyRequest.cc | 68 ++++----- src/librbd/deep_copy/SetHeadRequest.cc | 8 +- src/librbd/deep_copy/SnapshotCopyRequest.cc | 12 +- src/librbd/deep_copy/SnapshotCreateRequest.cc | 2 +- src/librbd/exclusive_lock/AutomaticPolicy.cc | 4 +- .../exclusive_lock/PostAcquireRequest.cc | 8 +- .../exclusive_lock/PreAcquireRequest.cc | 2 +- .../exclusive_lock/PreReleaseRequest.cc | 6 +- src/librbd/exclusive_lock/StandardPolicy.cc | 4 +- src/librbd/image/CloneRequest.cc | 18 +-- src/librbd/image/CloseRequest.cc | 8 +- src/librbd/image/CreateRequest.cc | 26 ++-- src/librbd/image/DetachChildRequest.cc | 10 +- src/librbd/image/ListWatchersRequest.cc | 6 +- src/librbd/image/RefreshParentRequest.cc | 16 +-- src/librbd/image/RefreshRequest.cc | 50 +++---- src/librbd/image/RemoveRequest.cc | 22 +-- src/librbd/image/SetFlagsRequest.cc | 2 +- src/librbd/image/SetSnapRequest.cc | 6 +- src/librbd/image_watcher/NotifyLockOwner.cc | 2 +- src/librbd/internal.cc | 36 ++--- src/librbd/io/AioCompletion.cc | 24 ++-- src/librbd/io/AioCompletion.h | 10 +- src/librbd/io/AsyncOperation.cc | 6 +- src/librbd/io/AsyncOperation.h | 2 +- src/librbd/io/CopyupRequest.cc | 26 ++-- src/librbd/io/ImageRequest.cc | 28 ++-- src/librbd/io/ImageRequestWQ.cc | 32 ++--- src/librbd/io/ObjectDispatchSpec.cc | 4 +- src/librbd/io/ObjectDispatcher.cc | 12 +- src/librbd/io/ObjectRequest.cc | 24 ++-- src/librbd/io/ObjectRequest.h | 4 +- src/librbd/io/ReadResult.cc | 6 +- src/librbd/journal/DemoteRequest.cc | 2 +- src/librbd/journal/Replay.cc | 50 +++---- src/librbd/journal/StandardPolicy.cc | 2 +- src/librbd/librbd.cc | 2 +- src/librbd/managed_lock/AcquireRequest.cc | 2 +- src/librbd/managed_lock/BreakRequest.cc | 4 +- src/librbd/managed_lock/GetLockerRequest.cc | 2 +- src/librbd/managed_lock/ReacquireRequest.cc | 2 +- src/librbd/managed_lock/ReleaseRequest.cc | 2 +- src/librbd/managed_lock/Utils.cc | 2 +- src/librbd/mirror/DisableRequest.cc | 22 +-- src/librbd/mirror/EnableRequest.cc | 4 +- src/librbd/mirror/GetInfoRequest.cc | 2 +- src/librbd/mirror/GetStatusRequest.cc | 2 +- src/librbd/object_map/CreateRequest.cc | 2 +- src/librbd/object_map/InvalidateRequest.cc | 6 +- src/librbd/object_map/LockRequest.cc | 6 +- src/librbd/object_map/RefreshRequest.cc | 14 +- src/librbd/object_map/RemoveRequest.cc | 6 +- src/librbd/object_map/ResizeRequest.cc | 2 +- .../object_map/SnapshotCreateRequest.cc | 10 +- .../object_map/SnapshotRemoveRequest.cc | 20 +-- .../object_map/SnapshotRollbackRequest.cc | 4 +- .../object_map/SnapshotRollbackRequest.h | 2 +- src/librbd/object_map/UnlockRequest.cc | 2 +- src/librbd/object_map/UpdateRequest.cc | 10 +- .../operation/DisableFeaturesRequest.cc | 10 +- src/librbd/operation/EnableFeaturesRequest.cc | 6 +- src/librbd/operation/FlattenRequest.cc | 14 +- src/librbd/operation/MetadataRemoveRequest.cc | 4 +- src/librbd/operation/MetadataSetRequest.cc | 4 +- src/librbd/operation/MigrateRequest.cc | 14 +- src/librbd/operation/MigrateRequest.h | 2 +- src/librbd/operation/ObjectMapIterate.cc | 18 +-- .../operation/RebuildObjectMapRequest.cc | 26 ++-- src/librbd/operation/RenameRequest.cc | 8 +- src/librbd/operation/Request.cc | 24 ++-- src/librbd/operation/Request.h | 2 +- src/librbd/operation/ResizeRequest.cc | 20 +-- src/librbd/operation/SnapshotCreateRequest.cc | 18 +-- src/librbd/operation/SnapshotLimitRequest.cc | 4 +- .../operation/SnapshotProtectRequest.cc | 4 +- src/librbd/operation/SnapshotRemoveRequest.cc | 12 +- src/librbd/operation/SnapshotRenameRequest.cc | 6 +- .../operation/SnapshotRollbackRequest.cc | 6 +- .../operation/SnapshotUnprotectRequest.cc | 18 +-- src/librbd/operation/TrimRequest.cc | 28 ++-- src/librbd/trash/MoveRequest.cc | 6 +- src/librbd/watcher/Notifier.cc | 6 +- src/librbd/watcher/RewatchRequest.cc | 8 +- 112 files changed, 824 insertions(+), 824 deletions(-) diff --git a/src/krbd.cc b/src/krbd.cc index e9bf701d0fb..41ab9e60434 100644 --- a/src/krbd.cc +++ b/src/krbd.cc @@ -227,13 +227,13 @@ static int wait_for_udev_add(struct udev_monitor *mon, const char *pool, const char *this_major = udev_device_get_property_value(dev, "MAJOR"); const char *this_minor = udev_device_get_property_value(dev, "MINOR"); - assert(!minor ^ have_minor_attr()); + ceph_assert(!minor ^ have_minor_attr()); if (strcmp(this_major, major) == 0 && (!minor || strcmp(this_minor, minor) == 0)) { string name = get_kernel_rbd_name(udev_device_get_sysname(bus_dev)); - assert(strcmp(udev_device_get_devnode(dev), name.c_str()) == 0); + ceph_assert(strcmp(udev_device_get_devnode(dev), name.c_str()) == 0); *pname = name; udev_device_unref(dev); @@ -363,7 +363,7 @@ static int devno_to_krbd_id(struct udev *udev, dev_t devno, string *pid) } /* make sure there is only one match */ - assert(!udev_list_entry_get_next(l)); + ceph_assert(!udev_list_entry_get_next(l)); dev = udev_device_new_from_syspath(udev, udev_list_entry_get_name(l)); if (!dev) { diff --git a/src/librbd/AsyncObjectThrottle.cc b/src/librbd/AsyncObjectThrottle.cc index 1f09091e8f5..4a5ae561903 100644 --- a/src/librbd/AsyncObjectThrottle.cc +++ b/src/librbd/AsyncObjectThrottle.cc @@ -25,7 +25,7 @@ AsyncObjectThrottle::AsyncObjectThrottle( template void AsyncObjectThrottle::start_ops(uint64_t max_concurrent) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); bool complete; { Mutex::Locker l(m_lock); diff --git a/src/librbd/AsyncRequest.cc b/src/librbd/AsyncRequest.cc index a1e4202997d..8a76a226474 100644 --- a/src/librbd/AsyncRequest.cc +++ b/src/librbd/AsyncRequest.cc @@ -12,7 +12,7 @@ template AsyncRequest::AsyncRequest(T &image_ctx, Context *on_finish) : m_image_ctx(image_ctx), m_on_finish(on_finish), m_canceled(false), m_xlist_item(this) { - assert(m_on_finish != NULL); + ceph_assert(m_on_finish != NULL); start_request(); } @@ -52,7 +52,7 @@ void AsyncRequest::finish_request() { decltype(m_image_ctx.async_requests_waiters) waiters; { Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock); - assert(m_xlist_item.remove_myself()); + ceph_assert(m_xlist_item.remove_myself()); if (m_image_ctx.async_requests.empty()) { waiters = std::move(m_image_ctx.async_requests_waiters); diff --git a/src/librbd/BlockGuard.h b/src/librbd/BlockGuard.h index 4c371580c1f..46c91b44b66 100644 --- a/src/librbd/BlockGuard.h +++ b/src/librbd/BlockGuard.h @@ -106,7 +106,7 @@ public: void release(BlockGuardCell *cell, BlockOperations *block_operations) { Mutex::Locker locker(m_lock); - assert(cell != nullptr); + ceph_assert(cell != nullptr); auto &detained_block_extent = reinterpret_cast( *cell); ldout(m_cct, 20) << "block_start=" diff --git a/src/librbd/DeepCopyRequest.cc b/src/librbd/DeepCopyRequest.cc index 56b177553f1..0813b580c92 100644 --- a/src/librbd/DeepCopyRequest.cc +++ b/src/librbd/DeepCopyRequest.cc @@ -44,8 +44,8 @@ DeepCopyRequest::DeepCopyRequest(I *src_image_ctx, I *dst_image_ctx, template DeepCopyRequest::~DeepCopyRequest() { - assert(m_snapshot_copy_request == nullptr); - assert(m_image_copy_request == nullptr); + ceph_assert(m_snapshot_copy_request == nullptr); + ceph_assert(m_image_copy_request == nullptr); } template @@ -196,7 +196,7 @@ void DeepCopyRequest::send_copy_object_map() { return; } - assert(m_dst_image_ctx->object_map != nullptr); + ceph_assert(m_dst_image_ctx->object_map != nullptr); ldout(m_cct, 20) << dendl; @@ -218,7 +218,7 @@ void DeepCopyRequest::send_copy_object_map() { handle_copy_object_map(r); finish_op_ctx->complete(0); }); - assert(m_snap_seqs->count(m_snap_id_end) > 0); + ceph_assert(m_snap_seqs->count(m_snap_id_end) > 0); librados::snap_t copy_snap_id = (*m_snap_seqs)[m_snap_id_end]; m_dst_image_ctx->object_map->rollback(copy_snap_id, ctx); m_dst_image_ctx->snap_lock.put_read(); @@ -229,7 +229,7 @@ template void DeepCopyRequest::handle_copy_object_map(int r) { ldout(m_cct, 20) << dendl; - assert(r == 0); + ceph_assert(r == 0); send_refresh_object_map(); } @@ -262,7 +262,7 @@ template void DeepCopyRequest::handle_refresh_object_map(int r) { ldout(m_cct, 20) << "r=" << r << dendl; - assert(r == 0); + ceph_assert(r == 0); { RWLock::WLocker snap_locker(m_dst_image_ctx->snap_lock); std::swap(m_dst_image_ctx->object_map, m_object_map); diff --git a/src/librbd/ExclusiveLock.cc b/src/librbd/ExclusiveLock.cc index 5821548f17b..588ab6085e3 100644 --- a/src/librbd/ExclusiveLock.cc +++ b/src/librbd/ExclusiveLock.cc @@ -81,7 +81,7 @@ template void ExclusiveLock::unblock_requests() { Mutex::Locker locker(ML::m_lock); - assert(m_request_blocked_count > 0); + ceph_assert(m_request_blocked_count > 0); m_request_blocked_count--; if (m_request_blocked_count == 0) { m_request_blocked_ret_val = 0; @@ -92,7 +92,7 @@ void ExclusiveLock::unblock_requests() { template void ExclusiveLock::init(uint64_t features, Context *on_init) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); ldout(m_image_ctx.cct, 10) << dendl; { @@ -122,7 +122,7 @@ void ExclusiveLock::handle_peer_notification(int r) { } ldout(m_image_ctx.cct, 10) << dendl; - assert(ML::is_action_acquire_lock()); + ceph_assert(ML::is_action_acquire_lock()); m_acquire_lock_peer_ret_val = r; ML::execute_next_action(); @@ -130,7 +130,7 @@ void ExclusiveLock::handle_peer_notification(int r) { template Context *ExclusiveLock::start_op() { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); Mutex::Locker locker(ML::m_lock); if (!accept_ops(ML::m_lock)) { @@ -208,7 +208,7 @@ void ExclusiveLock::post_acquire_lock_handler(int r, Context *on_finish) { return; } else if (r < 0) { ML::m_lock.Lock(); - assert(ML::is_state_acquiring()); + ceph_assert(ML::is_state_acquiring()); // PostAcquire state machine will not run, so we need complete prepare m_image_ctx.state->handle_prepare_lock_complete(); @@ -254,7 +254,7 @@ void ExclusiveLock::handle_post_acquiring_lock(int r) { Mutex::Locker locker(ML::m_lock); - assert(r == 0); + ceph_assert(r == 0); // lock is owned at this point ML::set_state_post_acquiring(); @@ -267,7 +267,7 @@ void ExclusiveLock::handle_post_acquired_lock(int r) { Context *on_finish = nullptr; { Mutex::Locker locker(ML::m_lock); - assert(ML::is_state_acquiring() || ML::is_state_post_acquiring()); + ceph_assert(ML::is_state_acquiring() || ML::is_state_post_acquiring()); assert (m_pre_post_callback != nullptr); std::swap(m_pre_post_callback, on_finish); @@ -305,7 +305,7 @@ void ExclusiveLock::post_release_lock_handler(bool shutting_down, int r, if (!shutting_down) { { Mutex::Locker locker(ML::m_lock); - assert(ML::is_state_pre_releasing() || ML::is_state_releasing()); + ceph_assert(ML::is_state_pre_releasing() || ML::is_state_releasing()); } if (r >= 0) { diff --git a/src/librbd/ImageCtx.cc b/src/librbd/ImageCtx.cc index 1552829d2e0..2c99f2cc68c 100644 --- a/src/librbd/ImageCtx.cc +++ b/src/librbd/ImageCtx.cc @@ -158,11 +158,11 @@ public: } ImageCtx::~ImageCtx() { - assert(image_watcher == NULL); - assert(exclusive_lock == NULL); - assert(object_map == NULL); - assert(journal == NULL); - assert(asok_hook == NULL); + ceph_assert(image_watcher == NULL); + ceph_assert(exclusive_lock == NULL); + ceph_assert(object_map == NULL); + ceph_assert(journal == NULL); + ceph_assert(asok_hook == NULL); if (perfcounter) { perf_stop(); @@ -183,8 +183,8 @@ public: } void ImageCtx::init() { - assert(!header_oid.empty()); - assert(old_format || !id.empty()); + ceph_assert(!header_oid.empty()); + ceph_assert(old_format || !id.empty()); asok_hook = new LibrbdAdminSocketHook(this); @@ -198,7 +198,7 @@ public: trace_endpoint.copy_name(pname); perf_start(pname); - assert(image_watcher == NULL); + ceph_assert(image_watcher == NULL); image_watcher = new ImageWatcher<>(*this); } @@ -298,7 +298,7 @@ public: } void ImageCtx::perf_stop() { - assert(perfcounter); + ceph_assert(perfcounter); cct->get_perfcounters_collection()->remove(perfcounter); delete perfcounter; } @@ -320,7 +320,7 @@ public: } int ImageCtx::snap_set(uint64_t in_snap_id) { - assert(snap_lock.is_wlocked()); + ceph_assert(snap_lock.is_wlocked()); auto it = snap_info.find(in_snap_id); if (in_snap_id != CEPH_NOSNAP && it != snap_info.end()) { snap_id = in_snap_id; @@ -335,7 +335,7 @@ public: void ImageCtx::snap_unset() { - assert(snap_lock.is_wlocked()); + ceph_assert(snap_lock.is_wlocked()); snap_id = CEPH_NOSNAP; snap_namespace = {}; snap_name = ""; @@ -346,7 +346,7 @@ public: snap_t ImageCtx::get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace, const string& in_snap_name) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); auto it = snap_ids.find({in_snap_namespace, in_snap_name}); if (it != snap_ids.end()) { return it->second; @@ -356,7 +356,7 @@ public: const SnapInfo* ImageCtx::get_snap_info(snap_t in_snap_id) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); map::const_iterator it = snap_info.find(in_snap_id); if (it != snap_info.end()) @@ -367,7 +367,7 @@ public: int ImageCtx::get_snap_name(snap_t in_snap_id, string *out_snap_name) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *out_snap_name = info->name; @@ -379,7 +379,7 @@ public: int ImageCtx::get_snap_namespace(snap_t in_snap_id, cls::rbd::SnapshotNamespace *out_snap_namespace) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *out_snap_namespace = info->snap_namespace; @@ -401,7 +401,7 @@ public: uint64_t ImageCtx::get_current_size() const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); return size; } @@ -448,20 +448,20 @@ public: void ImageCtx::set_access_timestamp(utime_t at) { - assert(timestamp_lock.is_wlocked()); + ceph_assert(timestamp_lock.is_wlocked()); access_timestamp = at; } void ImageCtx::set_modify_timestamp(utime_t mt) { - assert(timestamp_lock.is_locked()); + ceph_assert(timestamp_lock.is_locked()); modify_timestamp = mt; } int ImageCtx::is_snap_protected(snap_t in_snap_id, bool *is_protected) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *is_protected = @@ -474,7 +474,7 @@ public: int ImageCtx::is_snap_unprotected(snap_t in_snap_id, bool *is_unprotected) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *is_unprotected = @@ -490,7 +490,7 @@ public: const ParentInfo &parent, uint8_t protection_status, uint64_t flags, utime_t timestamp) { - assert(snap_lock.is_wlocked()); + ceph_assert(snap_lock.is_wlocked()); snaps.push_back(id); SnapInfo info(in_snap_name, in_snap_namespace, in_size, parent, protection_status, flags, timestamp); @@ -502,7 +502,7 @@ public: string in_snap_name, snap_t id) { - assert(snap_lock.is_wlocked()); + ceph_assert(snap_lock.is_wlocked()); snaps.erase(std::remove(snaps.begin(), snaps.end(), id), snaps.end()); snap_info.erase(id); snap_ids.erase({in_snap_namespace, in_snap_name}); @@ -510,7 +510,7 @@ public: uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); if (in_snap_id == CEPH_NOSNAP) { if (!resize_reqs.empty() && resize_reqs.front()->shrinking()) { @@ -527,7 +527,7 @@ public: } uint64_t ImageCtx::get_object_count(snap_t in_snap_id) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); uint64_t image_size = get_image_size(in_snap_id); return Striper::get_num_objects(layout, image_size); } @@ -541,7 +541,7 @@ public: bool ImageCtx::test_features(uint64_t in_features, const RWLock &in_snap_lock) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); return ((features & in_features) == in_features); } @@ -554,13 +554,13 @@ public: bool ImageCtx::test_op_features(uint64_t in_op_features, const RWLock &in_snap_lock) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); return ((op_features & in_op_features) == in_op_features); } int ImageCtx::get_flags(librados::snap_t _snap_id, uint64_t *_flags) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); if (_snap_id == CEPH_NOSNAP) { *_flags = flags; return 0; @@ -582,7 +582,7 @@ public: int ImageCtx::test_flags(uint64_t flags, const RWLock &in_snap_lock, bool *flags_set) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); uint64_t snap_flags; int r = get_flags(snap_id, &snap_flags); if (r < 0) { @@ -594,7 +594,7 @@ public: int ImageCtx::update_flags(snap_t in_snap_id, uint64_t flag, bool enabled) { - assert(snap_lock.is_wlocked()); + ceph_assert(snap_lock.is_wlocked()); uint64_t *_flags; if (in_snap_id == CEPH_NOSNAP) { _flags = &flags; @@ -616,8 +616,8 @@ public: const ParentInfo* ImageCtx::get_parent_info(snap_t in_snap_id) const { - assert(snap_lock.is_locked()); - assert(parent_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); + ceph_assert(parent_lock.is_locked()); if (in_snap_id == CEPH_NOSNAP) return &parent_md; const SnapInfo *info = get_snap_info(in_snap_id); @@ -652,7 +652,7 @@ public: int ImageCtx::get_parent_overlap(snap_t in_snap_id, uint64_t *overlap) const { - assert(snap_lock.is_locked()); + ceph_assert(snap_lock.is_locked()); const ParentInfo *info = get_parent_info(in_snap_id); if (info) { *overlap = info->overlap; @@ -662,7 +662,7 @@ public: } void ImageCtx::register_watch(Context *on_finish) { - assert(image_watcher != NULL); + ceph_assert(image_watcher != NULL); image_watcher->register_watch(on_finish); } @@ -927,27 +927,27 @@ public: } exclusive_lock::Policy *ImageCtx::get_exclusive_lock_policy() const { - assert(owner_lock.is_locked()); - assert(exclusive_lock_policy != nullptr); + ceph_assert(owner_lock.is_locked()); + ceph_assert(exclusive_lock_policy != nullptr); return exclusive_lock_policy; } void ImageCtx::set_exclusive_lock_policy(exclusive_lock::Policy *policy) { - assert(owner_lock.is_wlocked()); - assert(policy != nullptr); + ceph_assert(owner_lock.is_wlocked()); + ceph_assert(policy != nullptr); delete exclusive_lock_policy; exclusive_lock_policy = policy; } journal::Policy *ImageCtx::get_journal_policy() const { - assert(snap_lock.is_locked()); - assert(journal_policy != nullptr); + ceph_assert(snap_lock.is_locked()); + ceph_assert(journal_policy != nullptr); return journal_policy; } void ImageCtx::set_journal_policy(journal::Policy *policy) { - assert(snap_lock.is_wlocked()); - assert(policy != nullptr); + ceph_assert(snap_lock.is_wlocked()); + ceph_assert(policy != nullptr); delete journal_policy; journal_policy = policy; } diff --git a/src/librbd/ImageState.cc b/src/librbd/ImageState.cc index 12f7700bae0..fd1aa357475 100644 --- a/src/librbd/ImageState.cc +++ b/src/librbd/ImageState.cc @@ -31,10 +31,10 @@ public: } ~ImageUpdateWatchers() { - assert(m_watchers.empty()); - assert(m_in_flight.empty()); - assert(m_pending_unregister.empty()); - assert(m_on_shut_down_finish == nullptr); + ceph_assert(m_watchers.empty()); + ceph_assert(m_in_flight.empty()); + ceph_assert(m_pending_unregister.empty()); + ceph_assert(m_on_shut_down_finish == nullptr); destroy_work_queue(); } @@ -63,7 +63,7 @@ public: ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl; { Mutex::Locker locker(m_lock); - assert(m_on_shut_down_finish == nullptr); + ceph_assert(m_on_shut_down_finish == nullptr); m_watchers.clear(); if (!m_in_flight.empty()) { m_on_shut_down_finish = on_finish; @@ -79,7 +79,7 @@ public: ldout(m_cct, 20) << __func__ << ": watcher=" << watcher << dendl; Mutex::Locker locker(m_lock); - assert(m_on_shut_down_finish == nullptr); + ceph_assert(m_on_shut_down_finish == nullptr); create_work_queue(); @@ -98,7 +98,7 @@ public: r = -ENOENT; } else { if (m_in_flight.find(handle) != m_in_flight.end()) { - assert(m_pending_unregister.find(handle) == m_pending_unregister.end()); + ceph_assert(m_pending_unregister.find(handle) == m_pending_unregister.end()); m_pending_unregister[handle] = on_finish; on_finish = nullptr; } @@ -123,7 +123,7 @@ public: } void send_notify(uint64_t handle, UpdateWatchCtx *watcher) { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle=" << handle << ", watcher=" << watcher << dendl; @@ -152,7 +152,7 @@ public: Mutex::Locker locker(m_lock); auto in_flight_it = m_in_flight.find(handle); - assert(in_flight_it != m_in_flight.end()); + ceph_assert(in_flight_it != m_in_flight.end()); m_in_flight.erase(in_flight_it); // If there is no more in flight notifications for this watcher @@ -166,7 +166,7 @@ public: } if (m_in_flight.empty()) { - assert(m_pending_unregister.empty()); + ceph_assert(m_pending_unregister.empty()); if (m_on_shut_down_finish != nullptr) { std::swap(m_on_shut_down_finish, on_shut_down_finish); } @@ -239,7 +239,7 @@ ImageState::ImageState(I *image_ctx) template ImageState::~ImageState() { - assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED); + ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED); delete m_update_watchers; } @@ -261,7 +261,7 @@ void ImageState::open(uint64_t flags, Context *on_finish) { ldout(cct, 20) << __func__ << dendl; m_lock.Lock(); - assert(m_state == STATE_UNINITIALIZED); + ceph_assert(m_state == STATE_UNINITIALIZED); m_open_flags = flags; Action action(ACTION_TYPE_OPEN); @@ -286,7 +286,7 @@ void ImageState::close(Context *on_finish) { ldout(cct, 20) << __func__ << dendl; m_lock.Lock(); - assert(!is_closed()); + ceph_assert(!is_closed()); Action action(ACTION_TYPE_CLOSE); action.refresh_seq = m_refresh_seq; @@ -366,7 +366,7 @@ int ImageState::refresh_if_required() { template const typename ImageState::Action * ImageState::find_pending_refresh() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); auto it = std::find_if(m_actions_contexts.rbegin(), m_actions_contexts.rend(), @@ -479,7 +479,7 @@ bool ImageState::is_transition_state() const { template bool ImageState::is_closed() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return ((m_state == STATE_CLOSED) || (!m_actions_contexts.empty() && @@ -488,7 +488,7 @@ bool ImageState::is_closed() const { template void ImageState::append_context(const Action &action, Context *context) { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); ActionContexts *action_contexts = nullptr; for (auto &action_ctxs : m_actions_contexts) { @@ -510,8 +510,8 @@ void ImageState::append_context(const Action &action, Context *context) { template void ImageState::execute_next_action_unlock() { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); + ceph_assert(m_lock.is_locked()); + ceph_assert(!m_actions_contexts.empty()); switch (m_actions_contexts.front().first.action_type) { case ACTION_TYPE_OPEN: send_open_unlock(); @@ -535,7 +535,7 @@ void ImageState::execute_next_action_unlock() { template void ImageState::execute_action_unlock(const Action &action, Context *on_finish) { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); append_context(action, on_finish); if (!is_transition_state()) { @@ -547,8 +547,8 @@ void ImageState::execute_action_unlock(const Action &action, template void ImageState::complete_action_unlock(State next_state, int r) { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); + ceph_assert(m_lock.is_locked()); + ceph_assert(!m_actions_contexts.empty()); ActionContexts action_contexts(std::move(m_actions_contexts.front())); m_actions_contexts.pop_front(); @@ -572,7 +572,7 @@ void ImageState::complete_action_unlock(State next_state, int r) { template void ImageState::send_open_unlock() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -603,7 +603,7 @@ void ImageState::handle_open(int r) { template void ImageState::send_close_unlock() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -634,14 +634,14 @@ void ImageState::handle_close(int r) { template void ImageState::send_refresh_unlock() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; m_state = STATE_REFRESHING; - assert(!m_actions_contexts.empty()); + ceph_assert(!m_actions_contexts.empty()); auto &action_context = m_actions_contexts.front().first; - assert(action_context.action_type == ACTION_TYPE_REFRESH); + ceph_assert(action_context.action_type == ACTION_TYPE_REFRESH); Context *ctx = create_async_context_callback( *m_image_ctx, create_context_callback< @@ -659,11 +659,11 @@ void ImageState::handle_refresh(int r) { ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; m_lock.Lock(); - assert(!m_actions_contexts.empty()); + ceph_assert(!m_actions_contexts.empty()); ActionContexts &action_contexts(m_actions_contexts.front()); - assert(action_contexts.first.action_type == ACTION_TYPE_REFRESH); - assert(m_last_refresh <= action_contexts.first.refresh_seq); + ceph_assert(action_contexts.first.action_type == ACTION_TYPE_REFRESH); + ceph_assert(m_last_refresh <= action_contexts.first.refresh_seq); if (r == -ERESTART) { ldout(cct, 5) << "incomplete refresh: not updating sequence" << dendl; @@ -677,13 +677,13 @@ void ImageState::handle_refresh(int r) { template void ImageState::send_set_snap_unlock() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); m_state = STATE_SETTING_SNAP; - assert(!m_actions_contexts.empty()); + ceph_assert(!m_actions_contexts.empty()); ActionContexts &action_contexts(m_actions_contexts.front()); - assert(action_contexts.first.action_type == ACTION_TYPE_SET_SNAP); + ceph_assert(action_contexts.first.action_type == ACTION_TYPE_SET_SNAP); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": " @@ -717,12 +717,12 @@ void ImageState::send_prepare_lock_unlock() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); m_state = STATE_PREPARING_LOCK; - assert(!m_actions_contexts.empty()); + ceph_assert(!m_actions_contexts.empty()); ActionContexts &action_contexts(m_actions_contexts.front()); - assert(action_contexts.first.action_type == ACTION_TYPE_LOCK); + ceph_assert(action_contexts.first.action_type == ACTION_TYPE_LOCK); Context *on_ready = action_contexts.first.on_ready; m_lock.Unlock(); diff --git a/src/librbd/ImageWatcher.cc b/src/librbd/ImageWatcher.cc index 946c781cb75..8575cd05af9 100644 --- a/src/librbd/ImageWatcher.cc +++ b/src/librbd/ImageWatcher.cc @@ -165,8 +165,8 @@ template void ImageWatcher::notify_flatten(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); AsyncRequestId async_request_id(get_client_id(), request_id); @@ -180,8 +180,8 @@ void ImageWatcher::notify_resize(uint64_t request_id, uint64_t size, bool allow_shrink, ProgressContext &prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); AsyncRequestId async_request_id(get_client_id(), request_id); @@ -195,8 +195,8 @@ template void ImageWatcher::notify_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); notify_lock_owner(SnapCreatePayload(snap_namespace, snap_name), on_finish); @@ -206,8 +206,8 @@ template void ImageWatcher::notify_snap_rename(const snapid_t &src_snap_id, const std::string &dst_snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); notify_lock_owner(SnapRenamePayload(src_snap_id, dst_snap_name), on_finish); @@ -217,8 +217,8 @@ template void ImageWatcher::notify_snap_remove(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); notify_lock_owner(SnapRemovePayload(snap_namespace, snap_name), on_finish); @@ -228,8 +228,8 @@ template void ImageWatcher::notify_snap_protect(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); notify_lock_owner(SnapProtectPayload(snap_namespace, snap_name), on_finish); @@ -239,8 +239,8 @@ template void ImageWatcher::notify_snap_unprotect(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); notify_lock_owner(SnapUnprotectPayload(snap_namespace, snap_name), on_finish); @@ -250,8 +250,8 @@ template void ImageWatcher::notify_rebuild_object_map(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); AsyncRequestId async_request_id(get_client_id(), request_id); @@ -264,8 +264,8 @@ void ImageWatcher::notify_rebuild_object_map(uint64_t request_id, template void ImageWatcher::notify_rename(const std::string &image_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); notify_lock_owner(RenamePayload(image_name), on_finish); @@ -274,8 +274,8 @@ void ImageWatcher::notify_rename(const std::string &image_name, template void ImageWatcher::notify_update_features(uint64_t features, bool enabled, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); notify_lock_owner(UpdateFeaturesPayload(features, enabled), on_finish); @@ -285,8 +285,8 @@ template void ImageWatcher::notify_migrate(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); AsyncRequestId async_request_id(get_client_id(), request_id); @@ -332,7 +332,7 @@ void ImageWatcher::cancel_async_requests() { template void ImageWatcher::set_owner_client_id(const ClientId& client_id) { - assert(m_owner_client_id_lock.is_locked()); + ceph_assert(m_owner_client_id_lock.is_locked()); m_owner_client_id = client_id; ldout(m_image_ctx.cct, 10) << this << " current lock owner: " << m_owner_client_id << dendl; @@ -371,13 +371,13 @@ void ImageWatcher::notify_released_lock() { template void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); if (m_image_ctx.exclusive_lock == nullptr) { // exclusive lock dynamically disabled via image refresh return; } - assert(m_image_ctx.exclusive_lock && + ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); RWLock::RLocker watch_locker(this->m_watch_lock); @@ -454,8 +454,8 @@ void ImageWatcher::handle_request_lock(int r) { template void ImageWatcher::notify_lock_owner(const Payload& payload, Context *on_finish) { - assert(on_finish != nullptr); - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(on_finish != nullptr); + ceph_assert(m_image_ctx.owner_lock.is_locked()); bufferlist bl; encode(NotifyMessage(payload), bl); @@ -506,8 +506,8 @@ void ImageWatcher::notify_async_request(const AsyncRequestId &async_request_i const Payload& payload, ProgressContext& prog_ctx, Context *on_finish) { - assert(on_finish != nullptr); - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(on_finish != nullptr); + ceph_assert(m_image_ctx.owner_lock.is_locked()); ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id << dendl; @@ -649,7 +649,7 @@ bool ImageWatcher::handle_payload(const RequestLockPayload &payload, bool accept_request = m_image_ctx.exclusive_lock->accept_requests(&r); if (accept_request) { - assert(r == 0); + ceph_assert(r == 0); Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); if (!m_owner_client_id.is_valid()) { return true; diff --git a/src/librbd/Journal.cc b/src/librbd/Journal.cc index b52d47492c7..d29ecfb2e25 100644 --- a/src/librbd/Journal.cc +++ b/src/librbd/Journal.cc @@ -352,15 +352,15 @@ Journal::~Journal() { delete m_work_queue; } - assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED); - assert(m_journaler == NULL); - assert(m_journal_replay == NULL); - assert(m_wait_for_state_contexts.empty()); + ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED); + ceph_assert(m_journaler == NULL); + ceph_assert(m_journal_replay == NULL); + ceph_assert(m_wait_for_state_contexts.empty()); } template bool Journal::is_journal_supported(I &image_ctx) { - assert(image_ctx.snap_lock.is_locked()); + ceph_assert(image_ctx.snap_lock.is_locked()); return ((image_ctx.features & RBD_FEATURE_JOURNALING) && !image_ctx.read_only && image_ctx.snap_id == CEPH_NOSNAP); } @@ -532,7 +532,7 @@ bool Journal::is_journal_replaying() const { template bool Journal::is_journal_replaying(const Mutex &) const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return (m_state == STATE_REPLAYING || m_state == STATE_FLUSHING_REPLAY || m_state == STATE_FLUSHING_RESTART || @@ -541,7 +541,7 @@ bool Journal::is_journal_replaying(const Mutex &) const { template bool Journal::is_journal_appending() const { - assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); Mutex::Locker locker(m_lock); return (m_state == STATE_READY && !m_image_ctx.get_journal_policy()->append_disabled()); @@ -571,7 +571,7 @@ void Journal::open(Context *on_finish) { journal::ObjectDispatch::create(&m_image_ctx, this)); Mutex::Locker locker(m_lock); - assert(m_state == STATE_UNINITIALIZED); + ceph_assert(m_state == STATE_UNINITIALIZED); wait_for_steady_state(on_finish); create_journaler(); } @@ -607,7 +607,7 @@ void Journal::close(Context *on_finish) { m_listener_notify = false; m_listener_cond.Signal(); - assert(m_state != STATE_UNINITIALIZED); + ceph_assert(m_state != STATE_UNINITIALIZED); if (m_state == STATE_CLOSED) { on_finish->complete(m_error_result); return; @@ -629,7 +629,7 @@ bool Journal::is_tag_owner() const { template bool Journal::is_tag_owner(const Mutex &) const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return (m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID); } @@ -654,7 +654,7 @@ void Journal::allocate_local_tag(Context *on_finish) { predecessor.mirror_uuid = LOCAL_MIRROR_UUID; { Mutex::Locker locker(m_lock); - assert(m_journaler != nullptr && is_tag_owner(m_lock)); + ceph_assert(m_journaler != nullptr && is_tag_owner(m_lock)); cls::journal::Client client; int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client); @@ -667,7 +667,7 @@ void Journal::allocate_local_tag(Context *on_finish) { // since we are primary, populate the predecessor with our known commit // position - assert(m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID); + ceph_assert(m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID); if (!client.commit_position.object_positions.empty()) { auto position = client.commit_position.object_positions.front(); predecessor.commit_valid = true; @@ -688,7 +688,7 @@ void Journal::allocate_tag(const std::string &mirror_uuid, << dendl; Mutex::Locker locker(m_lock); - assert(m_journaler != nullptr); + ceph_assert(m_journaler != nullptr); journal::TagData tag_data; tag_data.mirror_uuid = mirror_uuid; @@ -709,7 +709,7 @@ void Journal::flush_commit_position(Context *on_finish) { ldout(cct, 20) << this << " " << __func__ << dendl; Mutex::Locker locker(m_lock); - assert(m_journaler != nullptr); + ceph_assert(m_journaler != nullptr); m_journaler->flush_commit_position(on_finish); } @@ -717,7 +717,7 @@ template uint64_t Journal::append_write_event(uint64_t offset, size_t length, const bufferlist &bl, bool flush_entry) { - assert(m_max_append_size > journal::AioWriteEvent::get_fixed_size()); + ceph_assert(m_max_append_size > journal::AioWriteEvent::get_fixed_size()); uint64_t max_write_data_size = m_max_append_size - journal::AioWriteEvent::get_fixed_size(); @@ -762,20 +762,20 @@ uint64_t Journal::append_io_events(journal::EventType event_type, const Bufferlists &bufferlists, uint64_t offset, size_t length, bool flush_entry, int filter_ret_val) { - assert(!bufferlists.empty()); + ceph_assert(!bufferlists.empty()); uint64_t tid; { Mutex::Locker locker(m_lock); - assert(m_state == STATE_READY); + ceph_assert(m_state == STATE_READY); tid = ++m_event_tid; - assert(tid != 0); + ceph_assert(tid != 0); } Futures futures; for (auto &bl : bufferlists) { - assert(bl.length() <= m_max_append_size); + ceph_assert(bl.length() <= m_max_append_size); futures.push_back(m_journaler->append(m_tag_tid, bl)); } @@ -819,7 +819,7 @@ void Journal::commit_io_event(uint64_t tid, int r) { template void Journal::commit_io_event_extent(uint64_t tid, uint64_t offset, uint64_t length, int r) { - assert(length > 0); + ceph_assert(length > 0); CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", " @@ -857,7 +857,7 @@ template void Journal::append_op_event(uint64_t op_tid, journal::EventEntry &&event_entry, Context *on_safe) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); bufferlist bl; event_entry.timestamp = ceph_clock_now(); @@ -866,12 +866,12 @@ void Journal::append_op_event(uint64_t op_tid, Future future; { Mutex::Locker locker(m_lock); - assert(m_state == STATE_READY); + ceph_assert(m_state == STATE_READY); future = m_journaler->append(m_tag_tid, bl); // delay committing op event to ensure consistent replay - assert(m_op_futures.count(op_tid) == 0); + ceph_assert(m_op_futures.count(op_tid) == 0); m_op_futures[op_tid] = future; } @@ -904,11 +904,11 @@ void Journal::commit_op_event(uint64_t op_tid, int r, Context *on_safe) { Future op_finish_future; { Mutex::Locker locker(m_lock); - assert(m_state == STATE_READY); + ceph_assert(m_state == STATE_READY); // ready to commit op event auto it = m_op_futures.find(op_tid); - assert(it != m_op_futures.end()); + ceph_assert(it != m_op_futures.end()); op_start_future = it->second; m_op_futures.erase(it); @@ -927,7 +927,7 @@ void Journal::replay_op_ready(uint64_t op_tid, Context *on_resume) { { Mutex::Locker locker(m_lock); - assert(m_journal_replay != nullptr); + ceph_assert(m_journal_replay != nullptr); m_journal_replay->replay_op_ready(op_tid, on_resume); } } @@ -962,11 +962,11 @@ void Journal::wait_event(uint64_t tid, Context *on_safe) { template typename Journal::Future Journal::wait_event(Mutex &lock, uint64_t tid, Context *on_safe) { - assert(m_event_lock.is_locked()); + ceph_assert(m_event_lock.is_locked()); CephContext *cct = m_image_ctx.cct; typename Events::iterator it = m_events.find(tid); - assert(it != m_events.end()); + ceph_assert(it != m_events.end()); Event &event = it->second; if (event.safe) { @@ -989,8 +989,8 @@ void Journal::start_external_replay(journal::Replay **journal_replay, ldout(cct, 20) << this << " " << __func__ << dendl; Mutex::Locker locker(m_lock); - assert(m_state == STATE_READY); - assert(m_journal_replay == nullptr); + ceph_assert(m_state == STATE_READY); + ceph_assert(m_journal_replay == nullptr); on_start = util::create_async_context_callback(m_image_ctx, on_start); on_start = new FunctionContext( @@ -1011,8 +1011,8 @@ void Journal::handle_start_external_replay(int r, ldout(cct, 20) << this << " " << __func__ << dendl; Mutex::Locker locker(m_lock); - assert(m_state == STATE_READY); - assert(m_journal_replay == nullptr); + ceph_assert(m_state == STATE_READY); + ceph_assert(m_journal_replay == nullptr); if (r < 0) { lderr(cct) << this << " " << __func__ << ": " @@ -1037,8 +1037,8 @@ void Journal::stop_external_replay() { ldout(cct, 20) << this << " " << __func__ << dendl; Mutex::Locker locker(m_lock); - assert(m_journal_replay != nullptr); - assert(m_state == STATE_REPLAYING); + ceph_assert(m_journal_replay != nullptr); + ceph_assert(m_state == STATE_REPLAYING); delete m_journal_replay; m_journal_replay = nullptr; @@ -1056,9 +1056,9 @@ void Journal::create_journaler() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - assert(m_lock.is_locked()); - assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY); - assert(m_journaler == NULL); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY); + ceph_assert(m_journaler == NULL); transition_state(STATE_INITIALIZING, 0); ::journal::Settings settings; @@ -1089,7 +1089,7 @@ void Journal::destroy_journaler(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); delete m_journal_replay; m_journal_replay = NULL; @@ -1114,8 +1114,8 @@ void Journal::recreate_journaler(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; - assert(m_lock.is_locked()); - assert(m_state == STATE_FLUSHING_RESTART || + ceph_assert(m_lock.is_locked()); + ceph_assert(m_state == STATE_FLUSHING_RESTART || m_state == STATE_FLUSHING_REPLAY); delete m_journal_replay; @@ -1131,8 +1131,8 @@ void Journal::recreate_journaler(int r) { template void Journal::complete_event(typename Events::iterator it, int r) { - assert(m_event_lock.is_locked()); - assert(m_state == STATE_READY); + ceph_assert(m_event_lock.is_locked()); + ceph_assert(m_state == STATE_READY); CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": tid=" << it->first << " " @@ -1146,7 +1146,7 @@ void Journal::complete_event(typename Events::iterator it, int r) { if (r < 0) { // event recorded to journal but failed to update disk, we cannot // commit this IO event. this event must be replayed. - assert(event.safe); + ceph_assert(event.safe); lderr(cct) << this << " " << __func__ << ": " << "failed to commit IO to disk, replay required: " << cpp_strerror(r) << dendl; @@ -1165,7 +1165,7 @@ void Journal::complete_event(typename Events::iterator it, int r) { template void Journal::start_append() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); m_journaler->start_append(m_image_ctx.journal_object_flush_interval, m_image_ctx.journal_object_flush_bytes, m_image_ctx.journal_object_flush_age); @@ -1178,7 +1178,7 @@ void Journal::handle_open(int r) { ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; Mutex::Locker locker(m_lock); - assert(m_state == STATE_INITIALIZING); + ceph_assert(m_state == STATE_INITIALIZING); if (r < 0) { lderr(cct) << this << " " << __func__ << ": " @@ -1215,7 +1215,7 @@ void Journal::handle_replay_ready() { } // only one entry should be in-flight at a time - assert(!m_processing_entry); + ceph_assert(!m_processing_entry); m_processing_entry = true; } @@ -1265,7 +1265,7 @@ void Journal::handle_replay_complete(int r) { State state; { Mutex::Locker locker(m_lock); - assert(m_state == STATE_FLUSHING_RESTART || + ceph_assert(m_state == STATE_FLUSHING_RESTART || m_state == STATE_FLUSHING_REPLAY); state = m_state; } @@ -1294,10 +1294,10 @@ void Journal::handle_replay_process_ready(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - assert(r == 0); + ceph_assert(r == 0); { Mutex::Locker locker(m_lock); - assert(m_processing_entry); + ceph_assert(m_processing_entry); m_processing_entry = false; } handle_replay_ready(); @@ -1308,7 +1308,7 @@ void Journal::handle_replay_process_safe(ReplayEntry replay_entry, int r) { CephContext *cct = m_image_ctx.cct; m_lock.Lock(); - assert(m_state == STATE_REPLAYING || + ceph_assert(m_state == STATE_REPLAYING || m_state == STATE_FLUSHING_RESTART || m_state == STATE_FLUSHING_REPLAY); @@ -1337,7 +1337,7 @@ void Journal::handle_replay_process_safe(ReplayEntry replay_entry, int r) { << "shut down replay" << dendl; { Mutex::Locker locker(m_lock); - assert(m_state == STATE_FLUSHING_RESTART); + ceph_assert(m_state == STATE_FLUSHING_RESTART); } m_journal_replay->shut_down(true, ctx); @@ -1364,8 +1364,8 @@ void Journal::handle_flushing_restart(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - assert(r == 0); - assert(m_state == STATE_FLUSHING_RESTART); + ceph_assert(r == 0); + ceph_assert(m_state == STATE_FLUSHING_RESTART); if (m_close_pending) { destroy_journaler(r); return; @@ -1381,7 +1381,7 @@ void Journal::handle_flushing_replay() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - assert(m_state == STATE_FLUSHING_REPLAY || m_state == STATE_FLUSHING_RESTART); + ceph_assert(m_state == STATE_FLUSHING_REPLAY || m_state == STATE_FLUSHING_RESTART); if (m_close_pending) { destroy_journaler(0); return; @@ -1404,7 +1404,7 @@ void Journal::handle_recording_stopped(int r) { ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; Mutex::Locker locker(m_lock); - assert(m_state == STATE_STOPPING); + ceph_assert(m_state == STATE_STOPPING); destroy_journaler(r); } @@ -1424,7 +1424,7 @@ void Journal::handle_journal_destroyed(int r) { delete m_journaler; m_journaler = nullptr; - assert(m_state == STATE_CLOSING || m_state == STATE_RESTARTING_REPLAY); + ceph_assert(m_state == STATE_CLOSING || m_state == STATE_RESTARTING_REPLAY); if (m_state == STATE_RESTARTING_REPLAY) { create_journaler(); return; @@ -1440,7 +1440,7 @@ void Journal::handle_io_event_safe(int r, uint64_t tid) { << "tid=" << tid << dendl; // journal will be flushed before closing - assert(m_state == STATE_READY || m_state == STATE_STOPPING); + ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING); if (r < 0) { lderr(cct) << this << " " << __func__ << ": " << "failed to commit IO event: " << cpp_strerror(r) << dendl; @@ -1450,7 +1450,7 @@ void Journal::handle_io_event_safe(int r, uint64_t tid) { { Mutex::Locker event_locker(m_event_lock); typename Events::iterator it = m_events.find(tid); - assert(it != m_events.end()); + ceph_assert(it != m_events.end()); Event &event = it->second; on_safe_contexts.swap(event.on_safe_contexts); @@ -1491,7 +1491,7 @@ void Journal::handle_op_event_safe(int r, uint64_t tid, << "tid=" << tid << dendl; // journal will be flushed before closing - assert(m_state == STATE_READY || m_state == STATE_STOPPING); + ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING); if (r < 0) { lderr(cct) << this << " " << __func__ << ": " << "failed to commit op event: " << cpp_strerror(r) << dendl; @@ -1506,10 +1506,10 @@ void Journal::handle_op_event_safe(int r, uint64_t tid, template void Journal::stop_recording() { - assert(m_lock.is_locked()); - assert(m_journaler != NULL); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_journaler != NULL); - assert(m_state == STATE_READY); + ceph_assert(m_state == STATE_READY); transition_state(STATE_STOPPING, 0); m_journaler->stop_append(util::create_async_context_callback( @@ -1521,7 +1521,7 @@ template void Journal::transition_state(State state, int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": new state=" << state << dendl; - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); m_state = state; if (m_error_result == 0 && r < 0) { @@ -1538,7 +1538,7 @@ void Journal::transition_state(State state, int r) { template bool Journal::is_steady_state() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); switch (m_state) { case STATE_READY: case STATE_CLOSED: @@ -1558,8 +1558,8 @@ bool Journal::is_steady_state() const { template void Journal::wait_for_steady_state(Context *on_state) { - assert(m_lock.is_locked()); - assert(!is_steady_state()); + ceph_assert(m_lock.is_locked()); + ceph_assert(!is_steady_state()); CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": on_state=" << on_state @@ -1578,8 +1578,8 @@ int Journal::check_resync_requested(bool *do_resync) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - assert(m_lock.is_locked()); - assert(do_resync != nullptr); + ceph_assert(m_lock.is_locked()); + ceph_assert(do_resync != nullptr); cls::journal::Client client; int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client); diff --git a/src/librbd/Journal.h b/src/librbd/Journal.h index 609e718bec2..0e85514fbe7 100644 --- a/src/librbd/Journal.h +++ b/src/librbd/Journal.h @@ -152,7 +152,7 @@ public: uint64_t allocate_op_tid() { uint64_t op_tid = ++m_op_tid; - assert(op_tid != 0); + ceph_assert(op_tid != 0); return op_tid; } diff --git a/src/librbd/LibrbdAdminSocketHook.cc b/src/librbd/LibrbdAdminSocketHook.cc index 0b0c2bf0bf0..15b8fb97029 100644 --- a/src/librbd/LibrbdAdminSocketHook.cc +++ b/src/librbd/LibrbdAdminSocketHook.cc @@ -93,7 +93,7 @@ bool LibrbdAdminSocketHook::call(std::string_view command, std::string_view format, bufferlist& out) { Commands::const_iterator i = commands.find(command); - assert(i != commands.end()); + ceph_assert(i != commands.end()); stringstream ss; bool r = i->second->call(&ss); out.append(ss); diff --git a/src/librbd/LibrbdWriteback.cc b/src/librbd/LibrbdWriteback.cc index 082cc22c7d0..da44fbf82d4 100644 --- a/src/librbd/LibrbdWriteback.cc +++ b/src/librbd/LibrbdWriteback.cc @@ -80,7 +80,7 @@ namespace librbd { ldout(m_cct, 20) << "C_OrderedWrite completing " << m_result << dendl; { Mutex::Locker l(m_wb_handler->m_lock); - assert(!m_result->done); + ceph_assert(!m_result->done); m_result->done = true; m_result->ret = r; m_wb_handler->complete_writes(m_result->oid); @@ -109,7 +109,7 @@ namespace librbd { void finish(int r) override { // all IO operations are flushed prior to closing the journal - assert(image_ctx->journal != nullptr); + ceph_assert(image_ctx->journal != nullptr); image_ctx->journal->commit_io_event_extent(journal_tid, offset, length, r); @@ -231,7 +231,7 @@ namespace librbd { uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix); // all IO operations are flushed prior to closing the journal - assert(original_journal_tid != 0 && m_ictx->journal != NULL); + ceph_assert(original_journal_tid != 0 && m_ictx->journal != NULL); Extents file_extents; Striper::extent_to_file(m_ictx->cct, &m_ictx->layout, object_no, off, @@ -254,7 +254,7 @@ namespace librbd { void LibrbdWriteback::complete_writes(const std::string& oid) { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); std::queue& results = m_writes[oid]; ldout(m_ictx->cct, 20) << "complete_writes() oid " << oid << dendl; std::list finished; diff --git a/src/librbd/ManagedLock.cc b/src/librbd/ManagedLock.cc index 5be8e0177ef..3c7431a623d 100644 --- a/src/librbd/ManagedLock.cc +++ b/src/librbd/ManagedLock.cc @@ -80,7 +80,7 @@ ManagedLock::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue, template ManagedLock::~ManagedLock() { Mutex::Locker locker(m_lock); - assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED || + ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED || m_state == STATE_UNINITIALIZED); if (m_state == STATE_UNINITIALIZED) { // never initialized -- ensure any in-flight ops are complete @@ -89,7 +89,7 @@ ManagedLock::~ManagedLock() { m_async_op_tracker.wait_for_ops(&ctx); ctx.wait(); } - assert(m_async_op_tracker.empty()); + ceph_assert(m_async_op_tracker.empty()); } template @@ -102,7 +102,7 @@ bool ManagedLock::is_lock_owner() const { template bool ManagedLock::is_lock_owner(Mutex &lock) const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); bool lock_owner; @@ -128,13 +128,13 @@ void ManagedLock::shut_down(Context *on_shut_down) { ldout(m_cct, 10) << dendl; Mutex::Locker locker(m_lock); - assert(!is_state_shutdown()); + ceph_assert(!is_state_shutdown()); if (m_state == STATE_WAITING_FOR_REGISTER) { // abort stalled acquire lock state ldout(m_cct, 10) << "woke up waiting acquire" << dendl; Action active_action = get_active_action(); - assert(active_action == ACTION_TRY_LOCK || + ceph_assert(active_action == ACTION_TRY_LOCK || active_action == ACTION_ACQUIRE_LOCK); complete_active_action(STATE_UNLOCKED, -ESHUTDOWN); } @@ -208,7 +208,7 @@ void ManagedLock::reacquire_lock(Context *on_reacquired) { // restart the acquire lock process now that watch is valid ldout(m_cct, 10) << "woke up waiting acquire" << dendl; Action active_action = get_active_action(); - assert(active_action == ACTION_TRY_LOCK || + ceph_assert(active_action == ACTION_TRY_LOCK || active_action == ACTION_ACQUIRE_LOCK); execute_next_action(); } else if (!is_state_shutdown() && @@ -370,7 +370,7 @@ bool ManagedLock::is_transition_state() const { template void ManagedLock::append_context(Action action, Context *ctx) { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); for (auto &action_ctxs : m_actions_contexts) { if (action == action_ctxs.first) { @@ -390,7 +390,7 @@ void ManagedLock::append_context(Action action, Context *ctx) { template void ManagedLock::execute_action(Action action, Context *ctx) { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); append_context(action, ctx); if (!is_transition_state()) { @@ -400,8 +400,8 @@ void ManagedLock::execute_action(Action action, Context *ctx) { template void ManagedLock::execute_next_action() { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); + ceph_assert(m_lock.is_locked()); + ceph_assert(!m_actions_contexts.empty()); switch (get_active_action()) { case ACTION_ACQUIRE_LOCK: case ACTION_TRY_LOCK: @@ -424,15 +424,15 @@ void ManagedLock::execute_next_action() { template typename ManagedLock::Action ManagedLock::get_active_action() const { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); + ceph_assert(m_lock.is_locked()); + ceph_assert(!m_actions_contexts.empty()); return m_actions_contexts.front().first; } template void ManagedLock::complete_active_action(State next_state, int r) { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); + ceph_assert(m_lock.is_locked()); + ceph_assert(!m_actions_contexts.empty()); ActionContexts action_contexts(std::move(m_actions_contexts.front())); m_actions_contexts.pop_front(); @@ -451,7 +451,7 @@ void ManagedLock::complete_active_action(State next_state, int r) { template bool ManagedLock::is_state_shutdown() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return ((m_state == STATE_SHUTDOWN) || (!m_actions_contexts.empty() && @@ -460,7 +460,7 @@ bool ManagedLock::is_state_shutdown() const { template void ManagedLock::send_acquire_lock() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); if (m_state == STATE_LOCKED) { complete_active_action(STATE_LOCKED, 0); return; @@ -525,7 +525,7 @@ void ManagedLock::handle_acquire_lock(int r) { template void ManagedLock::handle_no_op_reacquire_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; - assert(r >= 0); + ceph_assert(r >= 0); complete_active_action(STATE_LOCKED, 0); } @@ -553,7 +553,7 @@ void ManagedLock::revert_to_unlock_state(int r) { m_work_queue, m_oid, m_cookie, new FunctionContext([this, r](int ret) { Mutex::Locker locker(m_lock); - assert(ret == 0); + ceph_assert(ret == 0); complete_active_action(STATE_UNLOCKED, r); })); m_work_queue->queue(new C_SendLockRequest>(req)); @@ -561,7 +561,7 @@ void ManagedLock::revert_to_unlock_state(int r) { template void ManagedLock::send_reacquire_lock() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); if (m_state != STATE_LOCKED) { complete_active_action(m_state, 0); @@ -607,7 +607,7 @@ void ManagedLock::handle_reacquire_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; Mutex::Locker locker(m_lock); - assert(m_state == STATE_REACQUIRING); + ceph_assert(m_state == STATE_REACQUIRING); if (r < 0) { if (r == -EOPNOTSUPP) { @@ -622,7 +622,7 @@ void ManagedLock::handle_reacquire_lock(int r) { // be updated on older OSDs execute_action(ACTION_RELEASE_LOCK, nullptr); - assert(!m_actions_contexts.empty()); + ceph_assert(!m_actions_contexts.empty()); ActionContexts &action_contexts(m_actions_contexts.front()); // reacquire completes when the request lock completes @@ -651,7 +651,7 @@ void ManagedLock::handle_reacquire_lock(int r) { template void ManagedLock::send_release_lock() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); if (m_state == STATE_UNLOCKED) { complete_active_action(STATE_UNLOCKED, 0); return; @@ -672,7 +672,7 @@ void ManagedLock::handle_pre_release_lock(int r) { { Mutex::Locker locker(m_lock); - assert(m_state == STATE_PRE_RELEASING); + ceph_assert(m_state == STATE_PRE_RELEASING); m_state = STATE_RELEASING; } @@ -694,7 +694,7 @@ void ManagedLock::handle_release_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; Mutex::Locker locker(m_lock); - assert(m_state == STATE_RELEASING); + ceph_assert(m_state == STATE_RELEASING); if (r >= 0) { m_cookie = ""; @@ -719,7 +719,7 @@ void ManagedLock::handle_post_release_lock(int r) { template void ManagedLock::send_shutdown() { ldout(m_cct, 10) << dendl; - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); if (m_state == STATE_UNLOCKED) { m_state = STATE_SHUTTING_DOWN; m_work_queue->queue(new FunctionContext([this](int r) { @@ -729,7 +729,7 @@ void ManagedLock::send_shutdown() { return; } - assert(m_state == STATE_LOCKED); + ceph_assert(m_state == STATE_LOCKED); m_state = STATE_PRE_SHUTTING_DOWN; m_lock.Unlock(); @@ -765,7 +765,7 @@ void ManagedLock::handle_shutdown_pre_release(int r) { Mutex::Locker locker(m_lock); cookie = m_cookie; - assert(m_state == STATE_PRE_SHUTTING_DOWN); + ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN); m_state = STATE_SHUTTING_DOWN; } @@ -811,8 +811,8 @@ void ManagedLock::complete_shutdown(int r) { ActionContexts action_contexts; { Mutex::Locker locker(m_lock); - assert(m_lock.is_locked()); - assert(m_actions_contexts.size() == 1); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_actions_contexts.size() == 1); action_contexts = std::move(m_actions_contexts.front()); m_actions_contexts.pop_front(); diff --git a/src/librbd/ManagedLock.h b/src/librbd/ManagedLock.h index fdb6d43d78c..bbd932b744c 100644 --- a/src/librbd/ManagedLock.h +++ b/src/librbd/ManagedLock.h @@ -71,59 +71,59 @@ protected: mutable Mutex m_lock; inline void set_state_uninitialized() { - assert(m_lock.is_locked()); - assert(m_state == STATE_UNLOCKED); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_state == STATE_UNLOCKED); m_state = STATE_UNINITIALIZED; } inline void set_state_initializing() { - assert(m_lock.is_locked()); - assert(m_state == STATE_UNINITIALIZED); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_state == STATE_UNINITIALIZED); m_state = STATE_INITIALIZING; } inline void set_state_unlocked() { - assert(m_lock.is_locked()); - assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING); m_state = STATE_UNLOCKED; } inline void set_state_waiting_for_lock() { - assert(m_lock.is_locked()); - assert(m_state == STATE_ACQUIRING); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_state == STATE_ACQUIRING); m_state = STATE_WAITING_FOR_LOCK; } inline void set_state_post_acquiring() { - assert(m_lock.is_locked()); - assert(m_state == STATE_ACQUIRING); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_state == STATE_ACQUIRING); m_state = STATE_POST_ACQUIRING; } bool is_state_shutdown() const; inline bool is_state_acquiring() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return m_state == STATE_ACQUIRING; } inline bool is_state_post_acquiring() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return m_state == STATE_POST_ACQUIRING; } inline bool is_state_releasing() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return m_state == STATE_RELEASING; } inline bool is_state_pre_releasing() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return m_state == STATE_PRE_RELEASING; } inline bool is_state_locked() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return m_state == STATE_LOCKED; } inline bool is_state_waiting_for_lock() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return m_state == STATE_WAITING_FOR_LOCK; } inline bool is_action_acquire_lock() const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return get_active_action() == ACTION_ACQUIRE_LOCK; } diff --git a/src/librbd/MirroringWatcher.cc b/src/librbd/MirroringWatcher.cc index eadfb55cd27..757f56912ff 100644 --- a/src/librbd/MirroringWatcher.cc +++ b/src/librbd/MirroringWatcher.cc @@ -52,7 +52,7 @@ void MirroringWatcher::notify_mode_updated(librados::IoCtx &io_ctx, librados::AioCompletion *comp = create_rados_callback(on_finish); int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS, nullptr); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -82,7 +82,7 @@ void MirroringWatcher::notify_image_updated( librados::AioCompletion *comp = create_rados_callback(on_finish); int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS, nullptr); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/ObjectMap.cc b/src/librbd/ObjectMap.cc index 0a94f5a86e1..66f96768c94 100644 --- a/src/librbd/ObjectMap.cc +++ b/src/librbd/ObjectMap.cc @@ -71,23 +71,23 @@ bool ObjectMap::is_compatible(const file_layout_t& layout, uint64_t size) { template ceph::BitVector<2u>::Reference ObjectMap::operator[](uint64_t object_no) { - assert(m_image_ctx.object_map_lock.is_wlocked()); - assert(object_no < m_object_map.size()); + ceph_assert(m_image_ctx.object_map_lock.is_wlocked()); + ceph_assert(object_no < m_object_map.size()); return m_object_map[object_no]; } template uint8_t ObjectMap::operator[](uint64_t object_no) const { - assert(m_image_ctx.object_map_lock.is_locked()); - assert(object_no < m_object_map.size()); + ceph_assert(m_image_ctx.object_map_lock.is_locked()); + ceph_assert(object_no < m_object_map.size()); return m_object_map[object_no]; } template bool ObjectMap::object_may_exist(uint64_t object_no) const { - assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); // Fall back to default logic if object map is disabled or invalid if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, @@ -113,7 +113,7 @@ bool ObjectMap::object_may_exist(uint64_t object_no) const template bool ObjectMap::update_required(const ceph::BitVector<2>::Iterator& it, uint8_t new_state) { - assert(m_image_ctx.object_map_lock.is_wlocked()); + ceph_assert(m_image_ctx.object_map_lock.is_wlocked()); uint8_t state = *it; if ((state == new_state) || (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) || @@ -143,9 +143,9 @@ void ObjectMap::close(Context *on_finish) { template bool ObjectMap::set_object_map(ceph::BitVector<2> &target_object_map) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.snap_lock)); RWLock::RLocker object_map_locker(m_image_ctx.object_map_lock); m_object_map = target_object_map; @@ -154,8 +154,8 @@ bool ObjectMap::set_object_map(ceph::BitVector<2> &target_object_map) { template void ObjectMap::rollback(uint64_t snap_id, Context *on_finish) { - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.object_map_lock.is_wlocked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.object_map_lock.is_wlocked()); object_map::SnapshotRollbackRequest *req = new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, on_finish); @@ -164,9 +164,9 @@ void ObjectMap::rollback(uint64_t snap_id, Context *on_finish) { template void ObjectMap::snapshot_add(uint64_t snap_id, Context *on_finish) { - assert(m_image_ctx.snap_lock.is_locked()); - assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); - assert(snap_id != CEPH_NOSNAP); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); + ceph_assert(snap_id != CEPH_NOSNAP); object_map::SnapshotCreateRequest *req = new object_map::SnapshotCreateRequest(m_image_ctx, &m_object_map, snap_id, @@ -176,9 +176,9 @@ void ObjectMap::snapshot_add(uint64_t snap_id, Context *on_finish) { template void ObjectMap::snapshot_remove(uint64_t snap_id, Context *on_finish) { - assert(m_image_ctx.snap_lock.is_wlocked()); - assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); - assert(snap_id != CEPH_NOSNAP); + ceph_assert(m_image_ctx.snap_lock.is_wlocked()); + ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); + ceph_assert(snap_id != CEPH_NOSNAP); object_map::SnapshotRemoveRequest *req = new object_map::SnapshotRemoveRequest(m_image_ctx, &m_object_map, snap_id, @@ -188,9 +188,9 @@ void ObjectMap::snapshot_remove(uint64_t snap_id, Context *on_finish) { template void ObjectMap::aio_save(Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.snap_lock)); RWLock::RLocker object_map_locker(m_image_ctx.object_map_lock); @@ -204,19 +204,19 @@ void ObjectMap::aio_save(Context *on_finish) { librados::AioCompletion *comp = util::create_rados_callback(on_finish); int r = m_image_ctx.md_ctx.aio_operate(oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } template void ObjectMap::aio_resize(uint64_t new_size, uint8_t default_object_state, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.snap_lock)); - assert(m_image_ctx.image_watcher != NULL); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.image_watcher != NULL); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); object_map::ResizeRequest *req = new object_map::ResizeRequest( @@ -230,8 +230,8 @@ void ObjectMap::detained_aio_update(UpdateOperation &&op) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << dendl; - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.object_map_lock.is_wlocked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.object_map_lock.is_wlocked()); BlockGuardCell *cell; int r = m_update_guard->detain({op.start_object_no, op.end_object_no}, @@ -287,13 +287,13 @@ void ObjectMap::aio_update(uint64_t snap_id, uint64_t start_object_no, const boost::optional ¤t_state, const ZTracer::Trace &parent_trace, Context *on_finish) { - assert(m_image_ctx.snap_lock.is_locked()); - assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); - assert(m_image_ctx.image_watcher != nullptr); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); + ceph_assert(m_image_ctx.image_watcher != nullptr); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); - assert(snap_id != CEPH_NOSNAP || m_image_ctx.object_map_lock.is_wlocked()); - assert(start_object_no < end_object_no); + ceph_assert(snap_id != CEPH_NOSNAP || m_image_ctx.object_map_lock.is_wlocked()); + ceph_assert(start_object_no < end_object_no); CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << "start=" << start_object_no << ", " diff --git a/src/librbd/ObjectMap.h b/src/librbd/ObjectMap.h index dab91c04cf5..f00311bbb05 100644 --- a/src/librbd/ObjectMap.h +++ b/src/librbd/ObjectMap.h @@ -67,7 +67,7 @@ public: uint64_t end_object_no, uint8_t new_state, const boost::optional ¤t_state, const ZTracer::Trace &parent_trace, T *callback_object) { - assert(start_object_no < end_object_no); + ceph_assert(start_object_no < end_object_no); if (snap_id == CEPH_NOSNAP) { end_object_no = std::min(end_object_no, m_object_map.size()); if (start_object_no >= end_object_no) { diff --git a/src/librbd/Operations.cc b/src/librbd/Operations.cc index 531c1c0247d..8a0b00b2ab5 100644 --- a/src/librbd/Operations.cc +++ b/src/librbd/Operations.cc @@ -245,7 +245,7 @@ struct C_InvokeAsyncRequest : public Context { } void send_remote_request() { - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 20) << __func__ << dendl; @@ -280,7 +280,7 @@ struct C_InvokeAsyncRequest : public Context { } void send_local_request() { - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 20) << __func__ << dendl; @@ -370,8 +370,8 @@ int Operations::flatten(ProgressContext &prog_ctx) { template void Operations::execute_flatten(ProgressContext &prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); CephContext *cct = m_image_ctx.cct; @@ -402,12 +402,12 @@ void Operations::execute_flatten(ProgressContext &prog_ctx, } ::SnapContext snapc = m_image_ctx.snapc; - assert(m_image_ctx.parent != NULL); + ceph_assert(m_image_ctx.parent != NULL); uint64_t overlap; int r = m_image_ctx.get_parent_overlap(CEPH_NOSNAP, &overlap); - assert(r == 0); - assert(overlap <= m_image_ctx.size); + ceph_assert(r == 0); + ceph_assert(overlap <= m_image_ctx.size); uint64_t overlap_objects = Striper::get_num_objects(m_image_ctx.layout, overlap); @@ -449,8 +449,8 @@ int Operations::rebuild_object_map(ProgressContext &prog_ctx) { template void Operations::execute_rebuild_object_map(ProgressContext &prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); CephContext *cct = m_image_ctx.cct; @@ -496,8 +496,8 @@ template void Operations::object_map_iterate(ProgressContext &prog_ctx, operation::ObjectIterateWork handle_mismatch, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) { @@ -564,9 +564,9 @@ int Operations::rename(const char *dstname) { template void Operations::execute_rename(const std::string &dest_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); } @@ -648,8 +648,8 @@ template void Operations::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx, Context *on_finish, uint64_t journal_op_tid) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); CephContext *cct = m_image_ctx.cct; @@ -739,8 +739,8 @@ void Operations::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_ Context *on_finish, uint64_t journal_op_tid, bool skip_object_map) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); CephContext *cct = m_image_ctx.cct; @@ -822,7 +822,7 @@ void Operations::execute_snap_rollback(const cls::rbd::SnapshotNamespace& sna const std::string &snap_name, ProgressContext& prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name << dendl; @@ -919,10 +919,10 @@ template void Operations::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); { if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) { - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); } } @@ -1024,9 +1024,9 @@ template void Operations::execute_snap_rename(const uint64_t src_snap_id, const std::string &dest_snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); } @@ -1121,9 +1121,9 @@ template void Operations::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); } @@ -1216,9 +1216,9 @@ template void Operations::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); } @@ -1284,7 +1284,7 @@ int Operations::snap_set_limit(uint64_t limit) { template void Operations::execute_snap_set_limit(const uint64_t limit, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit @@ -1398,8 +1398,8 @@ template void Operations::execute_update_features(uint64_t features, bool enabled, Context *on_finish, uint64_t journal_op_tid) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); CephContext *cct = m_image_ctx.cct; @@ -1474,7 +1474,7 @@ template void Operations::execute_metadata_set(const std::string &key, const std::string &value, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value=" @@ -1540,7 +1540,7 @@ int Operations::metadata_remove(const std::string &key) { template void Operations::execute_metadata_remove(const std::string &key, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl; @@ -1597,8 +1597,8 @@ int Operations::migrate(ProgressContext &prog_ctx) { template void Operations::execute_migrate(ProgressContext &prog_ctx, Context *on_finish) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); CephContext *cct = m_image_ctx.cct; @@ -1637,7 +1637,7 @@ void Operations::execute_migrate(ProgressContext &prog_ctx, template int Operations::prepare_image_update(bool request_lock) { - assert(m_image_ctx.owner_lock.is_locked() && + ceph_assert(m_image_ctx.owner_lock.is_locked() && !m_image_ctx.owner_lock.is_wlocked()); if (m_image_ctx.image_watcher == nullptr) { return -EROFS; diff --git a/src/librbd/TaskFinisher.h b/src/librbd/TaskFinisher.h index 8b9c94b56ad..410b8ee88e4 100644 --- a/src/librbd/TaskFinisher.h +++ b/src/librbd/TaskFinisher.h @@ -98,7 +98,7 @@ public: typename TaskContexts::iterator it = m_task_contexts.find(task); if (it != m_task_contexts.end()) { if (it->second.second != NULL) { - assert(m_safe_timer->cancel_event(it->second.second)); + ceph_assert(m_safe_timer->cancel_event(it->second.second)); delete it->second.first; } else { // task already scheduled on the finisher diff --git a/src/librbd/TrashWatcher.cc b/src/librbd/TrashWatcher.cc index a4f7ee11848..74e12e485a2 100644 --- a/src/librbd/TrashWatcher.cc +++ b/src/librbd/TrashWatcher.cc @@ -42,7 +42,7 @@ void TrashWatcher::notify_image_added( librados::AioCompletion *comp = create_rados_callback(on_finish); int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -58,7 +58,7 @@ void TrashWatcher::notify_image_removed(librados::IoCtx &io_ctx, librados::AioCompletion *comp = create_rados_callback(on_finish); int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/Utils.h b/src/librbd/Utils.h index 1bdf1b6714a..c2925a33ce0 100644 --- a/src/librbd/Utils.h +++ b/src/librbd/Utils.h @@ -175,7 +175,7 @@ public: template void wait(I &image_ctx, Context *on_finish) { - assert(m_on_finish == nullptr); + ceph_assert(m_on_finish == nullptr); on_finish = create_async_context_callback(image_ctx, on_finish); if (m_refs == 0) { diff --git a/src/librbd/Watcher.cc b/src/librbd/Watcher.cc index 9a72fc13a76..45de8d7eb44 100644 --- a/src/librbd/Watcher.cc +++ b/src/librbd/Watcher.cc @@ -45,7 +45,7 @@ struct C_UnwatchAndFlush : public Context { librados::AioCompletion *aio_comp = create_rados_callback(this); r = rados.aio_watch_flush(aio_comp); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); return; } @@ -79,7 +79,7 @@ Watcher::C_NotifyAck::C_NotifyAck(Watcher *watcher, uint64_t notify_id, void Watcher::C_NotifyAck::finish(int r) { ldout(cct, 10) << "r=" << r << dendl; - assert(r == 0); + ceph_assert(r == 0); watcher->acknowledge_notify(notify_id, handle, out); } @@ -98,20 +98,20 @@ Watcher::Watcher(librados::IoCtx& ioctx, ContextWQ *work_queue, Watcher::~Watcher() { RWLock::RLocker l(m_watch_lock); - assert(is_unregistered(m_watch_lock)); + ceph_assert(is_unregistered(m_watch_lock)); } void Watcher::register_watch(Context *on_finish) { ldout(m_cct, 10) << dendl; RWLock::RLocker watch_locker(m_watch_lock); - assert(is_unregistered(m_watch_lock)); + ceph_assert(is_unregistered(m_watch_lock)); m_watch_state = WATCH_STATE_REGISTERING; librados::AioCompletion *aio_comp = create_rados_callback( new C_RegisterWatch(this, on_finish)); int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_watch_handle, &m_watch_ctx); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -122,7 +122,7 @@ void Watcher::handle_register_watch(int r, Context *on_finish) { Context *unregister_watch_ctx = nullptr; { RWLock::WLocker watch_locker(m_watch_lock); - assert(m_watch_state == WATCH_STATE_REGISTERING); + ceph_assert(m_watch_state == WATCH_STATE_REGISTERING); m_watch_state = WATCH_STATE_IDLE; if (r < 0) { @@ -158,7 +158,7 @@ void Watcher::unregister_watch(Context *on_finish) { ldout(m_cct, 10) << "delaying unregister until register completed" << dendl; - assert(m_unregister_watch_ctx == nullptr); + ceph_assert(m_unregister_watch_ctx == nullptr); m_unregister_watch_ctx = new FunctionContext([this, on_finish](int r) { unregister_watch(on_finish); }); @@ -167,7 +167,7 @@ void Watcher::unregister_watch(Context *on_finish) { librados::AioCompletion *aio_comp = create_rados_callback( new C_UnwatchAndFlush(m_ioctx, on_finish)); int r = m_ioctx.aio_unwatch(m_watch_handle, aio_comp); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); m_watch_handle = 0; return; @@ -196,7 +196,7 @@ void Watcher::block_notifies(Context *on_finish) { void Watcher::unblock_notifies() { RWLock::WLocker locker(m_watch_lock); - assert(m_blocked_count > 0); + ceph_assert(m_blocked_count > 0); --m_blocked_count; ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl; } @@ -212,7 +212,7 @@ std::string Watcher::get_oid() const { void Watcher::set_oid(const string& oid) { RWLock::WLocker watch_locker(m_watch_lock); - assert(is_unregistered(m_watch_lock)); + ceph_assert(is_unregistered(m_watch_lock)); m_oid = oid; } @@ -243,7 +243,7 @@ void Watcher::rewatch() { Context *unregister_watch_ctx = nullptr; { RWLock::WLocker watch_locker(m_watch_lock); - assert(m_watch_state == WATCH_STATE_REWATCHING); + ceph_assert(m_watch_state == WATCH_STATE_REWATCHING); if (m_unregister_watch_ctx != nullptr) { m_watch_state = WATCH_STATE_IDLE; @@ -269,7 +269,7 @@ void Watcher::handle_rewatch(int r) { Context *unregister_watch_ctx = nullptr; { RWLock::WLocker watch_locker(m_watch_lock); - assert(m_watch_state == WATCH_STATE_REWATCHING); + ceph_assert(m_watch_state == WATCH_STATE_REWATCHING); if (m_unregister_watch_ctx != nullptr) { ldout(m_cct, 10) << "image is closing, skip rewatch" << dendl; @@ -309,7 +309,7 @@ void Watcher::handle_rewatch_callback(int r) { Context *unregister_watch_ctx = nullptr; { RWLock::WLocker watch_locker(m_watch_lock); - assert(m_watch_state == WATCH_STATE_REWATCHING); + ceph_assert(m_watch_state == WATCH_STATE_REWATCHING); if (m_unregister_watch_ctx != nullptr) { m_watch_state = WATCH_STATE_IDLE; diff --git a/src/librbd/api/DiffIterate.cc b/src/librbd/api/DiffIterate.cc index 94e347134bd..a3af7fda4b9 100644 --- a/src/librbd/api/DiffIterate.cc +++ b/src/librbd/api/DiffIterate.cc @@ -76,7 +76,7 @@ public: op.list_snaps(&m_snap_set, &m_snap_ret); int r = m_head_ctx.aio_operate(m_oid, rados_completion, &op, NULL); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -187,7 +187,7 @@ private: } opos += r->second; } - assert(opos == q->offset + q->length); + ceph_assert(opos == q->offset + q->length); } } @@ -411,7 +411,7 @@ int DiffIterate::execute() { template int DiffIterate::diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id, BitVector<2>* object_diff_state) { - assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); CephContext* cct = m_image_ctx.cct; bool diff_from_start = (from_snap_id == 0); @@ -433,7 +433,7 @@ int DiffIterate::diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id, if (current_snap_id != CEPH_NOSNAP) { std::map::const_iterator snap_it = m_image_ctx.snap_info.find(current_snap_id); - assert(snap_it != m_image_ctx.snap_info.end()); + ceph_assert(snap_it != m_image_ctx.snap_info.end()); current_size = snap_it->second.size; ++snap_it; diff --git a/src/librbd/api/Group.cc b/src/librbd/api/Group.cc index cfd9d86e37e..c0540881b31 100644 --- a/src/librbd/api/Group.cc +++ b/src/librbd/api/Group.cc @@ -37,7 +37,7 @@ namespace { template snap_t get_group_snap_id(I* ictx, const cls::rbd::SnapshotNamespace& in_snap_namespace) { - assert(ictx->snap_lock.is_locked()); + ceph_assert(ictx->snap_lock.is_locked()); auto it = ictx->snap_ids.lower_bound({in_snap_namespace, ""}); if (it != ictx->snap_ids.end() && it->first.first == in_snap_namespace) { return it->second; diff --git a/src/librbd/api/Image.cc b/src/librbd/api/Image.cc index fee4cde84c2..c8480872e20 100644 --- a/src/librbd/api/Image.cc +++ b/src/librbd/api/Image.cc @@ -135,7 +135,7 @@ int Image::list_children(I *ictx, const ParentSpec &parent_spec, // retrieve clone v2 children attached to this snapshot IoCtx parent_io_ctx; r = rados.ioctx_create2(parent_spec.pool_id, parent_io_ctx); - assert(r == 0); + ceph_assert(r == 0); // TODO support clone v2 parent namespaces parent_io_ctx.set_namespace(ictx->md_ctx.get_namespace()); diff --git a/src/librbd/api/Migration.cc b/src/librbd/api/Migration.cc index 516dd858655..5aff775a975 100644 --- a/src/librbd/api/Migration.cc +++ b/src/librbd/api/Migration.cc @@ -47,7 +47,7 @@ public: m_prog_ctx(prog_ctx), m_cct(reinterpret_cast(io_ctx.cct())), m_lock(util::unique_lock_name("librbd::api::MigrationProgressContext", this)) { - assert(m_prog_ctx != nullptr); + ceph_assert(m_prog_ctx != nullptr); } ~MigrationProgressContext() { @@ -99,7 +99,7 @@ private: void set_state_description() { ldout(m_cct, 20) << "state_description=" << m_state_description << dendl; - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); librados::ObjectWriteOperation op; cls_client::migration_set_state(&op, m_state, m_state_description); @@ -108,7 +108,7 @@ private: librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_operate(m_header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); m_in_flight_state_updates++; @@ -810,7 +810,7 @@ int Migration::abort() { ldout(m_cct, 10) << "removing dst image" << dendl; - assert(dst_image_ctx->ignore_migrating); + ceph_assert(dst_image_ctx->ignore_migrating); ThreadPool *thread_pool; ContextWQ *op_work_queue; @@ -1261,7 +1261,7 @@ int Migration::remove_group(I *image_ctx, group_info_t *group_info) { return -ENOENT; } - assert(!image_ctx->id.empty()); + ceph_assert(!image_ctx->id.empty()); ldout(m_cct, 10) << dendl; @@ -1440,7 +1440,7 @@ int Migration::remove_src_image() { } } - assert(m_src_image_ctx->ignore_migrating); + ceph_assert(m_src_image_ctx->ignore_migrating); ThreadPool *thread_pool; ContextWQ *op_work_queue; diff --git a/src/librbd/cache/ObjectCacherObjectDispatch.cc b/src/librbd/cache/ObjectCacherObjectDispatch.cc index 4e4ab73d4e1..3aec9edcc9c 100644 --- a/src/librbd/cache/ObjectCacherObjectDispatch.cc +++ b/src/librbd/cache/ObjectCacherObjectDispatch.cc @@ -42,7 +42,7 @@ struct ObjectCacherObjectDispatch::C_InvalidateCache : public Context { } void finish(int r) override { - assert(dispatcher->m_cache_lock.is_locked()); + ceph_assert(dispatcher->m_cache_lock.is_locked()); auto cct = dispatcher->m_image_ctx->cct; if (r == -EBLACKLISTED) { diff --git a/src/librbd/deep_copy/ImageCopyRequest.cc b/src/librbd/deep_copy/ImageCopyRequest.cc index 8762fc15c66..14dd45235b1 100644 --- a/src/librbd/deep_copy/ImageCopyRequest.cc +++ b/src/librbd/deep_copy/ImageCopyRequest.cc @@ -163,7 +163,7 @@ void ImageCopyRequest::send_object_copies() { template void ImageCopyRequest::send_next_object_copy() { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); if (m_canceled && m_ret_val == 0) { ldout(m_cct, 10) << "image copy canceled" << dendl; @@ -197,7 +197,7 @@ void ImageCopyRequest::handle_object_copy(uint64_t object_no, int r) { bool complete; { Mutex::Locker locker(m_lock); - assert(m_current_ops > 0); + ceph_assert(m_current_ops > 0); --m_current_ops; if (r < 0 && r != -ENOENT) { @@ -216,7 +216,7 @@ void ImageCopyRequest::handle_object_copy(uint64_t object_no, int r) { m_lock.Unlock(); m_prog_ctx->update_progress(progress_object_no, m_end_object_no); m_lock.Lock(); - assert(m_updating_progress); + ceph_assert(m_updating_progress); m_updating_progress = false; } } diff --git a/src/librbd/deep_copy/ObjectCopyRequest.cc b/src/librbd/deep_copy/ObjectCopyRequest.cc index 235263ac107..2c72fbeb7c8 100644 --- a/src/librbd/deep_copy/ObjectCopyRequest.cc +++ b/src/librbd/deep_copy/ObjectCopyRequest.cc @@ -51,7 +51,7 @@ ObjectCopyRequest::ObjectCopyRequest(I *src_image_ctx, m_dst_image_ctx(dst_image_ctx), m_cct(dst_image_ctx->cct), m_snap_map(snap_map), m_dst_object_number(dst_object_number), m_flatten(flatten), m_on_finish(on_finish) { - assert(!m_snap_map.empty()); + ceph_assert(!m_snap_map.empty()); m_src_io_ctx.dup(m_src_image_ctx->data_ctx); m_dst_io_ctx.dup(m_dst_image_ctx->data_ctx); @@ -71,7 +71,7 @@ void ObjectCopyRequest::send() { template void ObjectCopyRequest::send_list_snaps() { - assert(!m_src_objects.empty()); + ceph_assert(!m_src_objects.empty()); m_src_ono = *m_src_objects.begin(); m_src_oid = m_src_image_ctx->get_object_name(m_src_ono); @@ -88,7 +88,7 @@ void ObjectCopyRequest::send_list_snaps() { m_src_io_ctx.snap_set_read(CEPH_SNAPDIR); int r = m_src_io_ctx.aio_operate(m_src_oid, rados_completion, &op, nullptr); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -142,7 +142,7 @@ void ObjectCopyRequest::send_read_object() { // all snapshots have been read merge_write_ops(); - assert(!m_src_objects.empty()); + ceph_assert(!m_src_objects.empty()); m_src_objects.erase(m_src_objects.begin()); if (!m_src_objects.empty()) { @@ -190,7 +190,7 @@ void ObjectCopyRequest::send_read_object() { ldout(m_cct, 20) << "read " << m_src_oid << dendl; int r = m_src_io_ctx.aio_operate(m_src_oid, comp, &op, nullptr); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -221,7 +221,7 @@ void ObjectCopyRequest::handle_read_object(int r) { return; } - assert(!m_read_snaps.empty()); + ceph_assert(!m_read_snaps.empty()); m_read_snaps.erase(m_read_snaps.begin()); send_read_object(); @@ -238,7 +238,7 @@ void ObjectCopyRequest::send_read_from_parent() { ldout(m_cct, 20) << dendl; - assert(m_src_parent_image_ctx != nullptr); + ceph_assert(m_src_parent_image_ctx != nullptr); auto ctx = create_context_callback< ObjectCopyRequest, &ObjectCopyRequest::handle_read_from_parent>(this); @@ -263,7 +263,7 @@ void ObjectCopyRequest::handle_read_from_parent(int r) { } if (!m_read_ops.empty()) { - assert(m_read_ops.size() == 1); + ceph_assert(m_read_ops.size() == 1); auto src_snap_seq = m_read_ops.begin()->first.first; auto ©_ops = m_read_ops.begin()->second; uint64_t offset = 0; @@ -294,7 +294,7 @@ void ObjectCopyRequest::handle_read_from_parent(int r) { template void ObjectCopyRequest::send_write_object() { - assert(!m_write_ops.empty()); + ceph_assert(!m_write_ops.empty()); auto& copy_ops = m_write_ops.begin()->second; // retrieve the destination snap context for the op @@ -303,21 +303,21 @@ void ObjectCopyRequest::send_write_object() { librados::snap_t src_snap_seq = m_write_ops.begin()->first; if (src_snap_seq != 0) { auto snap_map_it = m_snap_map.find(src_snap_seq); - assert(snap_map_it != m_snap_map.end()); + ceph_assert(snap_map_it != m_snap_map.end()); auto dst_snap_id = snap_map_it->second.front(); auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_id); - assert(dst_may_exist_it != m_dst_object_may_exist.end()); + ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end()); if (!dst_may_exist_it->second && !copy_ops.empty()) { // if the object cannot exist, the only valid op is to remove it - assert(copy_ops.size() == 1U); - assert(copy_ops.begin()->type == COPY_OP_TYPE_REMOVE); + ceph_assert(copy_ops.size() == 1U); + ceph_assert(copy_ops.begin()->type == COPY_OP_TYPE_REMOVE); } // write snapshot context should be before actual snapshot if (snap_map_it != m_snap_map.begin()) { --snap_map_it; - assert(!snap_map_it->second.empty()); + ceph_assert(!snap_map_it->second.empty()); dst_snap_seq = snap_map_it->second.front(); dst_snap_ids = snap_map_it->second; } @@ -394,7 +394,7 @@ void ObjectCopyRequest::send_write_object() { librados::AioCompletion *comp = create_rados_callback(ctx); int r = m_dst_io_ctx.aio_operate(m_dst_oid, comp, &op, dst_snap_seq, dst_snap_ids, nullptr); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -446,7 +446,7 @@ void ObjectCopyRequest::send_update_object_map() { auto &dst_object_state = *m_dst_object_state.begin(); auto it = m_snap_map.find(dst_object_state.first); - assert(it != m_snap_map.end()); + ceph_assert(it != m_snap_map.end()); auto dst_snap_id = it->second.front(); auto object_state = dst_object_state.second; m_dst_object_state.erase(m_dst_object_state.begin()); @@ -476,7 +476,7 @@ void ObjectCopyRequest::send_update_object_map() { m_dst_image_ctx->snap_lock.put_read(); m_dst_image_ctx->owner_lock.put_read(); if (!sent) { - assert(dst_snap_id == CEPH_NOSNAP); + ceph_assert(dst_snap_id == CEPH_NOSNAP); ctx->complete(0); } } @@ -485,7 +485,7 @@ template void ObjectCopyRequest::handle_update_object_map(int r) { ldout(m_cct, 20) << "r=" << r << dendl; - assert(r == 0); + ceph_assert(r == 0); if (!m_dst_object_state.empty()) { send_update_object_map(); return; @@ -495,7 +495,7 @@ void ObjectCopyRequest::handle_update_object_map(int r) { template Context *ObjectCopyRequest::start_lock_op(RWLock &owner_lock) { - assert(m_dst_image_ctx->owner_lock.is_locked()); + ceph_assert(m_dst_image_ctx->owner_lock.is_locked()); if (m_dst_image_ctx->exclusive_lock == nullptr) { return new FunctionContext([](int r) {}); } @@ -508,17 +508,17 @@ uint64_t ObjectCopyRequest::src_to_dst_object_offset(uint64_t objectno, std::vector> image_extents; Striper::extent_to_file(m_cct, &m_src_image_ctx->layout, objectno, offset, 1, image_extents); - assert(image_extents.size() == 1); + ceph_assert(image_extents.size() == 1); auto dst_object_offset = image_extents.begin()->first; std::map> dst_object_extents; Striper::file_to_extents(m_cct, m_dst_image_ctx->format_string, &m_dst_image_ctx->layout, dst_object_offset, 1, 0, dst_object_extents); - assert(dst_object_extents.size() == 1); - assert(dst_object_extents.begin()->second.size() == 1); + ceph_assert(dst_object_extents.size() == 1); + ceph_assert(dst_object_extents.begin()->second.size() == 1); auto &e = *dst_object_extents.begin()->second.begin(); - assert(e.objectno == m_dst_object_number); + ceph_assert(e.objectno == m_dst_object_number); return e.offset; } @@ -542,7 +542,7 @@ void ObjectCopyRequest::compute_src_object_extents() { m_src_objects.insert(s.objectno); total += s.length; while (s.length > 0) { - assert(s.length >= stripe_unit); + ceph_assert(s.length >= stripe_unit); auto dst_object_offset = src_to_dst_object_offset(s.objectno, s.offset); m_src_object_extents[dst_object_offset] = {s.objectno, s.offset, stripe_unit}; @@ -553,7 +553,7 @@ void ObjectCopyRequest::compute_src_object_extents() { } } - assert(total == m_dst_image_ctx->layout.object_size); + ceph_assert(total == m_dst_image_ctx->layout.object_size); ldout(m_cct, 20) << m_src_object_extents.size() << " src extents" << dendl; } @@ -571,7 +571,7 @@ void ObjectCopyRequest::compute_read_ops() { librados::snap_t start_src_snap_id = 0; for (auto &pair : m_snap_map) { - assert(!pair.second.empty()); + ceph_assert(!pair.second.empty()); librados::snap_t end_src_snap_id = pair.first; librados::snap_t end_dst_snap_id = pair.second.front(); @@ -626,7 +626,7 @@ void ObjectCopyRequest::compute_read_ops() { // reads should be issued against the newest (existing) snapshot within // the associated snapshot object clone. writes should be issued // against the oldest snapshot in the snap_map. - assert(clone_end_snap_id >= end_src_snap_id); + ceph_assert(clone_end_snap_id >= end_src_snap_id); if (clone_end_snap_id > src_copy_point_snap_id) { // do not read past the copy point snapshot clone_end_snap_id = src_copy_point_snap_id; @@ -668,11 +668,11 @@ void ObjectCopyRequest::compute_read_ops() { << ", dst_object_offset=" << dst_object_offset << ", read: " << read_interval << dendl; - assert(exists || read_interval.empty()); + ceph_assert(exists || read_interval.empty()); for (auto it = read_interval.begin(); it != read_interval.end(); it++) { - assert(it.get_start() >= e.offset); + ceph_assert(it.get_start() >= e.offset); auto offset = it.get_start() - e.offset; ldout(m_cct, 20) << "read/write op: " << it.get_start() << "~" << it.get_len() << " dst: " @@ -772,7 +772,7 @@ void ObjectCopyRequest::compute_read_from_parent_ops( << "~" << e.length << " overlap " << parent_overlap << " parent extents " << image_extents << dendl; - assert(image_extents.size() == 1); + ceph_assert(image_extents.size() == 1); auto src_image_offset = image_extents.begin()->first; auto length = image_extents.begin()->second; @@ -822,7 +822,7 @@ void ObjectCopyRequest::merge_write_ops() { << zero_len << dendl; m_dst_zero_interval[src_snap_seq].insert(dst_offset, zero_len); } else { - assert(dst_offset == copy_op.dst_offset + copy_op.length); + ceph_assert(dst_offset == copy_op.dst_offset + copy_op.length); } m_write_ops[src_snap_seq].emplace_back(std::move(copy_op)); } @@ -842,11 +842,11 @@ void ObjectCopyRequest::compute_zero_ops() { auto &zero_interval = it.second; auto snap_map_it = m_snap_map.find(src_snap_seq); - assert(snap_map_it != m_snap_map.end()); + ceph_assert(snap_map_it != m_snap_map.end()); auto dst_snap_seq = snap_map_it->second.front(); auto dst_may_exist_it = m_dst_object_may_exist.find(dst_snap_seq); - assert(dst_may_exist_it != m_dst_object_may_exist.end()); + ceph_assert(dst_may_exist_it != m_dst_object_may_exist.end()); if (!dst_may_exist_it->second && prev_end_size > 0) { ldout(m_cct, 5) << "object DNE for snap_id: " << dst_snap_seq << dendl; m_write_ops[src_snap_seq].emplace_back(COPY_OP_TYPE_REMOVE, 0, 0, 0); @@ -881,7 +881,7 @@ void ObjectCopyRequest::compute_zero_ops() { for (auto e : image_extents) { prev_end_size += e.second; } - assert(prev_end_size <= m_dst_image_ctx->layout.object_size); + ceph_assert(prev_end_size <= m_dst_image_ctx->layout.object_size); } } } diff --git a/src/librbd/deep_copy/SetHeadRequest.cc b/src/librbd/deep_copy/SetHeadRequest.cc index ade87ba384d..35da7471641 100644 --- a/src/librbd/deep_copy/SetHeadRequest.cc +++ b/src/librbd/deep_copy/SetHeadRequest.cc @@ -27,7 +27,7 @@ SetHeadRequest::SetHeadRequest(I *image_ctx, uint64_t size, : m_image_ctx(image_ctx), m_size(size), m_parent_spec(spec), m_parent_overlap(parent_overlap), m_on_finish(on_finish), m_cct(image_ctx->cct) { - assert(m_parent_overlap <= m_size); + ceph_assert(m_parent_overlap <= m_size); } template @@ -68,7 +68,7 @@ void SetHeadRequest::send_set_size() { }); librados::AioCompletion *comp = create_rados_callback(ctx); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -128,7 +128,7 @@ void SetHeadRequest::send_remove_parent() { }); librados::AioCompletion *comp = create_rados_callback(ctx); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -181,7 +181,7 @@ void SetHeadRequest::send_set_parent() { }); librados::AioCompletion *comp = create_rados_callback(ctx); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/deep_copy/SnapshotCopyRequest.cc b/src/librbd/deep_copy/SnapshotCopyRequest.cc index bc32e7e67a8..943e3e7bf11 100644 --- a/src/librbd/deep_copy/SnapshotCopyRequest.cc +++ b/src/librbd/deep_copy/SnapshotCopyRequest.cc @@ -33,7 +33,7 @@ const std::string &get_snapshot_name(I *image_ctx, librados::snap_t snap_id) { librados::snap_t> &pair) { return pair.second == snap_id; }); - assert(snap_it != image_ctx->snap_ids.end()); + ceph_assert(snap_it != image_ctx->snap_ids.end()); return snap_it->first.second; } @@ -401,11 +401,11 @@ void SnapshotCopyRequest::handle_snap_create(int r) { return; } - assert(m_prev_snap_id != CEPH_NOSNAP); + ceph_assert(m_prev_snap_id != CEPH_NOSNAP); auto snap_it = m_dst_image_ctx->snap_ids.find( {cls::rbd::UserSnapshotNamespace(), m_snap_name}); - assert(snap_it != m_dst_image_ctx->snap_ids.end()); + ceph_assert(snap_it != m_dst_image_ctx->snap_ids.end()); librados::snap_t dst_snap_id = snap_it->second; ldout(m_cct, 20) << "mapping source snap id " << m_prev_snap_id << " to " @@ -445,7 +445,7 @@ void SnapshotCopyRequest::send_snap_protect() { // if destination snapshot is not protected, protect it auto snap_seq_it = m_snap_seqs.find(src_snap_id); - assert(snap_seq_it != m_snap_seqs.end()); + ceph_assert(snap_seq_it != m_snap_seqs.end()); m_dst_image_ctx->snap_lock.get_read(); bool dst_protected; @@ -595,7 +595,7 @@ template void SnapshotCopyRequest::handle_resize_object_map(int r) { ldout(m_cct, 20) << "r=" << r << dendl; - assert(r == 0); + ceph_assert(r == 0); finish(0); } @@ -651,7 +651,7 @@ Context *SnapshotCopyRequest::start_lock_op() { template Context *SnapshotCopyRequest::start_lock_op(RWLock &owner_lock) { - assert(m_dst_image_ctx->owner_lock.is_locked()); + ceph_assert(m_dst_image_ctx->owner_lock.is_locked()); if (m_dst_image_ctx->exclusive_lock == nullptr) { return new FunctionContext([](int r) {}); } diff --git a/src/librbd/deep_copy/SnapshotCreateRequest.cc b/src/librbd/deep_copy/SnapshotCreateRequest.cc index a9503765ac2..6e110c72c85 100644 --- a/src/librbd/deep_copy/SnapshotCreateRequest.cc +++ b/src/librbd/deep_copy/SnapshotCreateRequest.cc @@ -144,7 +144,7 @@ void SnapshotCreateRequest::send_create_object_map() { }); librados::AioCompletion *comp = create_rados_callback(ctx); int r = m_dst_image_ctx->md_ctx.aio_operate(object_map_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/exclusive_lock/AutomaticPolicy.cc b/src/librbd/exclusive_lock/AutomaticPolicy.cc index a27cf49a72c..4d5f48b1527 100644 --- a/src/librbd/exclusive_lock/AutomaticPolicy.cc +++ b/src/librbd/exclusive_lock/AutomaticPolicy.cc @@ -13,8 +13,8 @@ namespace librbd { namespace exclusive_lock { int AutomaticPolicy::lock_requested(bool force) { - assert(m_image_ctx->owner_lock.is_locked()); - assert(m_image_ctx->exclusive_lock != nullptr); + ceph_assert(m_image_ctx->owner_lock.is_locked()); + ceph_assert(m_image_ctx->exclusive_lock != nullptr); ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force << dendl; diff --git a/src/librbd/exclusive_lock/PostAcquireRequest.cc b/src/librbd/exclusive_lock/PostAcquireRequest.cc index 870cf49b3d2..bea877427ac 100644 --- a/src/librbd/exclusive_lock/PostAcquireRequest.cc +++ b/src/librbd/exclusive_lock/PostAcquireRequest.cc @@ -257,7 +257,7 @@ void PostAcquireRequest::handle_close_object_map(int r) { ldout(cct, 10) << "r=" << r << dendl; // object map should never result in an error - assert(r == 0); + ceph_assert(r == 0); revert(); finish(); } @@ -266,10 +266,10 @@ template void PostAcquireRequest::apply() { { RWLock::WLocker snap_locker(m_image_ctx.snap_lock); - assert(m_image_ctx.object_map == nullptr); + ceph_assert(m_image_ctx.object_map == nullptr); m_image_ctx.object_map = m_object_map; - assert(m_image_ctx.journal == nullptr); + ceph_assert(m_image_ctx.journal == nullptr); m_image_ctx.journal = m_journal; } @@ -286,7 +286,7 @@ void PostAcquireRequest::revert() { delete m_object_map; delete m_journal; - assert(m_error_result < 0); + ceph_assert(m_error_result < 0); } template diff --git a/src/librbd/exclusive_lock/PreAcquireRequest.cc b/src/librbd/exclusive_lock/PreAcquireRequest.cc index 663b82bd8cc..ba3da1a24ba 100644 --- a/src/librbd/exclusive_lock/PreAcquireRequest.cc +++ b/src/librbd/exclusive_lock/PreAcquireRequest.cc @@ -78,7 +78,7 @@ void PreAcquireRequest::handle_flush_notifies(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; - assert(r == 0); + ceph_assert(r == 0); finish(); } diff --git a/src/librbd/exclusive_lock/PreReleaseRequest.cc b/src/librbd/exclusive_lock/PreReleaseRequest.cc index e5ba42b057e..7c663823e6a 100644 --- a/src/librbd/exclusive_lock/PreReleaseRequest.cc +++ b/src/librbd/exclusive_lock/PreReleaseRequest.cc @@ -94,7 +94,7 @@ void PreReleaseRequest::handle_cancel_op_requests(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << "r=" << r << dendl; - assert(r == 0); + ceph_assert(r == 0); send_block_writes(); } @@ -205,7 +205,7 @@ void PreReleaseRequest::handle_flush_notifies(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; - assert(r == 0); + ceph_assert(r == 0); send_close_journal(); } @@ -272,7 +272,7 @@ void PreReleaseRequest::handle_close_object_map(int r) { ldout(cct, 10) << "r=" << r << dendl; // object map shouldn't return errors - assert(r == 0); + ceph_assert(r == 0); delete m_object_map; send_unlock(); diff --git a/src/librbd/exclusive_lock/StandardPolicy.cc b/src/librbd/exclusive_lock/StandardPolicy.cc index adeaf3f7976..6bdb313b369 100644 --- a/src/librbd/exclusive_lock/StandardPolicy.cc +++ b/src/librbd/exclusive_lock/StandardPolicy.cc @@ -13,8 +13,8 @@ namespace librbd { namespace exclusive_lock { int StandardPolicy::lock_requested(bool force) { - assert(m_image_ctx->owner_lock.is_locked()); - assert(m_image_ctx->exclusive_lock != nullptr); + ceph_assert(m_image_ctx->owner_lock.is_locked()); + ceph_assert(m_image_ctx->exclusive_lock != nullptr); ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force << dendl; diff --git a/src/librbd/image/CloneRequest.cc b/src/librbd/image/CloneRequest.cc index 279a46d58dc..b90c777c1e6 100644 --- a/src/librbd/image/CloneRequest.cc +++ b/src/librbd/image/CloneRequest.cc @@ -117,7 +117,7 @@ void CloneRequest::validate_options() { template void CloneRequest::open_parent() { ldout(m_cct, 20) << dendl; - assert(m_parent_snap_name.empty() ^ (m_parent_snap_id == CEPH_NOSNAP)); + ceph_assert(m_parent_snap_name.empty() ^ (m_parent_snap_id == CEPH_NOSNAP)); if (m_parent_snap_id != CEPH_NOSNAP) { m_parent_image_ctx = I::create("", m_parent_image_id, m_parent_snap_id, @@ -232,7 +232,7 @@ void CloneRequest::validate_child() { int r = m_ioctx.aio_operate(util::old_header_name(m_name), comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -330,7 +330,7 @@ void CloneRequest::set_parent() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_imctx->md_ctx.aio_operate(m_imctx->header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -364,7 +364,7 @@ void CloneRequest::v2_set_op_feature() { auto aio_comp = create_rados_callback< CloneRequest, &CloneRequest::handle_v2_set_op_feature>(this); int r = m_ioctx.aio_operate(m_imctx->header_oid, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -393,7 +393,7 @@ void CloneRequest::v2_child_attach() { auto aio_comp = create_rados_callback< CloneRequest, &CloneRequest::handle_v2_child_attach>(this); int r = m_parent_image_ctx->md_ctx.aio_operate(m_parent_image_ctx->header_oid, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -423,7 +423,7 @@ void CloneRequest::v1_add_child() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_ioctx.aio_operate(RBD_CHILDREN, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -539,7 +539,7 @@ void CloneRequest::metadata_set() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_ioctx.aio_operate(m_imctx->header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -632,7 +632,7 @@ template void CloneRequest::close_child() { ldout(m_cct, 20) << dendl; - assert(m_imctx != nullptr); + ceph_assert(m_imctx != nullptr); using klass = CloneRequest; Context *ctx = create_async_context_callback( @@ -691,7 +691,7 @@ void CloneRequest::handle_remove_child(int r) { template void CloneRequest::close_parent() { ldout(m_cct, 20) << dendl; - assert(m_parent_image_ctx != nullptr); + ceph_assert(m_parent_image_ctx != nullptr); Context *ctx = create_async_context_callback( *m_parent_image_ctx, create_context_callback< diff --git a/src/librbd/image/CloseRequest.cc b/src/librbd/image/CloseRequest.cc index f4c0eb8a4d6..57a1eb7d97c 100644 --- a/src/librbd/image/CloseRequest.cc +++ b/src/librbd/image/CloseRequest.cc @@ -29,7 +29,7 @@ template CloseRequest::CloseRequest(I *image_ctx, Context *on_finish) : m_image_ctx(image_ctx), m_on_finish(on_finish), m_error_result(0), m_exclusive_lock(nullptr) { - assert(image_ctx != nullptr); + ceph_assert(image_ctx != nullptr); } template @@ -137,12 +137,12 @@ void CloseRequest::handle_shut_down_exclusive_lock(int r) { { RWLock::RLocker owner_locker(m_image_ctx->owner_lock); - assert(m_image_ctx->exclusive_lock == nullptr); + ceph_assert(m_image_ctx->exclusive_lock == nullptr); // object map and journal closed during exclusive lock shutdown RWLock::RLocker snap_locker(m_image_ctx->snap_lock); - assert(m_image_ctx->journal == nullptr); - assert(m_image_ctx->object_map == nullptr); + ceph_assert(m_image_ctx->journal == nullptr); + ceph_assert(m_image_ctx->object_map == nullptr); } delete m_exclusive_lock; diff --git a/src/librbd/image/CreateRequest.cc b/src/librbd/image/CreateRequest.cc index 6d97dd46dff..782259c6e80 100644 --- a/src/librbd/image/CreateRequest.cc +++ b/src/librbd/image/CreateRequest.cc @@ -282,7 +282,7 @@ void CreateRequest::validate_data_pool() { m_outbl.clear(); int r = m_data_io_ctx.aio_operate(RBD_INFO, comp, &op, &m_outbl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -368,7 +368,7 @@ void CreateRequest::add_image_to_directory() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -408,7 +408,7 @@ void CreateRequest::create_id_object() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_operate(m_id_obj, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -451,7 +451,7 @@ void CreateRequest::negotiate_features() { m_outbl.clear(); int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op, &m_outbl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -479,7 +479,7 @@ void CreateRequest::handle_negotiate_features(int r) { template void CreateRequest::create_image() { ldout(m_cct, 20) << dendl; - assert(m_data_pool.empty() || m_data_pool_id != -1); + ceph_assert(m_data_pool.empty() || m_data_pool_id != -1); ostringstream oss; oss << RBD_DATA_PREFIX; @@ -503,7 +503,7 @@ void CreateRequest::create_image() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_operate(m_header_obj, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -538,7 +538,7 @@ void CreateRequest::set_stripe_unit_count() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_operate(m_header_obj, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -574,7 +574,7 @@ void CreateRequest::object_map_resize() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_operate(m_objmap_name, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -611,7 +611,7 @@ void CreateRequest::fetch_mirror_mode() { create_rados_callback(this); m_outbl.clear(); int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_outbl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -788,7 +788,7 @@ void CreateRequest::remove_object_map() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_remove(m_objmap_name, comp); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -812,7 +812,7 @@ void CreateRequest::remove_header_object() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_remove(m_header_obj, comp); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -836,7 +836,7 @@ void CreateRequest::remove_id_object() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_remove(m_id_obj, comp); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -863,7 +863,7 @@ void CreateRequest::remove_from_dir() { librados::AioCompletion *comp = create_rados_callback(this); int r = m_io_ctx.aio_operate(RBD_DIRECTORY, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/image/DetachChildRequest.cc b/src/librbd/image/DetachChildRequest.cc index 5902e1a6b32..43922750f92 100644 --- a/src/librbd/image/DetachChildRequest.cc +++ b/src/librbd/image/DetachChildRequest.cc @@ -25,7 +25,7 @@ using util::create_rados_callback; template DetachChildRequest::~DetachChildRequest() { - assert(m_parent_image_ctx == nullptr); + ceph_assert(m_parent_image_ctx == nullptr); } template @@ -67,7 +67,7 @@ void DetachChildRequest::clone_v2_child_detach() { librados::Rados rados(m_image_ctx.md_ctx); int r = rados.ioctx_create2(m_parent_spec.pool_id, m_parent_io_ctx); - assert(r == 0); + ceph_assert(r == 0); // TODO support clone v2 parent namespaces m_parent_io_ctx.set_namespace(m_image_ctx.md_ctx.get_namespace()); @@ -78,7 +78,7 @@ void DetachChildRequest::clone_v2_child_detach() { DetachChildRequest, &DetachChildRequest::handle_clone_v2_child_detach>(this); r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -111,7 +111,7 @@ void DetachChildRequest::clone_v2_get_snapshot() { &DetachChildRequest::handle_clone_v2_get_snapshot>(this); int r = m_parent_io_ctx.aio_operate(m_parent_header_name, aio_comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -245,7 +245,7 @@ void DetachChildRequest::clone_v1_remove_child() { DetachChildRequest, &DetachChildRequest::handle_clone_v1_remove_child>(this); int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } diff --git a/src/librbd/image/ListWatchersRequest.cc b/src/librbd/image/ListWatchersRequest.cc index 21e8bfa4548..c6251f49253 100644 --- a/src/librbd/image/ListWatchersRequest.cc +++ b/src/librbd/image/ListWatchersRequest.cc @@ -51,7 +51,7 @@ void ListWatchersRequest::list_image_watchers() { int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -90,7 +90,7 @@ void ListWatchersRequest::get_mirror_image() { create_rados_callback(this); m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -122,7 +122,7 @@ void ListWatchersRequest::list_mirror_watchers() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/image/RefreshParentRequest.cc b/src/librbd/image/RefreshParentRequest.cc index 38aeb765c37..233b189c486 100644 --- a/src/librbd/image/RefreshParentRequest.cc +++ b/src/librbd/image/RefreshParentRequest.cc @@ -36,8 +36,8 @@ template bool RefreshParentRequest::is_refresh_required( I &child_image_ctx, const ParentInfo &parent_md, const MigrationInfo &migration_info) { - assert(child_image_ctx.snap_lock.is_locked()); - assert(child_image_ctx.parent_lock.is_locked()); + ceph_assert(child_image_ctx.snap_lock.is_locked()); + ceph_assert(child_image_ctx.parent_lock.is_locked()); return (is_open_required(child_image_ctx, parent_md, migration_info) || is_close_required(child_image_ctx, parent_md, migration_info)); } @@ -81,8 +81,8 @@ void RefreshParentRequest::send() { template void RefreshParentRequest::apply() { - assert(m_child_image_ctx.snap_lock.is_wlocked()); - assert(m_child_image_ctx.parent_lock.is_wlocked()); + ceph_assert(m_child_image_ctx.snap_lock.is_wlocked()); + ceph_assert(m_child_image_ctx.parent_lock.is_wlocked()); std::swap(m_child_image_ctx.parent, m_parent_image_ctx); std::swap(m_child_image_ctx.migration_parent, m_migration_parent_image_ctx); } @@ -102,7 +102,7 @@ void RefreshParentRequest::finalize(Context *on_finish) { template void RefreshParentRequest::send_open_parent() { - assert(m_parent_md.spec.pool_id >= 0); + ceph_assert(m_parent_md.spec.pool_id >= 0); CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -172,8 +172,8 @@ Context *RefreshParentRequest::handle_open_parent(int *result) { template void RefreshParentRequest::send_open_migration_parent() { - assert(m_parent_image_ctx != nullptr); - assert(!m_migration_info.empty()); + ceph_assert(m_parent_image_ctx != nullptr); + ceph_assert(!m_migration_info.empty()); CephContext *cct = m_child_image_ctx.cct; ParentSpec parent_spec; @@ -279,7 +279,7 @@ Context *RefreshParentRequest::handle_close_migration_parent(int *result) { template void RefreshParentRequest::send_close_parent() { - assert(m_parent_image_ctx != nullptr); + ceph_assert(m_parent_image_ctx != nullptr); CephContext *cct = m_child_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; diff --git a/src/librbd/image/RefreshRequest.cc b/src/librbd/image/RefreshRequest.cc index 9b3947c2751..717648a4721 100644 --- a/src/librbd/image/RefreshRequest.cc +++ b/src/librbd/image/RefreshRequest.cc @@ -52,11 +52,11 @@ RefreshRequest::RefreshRequest(I &image_ctx, bool acquiring_lock, template RefreshRequest::~RefreshRequest() { // these require state machine to close - assert(m_exclusive_lock == nullptr); - assert(m_object_map == nullptr); - assert(m_journal == nullptr); - assert(m_refresh_parent == nullptr); - assert(!m_blocked_writes); + ceph_assert(m_exclusive_lock == nullptr); + ceph_assert(m_object_map == nullptr); + ceph_assert(m_journal == nullptr); + ceph_assert(m_refresh_parent == nullptr); + ceph_assert(!m_blocked_writes); } template @@ -166,7 +166,7 @@ void RefreshRequest::send_v1_read_header() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -223,7 +223,7 @@ void RefreshRequest::send_v1_get_snapshots() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -277,7 +277,7 @@ void RefreshRequest::send_v1_get_locks() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -352,7 +352,7 @@ void RefreshRequest::send_v2_get_mutable_metadata() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -474,7 +474,7 @@ void RefreshRequest::send_v2_get_flags() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -533,7 +533,7 @@ void RefreshRequest::send_v2_get_op_features() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -572,7 +572,7 @@ void RefreshRequest::send_v2_get_group() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -622,7 +622,7 @@ void RefreshRequest::send_v2_get_snapshots() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -671,7 +671,7 @@ void RefreshRequest::send_v2_get_snapshots_legacy() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -727,7 +727,7 @@ void RefreshRequest::send_v2_get_snap_timestamps() { m_out_bl.clear(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -1055,7 +1055,7 @@ Context *RefreshRequest::handle_v2_finalize_refresh_parent(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; - assert(m_refresh_parent != nullptr); + ceph_assert(m_refresh_parent != nullptr); delete m_refresh_parent; m_refresh_parent = nullptr; @@ -1093,10 +1093,10 @@ Context *RefreshRequest::handle_v2_shut_down_exclusive_lock(int *result) { { RWLock::WLocker owner_locker(m_image_ctx.owner_lock); - assert(m_image_ctx.exclusive_lock == nullptr); + ceph_assert(m_image_ctx.exclusive_lock == nullptr); } - assert(m_exclusive_lock != nullptr); + ceph_assert(m_exclusive_lock != nullptr); delete m_exclusive_lock; m_exclusive_lock = nullptr; @@ -1131,11 +1131,11 @@ Context *RefreshRequest::handle_v2_close_journal(int *result) { << dendl; } - assert(m_journal != nullptr); + ceph_assert(m_journal != nullptr); delete m_journal; m_journal = nullptr; - assert(m_blocked_writes); + ceph_assert(m_blocked_writes); m_blocked_writes = false; m_image_ctx.io_work_queue->unblock_writes(); @@ -1164,8 +1164,8 @@ Context *RefreshRequest::handle_v2_close_object_map(int *result) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; - assert(*result == 0); - assert(m_object_map != nullptr); + ceph_assert(*result == 0); + ceph_assert(m_object_map != nullptr); delete m_object_map; m_object_map = nullptr; @@ -1340,11 +1340,11 @@ void RefreshRequest::apply() { m_image_ctx.snap_lock)) { // disabling exclusive lock will automatically handle closing // object map and journaling - assert(m_exclusive_lock == nullptr); + ceph_assert(m_exclusive_lock == nullptr); m_exclusive_lock = m_image_ctx.exclusive_lock; } else { if (m_exclusive_lock != nullptr) { - assert(m_image_ctx.exclusive_lock == nullptr); + ceph_assert(m_image_ctx.exclusive_lock == nullptr); std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock); } if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING, @@ -1394,7 +1394,7 @@ bool RefreshRequest::get_migration_info(ParentInfo *parent_md, if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST || (m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED && m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING)) { - assert(m_migration_spec.header_type == cls::rbd::MIGRATION_HEADER_TYPE_SRC || + ceph_assert(m_migration_spec.header_type == cls::rbd::MIGRATION_HEADER_TYPE_SRC || m_migration_spec.pool_id == -1 || m_migration_spec.state == cls::rbd::MIGRATION_STATE_EXECUTED); diff --git a/src/librbd/image/RemoveRequest.cc b/src/librbd/image/RemoveRequest.cc index 9140b42290d..f06345ab3c3 100644 --- a/src/librbd/image/RemoveRequest.cc +++ b/src/librbd/image/RemoveRequest.cc @@ -181,7 +181,7 @@ void RemoveRequest::handle_exclusive_lock_force(int r) { return; } - assert(m_image_ctx->exclusive_lock == nullptr); + ceph_assert(m_image_ctx->exclusive_lock == nullptr); validate_image_removal(); } @@ -291,7 +291,7 @@ void RemoveRequest::check_group() { m_out_bl.clear(); int r = m_image_ctx->md_ctx.aio_operate(m_header_oid, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -357,7 +357,7 @@ void RemoveRequest::handle_remove_snapshot(int r) { return; } - assert(!m_snap_infos.empty()); + ceph_assert(!m_snap_infos.empty()); m_snap_infos.erase(m_snap_infos.begin()); remove_snapshot(); @@ -486,7 +486,7 @@ void RemoveRequest::remove_header() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_remove(m_header_oid, rados_completion); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -514,7 +514,7 @@ void RemoveRequest::remove_header_v2() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_remove(m_header_oid, rados_completion); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -571,7 +571,7 @@ void RemoveRequest::send_object_map_remove() { int r = ObjectMap<>::aio_remove(m_ioctx, m_image_id, rados_completion); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -602,7 +602,7 @@ void RemoveRequest::mirror_image_remove() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_operate(RBD_MIRRORING, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -699,7 +699,7 @@ void RemoveRequest::dir_get_image_id() { create_rados_callback(this); m_out_bl.clear(); int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -738,7 +738,7 @@ void RemoveRequest::dir_get_image_name() { create_rados_callback(this); m_out_bl.clear(); int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -773,7 +773,7 @@ void RemoveRequest::remove_id_object() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_remove(util::id_obj_name(m_image_name), rados_completion); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -802,7 +802,7 @@ void RemoveRequest::dir_remove_image() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_operate(RBD_DIRECTORY, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/image/SetFlagsRequest.cc b/src/librbd/image/SetFlagsRequest.cc index 7d292107f53..71a864244d2 100644 --- a/src/librbd/image/SetFlagsRequest.cc +++ b/src/librbd/image/SetFlagsRequest.cc @@ -54,7 +54,7 @@ void SetFlagsRequest::send_set_flags() { librados::AioCompletion *comp = create_rados_callback(gather_ctx->new_sub()); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } gather_ctx->activate(); diff --git a/src/librbd/image/SetSnapRequest.cc b/src/librbd/image/SetSnapRequest.cc index 40e25e3e945..e9542491db6 100644 --- a/src/librbd/image/SetSnapRequest.cc +++ b/src/librbd/image/SetSnapRequest.cc @@ -30,7 +30,7 @@ SetSnapRequest::SetSnapRequest(I &image_ctx, uint64_t snap_id, template SetSnapRequest::~SetSnapRequest() { - assert(!m_writes_blocked); + ceph_assert(!m_writes_blocked); delete m_refresh_parent; delete m_object_map; delete m_exclusive_lock; @@ -50,7 +50,7 @@ void SetSnapRequest::send_init_exclusive_lock() { { RWLock::RLocker snap_locker(m_image_ctx.snap_lock); if (m_image_ctx.exclusive_lock != nullptr) { - assert(m_image_ctx.snap_id == CEPH_NOSNAP); + ceph_assert(m_image_ctx.snap_id == CEPH_NOSNAP); send_complete(); return; } @@ -328,7 +328,7 @@ int SetSnapRequest::apply() { RWLock::WLocker snap_locker(m_image_ctx.snap_lock); RWLock::WLocker parent_locker(m_image_ctx.parent_lock); if (m_snap_id != CEPH_NOSNAP) { - assert(m_image_ctx.exclusive_lock == nullptr); + ceph_assert(m_image_ctx.exclusive_lock == nullptr); int r = m_image_ctx.snap_set(m_snap_id); if (r < 0) { return r; diff --git a/src/librbd/image_watcher/NotifyLockOwner.cc b/src/librbd/image_watcher/NotifyLockOwner.cc index 2cec8b1bc89..ead5f214c22 100644 --- a/src/librbd/image_watcher/NotifyLockOwner.cc +++ b/src/librbd/image_watcher/NotifyLockOwner.cc @@ -36,7 +36,7 @@ void NotifyLockOwner::send_notify() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << dendl; - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); m_notifier.notify(m_bl, &m_notify_response, create_context_callback< NotifyLockOwner, &NotifyLockOwner::handle_notify>(this)); } diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc index bf19b63d2c3..3285fcab4e0 100644 --- a/src/librbd/internal.cc +++ b/src/librbd/internal.cc @@ -211,8 +211,8 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) void trim_image(ImageCtx *ictx, uint64_t newsize, ProgressContext& prog_ctx) { - assert(ictx->owner_lock.is_locked()); - assert(ictx->exclusive_lock == nullptr || + ceph_assert(ictx->owner_lock.is_locked()); + ceph_assert(ictx->exclusive_lock == nullptr || ictx->exclusive_lock->is_lock_owner()); C_SaferCond ctx; @@ -494,7 +494,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) IMAGE_OPTIONS_TYPE_MAPPING.find(optname); if (i == IMAGE_OPTIONS_TYPE_MAPPING.end()) { - assert((*opts_)->find(optname) == (*opts_)->end()); + ceph_assert((*opts_)->find(optname) == (*opts_)->end()); return -EINVAL; } @@ -634,7 +634,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) } } pctx.update_progress(++i, size); - assert(i <= size); + ceph_assert(i <= size); } return 0; @@ -806,12 +806,12 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) ImageOptions opts; int r = opts.set(RBD_IMAGE_OPTION_ORDER, order_); - assert(r == 0); + ceph_assert(r == 0); r = create(io_ctx, imgname, "", size, opts, "", "", false); int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_); - assert(r1 == 0); + ceph_assert(r1 == 0); *order = order_; return r; @@ -830,20 +830,20 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) int r; r = opts.set(RBD_IMAGE_OPTION_FORMAT, format); - assert(r == 0); + ceph_assert(r == 0); r = opts.set(RBD_IMAGE_OPTION_FEATURES, features); - assert(r == 0); + ceph_assert(r == 0); r = opts.set(RBD_IMAGE_OPTION_ORDER, order_); - assert(r == 0); + ceph_assert(r == 0); r = opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit); - assert(r == 0); + ceph_assert(r == 0); r = opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count); - assert(r == 0); + ceph_assert(r == 0); r = create(io_ctx, imgname, "", size, opts, "", "", false); int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_); - assert(r1 == 0); + ceph_assert(r1 == 0); *order = order_; return r; @@ -920,7 +920,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) } int r1 = opts.set(RBD_IMAGE_OPTION_ORDER, order); - assert(r1 == 0); + ceph_assert(r1 == 0); return r; } @@ -954,7 +954,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) const std::string &non_primary_global_image_id, const std::string &primary_mirror_uuid) { - assert((p_id == nullptr) ^ (p_name == nullptr)); + ceph_assert((p_id == nullptr) ^ (p_name == nullptr)); CephContext *cct = (CephContext *)p_ioctx.cct(); if (p_snap_name == nullptr) { @@ -1732,7 +1732,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) int snap_get_timestamp(ImageCtx *ictx, uint64_t snap_id, struct timespec *timestamp) { std::map::iterator snap_it = ictx->snap_info.find(snap_id); - assert(snap_it != ictx->snap_info.end()); + ceph_assert(snap_it != ictx->snap_info.end()); utime_t time = snap_it->second.timestamp; time.to_timespec(timestamp); return 0; @@ -1859,7 +1859,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) m_throttle->end_op(r); return; } - assert(m_bl->length() == (size_t)r); + ceph_assert(m_bl->length() == (size_t)r); if (m_bl->is_zero()) { delete m_bl; @@ -1907,7 +1907,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) } } delete m_bl; - assert(gather_ctx->get_sub_created_count() > 0); + ceph_assert(gather_ctx->get_sub_created_count() > 0); gather_ctx->activate(); } @@ -2242,7 +2242,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2) // validate extent against image size; clip to image size if necessary int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len) { - assert(ictx->snap_lock.is_locked()); + ceph_assert(ictx->snap_lock.is_locked()); uint64_t image_size = ictx->get_image_size(ictx->snap_id); bool snap_exists = ictx->snap_exists; diff --git a/src/librbd/io/AioCompletion.cc b/src/librbd/io/AioCompletion.cc index 4ce09c3b49c..661cd57255b 100644 --- a/src/librbd/io/AioCompletion.cc +++ b/src/librbd/io/AioCompletion.cc @@ -41,8 +41,8 @@ int AioCompletion::wait_for_complete() { void AioCompletion::finalize(ssize_t rval) { - assert(lock.is_locked()); - assert(ictx != nullptr); + ceph_assert(lock.is_locked()); + ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; ldout(cct, 20) << "r=" << rval << dendl; @@ -52,8 +52,8 @@ void AioCompletion::finalize(ssize_t rval) } void AioCompletion::complete() { - assert(lock.is_locked()); - assert(ictx != nullptr); + ceph_assert(lock.is_locked()); + ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; tracepoint(librbd, aio_complete_enter, this, rval); @@ -117,8 +117,8 @@ void AioCompletion::init_time(ImageCtx *i, aio_type_t t) { void AioCompletion::start_op(bool ignore_type) { Mutex::Locker locker(lock); - assert(ictx != nullptr); - assert(!async_op.started()); + ceph_assert(ictx != nullptr); + ceph_assert(!async_op.started()); if (state == AIO_STATE_PENDING && (ignore_type || aio_type != AIO_TYPE_FLUSH)) { async_op.start_op(*ictx); @@ -128,11 +128,11 @@ void AioCompletion::start_op(bool ignore_type) { void AioCompletion::fail(int r) { lock.Lock(); - assert(ictx != nullptr); + ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; lderr(cct) << cpp_strerror(r) << dendl; - assert(pending_count == 0); + ceph_assert(pending_count == 0); rval = r; complete(); put_unlock(); @@ -140,11 +140,11 @@ void AioCompletion::fail(int r) void AioCompletion::set_request_count(uint32_t count) { lock.Lock(); - assert(ictx != nullptr); + ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; ldout(cct, 20) << "pending=" << count << dendl; - assert(pending_count == 0); + ceph_assert(pending_count == 0); pending_count = count; lock.Unlock(); @@ -155,7 +155,7 @@ void AioCompletion::set_request_count(uint32_t count) { void AioCompletion::complete_request(ssize_t r) { lock.Lock(); - assert(ictx != nullptr); + ceph_assert(ictx != nullptr); CephContext *cct = ictx->cct; if (rval >= 0) { @@ -164,7 +164,7 @@ void AioCompletion::complete_request(ssize_t r) else if (r > 0) rval += r; } - assert(pending_count); + ceph_assert(pending_count); int count = --pending_count; ldout(cct, 20) << "cb=" << complete_cb << ", " diff --git a/src/librbd/io/AioCompletion.h b/src/librbd/io/AioCompletion.h index 6aff99154fa..ef0a3c8999b 100644 --- a/src/librbd/io/AioCompletion.h +++ b/src/librbd/io/AioCompletion.h @@ -142,7 +142,7 @@ struct AioCompletion { void set_request_count(uint32_t num); void add_request() { lock.Lock(); - assert(pending_count > 0); + ceph_assert(pending_count > 0); lock.Unlock(); get(); } @@ -154,13 +154,13 @@ struct AioCompletion { void get() { lock.Lock(); - assert(ref > 0); + ceph_assert(ref > 0); ref++; lock.Unlock(); } void release() { lock.Lock(); - assert(!released); + ceph_assert(!released); released = true; put_unlock(); } @@ -169,7 +169,7 @@ struct AioCompletion { put_unlock(); } void put_unlock() { - assert(ref > 0); + ceph_assert(ref > 0); int n = --ref; lock.Unlock(); if (!n) { @@ -194,7 +194,7 @@ struct AioCompletion { } void unblock() { Mutex::Locker l(lock); - assert(blockers > 0); + ceph_assert(blockers > 0); --blockers; if (pending_count == 0 && blockers == 0) { finalize(rval); diff --git a/src/librbd/io/AsyncOperation.cc b/src/librbd/io/AsyncOperation.cc index ca8daa4c098..fddb621e220 100644 --- a/src/librbd/io/AsyncOperation.cc +++ b/src/librbd/io/AsyncOperation.cc @@ -38,7 +38,7 @@ struct C_CompleteFlushes : public Context { } // anonymous namespace void AsyncOperation::start_op(ImageCtx &image_ctx) { - assert(m_image_ctx == NULL); + ceph_assert(m_image_ctx == NULL); m_image_ctx = &image_ctx; ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl; @@ -53,7 +53,7 @@ void AsyncOperation::finish_op() { Mutex::Locker l(m_image_ctx->async_ops_lock); xlist::iterator iter(&m_xlist_item); ++iter; - assert(m_xlist_item.remove_myself()); + ceph_assert(m_xlist_item.remove_myself()); // linked list stored newest -> oldest ops if (!iter.end() && !m_flush_contexts.empty()) { @@ -74,7 +74,7 @@ void AsyncOperation::finish_op() { } void AsyncOperation::add_flush_context(Context *on_finish) { - assert(m_image_ctx->async_ops_lock.is_locked()); + ceph_assert(m_image_ctx->async_ops_lock.is_locked()); ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": " << "flush=" << on_finish << dendl; m_flush_contexts.push_back(on_finish); diff --git a/src/librbd/io/AsyncOperation.h b/src/librbd/io/AsyncOperation.h index 5839a6964ba..1f22e72db81 100644 --- a/src/librbd/io/AsyncOperation.h +++ b/src/librbd/io/AsyncOperation.h @@ -26,7 +26,7 @@ public: ~AsyncOperation() { - assert(!m_xlist_item.is_on_list()); + ceph_assert(!m_xlist_item.is_on_list()); } inline bool started() const { diff --git a/src/librbd/io/CopyupRequest.cc b/src/librbd/io/CopyupRequest.cc index b8d71eadff0..09be635016d 100644 --- a/src/librbd/io/CopyupRequest.cc +++ b/src/librbd/io/CopyupRequest.cc @@ -47,8 +47,8 @@ public: if (snap_id == CEPH_NOSNAP) { RWLock::RLocker snap_locker(m_image_ctx.snap_lock); RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock); - assert(m_image_ctx.exclusive_lock->is_lock_owner()); - assert(m_image_ctx.object_map != nullptr); + ceph_assert(m_image_ctx.exclusive_lock->is_lock_owner()); + ceph_assert(m_image_ctx.object_map != nullptr); bool sent = m_image_ctx.object_map->aio_update( CEPH_NOSNAP, m_object_no, OBJECT_EXISTS, {}, m_trace, this); return (sent ? 0 : 1); @@ -68,7 +68,7 @@ public: bool sent = m_image_ctx.object_map->aio_update( snap_id, m_object_no, state, {}, m_trace, this); - assert(sent); + ceph_assert(sent); return 0; } @@ -95,7 +95,7 @@ CopyupRequest::CopyupRequest(I *ictx, const std::string &oid, template CopyupRequest::~CopyupRequest() { - assert(m_pending_requests.empty()); + ceph_assert(m_pending_requests.empty()); m_async_op.finish_op(); } @@ -156,7 +156,7 @@ bool CopyupRequest::send_copyup() { r = m_data_ctx.aio_operate( m_oid, comp, ©up_op, 0, snaps, (m_trace.valid() ? m_trace.get_info() : nullptr)); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -178,7 +178,7 @@ bool CopyupRequest::send_copyup() { r = m_ictx->data_ctx.aio_operate( m_oid, comp, &write_op, snapc.seq, snaps, (m_trace.valid() ? m_trace.get_info() : nullptr)); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } return false; @@ -221,7 +221,7 @@ bool CopyupRequest::is_update_object_map_required(int r) { } auto it = m_ictx->migration_info.snap_map.find(CEPH_NOSNAP); - assert(it != m_ictx->migration_info.snap_map.end()); + ceph_assert(it != m_ictx->migration_info.snap_map.end()); return it->second[0] != CEPH_NOSNAP; } @@ -304,12 +304,12 @@ bool CopyupRequest::should_complete(int *r) { case STATE_OBJECT_MAP_HEAD: ldout(cct, 20) << "OBJECT_MAP_HEAD" << dendl; - assert(*r == 0); + ceph_assert(*r == 0); return send_object_map(); case STATE_OBJECT_MAP: ldout(cct, 20) << "OBJECT_MAP" << dendl; - assert(*r == 0); + ceph_assert(*r == 0); if (!is_copyup_required()) { ldout(cct, 20) << "skipping copyup" << dendl; return true; @@ -319,7 +319,7 @@ bool CopyupRequest::should_complete(int *r) { case STATE_COPYUP: { Mutex::Locker locker(m_lock); - assert(m_pending_copyups > 0); + ceph_assert(m_pending_copyups > 0); pending_copyups = --m_pending_copyups; } ldout(cct, 20) << "COPYUP (" << pending_copyups << " pending)" @@ -352,10 +352,10 @@ void CopyupRequest::remove_from_list() { template void CopyupRequest::remove_from_list(Mutex &lock) { - assert(m_ictx->copyup_list_lock.is_locked()); + ceph_assert(m_ictx->copyup_list_lock.is_locked()); auto it = m_ictx->copyup_list.find(m_object_no); - assert(it != m_ictx->copyup_list.end()); + ceph_assert(it != m_ictx->copyup_list.end()); m_ictx->copyup_list.erase(it); } @@ -371,7 +371,7 @@ bool CopyupRequest::send_object_map_head() { RWLock::RLocker snap_locker(m_ictx->snap_lock); if (m_ictx->object_map != nullptr) { bool copy_on_read = m_pending_requests.empty(); - assert(m_ictx->exclusive_lock->is_lock_owner()); + ceph_assert(m_ictx->exclusive_lock->is_lock_owner()); RWLock::WLocker object_map_locker(m_ictx->object_map_lock); diff --git a/src/librbd/io/ImageRequest.cc b/src/librbd/io/ImageRequest.cc index 4da8595cc2e..a4e00e4fd64 100644 --- a/src/librbd/io/ImageRequest.cc +++ b/src/librbd/io/ImageRequest.cc @@ -52,7 +52,7 @@ public: librados::AioCompletion *comp = librbd::util::create_rados_callback(this); int r = m_image_ctx->md_ctx.aio_operate(m_image_ctx->header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -135,8 +135,8 @@ void ImageRequest::aio_compare_and_write(I *ictx, AioCompletion *c, template void ImageRequest::send() { I &image_ctx = this->m_image_ctx; - assert(m_aio_comp->is_initialized(get_aio_type())); - assert(m_aio_comp->is_started() ^ (get_aio_type() == AIO_TYPE_FLUSH)); + ceph_assert(m_aio_comp->is_initialized(get_aio_type())); + ceph_assert(m_aio_comp->is_started() ^ (get_aio_type() == AIO_TYPE_FLUSH)); CephContext *cct = image_ctx.cct; AioCompletion *aio_comp = this->m_aio_comp; @@ -288,7 +288,7 @@ void ImageReadRequest::send_request() { template void ImageReadRequest::send_image_cache_request() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.image_cache != nullptr); + ceph_assert(image_ctx.image_cache != nullptr); AioCompletion *aio_comp = this->m_aio_comp; aio_comp->set_request_count(1); @@ -370,7 +370,7 @@ void AbstractImageWriteRequest::send_request() { if (journaling) { // in-flight ops are flushed prior to closing the journal - assert(image_ctx.journal != NULL); + ceph_assert(image_ctx.journal != NULL); journal_tid = append_journal_event(m_synchronous); } @@ -423,7 +423,7 @@ uint64_t ImageWriteRequest::append_journal_event(bool synchronous) { uint64_t tid = 0; uint64_t buffer_offset = 0; - assert(!this->m_image_extents.empty()); + ceph_assert(!this->m_image_extents.empty()); for (auto &extent : this->m_image_extents) { bufferlist sub_bl; sub_bl.substr_of(m_bl, buffer_offset, extent.second); @@ -439,7 +439,7 @@ uint64_t ImageWriteRequest::append_journal_event(bool synchronous) { template void ImageWriteRequest::send_image_cache_request() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.image_cache != nullptr); + ceph_assert(image_ctx.image_cache != nullptr); AioCompletion *aio_comp = this->m_aio_comp; aio_comp->set_request_count(1); @@ -475,7 +475,7 @@ uint64_t ImageDiscardRequest::append_journal_event(bool synchronous) { I &image_ctx = this->m_image_ctx; uint64_t tid = 0; - assert(!this->m_image_extents.empty()); + ceph_assert(!this->m_image_extents.empty()); for (auto &extent : this->m_image_extents) { journal::EventEntry event_entry( journal::AioDiscardEvent(extent.first, @@ -492,7 +492,7 @@ uint64_t ImageDiscardRequest::append_journal_event(bool synchronous) { template void ImageDiscardRequest::send_image_cache_request() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.image_cache != nullptr); + ceph_assert(image_ctx.image_cache != nullptr); AioCompletion *aio_comp = this->m_aio_comp; aio_comp->set_request_count(this->m_image_extents.size()); @@ -584,7 +584,7 @@ void ImageFlushRequest::send_request() { template void ImageFlushRequest::send_image_cache_request() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.image_cache != nullptr); + ceph_assert(image_ctx.image_cache != nullptr); AioCompletion *aio_comp = this->m_aio_comp; aio_comp->set_request_count(1); @@ -597,7 +597,7 @@ uint64_t ImageWriteSameRequest::append_journal_event(bool synchronous) { I &image_ctx = this->m_image_ctx; uint64_t tid = 0; - assert(!this->m_image_extents.empty()); + ceph_assert(!this->m_image_extents.empty()); for (auto &extent : this->m_image_extents) { journal::EventEntry event_entry(journal::AioWriteSameEvent(extent.first, extent.second, @@ -613,7 +613,7 @@ uint64_t ImageWriteSameRequest::append_journal_event(bool synchronous) { template void ImageWriteSameRequest::send_image_cache_request() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.image_cache != nullptr); + ceph_assert(image_ctx.image_cache != nullptr); AioCompletion *aio_comp = this->m_aio_comp; aio_comp->set_request_count(this->m_image_extents.size()); @@ -664,7 +664,7 @@ uint64_t ImageCompareAndWriteRequest::append_journal_event( I &image_ctx = this->m_image_ctx; uint64_t tid = 0; - assert(this->m_image_extents.size() == 1); + ceph_assert(this->m_image_extents.size() == 1); auto &extent = this->m_image_extents.front(); journal::EventEntry event_entry( journal::AioCompareAndWriteEvent(extent.first, extent.second, m_cmp_bl, @@ -690,7 +690,7 @@ void ImageCompareAndWriteRequest::assemble_extent( template void ImageCompareAndWriteRequest::send_image_cache_request() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.image_cache != nullptr); + ceph_assert(image_ctx.image_cache != nullptr); AioCompletion *aio_comp = this->m_aio_comp; aio_comp->set_request_count(1); diff --git a/src/librbd/io/ImageRequestWQ.cc b/src/librbd/io/ImageRequestWQ.cc index 2c87e606a20..80ac6de363c 100644 --- a/src/librbd/io/ImageRequestWQ.cc +++ b/src/librbd/io/ImageRequestWQ.cc @@ -488,11 +488,11 @@ void ImageRequestWQ::aio_compare_and_write(AioCompletion *c, template void ImageRequestWQ::shut_down(Context *on_shutdown) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); { RWLock::WLocker locker(m_lock); - assert(!m_shutdown); + ceph_assert(!m_shutdown); m_shutdown = true; CephContext *cct = m_image_ctx.cct; @@ -517,7 +517,7 @@ int ImageRequestWQ::block_writes() { template void ImageRequestWQ::block_writes(Context *on_blocked) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; { @@ -543,7 +543,7 @@ void ImageRequestWQ::unblock_writes() { Contexts waiter_contexts; { RWLock::WLocker locker(m_lock); - assert(m_write_blockers > 0); + ceph_assert(m_write_blockers > 0); --m_write_blockers; ldout(cct, 5) << &m_image_ctx << ", " << "num=" @@ -564,7 +564,7 @@ void ImageRequestWQ::unblock_writes() { template void ImageRequestWQ::wait_on_writes_unblocked(Context *on_unblocked) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; { @@ -622,7 +622,7 @@ void ImageRequestWQ::apply_qos_limit(uint64_t limit, const uint64_t flag) { break; } } - assert(throttle != nullptr); + ceph_assert(throttle != nullptr); throttle->set_max(limit); throttle->set_average(limit); if (limit) @@ -636,7 +636,7 @@ void ImageRequestWQ::handle_throttle_ready(int r, ImageDispatchSpec *item, CephContext *cct = m_image_ctx.cct; ldout(cct, 15) << "r=" << r << ", " << "req=" << item << dendl; - assert(m_io_throttled.load() > 0); + ceph_assert(m_io_throttled.load() > 0); item->set_throttled(flag); if (item->were_all_throttled()) { this->requeue(item); @@ -716,7 +716,7 @@ void *ImageRequestWQ::_void_dequeue() { auto item = reinterpret_cast *>( ThreadPool::PointerWQ >::_void_dequeue()); - assert(peek_item == item); + ceph_assert(peek_item == item); if (lock_required) { this->get_pool_lock().Unlock(); @@ -783,10 +783,10 @@ template void ImageRequestWQ::finish_queued_io(ImageDispatchSpec *req) { RWLock::RLocker locker(m_lock); if (req->is_write_op()) { - assert(m_queued_writes > 0); + ceph_assert(m_queued_writes > 0); m_queued_writes--; } else { - assert(m_queued_reads > 0); + ceph_assert(m_queued_reads > 0); m_queued_reads--; } } @@ -796,7 +796,7 @@ void ImageRequestWQ::finish_in_flight_write() { bool writes_blocked = false; { RWLock::RLocker locker(m_lock); - assert(m_in_flight_writes > 0); + ceph_assert(m_in_flight_writes > 0); if (--m_in_flight_writes == 0 && !m_write_blocker_contexts.empty()) { writes_blocked = true; @@ -839,7 +839,7 @@ void ImageRequestWQ::finish_in_flight_io() { CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << "completing shut down" << dendl; - assert(on_shutdown != nullptr); + ceph_assert(on_shutdown != nullptr); flush_image(m_image_ctx, on_shutdown); } @@ -855,14 +855,14 @@ void ImageRequestWQ::fail_in_flight_io( template bool ImageRequestWQ::is_lock_required(bool write_op) const { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); return ((write_op && m_require_lock_on_write) || (!write_op && m_require_lock_on_read)); } template void ImageRequestWQ::queue(ImageDispatchSpec *req) { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << "ictx=" << &m_image_ctx << ", " @@ -891,7 +891,7 @@ void ImageRequestWQ::handle_acquire_lock( this->requeue(req); } - assert(m_io_blockers.load() > 0); + ceph_assert(m_io_blockers.load() > 0); --m_io_blockers; this->signal(); } @@ -910,7 +910,7 @@ void ImageRequestWQ::handle_refreshed( this->requeue(req); } - assert(m_io_blockers.load() > 0); + ceph_assert(m_io_blockers.load() > 0); --m_io_blockers; this->signal(); } diff --git a/src/librbd/io/ObjectDispatchSpec.cc b/src/librbd/io/ObjectDispatchSpec.cc index 1d0c24e003b..bc60dfe010e 100644 --- a/src/librbd/io/ObjectDispatchSpec.cc +++ b/src/librbd/io/ObjectDispatchSpec.cc @@ -23,7 +23,7 @@ void ObjectDispatchSpec::C_Dispatcher::complete(int r) { finish(r); break; case DISPATCH_RESULT_INVALID: - assert(false); + ceph_assert(false); break; } } @@ -38,7 +38,7 @@ void ObjectDispatchSpec::send() { } void ObjectDispatchSpec::fail(int r) { - assert(r < 0); + ceph_assert(r < 0); dispatcher_ctx.complete(r); } diff --git a/src/librbd/io/ObjectDispatcher.cc b/src/librbd/io/ObjectDispatcher.cc index 881e76cb02c..befc075062c 100644 --- a/src/librbd/io/ObjectDispatcher.cc +++ b/src/librbd/io/ObjectDispatcher.cc @@ -186,7 +186,7 @@ ObjectDispatcher::ObjectDispatcher(I* image_ctx) template ObjectDispatcher::~ObjectDispatcher() { - assert(m_object_dispatches.empty()); + ceph_assert(m_object_dispatches.empty()); } template @@ -214,11 +214,11 @@ void ObjectDispatcher::register_object_dispatch( ldout(cct, 5) << "object_dispatch_layer=" << type << dendl; RWLock::WLocker locker(m_lock); - assert(type < OBJECT_DISPATCH_LAYER_LAST); + ceph_assert(type < OBJECT_DISPATCH_LAYER_LAST); auto result = m_object_dispatches.insert( {type, {object_dispatch, new AsyncOpTracker()}}); - assert(result.second); + ceph_assert(result.second); } template @@ -226,13 +226,13 @@ void ObjectDispatcher::shut_down_object_dispatch( ObjectDispatchLayer object_dispatch_layer, Context* on_finish) { auto cct = m_image_ctx->cct; ldout(cct, 5) << "object_dispatch_layer=" << object_dispatch_layer << dendl; - assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST); + ceph_assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST); ObjectDispatchMeta object_dispatch_meta; { RWLock::WLocker locker(m_lock); auto it = m_object_dispatches.find(object_dispatch_layer); - assert(it != m_object_dispatches.end()); + ceph_assert(it != m_object_dispatches.end()); object_dispatch_meta = it->second; m_object_dispatches.erase(it); @@ -306,7 +306,7 @@ void ObjectDispatcher::send(ObjectDispatchSpec* object_dispatch_spec) { ldout(cct, 20) << "object_dispatch_spec=" << object_dispatch_spec << dendl; auto object_dispatch_layer = object_dispatch_spec->object_dispatch_layer; - assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST); + ceph_assert(object_dispatch_layer + 1 < OBJECT_DISPATCH_LAYER_LAST); // apply the IO request to all layers -- this method will be re-invoked // by the dispatch layer if continuing / restarting the IO diff --git a/src/librbd/io/ObjectRequest.cc b/src/librbd/io/ObjectRequest.cc index 9bb1f50e31c..b1cb5f67640 100644 --- a/src/librbd/io/ObjectRequest.cc +++ b/src/librbd/io/ObjectRequest.cc @@ -133,8 +133,8 @@ void ObjectRequest::add_write_hint(I& image_ctx, template bool ObjectRequest::compute_parent_extents(Extents *parent_extents, bool read_request) { - assert(m_ictx->snap_lock.is_locked()); - assert(m_ictx->parent_lock.is_locked()); + ceph_assert(m_ictx->snap_lock.is_locked()); + ceph_assert(m_ictx->parent_lock.is_locked()); m_has_parent = false; parent_extents->clear(); @@ -239,7 +239,7 @@ void ObjectReadRequest::read_object() { int r = image_ctx->data_ctx.aio_operate( this->m_oid, rados_completion, &op, flags, nullptr, (this->m_trace.valid() ? this->m_trace.get_info() : nullptr)); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -417,7 +417,7 @@ void AbstractObjectWriteRequest::send() { m_object_may_exist = true; } else { // should have been flushed prior to releasing lock - assert(image_ctx->exclusive_lock->is_lock_owner()); + ceph_assert(image_ctx->exclusive_lock->is_lock_owner()); m_object_may_exist = image_ctx->object_map->object_may_exist( this->m_object_no); } @@ -475,7 +475,7 @@ void AbstractObjectWriteRequest::handle_pre_write_object_map_update(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; - assert(r == 0); + ceph_assert(r == 0); write_object(); } @@ -497,7 +497,7 @@ void AbstractObjectWriteRequest::write_object() { add_write_hint(&write); add_write_ops(&write); - assert(write.size() != 0); + ceph_assert(write.size() != 0); librados::AioCompletion *rados_completion = util::create_rados_callback< AbstractObjectWriteRequest, @@ -505,7 +505,7 @@ void AbstractObjectWriteRequest::write_object() { int r = image_ctx->data_ctx.aio_operate( this->m_oid, rados_completion, &write, m_snap_seq, m_snaps, (this->m_trace.valid() ? this->m_trace.get_info() : nullptr)); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -539,7 +539,7 @@ void AbstractObjectWriteRequest::copyup() { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << dendl; - assert(!m_copyup_in_progress); + ceph_assert(!m_copyup_in_progress); m_copyup_in_progress = true; image_ctx->copyup_list_lock.Lock(); @@ -567,7 +567,7 @@ void AbstractObjectWriteRequest::handle_copyup(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; - assert(m_copyup_in_progress); + ceph_assert(m_copyup_in_progress); m_copyup_in_progress = false; if (r < 0) { @@ -600,7 +600,7 @@ void AbstractObjectWriteRequest::post_write_object_map_update() { ldout(image_ctx->cct, 20) << dendl; // should have been flushed prior to releasing lock - assert(image_ctx->exclusive_lock->is_lock_owner()); + ceph_assert(image_ctx->exclusive_lock->is_lock_owner()); image_ctx->object_map_lock.get_write(); if (image_ctx->object_map->template aio_update< AbstractObjectWriteRequest, @@ -622,7 +622,7 @@ void AbstractObjectWriteRequest::handle_post_write_object_map_update(int r) { I *image_ctx = this->m_ictx; ldout(image_ctx->cct, 20) << "r=" << r << dendl; - assert(r == 0); + ceph_assert(r == 0); this->finish(0); } @@ -682,7 +682,7 @@ int ObjectCompareAndWriteRequest::filter_write_result(int r) const { Striper::extent_to_file(image_ctx->cct, &image_ctx->layout, this->m_object_no, offset, this->m_object_len, image_extents); - assert(image_extents.size() == 1); + ceph_assert(image_extents.size() == 1); if (m_mismatch_offset) { *m_mismatch_offset = image_extents[0].first; diff --git a/src/librbd/io/ObjectRequest.h b/src/librbd/io/ObjectRequest.h index 689a5e0edc1..d1ce8f1461c 100644 --- a/src/librbd/io/ObjectRequest.h +++ b/src/librbd/io/ObjectRequest.h @@ -327,7 +327,7 @@ public: case DISCARD_ACTION_ZERO: return "zero"; } - assert(false); + ceph_assert(false); return nullptr; } @@ -371,7 +371,7 @@ protected: wr->zero(this->m_object_off, this->m_object_len); break; default: - assert(false); + ceph_assert(false); break; } } diff --git a/src/librbd/io/ReadResult.cc b/src/librbd/io/ReadResult.cc index 58795202b2d..c24d8b4a7fc 100644 --- a/src/librbd/io/ReadResult.cc +++ b/src/librbd/io/ReadResult.cc @@ -23,7 +23,7 @@ struct ReadResult::SetClipLengthVisitor : public boost::static_visitor { } void operator()(Linear &linear) const { - assert(length <= linear.buf_len); + ceph_assert(length <= linear.buf_len); linear.buf_len = length; } @@ -66,7 +66,7 @@ struct ReadResult::AssembleResultVisitor : public boost::static_visitor { it.copy(len, static_cast(vector.iov[idx].iov_base)); offset += len; } - assert(offset == bl.length()); + ceph_assert(offset == bl.length()); } void operator()(Bufferlist &bufferlist) const { @@ -94,7 +94,7 @@ void ReadResult::C_ImageReadRequest::finish(int r) { for (auto &image_extent : image_extents) { length += image_extent.second; } - assert(length == bl.length()); + ceph_assert(length == bl.length()); aio_completion->lock.Lock(); aio_completion->read_result.m_destriper.add_partial_result( diff --git a/src/librbd/journal/DemoteRequest.cc b/src/librbd/journal/DemoteRequest.cc index bef477d5645..c851beae57d 100644 --- a/src/librbd/journal/DemoteRequest.cc +++ b/src/librbd/journal/DemoteRequest.cc @@ -31,7 +31,7 @@ DemoteRequest::DemoteRequest(I &image_ctx, Context *on_finish) template DemoteRequest::~DemoteRequest() { - assert(m_journaler == nullptr); + ceph_assert(m_journaler == nullptr); } template diff --git a/src/librbd/journal/Replay.cc b/src/librbd/journal/Replay.cc index 89c2bcd858e..82bae75e81e 100644 --- a/src/librbd/journal/Replay.cc +++ b/src/librbd/journal/Replay.cc @@ -178,12 +178,12 @@ Replay::Replay(I &image_ctx) template Replay::~Replay() { - assert(m_in_flight_aio_flush == 0); - assert(m_in_flight_aio_modify == 0); - assert(m_aio_modify_unsafe_contexts.empty()); - assert(m_aio_modify_safe_contexts.empty()); - assert(m_op_events.empty()); - assert(m_in_flight_op_events == 0); + ceph_assert(m_in_flight_aio_flush == 0); + ceph_assert(m_in_flight_aio_modify == 0); + ceph_assert(m_aio_modify_unsafe_contexts.empty()); + ceph_assert(m_aio_modify_safe_contexts.empty()); + ceph_assert(m_op_events.empty()); + ceph_assert(m_in_flight_op_events == 0); } template @@ -234,7 +234,7 @@ void Replay::shut_down(bool cancel_ops, Context *on_finish) { // safely commit any remaining AIO modify operations if ((m_in_flight_aio_flush + m_in_flight_aio_modify) != 0) { flush_comp = create_aio_flush_completion(nullptr); - assert(flush_comp != nullptr); + ceph_assert(flush_comp != nullptr); } for (auto &op_event_pair : m_op_events) { @@ -259,10 +259,10 @@ void Replay::shut_down(bool cancel_ops, Context *on_finish) { } } - assert(!m_shut_down); + ceph_assert(!m_shut_down); m_shut_down = true; - assert(m_flush_ctx == nullptr); + ceph_assert(m_flush_ctx == nullptr); if (m_in_flight_op_events > 0 || flush_comp != nullptr) { std::swap(m_flush_ctx, on_finish); } @@ -303,10 +303,10 @@ void Replay::replay_op_ready(uint64_t op_tid, Context *on_resume) { Mutex::Locker locker(m_lock); auto op_it = m_op_events.find(op_tid); - assert(op_it != m_op_events.end()); + ceph_assert(op_it != m_op_events.end()); OpEvent &op_event = op_it->second; - assert(op_event.op_in_progress && + ceph_assert(op_event.op_in_progress && op_event.on_op_finish_event == nullptr && op_event.on_finish_ready == nullptr && op_event.on_finish_safe == nullptr); @@ -510,7 +510,7 @@ void Replay::handle_event(const journal::OpFinishEvent &event, } OpEvent &op_event = op_it->second; - assert(op_event.on_finish_safe == nullptr); + ceph_assert(op_event.on_finish_safe == nullptr); op_event.on_finish_ready = on_ready; op_event.on_finish_safe = on_safe; op_in_progress = op_event.op_in_progress; @@ -907,7 +907,7 @@ void Replay::handle_aio_modify_complete(Context *on_ready, Context *on_safe, m_aio_modify_safe_contexts.insert(on_safe); } else { // IO is safely stored on disk - assert(m_in_flight_aio_modify > 0); + ceph_assert(m_in_flight_aio_modify > 0); --m_in_flight_aio_modify; if (m_on_aio_ready != nullptr) { @@ -935,8 +935,8 @@ void Replay::handle_aio_flush_complete(Context *on_flush_safe, Context *on_flush = nullptr; { Mutex::Locker locker(m_lock); - assert(m_in_flight_aio_flush > 0); - assert(m_in_flight_aio_modify >= on_safe_ctxs.size()); + ceph_assert(m_in_flight_aio_flush > 0); + ceph_assert(m_in_flight_aio_modify >= on_safe_ctxs.size()); --m_in_flight_aio_flush; m_in_flight_aio_modify -= on_safe_ctxs.size(); @@ -988,7 +988,7 @@ Context *Replay::create_op_context_callback(uint64_t op_tid, return nullptr; } - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); if (m_op_events.count(op_tid) != 0) { lderr(cct) << ": duplicate op tid detected: " << op_tid << dendl; @@ -1019,27 +1019,27 @@ void Replay::handle_op_complete(uint64_t op_tid, int r) { { Mutex::Locker locker(m_lock); auto op_it = m_op_events.find(op_tid); - assert(op_it != m_op_events.end()); + ceph_assert(op_it != m_op_events.end()); op_event = std::move(op_it->second); m_op_events.erase(op_it); if (m_shut_down) { - assert(m_flush_ctx != nullptr); + ceph_assert(m_flush_ctx != nullptr); shutting_down = true; } } - assert(op_event.on_start_ready == nullptr || (r < 0 && r != -ERESTART)); + ceph_assert(op_event.on_start_ready == nullptr || (r < 0 && r != -ERESTART)); if (op_event.on_start_ready != nullptr) { // blocking op event failed before it became ready - assert(op_event.on_finish_ready == nullptr && + ceph_assert(op_event.on_finish_ready == nullptr && op_event.on_finish_safe == nullptr); op_event.on_start_ready->complete(0); } else { // event kicked off by OpFinishEvent - assert((op_event.on_finish_ready != nullptr && + ceph_assert((op_event.on_finish_ready != nullptr && op_event.on_finish_safe != nullptr) || shutting_down); } @@ -1066,7 +1066,7 @@ void Replay::handle_op_complete(uint64_t op_tid, int r) { Context *on_flush = nullptr; { Mutex::Locker locker(m_lock); - assert(m_in_flight_op_events > 0); + ceph_assert(m_in_flight_op_events > 0); --m_in_flight_op_events; if (m_in_flight_op_events == 0 && (m_in_flight_aio_flush + m_in_flight_aio_modify) == 0) { @@ -1087,7 +1087,7 @@ Replay::create_aio_modify_completion(Context *on_ready, std::set &&filters) { Mutex::Locker locker(m_lock); CephContext *cct = m_image_ctx.cct; - assert(m_on_aio_ready == nullptr); + ceph_assert(m_on_aio_ready == nullptr); if (m_shut_down) { ldout(cct, 5) << ": ignoring event after shut down" << dendl; @@ -1123,7 +1123,7 @@ Replay::create_aio_modify_completion(Context *on_ready, if (m_in_flight_aio_modify == IN_FLIGHT_IO_HIGH_WATER_MARK) { ldout(cct, 10) << ": hit AIO replay high-water mark: pausing replay" << dendl; - assert(m_on_aio_ready == nullptr); + ceph_assert(m_on_aio_ready == nullptr); std::swap(m_on_aio_ready, on_ready); } @@ -1139,7 +1139,7 @@ Replay::create_aio_modify_completion(Context *on_ready, template io::AioCompletion *Replay::create_aio_flush_completion(Context *on_safe) { - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); CephContext *cct = m_image_ctx.cct; if (m_shut_down) { diff --git a/src/librbd/journal/StandardPolicy.cc b/src/librbd/journal/StandardPolicy.cc index 7e5e329d700..58631801703 100644 --- a/src/librbd/journal/StandardPolicy.cc +++ b/src/librbd/journal/StandardPolicy.cc @@ -15,7 +15,7 @@ namespace journal { template void StandardPolicy::allocate_tag_on_lock(Context *on_finish) { - assert(m_image_ctx->journal != nullptr); + ceph_assert(m_image_ctx->journal != nullptr); if (!m_image_ctx->journal->is_tag_owner()) { lderr(m_image_ctx->cct) << "local image not promoted" << dendl; diff --git a/src/librbd/librbd.cc b/src/librbd/librbd.cc index 9616be97f83..e09f92ea657 100644 --- a/src/librbd/librbd.cc +++ b/src/librbd/librbd.cc @@ -2532,7 +2532,7 @@ extern "C" int rbd_mirror_image_status_list(rados_ioctx_t p, size_t i = 0; for (auto &it : cpp_images) { - assert(i < max); + ceph_assert(i < max); const std::string &image_id = it.first; image_ids[i] = strdup(image_id.c_str()); mirror_image_status_cpp_to_c(it.second, &images[i]); diff --git a/src/librbd/managed_lock/AcquireRequest.cc b/src/librbd/managed_lock/AcquireRequest.cc index 83f5dbac1a8..9b3e5e8776c 100644 --- a/src/librbd/managed_lock/AcquireRequest.cc +++ b/src/librbd/managed_lock/AcquireRequest.cc @@ -115,7 +115,7 @@ void AcquireRequest::send_lock() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/managed_lock/BreakRequest.cc b/src/librbd/managed_lock/BreakRequest.cc index 9a7fe10f35a..8caaea6ff96 100644 --- a/src/librbd/managed_lock/BreakRequest.cc +++ b/src/librbd/managed_lock/BreakRequest.cc @@ -76,7 +76,7 @@ void BreakRequest::send_get_watchers() { create_rados_callback(this); m_out_bl.clear(); int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -204,7 +204,7 @@ void BreakRequest::send_break_lock() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/managed_lock/GetLockerRequest.cc b/src/librbd/managed_lock/GetLockerRequest.cc index ede93cba95c..157c2dc6777 100644 --- a/src/librbd/managed_lock/GetLockerRequest.cc +++ b/src/librbd/managed_lock/GetLockerRequest.cc @@ -48,7 +48,7 @@ void GetLockerRequest::send_get_lockers() { create_rados_callback(this); m_out_bl.clear(); int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/managed_lock/ReacquireRequest.cc b/src/librbd/managed_lock/ReacquireRequest.cc index 65e63656f51..dc5624d4410 100644 --- a/src/librbd/managed_lock/ReacquireRequest.cc +++ b/src/librbd/managed_lock/ReacquireRequest.cc @@ -54,7 +54,7 @@ void ReacquireRequest::set_cookie() { librados::AioCompletion *rados_completion = create_rados_callback< ReacquireRequest, &ReacquireRequest::handle_set_cookie>(this); int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/managed_lock/ReleaseRequest.cc b/src/librbd/managed_lock/ReleaseRequest.cc index 0ccf674f00c..d74e1462522 100644 --- a/src/librbd/managed_lock/ReleaseRequest.cc +++ b/src/librbd/managed_lock/ReleaseRequest.cc @@ -65,7 +65,7 @@ void ReleaseRequest::send_unlock() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_ioctx.aio_operate(m_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/managed_lock/Utils.cc b/src/librbd/managed_lock/Utils.cc index 64210beb74e..89ac6aae564 100644 --- a/src/librbd/managed_lock/Utils.cc +++ b/src/librbd/managed_lock/Utils.cc @@ -30,7 +30,7 @@ bool decode_lock_cookie(const std::string &tag, uint64_t *handle) { } std::string encode_lock_cookie(uint64_t watch_handle) { - assert(watch_handle != 0); + ceph_assert(watch_handle != 0); std::ostringstream ss; ss << WATCHER_LOCK_COOKIE_PREFIX << " " << watch_handle; return ss.str(); diff --git a/src/librbd/mirror/DisableRequest.cc b/src/librbd/mirror/DisableRequest.cc index d9b303fd27c..6a21c5608d7 100644 --- a/src/librbd/mirror/DisableRequest.cc +++ b/src/librbd/mirror/DisableRequest.cc @@ -49,7 +49,7 @@ void DisableRequest::send_get_mirror_image() { create_rados_callback(this); m_out_bl.clear(); int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -131,7 +131,7 @@ void DisableRequest::send_set_mirror_image() { create_rados_callback(this); m_out_bl.clear(); int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -190,7 +190,7 @@ void DisableRequest::send_promote_image() { ldout(cct, 10) << this << " " << __func__ << dendl; // Not primary -- shouldn't have the journal open - assert(m_image_ctx->journal == nullptr); + ceph_assert(m_image_ctx->journal == nullptr); using klass = DisableRequest; Context *ctx = util::create_context_callback< @@ -241,7 +241,7 @@ Context *DisableRequest::handle_get_clients(int *result) { Mutex::Locker locker(m_lock); - assert(m_current_ops.empty()); + ceph_assert(m_current_ops.empty()); for (auto client : m_clients) { journal::ClientData client_data; @@ -306,7 +306,7 @@ void DisableRequest::send_remove_snap(const std::string &client_id, ldout(cct, 10) << this << " " << __func__ << ": client_id=" << client_id << ", snap_name=" << snap_name << dendl; - assert(m_lock.is_locked()); + ceph_assert(m_lock.is_locked()); m_current_ops[client_id]++; @@ -330,7 +330,7 @@ Context *DisableRequest::handle_remove_snap(int *result, Mutex::Locker locker(m_lock); - assert(m_current_ops[client_id] > 0); + ceph_assert(m_current_ops[client_id] > 0); m_current_ops[client_id]--; if (*result < 0 && *result != -ENOENT) { @@ -353,8 +353,8 @@ void DisableRequest::send_unregister_client( CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; - assert(m_lock.is_locked()); - assert(m_current_ops[client_id] == 0); + ceph_assert(m_lock.is_locked()); + ceph_assert(m_current_ops[client_id] == 0); Context *ctx = create_context_callback( &DisableRequest::handle_unregister_client, client_id); @@ -370,7 +370,7 @@ void DisableRequest::send_unregister_client( librados::AioCompletion *comp = create_rados_callback(ctx); int r = m_image_ctx->md_ctx.aio_operate(header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -382,7 +382,7 @@ Context *DisableRequest::handle_unregister_client( ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; Mutex::Locker locker(m_lock); - assert(m_current_ops[client_id] == 0); + ceph_assert(m_current_ops[client_id] == 0); m_current_ops.erase(client_id); if (*result < 0 && *result != -ENOENT) { @@ -417,7 +417,7 @@ void DisableRequest::send_remove_mirror_image() { create_rados_callback(this); m_out_bl.clear(); int r = m_image_ctx->md_ctx.aio_operate(RBD_MIRRORING, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/mirror/EnableRequest.cc b/src/librbd/mirror/EnableRequest.cc index b803ee94328..a5c5b1255fa 100644 --- a/src/librbd/mirror/EnableRequest.cc +++ b/src/librbd/mirror/EnableRequest.cc @@ -48,7 +48,7 @@ void EnableRequest::send_get_mirror_image() { create_rados_callback(this); m_out_bl.clear(); int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -139,7 +139,7 @@ void EnableRequest::send_set_mirror_image() { create_rados_callback(this); m_out_bl.clear(); int r = m_io_ctx.aio_operate(RBD_MIRRORING, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/mirror/GetInfoRequest.cc b/src/librbd/mirror/GetInfoRequest.cc index 0d2d87aa1da..1bd692ae3ea 100644 --- a/src/librbd/mirror/GetInfoRequest.cc +++ b/src/librbd/mirror/GetInfoRequest.cc @@ -66,7 +66,7 @@ void GetInfoRequest::get_mirror_image() { librados::AioCompletion *comp = create_rados_callback< GetInfoRequest, &GetInfoRequest::handle_get_mirror_image>(this); int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/mirror/GetStatusRequest.cc b/src/librbd/mirror/GetStatusRequest.cc index 1259ff96017..570968831e3 100644 --- a/src/librbd/mirror/GetStatusRequest.cc +++ b/src/librbd/mirror/GetStatusRequest.cc @@ -72,7 +72,7 @@ void GetStatusRequest::get_status() { librados::AioCompletion *comp = create_rados_callback< GetStatusRequest, &GetStatusRequest::handle_get_status>(this); int r = m_image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/object_map/CreateRequest.cc b/src/librbd/object_map/CreateRequest.cc index 399b5cb25f1..f4eee5594fa 100644 --- a/src/librbd/object_map/CreateRequest.cc +++ b/src/librbd/object_map/CreateRequest.cc @@ -70,7 +70,7 @@ void CreateRequest::send_object_map_resize() { std::string oid(ObjectMap<>::object_map_name(m_image_ctx->id, snap_id)); librados::AioCompletion *comp = create_rados_callback(gather_ctx->new_sub()); int r = m_image_ctx->md_ctx.aio_operate(oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } gather_ctx->activate(); diff --git a/src/librbd/object_map/InvalidateRequest.cc b/src/librbd/object_map/InvalidateRequest.cc index e744add68db..754b0902018 100644 --- a/src/librbd/object_map/InvalidateRequest.cc +++ b/src/librbd/object_map/InvalidateRequest.cc @@ -23,8 +23,8 @@ InvalidateRequest* InvalidateRequest::create(I &image_ctx, template void InvalidateRequest::send() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); - assert(image_ctx.snap_lock.is_wlocked()); + ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.snap_lock.is_wlocked()); uint64_t snap_flags; int r = image_ctx.get_flags(m_snap_id, &snap_flags); @@ -65,7 +65,7 @@ void InvalidateRequest::send() { this->create_callback_completion(); r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/object_map/LockRequest.cc b/src/librbd/object_map/LockRequest.cc index 7b143447d2b..4ed4fa1b823 100644 --- a/src/librbd/object_map/LockRequest.cc +++ b/src/librbd/object_map/LockRequest.cc @@ -42,7 +42,7 @@ void LockRequest::send_lock() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -81,7 +81,7 @@ void LockRequest::send_get_lock_info() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -130,7 +130,7 @@ void LockRequest::send_break_locks() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/object_map/RefreshRequest.cc b/src/librbd/object_map/RefreshRequest.cc index 769cca5cf5b..c668fc2305e 100644 --- a/src/librbd/object_map/RefreshRequest.cc +++ b/src/librbd/object_map/RefreshRequest.cc @@ -55,7 +55,7 @@ void RefreshRequest::apply() { num_objs = Striper::get_num_objects( m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id)); } - assert(m_on_disk_object_map.size() >= num_objs); + ceph_assert(m_on_disk_object_map.size() >= num_objs); *m_object_map = m_on_disk_object_map; } @@ -87,7 +87,7 @@ Context *RefreshRequest::handle_lock(int *ret_val) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; - assert(*ret_val == 0); + ceph_assert(*ret_val == 0); send_load(); return nullptr; } @@ -106,7 +106,7 @@ void RefreshRequest::send_load() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -181,7 +181,7 @@ Context *RefreshRequest::handle_invalidate(int *ret_val) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl; - assert(*ret_val == 0); + ceph_assert(*ret_val == 0); apply(); return m_on_finish; } @@ -211,7 +211,7 @@ Context *RefreshRequest::handle_resize_invalidate(int *ret_val) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl; - assert(*ret_val == 0); + ceph_assert(*ret_val == 0); send_resize(); return nullptr; } @@ -235,7 +235,7 @@ void RefreshRequest::send_resize() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -275,7 +275,7 @@ Context *RefreshRequest::handle_invalidate_and_close(int *ret_val) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *ret_val << dendl; - assert(*ret_val == 0); + ceph_assert(*ret_val == 0); *ret_val = -EFBIG; m_object_map->clear(); diff --git a/src/librbd/object_map/RemoveRequest.cc b/src/librbd/object_map/RemoveRequest.cc index fa6c7b4189a..ee6e2746409 100644 --- a/src/librbd/object_map/RemoveRequest.cc +++ b/src/librbd/object_map/RemoveRequest.cc @@ -43,7 +43,7 @@ void RemoveRequest::send_remove_object_map() { } Mutex::Locker locker(m_lock); - assert(m_ref_counter == 0); + ceph_assert(m_ref_counter == 0); for (auto snap_id : snap_ids) { m_ref_counter++; @@ -53,7 +53,7 @@ void RemoveRequest::send_remove_object_map() { create_rados_callback(this); int r = m_image_ctx->md_ctx.aio_remove(oid, comp); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } } @@ -65,7 +65,7 @@ Context *RemoveRequest::handle_remove_object_map(int *result) { { Mutex::Locker locker(m_lock); - assert(m_ref_counter > 0); + ceph_assert(m_ref_counter > 0); m_ref_counter--; if (*result < 0 && *result != -ENOENT) { diff --git a/src/librbd/object_map/ResizeRequest.cc b/src/librbd/object_map/ResizeRequest.cc index 33e6a219013..1ec0d99afad 100644 --- a/src/librbd/object_map/ResizeRequest.cc +++ b/src/librbd/object_map/ResizeRequest.cc @@ -48,7 +48,7 @@ void ResizeRequest::send() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/object_map/SnapshotCreateRequest.cc b/src/librbd/object_map/SnapshotCreateRequest.cc index aae615aabdc..eec5a642950 100644 --- a/src/librbd/object_map/SnapshotCreateRequest.cc +++ b/src/librbd/object_map/SnapshotCreateRequest.cc @@ -75,8 +75,8 @@ bool SnapshotCreateRequest::should_complete(int r) { } void SnapshotCreateRequest::send_read_map() { - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.get_snap_info(m_snap_id) != NULL); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.get_snap_info(m_snap_id) != NULL); CephContext *cct = m_image_ctx.cct; std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP)); @@ -90,7 +90,7 @@ void SnapshotCreateRequest::send_read_map() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op, &m_read_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -106,7 +106,7 @@ void SnapshotCreateRequest::send_write_map() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -127,7 +127,7 @@ bool SnapshotCreateRequest::send_add_snapshot() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); return false; } diff --git a/src/librbd/object_map/SnapshotRemoveRequest.cc b/src/librbd/object_map/SnapshotRemoveRequest.cc index a092dda5ab2..ec33df7c1ce 100644 --- a/src/librbd/object_map/SnapshotRemoveRequest.cc +++ b/src/librbd/object_map/SnapshotRemoveRequest.cc @@ -43,15 +43,15 @@ std::ostream& operator<<(std::ostream& os, } // anonymous namespace void SnapshotRemoveRequest::send() { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.snap_lock.is_wlocked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_wlocked()); if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) { compute_next_snap_id(); uint64_t flags; int r = m_image_ctx.get_flags(m_snap_id, &flags); - assert(r == 0); + ceph_assert(r == 0); if ((flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0) { send_invalidate_next_map(); @@ -123,7 +123,7 @@ void SnapshotRemoveRequest::send_load_map() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -141,13 +141,13 @@ void SnapshotRemoveRequest::send_remove_snapshot() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } void SnapshotRemoveRequest::send_invalidate_next_map() { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_image_ctx.snap_lock.is_wlocked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_wlocked()); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -170,17 +170,17 @@ void SnapshotRemoveRequest::send_remove_map() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } void SnapshotRemoveRequest::compute_next_snap_id() { - assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); m_next_snap_id = CEPH_NOSNAP; std::map::const_iterator it = m_image_ctx.snap_info.find(m_snap_id); - assert(it != m_image_ctx.snap_info.end()); + ceph_assert(it != m_image_ctx.snap_info.end()); ++it; if (it != m_image_ctx.snap_info.end()) { diff --git a/src/librbd/object_map/SnapshotRollbackRequest.cc b/src/librbd/object_map/SnapshotRollbackRequest.cc index 6c8b955253d..d32123ffcf2 100644 --- a/src/librbd/object_map/SnapshotRollbackRequest.cc +++ b/src/librbd/object_map/SnapshotRollbackRequest.cc @@ -89,7 +89,7 @@ void SnapshotRollbackRequest::send_read_map() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op, &m_read_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -109,7 +109,7 @@ void SnapshotRollbackRequest::send_write_map() { librados::AioCompletion *rados_completion = create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(snap_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/object_map/SnapshotRollbackRequest.h b/src/librbd/object_map/SnapshotRollbackRequest.h index c62849e8564..e26b1e0a37b 100644 --- a/src/librbd/object_map/SnapshotRollbackRequest.h +++ b/src/librbd/object_map/SnapshotRollbackRequest.h @@ -47,7 +47,7 @@ public: Context *on_finish) : Request(image_ctx, CEPH_NOSNAP, on_finish), m_snap_id(snap_id), m_ret_val(0) { - assert(snap_id != CEPH_NOSNAP); + ceph_assert(snap_id != CEPH_NOSNAP); } void send() override; diff --git a/src/librbd/object_map/UnlockRequest.cc b/src/librbd/object_map/UnlockRequest.cc index ad9d3d93ab7..0220ec900d8 100644 --- a/src/librbd/object_map/UnlockRequest.cc +++ b/src/librbd/object_map/UnlockRequest.cc @@ -41,7 +41,7 @@ void UnlockRequest::send_unlock() { librados::AioCompletion *rados_completion = create_rados_callback(this); int r = m_image_ctx.md_ctx.aio_operate(oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/object_map/UpdateRequest.cc b/src/librbd/object_map/UpdateRequest.cc index 36ccc6159aa..36f72dd40a7 100644 --- a/src/librbd/object_map/UpdateRequest.cc +++ b/src/librbd/object_map/UpdateRequest.cc @@ -33,8 +33,8 @@ void UpdateRequest::send() { template void UpdateRequest::update_object_map() { - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.object_map_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.object_map_lock.is_locked()); CephContext *cct = m_image_ctx.cct; // break very large requests into manageable batches @@ -64,7 +64,7 @@ void UpdateRequest::update_object_map() { int r = m_image_ctx.md_ctx.aio_operate( oid, rados_completion, &op, 0, snaps, (m_trace.valid() ? m_trace.get_info() : nullptr)); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -90,8 +90,8 @@ void UpdateRequest::handle_update_object_map(int r) { template void UpdateRequest::update_in_memory_object_map() { - assert(m_image_ctx.snap_lock.is_locked()); - assert(m_image_ctx.object_map_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.object_map_lock.is_locked()); // rebuilding the object map might update on-disk only if (m_snap_id == m_image_ctx.snap_id) { diff --git a/src/librbd/operation/DisableFeaturesRequest.cc b/src/librbd/operation/DisableFeaturesRequest.cc index 3f1d338a4ae..afb99e34b92 100644 --- a/src/librbd/operation/DisableFeaturesRequest.cc +++ b/src/librbd/operation/DisableFeaturesRequest.cc @@ -41,7 +41,7 @@ template void DisableFeaturesRequest::send_op() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features << dendl; @@ -225,7 +225,7 @@ void DisableFeaturesRequest::send_get_mirror_mode() { create_rados_callback(this); m_out_bl.clear(); int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -273,7 +273,7 @@ void DisableFeaturesRequest::send_get_mirror_image() { create_rados_callback(this); m_out_bl.clear(); int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -374,7 +374,7 @@ Context *DisableFeaturesRequest::handle_close_journal(int *result) { << dendl; } - assert(m_journal != nullptr); + ceph_assert(m_journal != nullptr); delete m_journal; m_journal = nullptr; @@ -495,7 +495,7 @@ void DisableFeaturesRequest::send_set_features() { librados::AioCompletion *comp = create_rados_callback(this); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/operation/EnableFeaturesRequest.cc b/src/librbd/operation/EnableFeaturesRequest.cc index b9d7420cd1a..8cd3f00b55e 100644 --- a/src/librbd/operation/EnableFeaturesRequest.cc +++ b/src/librbd/operation/EnableFeaturesRequest.cc @@ -38,7 +38,7 @@ template void EnableFeaturesRequest::send_op() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features << dendl; @@ -135,7 +135,7 @@ void EnableFeaturesRequest::send_get_mirror_mode() { create_rados_callback(this); m_out_bl.clear(); int r = image_ctx.md_ctx.aio_operate(RBD_MIRRORING, comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -336,7 +336,7 @@ void EnableFeaturesRequest::send_set_features() { librados::AioCompletion *comp = create_rados_callback(this); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/operation/FlattenRequest.cc b/src/librbd/operation/FlattenRequest.cc index 877384bb6df..071cbe43d17 100644 --- a/src/librbd/operation/FlattenRequest.cc +++ b/src/librbd/operation/FlattenRequest.cc @@ -31,7 +31,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; if (image_ctx.exclusive_lock != nullptr && @@ -99,7 +99,7 @@ bool FlattenRequest::should_complete(int r) { template void FlattenRequest::send_op() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " send" << dendl; @@ -116,11 +116,11 @@ void FlattenRequest::send_op() { template bool FlattenRequest::send_detach_child() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); // if there are no snaps, remove from the children object as well @@ -146,14 +146,14 @@ bool FlattenRequest::send_detach_child() { template bool FlattenRequest::send_update_header() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " send_update_header" << dendl; m_state = STATE_UPDATE_HEADER; // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); { @@ -173,7 +173,7 @@ bool FlattenRequest::send_update_header() { librados::AioCompletion *rados_completion = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); return false; } diff --git a/src/librbd/operation/MetadataRemoveRequest.cc b/src/librbd/operation/MetadataRemoveRequest.cc index e8f00a5a239..828e7a5b6fd 100644 --- a/src/librbd/operation/MetadataRemoveRequest.cc +++ b/src/librbd/operation/MetadataRemoveRequest.cc @@ -40,7 +40,7 @@ bool MetadataRemoveRequest::should_complete(int r) { template void MetadataRemoveRequest::send_metadata_remove() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; @@ -50,7 +50,7 @@ void MetadataRemoveRequest::send_metadata_remove() { librados::AioCompletion *comp = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/operation/MetadataSetRequest.cc b/src/librbd/operation/MetadataSetRequest.cc index 0cc2bc7646c..760e9b1e32e 100644 --- a/src/librbd/operation/MetadataSetRequest.cc +++ b/src/librbd/operation/MetadataSetRequest.cc @@ -41,7 +41,7 @@ bool MetadataSetRequest::should_complete(int r) { template void MetadataSetRequest::send_metadata_set() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; @@ -52,7 +52,7 @@ void MetadataSetRequest::send_metadata_set() { librados::AioCompletion *comp = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } diff --git a/src/librbd/operation/MigrateRequest.cc b/src/librbd/operation/MigrateRequest.cc index 18737f01e28..5cf33887900 100644 --- a/src/librbd/operation/MigrateRequest.cc +++ b/src/librbd/operation/MigrateRequest.cc @@ -40,7 +40,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; if (image_ctx.exclusive_lock != nullptr && @@ -62,11 +62,11 @@ private: void start_async_op() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 10) << dendl; - assert(m_async_op == nullptr); + ceph_assert(m_async_op == nullptr); m_async_op = new io::AsyncOperation(); m_async_op->start_op(image_ctx); @@ -110,7 +110,7 @@ private: void migrate_object() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; auto ctx = create_context_callback< @@ -128,7 +128,7 @@ private: req->send(); } else { - assert(image_ctx.parent != nullptr); + ceph_assert(image_ctx.parent != nullptr); auto req = deep_copy::ObjectCopyRequest::create( image_ctx.parent, image_ctx.migration_parent, &image_ctx, @@ -160,7 +160,7 @@ private: template void MigrateRequest::send_op() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 10) << dendl; @@ -184,7 +184,7 @@ template void MigrateRequest::migrate_objects() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); uint64_t overlap_objects = get_num_overlap_objects(); diff --git a/src/librbd/operation/MigrateRequest.h b/src/librbd/operation/MigrateRequest.h index a74dab38f4c..654356b6887 100644 --- a/src/librbd/operation/MigrateRequest.h +++ b/src/librbd/operation/MigrateRequest.h @@ -30,7 +30,7 @@ protected: return true; } journal::Event create_event(uint64_t op_tid) const override { - assert(0); + ceph_assert(0); return journal::UnknownEvent(); } diff --git a/src/librbd/operation/ObjectMapIterate.cc b/src/librbd/operation/ObjectMapIterate.cc index 007300ee3a1..2cd75a76bc8 100644 --- a/src/librbd/operation/ObjectMapIterate.cc +++ b/src/librbd/operation/ObjectMapIterate.cc @@ -91,7 +91,7 @@ private: void send_list_snaps() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); ldout(image_ctx.cct, 5) << m_oid << " C_VerifyObjectCallback::send_list_snaps" << dendl; @@ -101,7 +101,7 @@ private: librados::AioCompletion *comp = util::create_rados_callback(this); int r = m_io_ctx.aio_operate(m_oid, comp, &op, NULL); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -137,7 +137,7 @@ private: uint64_t next_valid_snap_id(uint64_t snap_id) { I &image_ctx = this->m_image_ctx; - assert(image_ctx.snap_lock.is_locked()); + ceph_assert(image_ctx.snap_lock.is_locked()); std::map::iterator it = image_ctx.snap_info.lower_bound(snap_id); @@ -153,11 +153,11 @@ private: RWLock::RLocker owner_locker(image_ctx.owner_lock); // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); RWLock::RLocker snap_locker(image_ctx.snap_lock); - assert(image_ctx.object_map != nullptr); + ceph_assert(image_ctx.object_map != nullptr); RWLock::WLocker l(image_ctx.object_map_lock); uint8_t state = (*image_ctx.object_map)[m_object_no]; @@ -170,7 +170,7 @@ private: if (state != new_state) { int r = 0; - assert(m_handle_mismatch); + ceph_assert(m_handle_mismatch); r = m_handle_mismatch(image_ctx, m_object_no, state, new_state); if (r) { lderr(cct) << "object map error: object " @@ -237,7 +237,7 @@ bool ObjectMapIterateRequest::should_complete(int r) { template void ObjectMapIterateRequest::send_verify_objects() { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; uint64_t snap_id; @@ -264,7 +264,7 @@ void ObjectMapIterateRequest::send_verify_objects() { template uint64_t ObjectMapIterateRequest::get_image_size() const { - assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); if (m_image_ctx.snap_id == CEPH_NOSNAP) { if (!m_image_ctx.resize_reqs.empty()) { return m_image_ctx.resize_reqs.front()->get_image_size(); @@ -287,7 +287,7 @@ void ObjectMapIterateRequest::send_invalidate_object_map() { true, this->create_callback_context()); - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); RWLock::WLocker snap_locker(m_image_ctx.snap_lock); req->send(); } diff --git a/src/librbd/operation/RebuildObjectMapRequest.cc b/src/librbd/operation/RebuildObjectMapRequest.cc index 74e71741665..b93258b9a42 100644 --- a/src/librbd/operation/RebuildObjectMapRequest.cc +++ b/src/librbd/operation/RebuildObjectMapRequest.cc @@ -93,11 +93,11 @@ bool RebuildObjectMapRequest::should_complete(int r) { template void RebuildObjectMapRequest::send_resize_object_map() { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; m_image_ctx.snap_lock.get_read(); - assert(m_image_ctx.object_map != nullptr); + ceph_assert(m_image_ctx.object_map != nullptr); uint64_t size = get_image_size(); uint64_t num_objects = Striper::get_num_objects(m_image_ctx.layout, size); @@ -112,7 +112,7 @@ void RebuildObjectMapRequest::send_resize_object_map() { m_state = STATE_RESIZE_OBJECT_MAP; // should have been canceled prior to releasing lock - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); m_image_ctx.object_map->aio_resize(size, OBJECT_NONEXISTENT, @@ -127,7 +127,7 @@ void RebuildObjectMapRequest::send_trim_image() { RWLock::RLocker l(m_image_ctx.owner_lock); // should have been canceled prior to releasing lock - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); ldout(cct, 5) << this << " send_trim_image" << dendl; m_state = STATE_TRIM_IMAGE; @@ -136,7 +136,7 @@ void RebuildObjectMapRequest::send_trim_image() { uint64_t orig_size; { RWLock::RLocker l(m_image_ctx.snap_lock); - assert(m_image_ctx.object_map != nullptr); + ceph_assert(m_image_ctx.object_map != nullptr); new_size = get_image_size(); orig_size = m_image_ctx.get_object_size() * @@ -173,7 +173,7 @@ bool update_object_map(I& image_ctx, uint64_t object_no, uint8_t current_state, template void RebuildObjectMapRequest::send_verify_objects() { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; m_state = STATE_VERIFY_OBJECTS; @@ -189,27 +189,27 @@ void RebuildObjectMapRequest::send_verify_objects() { template void RebuildObjectMapRequest::send_save_object_map() { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " send_save_object_map" << dendl; m_state = STATE_SAVE_OBJECT_MAP; // should have been canceled prior to releasing lock - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); RWLock::RLocker snap_locker(m_image_ctx.snap_lock); - assert(m_image_ctx.object_map != nullptr); + ceph_assert(m_image_ctx.object_map != nullptr); m_image_ctx.object_map->aio_save(this->create_callback_context()); } template void RebuildObjectMapRequest::send_update_header() { - assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(m_image_ctx.owner_lock.is_locked()); // should have been canceled prior to releasing lock - assert(m_image_ctx.exclusive_lock == nullptr || + ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); ldout(m_image_ctx.cct, 5) << this << " send_update_header" << dendl; @@ -222,7 +222,7 @@ void RebuildObjectMapRequest::send_update_header() { librados::AioCompletion *comp = this->create_callback_completion(); int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); RWLock::WLocker snap_locker(m_image_ctx.snap_lock); @@ -231,7 +231,7 @@ void RebuildObjectMapRequest::send_update_header() { template uint64_t RebuildObjectMapRequest::get_image_size() const { - assert(m_image_ctx.snap_lock.is_locked()); + ceph_assert(m_image_ctx.snap_lock.is_locked()); if (m_image_ctx.snap_id == CEPH_NOSNAP) { if (!m_image_ctx.resize_reqs.empty()) { return m_image_ctx.resize_reqs.front()->get_image_size(); diff --git a/src/librbd/operation/RenameRequest.cc b/src/librbd/operation/RenameRequest.cc index 1e837472948..44c5e2cd468 100644 --- a/src/librbd/operation/RenameRequest.cc +++ b/src/librbd/operation/RenameRequest.cc @@ -135,7 +135,7 @@ void RenameRequest::send_read_source_header() { librados::AioCompletion *rados_completion = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(m_source_oid, rados_completion, &op, &m_header_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -152,7 +152,7 @@ void RenameRequest::send_write_destination_header() { librados::AioCompletion *rados_completion = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(m_dest_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -180,7 +180,7 @@ void RenameRequest::send_update_directory() { librados::AioCompletion *rados_completion = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(RBD_DIRECTORY, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -196,7 +196,7 @@ void RenameRequest::send_remove_source_header() { librados::AioCompletion *rados_completion = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(m_source_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/operation/Request.cc b/src/librbd/operation/Request.cc index d6201d4cb46..429cc01e180 100644 --- a/src/librbd/operation/Request.cc +++ b/src/librbd/operation/Request.cc @@ -22,7 +22,7 @@ Request::Request(I &image_ctx, Context *on_finish, uint64_t journal_op_tid) template void Request::send() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); // automatically create the event if we don't need to worry // about affecting concurrent IO ops @@ -66,7 +66,7 @@ void Request::finish(int r) { CephContext *cct = image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; - assert(!m_appended_op_event || m_committed_op_event); + ceph_assert(!m_appended_op_event || m_committed_op_event); AsyncRequest::finish(r); } @@ -74,7 +74,7 @@ template bool Request::append_op_event() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); RWLock::RLocker snap_locker(image_ctx.snap_lock); if (image_ctx.journal != nullptr && image_ctx.journal->is_journal_appending()) { @@ -94,8 +94,8 @@ bool Request::commit_op_event(int r) { return false; } - assert(m_op_tid != 0); - assert(!m_committed_op_event); + ceph_assert(m_op_tid != 0); + ceph_assert(!m_committed_op_event); m_committed_op_event = true; if (image_ctx.journal != nullptr && @@ -104,7 +104,7 @@ bool Request::commit_op_event(int r) { ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; // ops will be canceled / completed before closing journal - assert(image_ctx.journal->is_journal_ready()); + ceph_assert(image_ctx.journal->is_journal_ready()); image_ctx.journal->commit_op_event(m_op_tid, r, new C_CommitOpEvent(this, r)); return true; @@ -131,9 +131,9 @@ void Request::handle_commit_op_event(int r, int original_ret_val) { template void Request::replay_op_ready(Context *on_safe) { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); - assert(image_ctx.snap_lock.is_locked()); - assert(m_op_tid != 0); + ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.snap_lock.is_locked()); + ceph_assert(m_op_tid != 0); m_appended_op_event = true; image_ctx.journal->replay_op_ready( @@ -143,8 +143,8 @@ void Request::replay_op_ready(Context *on_safe) { template void Request::append_op_event(Context *on_safe) { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); - assert(image_ctx.snap_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.snap_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -167,7 +167,7 @@ void Request::handle_op_event_safe(int r) { this->finish(r); delete this; } else { - assert(!can_affect_io()); + ceph_assert(!can_affect_io()); // haven't started the request state machine yet RWLock::RLocker owner_locker(image_ctx.owner_lock); diff --git a/src/librbd/operation/Request.h b/src/librbd/operation/Request.h index 8eaca29b1d1..315a3c964ec 100644 --- a/src/librbd/operation/Request.h +++ b/src/librbd/operation/Request.h @@ -37,7 +37,7 @@ protected: bool append_op_event(T *request) { ImageCtxT &image_ctx = this->m_image_ctx; - assert(can_affect_io()); + ceph_assert(can_affect_io()); RWLock::RLocker owner_locker(image_ctx.owner_lock); RWLock::RLocker snap_locker(image_ctx.snap_lock); if (image_ctx.journal != nullptr) { diff --git a/src/librbd/operation/ResizeRequest.cc b/src/librbd/operation/ResizeRequest.cc index 31827f562f5..17bc7ff17c6 100644 --- a/src/librbd/operation/ResizeRequest.cc +++ b/src/librbd/operation/ResizeRequest.cc @@ -43,7 +43,7 @@ ResizeRequest::~ResizeRequest() { ResizeRequest *next_req = NULL; { RWLock::WLocker snap_locker(image_ctx.snap_lock); - assert(m_xlist_item.remove_myself()); + ceph_assert(m_xlist_item.remove_myself()); if (!image_ctx.resize_reqs.empty()) { next_req = image_ctx.resize_reqs.front(); } @@ -58,7 +58,7 @@ ResizeRequest::~ResizeRequest() { template void ResizeRequest::send() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); { RWLock::WLocker snap_locker(image_ctx.snap_lock); @@ -69,7 +69,7 @@ void ResizeRequest::send() { } } - assert(image_ctx.resize_reqs.front() == this); + ceph_assert(image_ctx.resize_reqs.front() == this); m_original_size = image_ctx.size; compute_parent_overlap(); } @@ -80,7 +80,7 @@ void ResizeRequest::send() { template void ResizeRequest::send_op() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); if (this->is_canceled()) { this->async_complete(-ERESTART); @@ -278,7 +278,7 @@ Context *ResizeRequest::send_grow_object_map() { ldout(cct, 5) << this << " " << __func__ << dendl; // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); image_ctx.object_map->aio_resize( @@ -295,7 +295,7 @@ Context *ResizeRequest::handle_grow_object_map(int *result) { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl; - assert(*result == 0); + ceph_assert(*result == 0); send_post_block_writes(); return nullptr; } @@ -320,7 +320,7 @@ Context *ResizeRequest::send_shrink_object_map() { << "new_size=" << m_new_size << dendl; // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); image_ctx.object_map->aio_resize( @@ -338,7 +338,7 @@ Context *ResizeRequest::handle_shrink_object_map(int *result) { ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl; update_size_and_overlap(); - assert(*result == 0); + ceph_assert(*result == 0); return this->create_context_finisher(0); } @@ -380,7 +380,7 @@ void ResizeRequest::send_update_header() { // should have been canceled prior to releasing lock RWLock::RLocker owner_locker(image_ctx.owner_lock); - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); librados::ObjectWriteOperation op; @@ -398,7 +398,7 @@ void ResizeRequest::send_update_header() { ResizeRequest, &ResizeRequest::handle_update_header>(this); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/operation/SnapshotCreateRequest.cc b/src/librbd/operation/SnapshotCreateRequest.cc index dc11b28f7d3..2b693e032dc 100644 --- a/src/librbd/operation/SnapshotCreateRequest.cc +++ b/src/librbd/operation/SnapshotCreateRequest.cc @@ -63,7 +63,7 @@ Context *SnapshotCreateRequest::handle_suspend_requests(int *result) { template void SnapshotCreateRequest::send_suspend_aio() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -163,7 +163,7 @@ void SnapshotCreateRequest::send_create_snap() { RWLock::RLocker parent_locker(image_ctx.parent_lock); // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); // save current size / parent info for creating snapshot record in ImageCtx @@ -182,7 +182,7 @@ void SnapshotCreateRequest::send_create_snap() { &SnapshotCreateRequest::handle_create_snap>(this); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } @@ -238,7 +238,7 @@ Context *SnapshotCreateRequest::handle_create_object_map(int *result) { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl; - assert(*result == 0); + ceph_assert(*result == 0); image_ctx.io_work_queue->unblock_writes(); return this->create_context_finisher(0); @@ -250,7 +250,7 @@ void SnapshotCreateRequest::send_release_snap_id() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; - assert(m_snap_id != CEPH_NOSNAP); + ceph_assert(m_snap_id != CEPH_NOSNAP); librados::AioCompletion *rados_completion = create_rados_callback< SnapshotCreateRequest, @@ -265,7 +265,7 @@ Context *SnapshotCreateRequest::handle_release_snap_id(int *result) { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl; - assert(m_ret_val < 0); + ceph_assert(m_ret_val < 0); *result = m_ret_val; image_ctx.io_work_queue->unblock_writes(); @@ -290,7 +290,7 @@ void SnapshotCreateRequest::update_snap_context() { ldout(cct, 5) << this << " " << __func__ << dendl; // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); // immediately add a reference to the new snapshot @@ -313,8 +313,8 @@ void SnapshotCreateRequest::update_snap_context() { if (!image_ctx.migration_info.empty()) { auto it = image_ctx.migration_info.snap_map.find(CEPH_NOSNAP); - assert(it != image_ctx.migration_info.snap_map.end()); - assert(!it->second.empty()); + ceph_assert(it != image_ctx.migration_info.snap_map.end()); + ceph_assert(!it->second.empty()); if (it->second[0] == CEPH_NOSNAP) { ldout(cct, 5) << this << " " << __func__ << ": updating migration snap_map" << dendl; diff --git a/src/librbd/operation/SnapshotLimitRequest.cc b/src/librbd/operation/SnapshotLimitRequest.cc index 74f4723e916..d47dc3a86af 100644 --- a/src/librbd/operation/SnapshotLimitRequest.cc +++ b/src/librbd/operation/SnapshotLimitRequest.cc @@ -40,7 +40,7 @@ bool SnapshotLimitRequest::should_complete(int r) { template void SnapshotLimitRequest::send_limit_snaps() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -56,7 +56,7 @@ void SnapshotLimitRequest::send_limit_snaps() { this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } } diff --git a/src/librbd/operation/SnapshotProtectRequest.cc b/src/librbd/operation/SnapshotProtectRequest.cc index 018155d7aff..9a0375fc7c2 100644 --- a/src/librbd/operation/SnapshotProtectRequest.cc +++ b/src/librbd/operation/SnapshotProtectRequest.cc @@ -61,7 +61,7 @@ bool SnapshotProtectRequest::should_complete(int r) { template void SnapshotProtectRequest::send_protect_snap() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -108,7 +108,7 @@ int SnapshotProtectRequest::verify_and_send_protect_snap() { this->create_callback_completion(); r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); return 0; } diff --git a/src/librbd/operation/SnapshotRemoveRequest.cc b/src/librbd/operation/SnapshotRemoveRequest.cc index d0c81777b62..3bd1c2123db 100644 --- a/src/librbd/operation/SnapshotRemoveRequest.cc +++ b/src/librbd/operation/SnapshotRemoveRequest.cc @@ -36,7 +36,7 @@ void SnapshotRemoveRequest::send_op() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); { RWLock::RLocker snap_locker(image_ctx.snap_lock); RWLock::RLocker object_map_locker(image_ctx.object_map_lock); @@ -83,7 +83,7 @@ void SnapshotRemoveRequest::trash_snap() { SnapshotRemoveRequest, &SnapshotRemoveRequest::handle_trash_snap>(this); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -122,7 +122,7 @@ void SnapshotRemoveRequest::get_snap() { &SnapshotRemoveRequest::handle_get_snap>(this); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op, &m_out_bl); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -308,7 +308,7 @@ void SnapshotRemoveRequest::remove_snap() { SnapshotRemoveRequest, &SnapshotRemoveRequest::handle_remove_snap>(this); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -341,8 +341,8 @@ void SnapshotRemoveRequest::remove_snap_context() { template int SnapshotRemoveRequest::scan_for_parents(ParentSpec &pspec) { I &image_ctx = this->m_image_ctx; - assert(image_ctx.snap_lock.is_locked()); - assert(image_ctx.parent_lock.is_locked()); + ceph_assert(image_ctx.snap_lock.is_locked()); + ceph_assert(image_ctx.parent_lock.is_locked()); if (pspec.pool_id != -1) { map::iterator it; diff --git a/src/librbd/operation/SnapshotRenameRequest.cc b/src/librbd/operation/SnapshotRenameRequest.cc index a11489eb54e..b25b991f4d1 100644 --- a/src/librbd/operation/SnapshotRenameRequest.cc +++ b/src/librbd/operation/SnapshotRenameRequest.cc @@ -41,7 +41,7 @@ SnapshotRenameRequest::SnapshotRenameRequest(I &image_ctx, template journal::Event SnapshotRenameRequest::create_event(uint64_t op_tid) const { I &image_ctx = this->m_image_ctx; - assert(image_ctx.snap_lock.is_locked()); + ceph_assert(image_ctx.snap_lock.is_locked()); std::string src_snap_name; auto snap_info_it = image_ctx.snap_info.find(m_snap_id); @@ -77,7 +77,7 @@ bool SnapshotRenameRequest::should_complete(int r) { template void SnapshotRenameRequest::send_rename_snap() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); RWLock::RLocker md_locker(image_ctx.md_lock); RWLock::RLocker snap_locker(image_ctx.snap_lock); @@ -94,7 +94,7 @@ void SnapshotRenameRequest::send_rename_snap() { librados::AioCompletion *rados_completion = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, rados_completion, &op); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); } diff --git a/src/librbd/operation/SnapshotRollbackRequest.cc b/src/librbd/operation/SnapshotRollbackRequest.cc index 2b98af3278e..0cd85463bf7 100644 --- a/src/librbd/operation/SnapshotRollbackRequest.cc +++ b/src/librbd/operation/SnapshotRollbackRequest.cc @@ -222,7 +222,7 @@ Context *SnapshotRollbackRequest::handle_get_snap_object_map(int *result) { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl; - assert(*result == 0); + ceph_assert(*result == 0); send_rollback_object_map(); return nullptr; } @@ -256,7 +256,7 @@ Context *SnapshotRollbackRequest::handle_rollback_object_map(int *result) { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl; - assert(*result == 0); + ceph_assert(*result == 0); send_rollback_objects(); return nullptr; } @@ -337,7 +337,7 @@ Context *SnapshotRollbackRequest::handle_refresh_object_map(int *result) { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": r=" << *result << dendl; - assert(*result == 0); + ceph_assert(*result == 0); return send_invalidate_cache(); } diff --git a/src/librbd/operation/SnapshotUnprotectRequest.cc b/src/librbd/operation/SnapshotUnprotectRequest.cc index e456bbfdead..ce5ba9a6fb2 100644 --- a/src/librbd/operation/SnapshotUnprotectRequest.cc +++ b/src/librbd/operation/SnapshotUnprotectRequest.cc @@ -64,7 +64,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 10) << this << " scanning pool '" << m_pool.second << "'" @@ -108,7 +108,7 @@ public: util::create_rados_callback(this); r = m_pool_ioctx.aio_operate(RBD_CHILDREN, rados_completion, &op, &m_children_bl); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); return 0; } @@ -228,7 +228,7 @@ bool SnapshotUnprotectRequest::should_complete_error() { template void SnapshotUnprotectRequest::send_unprotect_snap_start() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -243,7 +243,7 @@ void SnapshotUnprotectRequest::send_unprotect_snap_start() { template void SnapshotUnprotectRequest::send_scan_pool_children() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -273,7 +273,7 @@ void SnapshotUnprotectRequest::send_scan_pool_children() { template void SnapshotUnprotectRequest::send_unprotect_snap_finish() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -286,14 +286,14 @@ void SnapshotUnprotectRequest::send_unprotect_snap_finish() { librados::AioCompletion *comp = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } template void SnapshotUnprotectRequest::send_unprotect_snap_rollback() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -306,7 +306,7 @@ void SnapshotUnprotectRequest::send_unprotect_snap_rollback() { librados::AioCompletion *comp = this->create_callback_completion(); int r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -344,7 +344,7 @@ int SnapshotUnprotectRequest::verify_and_send_unprotect_snap_start() { librados::AioCompletion *comp = this->create_callback_completion(); r = image_ctx.md_ctx.aio_operate(image_ctx.header_oid, comp, &op); - assert(r == 0); + ceph_assert(r == 0); comp->release(); // TODO legacy code threw a notification post UNPROTECTING update -- required? diff --git a/src/librbd/operation/TrimRequest.cc b/src/librbd/operation/TrimRequest.cc index 99d144e064b..8bc183e1c77 100644 --- a/src/librbd/operation/TrimRequest.cc +++ b/src/librbd/operation/TrimRequest.cc @@ -39,8 +39,8 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); string oid = image_ctx.get_object_name(m_object_no); @@ -69,8 +69,8 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); { @@ -87,7 +87,7 @@ public: librados::AioCompletion *rados_completion = util::create_rados_callback(this); int r = image_ctx.data_ctx.aio_remove(oid, rados_completion); - assert(r == 0); + ceph_assert(r == 0); rados_completion->release(); return 0; } @@ -180,7 +180,7 @@ void TrimRequest::send() { template void TrimRequest::send_pre_trim() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); if (m_delete_start >= m_num_objects) { send_clean_boundary(); @@ -195,7 +195,7 @@ void TrimRequest::send_pre_trim() { << " num_objects=" << m_num_objects << dendl; m_state = STATE_PRE_TRIM; - assert(image_ctx.exclusive_lock->is_lock_owner()); + ceph_assert(image_ctx.exclusive_lock->is_lock_owner()); RWLock::WLocker object_map_locker(image_ctx.object_map_lock); if (image_ctx.object_map->template aio_update >( @@ -212,7 +212,7 @@ void TrimRequest::send_pre_trim() { template void TrimRequest::send_copyup_objects() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); ::SnapContext snapc; bool has_snapshots; @@ -224,7 +224,7 @@ void TrimRequest::send_copyup_objects() { snapc = image_ctx.snapc; has_snapshots = !image_ctx.snaps.empty(); int r = image_ctx.get_parent_overlap(CEPH_NOSNAP, &parent_overlap); - assert(r == 0); + ceph_assert(r == 0); } // copyup is only required for portion of image that overlaps parent @@ -259,7 +259,7 @@ void TrimRequest::send_copyup_objects() { template void TrimRequest::send_remove_objects() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); ldout(image_ctx.cct, 5) << this << " send_remove_objects: " << " delete_start=" << m_delete_start @@ -279,7 +279,7 @@ void TrimRequest::send_remove_objects() { template void TrimRequest::send_post_trim() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); { RWLock::RLocker snap_locker(image_ctx.snap_lock); @@ -289,7 +289,7 @@ void TrimRequest::send_post_trim() { << " num_objects=" << m_num_objects << dendl; m_state = STATE_POST_TRIM; - assert(image_ctx.exclusive_lock->is_lock_owner()); + ceph_assert(image_ctx.exclusive_lock->is_lock_owner()); RWLock::WLocker object_map_locker(image_ctx.object_map_lock); if (image_ctx.object_map->template aio_update >( @@ -306,7 +306,7 @@ void TrimRequest::send_post_trim() { template void TrimRequest::send_clean_boundary() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); + ceph_assert(image_ctx.owner_lock.is_locked()); CephContext *cct = image_ctx.cct; if (m_delete_off <= m_new_size) { send_finish(0); @@ -314,7 +314,7 @@ void TrimRequest::send_clean_boundary() { } // should have been canceled prior to releasing lock - assert(image_ctx.exclusive_lock == nullptr || + ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); uint64_t delete_len = m_delete_off - m_new_size; ldout(image_ctx.cct, 5) << this << " send_clean_boundary: " diff --git a/src/librbd/trash/MoveRequest.cc b/src/librbd/trash/MoveRequest.cc index cf80c853b36..526e92abca2 100644 --- a/src/librbd/trash/MoveRequest.cc +++ b/src/librbd/trash/MoveRequest.cc @@ -36,7 +36,7 @@ void MoveRequest::trash_add() { auto aio_comp = create_rados_callback< MoveRequest, &MoveRequest::handle_trash_add>(this); int r = m_io_ctx.aio_operate(RBD_TRASH, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -65,7 +65,7 @@ void MoveRequest::remove_id() { MoveRequest, &MoveRequest::handle_remove_id>(this); int r = m_io_ctx.aio_remove(util::id_obj_name(m_trash_image_spec.name), aio_comp); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -94,7 +94,7 @@ void MoveRequest::directory_remove() { auto aio_comp = create_rados_callback< MoveRequest, &MoveRequest::handle_directory_remove>(this); int r = m_io_ctx.aio_operate(RBD_DIRECTORY, aio_comp, &op); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } diff --git a/src/librbd/watcher/Notifier.cc b/src/librbd/watcher/Notifier.cc index 80ed25abace..dfb95aec0b8 100644 --- a/src/librbd/watcher/Notifier.cc +++ b/src/librbd/watcher/Notifier.cc @@ -45,7 +45,7 @@ Notifier::Notifier(ContextWQ *work_queue, IoCtx &ioctx, const std::string &oid) Notifier::~Notifier() { Mutex::Locker aio_notify_locker(m_aio_notify_lock); - assert(m_pending_aio_notifies == 0); + ceph_assert(m_pending_aio_notifies == 0); } void Notifier::flush(Context *on_finish) { @@ -70,7 +70,7 @@ void Notifier::notify(bufferlist &bl, NotifyResponse *response, C_AioNotify *ctx = new C_AioNotify(this, response, on_finish); librados::AioCompletion *comp = util::create_rados_callback(ctx); int r = m_ioctx.aio_notify(m_oid, comp, bl, NOTIFY_TIMEOUT, &ctx->out_bl); - assert(r == 0); + ceph_assert(r == 0); comp->release(); } @@ -78,7 +78,7 @@ void Notifier::handle_notify(int r, Context *on_finish) { ldout(m_cct, 20) << "r=" << r << dendl; Mutex::Locker aio_notify_locker(m_aio_notify_lock); - assert(m_pending_aio_notifies > 0); + ceph_assert(m_pending_aio_notifies > 0); --m_pending_aio_notifies; ldout(m_cct, 20) << "pending=" << m_pending_aio_notifies << dendl; diff --git a/src/librbd/watcher/RewatchRequest.cc b/src/librbd/watcher/RewatchRequest.cc index 22d2bb8e567..9deba2d457b 100644 --- a/src/librbd/watcher/RewatchRequest.cc +++ b/src/librbd/watcher/RewatchRequest.cc @@ -34,8 +34,8 @@ void RewatchRequest::send() { } void RewatchRequest::unwatch() { - assert(m_watch_lock.is_wlocked()); - assert(*m_watch_handle != 0); + ceph_assert(m_watch_lock.is_wlocked()); + ceph_assert(*m_watch_handle != 0); CephContext *cct = reinterpret_cast(m_ioctx.cct()); ldout(cct, 10) << dendl; @@ -46,7 +46,7 @@ void RewatchRequest::unwatch() { librados::AioCompletion *aio_comp = create_rados_callback< RewatchRequest, &RewatchRequest::handle_unwatch>(this); int r = m_ioctx.aio_unwatch(watch_handle, aio_comp); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } @@ -71,7 +71,7 @@ void RewatchRequest::rewatch() { librados::AioCompletion *aio_comp = create_rados_callback< RewatchRequest, &RewatchRequest::handle_rewatch>(this); int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_rewatch_handle, m_watch_ctx); - assert(r == 0); + ceph_assert(r == 0); aio_comp->release(); } -- 2.39.5