]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
osd: s/assert/ceph_assert/g'
authorMatan Breizman <mbreizma@redhat.com>
Tue, 20 May 2025 08:01:17 +0000 (08:01 +0000)
committerMatan Breizman <mbreizma@redhat.com>
Tue, 20 May 2025 08:01:17 +0000 (08:01 +0000)
Now that we fallback to RelDebWithInfo [1], C asserts are not compiled in.
Replace some of the existing asserts with ceph_assserts instead.

[1] https://github.com/ceph/ceph/pull/61637

Fixes: https://tracker.ceph.com/issues/71360
Signed-off-by: Matan Breizman <mbreizma@redhat.com>
16 files changed:
src/osd/ECBackend.cc
src/osd/ECBackendL.cc
src/osd/ECUtilL.h
src/osd/OSD.cc
src/osd/OSDMap.cc
src/osd/OSDMap.h
src/osd/PGLog.cc
src/osd/PGLog.h
src/osd/PeeringState.cc
src/osd/PeeringState.h
src/osd/PrimaryLogPG.cc
src/osd/ReplicatedBackend.cc
src/osd/SnapMapper.cc
src/osd/objclass.cc
src/osd/scheduler/mClockScheduler.cc
src/osd/scrubber/scrub_machine.h

index 9259f959f239de8464bc3be5f7fac82e5503ac4e..f418025d1b287b3a5a40e0413fe5d8e95a8fb21a 100644 (file)
@@ -1432,7 +1432,7 @@ struct ECClassicalOp : ECCommon::RMWPipeline::Op {
     shard_id_map<ObjectStore::Transaction> *transactions,
     DoutPrefixProvider *dpp,
     const OSDMapRef &osdmap) final {
-    assert(t);
+    ceph_assert(t);
     ECTransaction::generate_transactions(
       t.get(),
       plan,
index 8d685a1aa32ba99beffb7a1117b0cda4a3f88749..b17f3fd69566f91b4156066f2290a605918fc919 100644 (file)
@@ -1444,7 +1444,7 @@ struct ECClassicalOp : ECCommonL::RMWPipeline::Op {
       DoutPrefixProvider *dpp,
       const ceph_release_t require_osd_release) final
   {
-    assert(t);
+    ceph_assert(t);
     ECTransactionL::generate_transactions(
       t.get(),
       plan,
index 18038b1118439d88edc8efa50b60d030b94bc842..e2433ff3513e513097f47da6f41ab42cfa8ce0a5 100644 (file)
@@ -172,7 +172,7 @@ public:
   }
   std::pair<uint64_t, uint64_t> offset_length_to_data_chunk_indices(
     uint64_t off, uint64_t len) const {
-    assert(chunk_size > 0);
+    ceph_assert(chunk_size > 0);
     const auto first_chunk_idx = (off / chunk_size);
     const auto last_chunk_idx = (chunk_size - 1 + off + len) / chunk_size;
     return {first_chunk_idx, last_chunk_idx};
@@ -182,7 +182,7 @@ public:
     if (len == 0) {
       return true;
     }
-    assert(chunk_size > 0);
+    ceph_assert(chunk_size > 0);
     const auto first_stripe_idx = off / stripe_width;
     const auto last_inc_stripe_idx = (off + len - 1) / stripe_width;
     return first_stripe_idx == last_inc_stripe_idx;
index c46ce54b202b38a687d7a2a81f0e2886af1109eb..9b9784fb652993fe3c23b03761a27ea6749a6cfc 100644 (file)
@@ -1951,7 +1951,7 @@ void OSDService::set_ready_to_merge_source(PG *pg, eversion_t version)
   std::lock_guard l(merge_lock);
   dout(10) << __func__ << " " << pg->pg_id << dendl;
   ready_to_merge_source[pg->pg_id.pgid] = version;
-  assert(not_ready_to_merge_source.count(pg->pg_id.pgid) == 0);
+  ceph_assert(not_ready_to_merge_source.count(pg->pg_id.pgid) == 0);
   _send_ready_to_merge();
 }
 
@@ -1966,7 +1966,7 @@ void OSDService::set_ready_to_merge_target(PG *pg,
                                         make_tuple(version,
                                                    last_epoch_started,
                                                    last_epoch_clean)));
-  assert(not_ready_to_merge_target.count(pg->pg_id.pgid) == 0);
+  ceph_assert(not_ready_to_merge_target.count(pg->pg_id.pgid) == 0);
   _send_ready_to_merge();
 }
 
@@ -1975,7 +1975,7 @@ void OSDService::set_not_ready_to_merge_source(pg_t source)
   std::lock_guard l(merge_lock);
   dout(10) << __func__ << " " << source << dendl;
   not_ready_to_merge_source.insert(source);
-  assert(ready_to_merge_source.count(source) == 0);
+  ceph_assert(ready_to_merge_source.count(source) == 0);
   _send_ready_to_merge();
 }
 
@@ -1984,7 +1984,7 @@ void OSDService::set_not_ready_to_merge_target(pg_t target, pg_t source)
   std::lock_guard l(merge_lock);
   dout(10) << __func__ << " " << target << " source " << source << dendl;
   not_ready_to_merge_target[target] = source;
-  assert(ready_to_merge_target.count(target) == 0);
+  ceph_assert(ready_to_merge_target.count(target) == 0);
   _send_ready_to_merge();
 }
 
@@ -4055,13 +4055,13 @@ int OSD::init()
        for (auto shard : shards) {
          shard->prime_splits(osdmap, &new_children);
        }
-       assert(new_children.empty());
+       ceph_assert(new_children.empty());
       }
       if (!merge_pgs.empty()) {
        for (auto shard : shards) {
          shard->prime_merges(osdmap, &merge_pgs);
        }
-       assert(merge_pgs.empty());
+       ceph_assert(merge_pgs.empty());
       }
     }
   }
@@ -5449,7 +5449,7 @@ void OSD::load_pgs()
     }
     {
       uint32_t shard_index = pgid.hash_to_shard(shards.size());
-      assert(NULL != shards[shard_index]);
+      ceph_assert(NULL != shards[shard_index]);
       store->set_collection_commit_queue(pg->coll, &(shards[shard_index]->context_queue));
     }
 
@@ -5518,7 +5518,7 @@ PGRef OSD::handle_pg_create_info(const OSDMapRef& osdmap,
 
   {
     uint32_t shard_index = pgid.hash_to_shard(shards.size());
-    assert(NULL != shards[shard_index]);
+    ceph_assert(NULL != shards[shard_index]);
     store->set_collection_commit_queue(pg->coll, &(shards[shard_index]->context_queue));
   }
 
@@ -5673,7 +5673,7 @@ void OSD::_add_heartbeat_peer(int p)
     pair<ConnectionRef,ConnectionRef> cons = service.get_con_osd_hb(p, get_osdmap_epoch());
     if (!cons.first)
       return;
-    assert(cons.second);
+    ceph_assert(cons.second);
 
     hi = &heartbeat_peers[p];
     hi->peer = p;
@@ -7943,7 +7943,7 @@ MPGStats* OSD::collect_pg_stats()
       per_pool_stats = false;
       break;
     } else {
-      assert(r >= 0);
+      ceph_assert(r >= 0);
       m->pool_stat[p] = st;
     }
   }
@@ -9365,7 +9365,7 @@ void OSD::split_pgs(
 
     {
       uint32_t shard_index = i->hash_to_shard(shards.size());
-      assert(NULL != shards[shard_index]);
+      ceph_assert(NULL != shards[shard_index]);
       store->set_collection_commit_queue(child->coll, &(shards[shard_index]->context_queue));
     }
 
index cff5a6153842b1922cf761ebc97e8b6ff3569f2d..30ae1bc37b944359c790e4af001148a36eaa448d 100644 (file)
@@ -2905,7 +2905,7 @@ const std::vector<int> OSDMap::pgtemp_undo_primaryfirst(const pg_pool_t& pool,
       std::vector<int> result;
       int primaryshard = 0;
       int nonprimaryshard = pool.size - pool.nonprimary_shards.size();
-      assert(acting.size() == pool.size);
+      ceph_assert(acting.size() == pool.size);
       for (auto shard = 0; shard < pool.size; shard++) {
        if (pool.is_nonprimary_shard(shard_id_t(shard))) {
          result.emplace_back(acting[nonprimaryshard++]);
@@ -7888,7 +7888,7 @@ void OSDMap::get_random_up_osds_by_subtree(int n,     // whoami
 float OSDMap::pool_raw_used_rate(int64_t poolid) const
 {
   const pg_pool_t *pool = get_pg_pool(poolid);
-  assert(pool != nullptr);
+  ceph_assert(pool != nullptr);
 
   switch (pool->get_type()) {
   case pg_pool_t::TYPE_REPLICATED:
index 81f3d914edab432c62d756e78f946fb24e82847d..38fd697f797c489adc1f271927e0ffd4f3ff47ae 100644 (file)
@@ -893,7 +893,7 @@ public:
   }
 
   bool exists(int osd) const {
-    //assert(osd >= 0);
+    //ceph_assert(osd >= 0);
     return osd >= 0 && osd < max_osd && (osd_state[osd] & CEPH_OSD_EXISTS);
   }
 
index 07e3f30b6c820e6a1f321f8611ab061f76127822..22846c2b68765de06351c47d086879a63958f3db 100644 (file)
@@ -1178,7 +1178,7 @@ namespace {
           }).then([this] {
             if (info.pgid.is_no_shard()) {
               // replicated pool pg does not persist this key
-              assert(on_disk_rollback_info_trimmed_to == eversion_t());
+              ceph_assert(on_disk_rollback_info_trimmed_to == eversion_t());
               on_disk_rollback_info_trimmed_to = info.last_update;
             }
             log = PGLog::IndexedLog(
index 455fc4464185016d829c0ad4967f7e9f20103bba..4c09761f09a9aa47ef1616b13312bfcb90e5da8a 100644 (file)
@@ -1587,7 +1587,7 @@ public:
       });
     if (info.pgid.is_no_shard()) {
       // replicated pool pg does not persist this key
-      assert(on_disk_rollback_info_trimmed_to == eversion_t());
+      ceph_assert(on_disk_rollback_info_trimmed_to == eversion_t());
       on_disk_rollback_info_trimmed_to = info.last_update;
     }
     log = IndexedLog(
index de2d275ed87a0d79d89b4ed1a03650226e7ba0a7..9820867485a5f0024b7ecd872f0a01acc268dbc9 100644 (file)
@@ -1247,7 +1247,7 @@ void PeeringState::send_lease()
 
 void PeeringState::proc_lease(const pg_lease_t& l)
 {
-  assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+  ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
   if (!is_nonprimary()) {
     psdout(20) << "no-op, !nonprimary" << dendl;
     return;
@@ -1289,7 +1289,7 @@ void PeeringState::proc_lease(const pg_lease_t& l)
 
 void PeeringState::proc_lease_ack(int from, const pg_lease_ack_t& a)
 {
-  assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+  ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
   auto now = pl->get_mnow();
   bool was_min = false;
   for (unsigned i = 0; i < acting.size(); ++i) {
@@ -1315,7 +1315,7 @@ void PeeringState::proc_lease_ack(int from, const pg_lease_ack_t& a)
 
 void PeeringState::proc_renew_lease()
 {
-  assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+  ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
   renew_lease(pl->get_mnow());
   if (actingset.size() > 1) {
     send_lease();
@@ -1327,7 +1327,7 @@ void PeeringState::proc_renew_lease()
 
 void PeeringState::recalc_readable_until()
 {
-  assert(is_primary());
+  ceph_assert(is_primary());
   ceph::signedspan min = readable_until_ub_sent;
   for (unsigned i = 0; i < acting.size(); ++i) {
     if (acting[i] == pg_whoami.osd || acting[i] == CRUSH_ITEM_NONE) {
@@ -1347,7 +1347,7 @@ void PeeringState::recalc_readable_until()
 
 bool PeeringState::check_prior_readable_down_osds(const OSDMapRef& map)
 {
-  assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+  ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
   bool changed = false;
   auto p = prior_readable_down_osds.begin();
   while (p != prior_readable_down_osds.end()) {
@@ -1762,7 +1762,7 @@ PeeringState::select_replicated_primary(
       !primary->second.is_incomplete() &&
       primary->second.last_update >=
         auth_log_shard->second.log_tail) {
-    assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
+    ceph_assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
     auto approx_missing_objects =
       primary->second.stats.stats.sum.num_objects_missing;
     auto auth_version = auth_log_shard->second.last_update.version;
@@ -1961,7 +1961,7 @@ class bucket_candidates_t {
 public:
   void add_osd(osd_ord_t ord, osd_id_t osd) {
     // osds will be added in smallest to largest order
-    assert(osds.empty() || osds.back().first <= ord);
+    ceph_assert(osds.empty() || osds.back().first <= ord);
     osds.push_back(std::make_pair(ord, osd));
   }
   osd_id_t pop_osd() {
@@ -2295,7 +2295,7 @@ void PeeringState::choose_async_recovery_ec(
     // past the authoritative last_update the same as those equal to it.
     version_t auth_version = auth_info.last_update.version;
     version_t candidate_version = shard_info.last_update.version;
-    assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
+    ceph_assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
     auto approx_missing_objects =
       shard_info.stats.stats.sum.num_objects_missing;
     if (auth_version > candidate_version) {
@@ -2353,7 +2353,7 @@ void PeeringState::choose_async_recovery_replicated(
     // logs plus historical missing objects as the cost of recovery
     version_t auth_version = auth_info.last_update.version;
     version_t candidate_version = shard_info.last_update.version;
-    assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
+    ceph_assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
     auto approx_missing_objects =
       shard_info.stats.stats.sum.num_objects_missing;
     if (auth_version > candidate_version) {
@@ -2886,7 +2886,7 @@ void PeeringState::activate(
     purged.intersection_of(to_trim, info.purged_snaps);
     to_trim.subtract(purged);
 
-    assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+    ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
     renew_lease(pl->get_mnow());
     // do not schedule until we are actually activated
 
@@ -4584,7 +4584,7 @@ void PeeringState::recover_got(
     psdout(10) << "last_complete now " << info.last_complete
               << " log.complete_to at end" << dendl;
     //below is not true in the repair case.
-    //assert(missing.num_missing() == 0);  // otherwise, complete_to was wrong.
+    //ceph_assert(missing.num_missing() == 0);  // otherwise, complete_to was wrong.
     ceph_assert(info.last_complete == info.last_update);
   }
 
@@ -4774,8 +4774,8 @@ void PeeringState::calc_trim_to()
     }
     psdout(10) << "calc_trim_to " << pg_trim_to << " -> " << new_trim_to << dendl;
     pg_trim_to = new_trim_to;
-    assert(pg_trim_to <= pg_log.get_head());
-    assert(pg_trim_to <= min_last_complete_ondisk);
+    ceph_assert(pg_trim_to <= pg_log.get_head());
+    ceph_assert(pg_trim_to <= min_last_complete_ondisk);
   }
 }
 
@@ -6584,7 +6584,7 @@ boost::statechart::result PeeringState::Active::react(const AllReplicasActivated
       if (merge_target) {
        pg_t src = pgid;
        src.set_ps(ps->pool.info.get_pg_num_pending());
-       assert(src.get_parent() == pgid);
+       ceph_assert(src.get_parent() == pgid);
        pl->set_not_ready_to_merge_target(pgid, src);
       } else {
        pl->set_not_ready_to_merge_source(pgid);
@@ -6679,7 +6679,7 @@ void PeeringState::Active::all_activated_and_committed()
   ceph_assert(!ps->acting_recovery_backfill.empty());
   ceph_assert(ps->blocked_by.empty());
 
-  assert(HAVE_FEATURE(ps->upacting_features, SERVER_OCTOPUS));
+  ceph_assert(HAVE_FEATURE(ps->upacting_features, SERVER_OCTOPUS));
   // this is overkill when the activation is quick, but when it is slow it
   // is important, because the lease was renewed by the activate itself but we
   // don't know how long ago that was, and simply scheduling now may leave
@@ -8069,7 +8069,7 @@ std::vector<pg_shard_t> PeeringState::get_replica_recovery_order() const
       continue;
     }
     auto pm = get_peer_missing().find(p);
-    assert(pm != get_peer_missing().end());
+    ceph_assert(pm != get_peer_missing().end());
     auto nm = pm->second.num_missing();
     if (nm != 0) {
       if (is_async_recovery_target(p)) {
index 82c0082bb6e668921dce6127ba6bd19e827e6df0..13a2a5284228c21eb29fe0e115c6b2bb4ee7895a 100644 (file)
@@ -633,7 +633,7 @@ public:
     }
 
     PeeringCtxWrapper &get_recovery_ctx() {
-      assert(state->rctx);
+      ceph_assert(state->rctx);
       return *(state->rctx);
     }
 
@@ -2395,13 +2395,13 @@ public:
       return pg_log.get_missing();
     } else {
       auto it = peer_missing.find(peer);
-      assert(it != peer_missing.end());
+      ceph_assert(it != peer_missing.end());
       return it->second;
     }
   }
   const pg_info_t&get_peer_info(pg_shard_t peer) const {
     auto it = peer_info.find(peer);
-    assert(it != peer_info.end());
+    ceph_assert(it != peer_info.end());
     return it->second;
   }
   bool has_peer_info(pg_shard_t peer) const {
index 984c981dff49113ee44642478699257026f7aea9..f4bb9a41235ab81de56c77c99e50782aea7441b9 100644 (file)
@@ -852,7 +852,7 @@ void PrimaryLogPG::maybe_force_recovery()
 
 bool PrimaryLogPG::check_laggy(OpRequestRef& op)
 {
-  assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
+  ceph_assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
                      SERVER_OCTOPUS));
   if (state_test(PG_STATE_WAIT)) {
     dout(10) << __func__ << " PG is WAIT state" << dendl;
@@ -884,7 +884,7 @@ bool PrimaryLogPG::check_laggy(OpRequestRef& op)
 
 bool PrimaryLogPG::check_laggy_requeue(OpRequestRef& op)
 {
-  assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
+  ceph_assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
                      SERVER_OCTOPUS));
   if (!state_test(PG_STATE_WAIT) && !state_test(PG_STATE_LAGGY)) {
     return true; // not laggy
@@ -3988,7 +3988,7 @@ void PrimaryLogPG::finish_proxy_write(hobject_t oid, ceph_tid_t tid, int r)
 
   if (!pwop->sent_reply) {
     // send commit.
-    assert(pwop->ctx->reply == nullptr);
+    ceph_assert(pwop->ctx->reply == nullptr);
     MOSDOpReply *reply = new MOSDOpReply(m, r, get_osdmap_epoch(), 0,
                                         true /* we claim it below */);
     reply->set_reply_versions(eversion_t(), pwop->user_version);
@@ -10729,7 +10729,7 @@ std::pair<int, hobject_t> PrimaryLogPG::get_fpoid_from_chunk(
       case pg_pool_t::TYPE_FINGERPRINT_SHA512:
        return ceph::crypto::digest<ceph::crypto::SHA512>(chunk).to_str();
       default:
-       assert(0 == "unrecognized fingerprint type");
+       ceph_assert(0 == "unrecognized fingerprint type");
        return {};
     }
   }();    
@@ -14109,7 +14109,7 @@ uint64_t PrimaryLogPG::recover_backfill(
        dout(20) << " BACKFILL keeping " << check
                 << " with ver " << obj_v
                 << " on peers " << keep_ver_targs << dendl;
-       //assert(!waiting_for_degraded_object.count(check));
+       //ceph_assert(!waiting_for_degraded_object.count(check));
       }
       if (!need_ver_targs.empty() || !missing_targs.empty()) {
        ObjectContextRef obc = get_object_context(backfill_info.begin, false);
index 7a6b29dca855523f313d67330c4def9d43222293..36fec467de9030eb04f0dfd2642dbae526215824 100644 (file)
@@ -1334,9 +1334,9 @@ void ReplicatedBackend::calc_head_subsets(
   if (size)
     data_subset.insert(0, size);
 
-  assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
+  ceph_assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
   const auto it = missing.get_items().find(head);
-  assert(it != missing.get_items().end());
+  ceph_assert(it != missing.get_items().end());
   data_subset.intersection_of(it->second.clean_regions.get_dirty_regions());
   dout(10) << "calc_head_subsets " << head
           << " data_subset " << data_subset << dendl;
@@ -1572,7 +1572,7 @@ void ReplicatedBackend::prepare_pull(
     // pulling head or unversioned object.
     // always pull the whole thing.
     recovery_info.copy_subset.insert(0, (uint64_t)-1);
-    assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
+    ceph_assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
     recovery_info.copy_subset.intersection_of(missing_iter->second.clean_regions.get_dirty_regions());
     recovery_info.size = ((uint64_t)-1);
     recovery_info.object_exist = missing_iter->second.clean_regions.object_is_exist();
@@ -1701,7 +1701,7 @@ int ReplicatedBackend::prep_push(
   get_parent()->begin_peer_recover(peer, soid);
   const auto pmissing_iter = get_parent()->get_shard_missing().find(peer);
   const auto missing_iter = pmissing_iter->second.get_items().find(soid);
-  assert(missing_iter != pmissing_iter->second.get_items().end());
+  ceph_assert(missing_iter != pmissing_iter->second.get_items().end());
   // take note.
   push_info_t &push_info = pushing[soid][peer];
   push_info.obc = obc;
@@ -1805,7 +1805,7 @@ void ReplicatedBackend::submit_push_data(
     if (!complete) {
       //clone overlap content in local object
       if (recovery_info.object_exist) {
-        assert(r == 0);
+        ceph_assert(r == 0);
         uint64_t local_size = std::min(recovery_info.size, (uint64_t)st.st_size);
         interval_set<uint64_t> local_intervals_included, local_intervals_excluded;
         if (local_size) {
@@ -1831,7 +1831,7 @@ void ReplicatedBackend::submit_push_data(
   // Punch zeros for data, if fiemap indicates nothing but it is marked dirty
   if (data_zeros.size() > 0) {
     data_zeros.intersection_of(recovery_info.copy_subset);
-    assert(intervals_included.subset_of(data_zeros));
+    ceph_assert(intervals_included.subset_of(data_zeros));
     data_zeros.subtract(intervals_included);
 
     dout(20) << __func__ <<" recovering object " << recovery_info.soid
@@ -1967,7 +1967,7 @@ bool ReplicatedBackend::handle_pull_response(
     if (attrset.find(SS_ATTR) != attrset.end()) {
       bufferlist ssbv = attrset.at(SS_ATTR);
       SnapSet ss(ssbv);
-      assert(!pull_info.obc->ssc->exists || ss.seq  == pull_info.obc->ssc->snapset.seq);
+      ceph_assert(!pull_info.obc->ssc->exists || ss.seq  == pull_info.obc->ssc->snapset.seq);
     }
     pull_info.recovery_info.oi = pull_info.obc->obs.oi;
     pull_info.recovery_info = recalc_subsets(
@@ -2443,7 +2443,7 @@ void ReplicatedBackend::handle_pull(pg_shard_t peer, PullOp &op, PushOp *reply)
       } else {
         recovery_info.copy_subset.clear();
       }
-      assert(recovery_info.clone_subset.empty());
+      ceph_assert(recovery_info.clone_subset.empty());
     }
 
     r = build_push_op(recovery_info, progress, 0, reply);
@@ -2500,7 +2500,7 @@ void ReplicatedBackend::_failed_pull(pg_shard_t from, const hobject_t &soid)
 {
   dout(20) << __func__ << ": " << soid << " from " << from << dendl;
   auto it = pulling.find(soid);
-  assert(it != pulling.end());
+  ceph_assert(it != pulling.end());
   get_parent()->on_failed_pull(
     { from },
     soid,
index b507b6c1d7e12641826a358fa1488814ce57d987..8b471be5321e8575f8e1b19cf2a92a51b56ea83d 100644 (file)
@@ -97,7 +97,7 @@ int OSDriver::get_keys(
     reinterpret_cast<FuturizedStore::Shard::omap_values_t&>(*out) = std::move(vals);
     return 0;
   }, FuturizedStore::Shard::read_errorator::all_same_way([] (auto& e) {
-    assert(e.value() > 0);
+    ceph_assert(e.value() > 0);
     return -e.value();
   }))); // this requires seastar::thread
 }
@@ -118,7 +118,7 @@ int OSDriver::get_next(
       return -ENOENT;
     } else {
       CRIMSON_DEBUG("OSDriver::get_next returning next: {}, ", nit->first);
-      assert(nit->first > key);
+      ceph_assert(nit->first > key);
       *next = *nit;
       return 0;
     }
@@ -139,7 +139,7 @@ int OSDriver::get_next_or_current(
     ch, hoid, FuturizedStore::Shard::omap_keys_t{key}
   ).safe_then([&key, next_or_current] (FuturizedStore::Shard::omap_values_t&& vals) {
     CRIMSON_DEBUG("OSDriver::get_next_or_current returning {}", key);
-    assert(vals.size() == 1);
+    ceph_assert(vals.size() == 1);
     *next_or_current = std::make_pair(key, std::move(vals.begin()->second));
     return 0;
   }, FuturizedStore::Shard::read_errorator::all_same_way(
@@ -1022,9 +1022,9 @@ void SnapMapper::Scrubber::run()
                       __func__, mapping.hoid, mapping.snap, pool, begin, end)
                  << dendl;
       } else {
-        assert(mapping.snap >= begin);
-        assert(mapping.snap < end);
-        assert(mapping.hoid.pool == pool);
+        ceph_assert(mapping.snap >= begin);
+        ceph_assert(mapping.snap < end);
+        ceph_assert(mapping.hoid.pool == pool);
         // invalid
         dout(10) << fmt::format(
                       "{} stray {} snap {} in pool {} shard {} purged_snaps[{}, {})",
index cc3109b246df03e6a2fff7edcea8026e8086255c..899be58f3ebef3eaeec4e34865ff60f79b8676b8 100644 (file)
@@ -731,7 +731,7 @@ int cls_cxx_gather(cls_method_context_t hctx, const std::set<std::string> &src_o
   int subop_num = (*pctx)->current_osd_subop_num;
   OSDOp *osd_op = &(*(*pctx)->ops)[subop_num];
   auto [iter, inserted] = (*pctx)->op_finishers.emplace(std::make_pair(subop_num, std::make_unique<GatherFinisher>(osd_op)));
-  assert(inserted);
+  ceph_assert(inserted);
   auto &gather = *static_cast<GatherFinisher*>(iter->second.get());
   for (const auto &obj : src_objs) {
     gather.src_obj_buffs[obj] = bufferlist();
@@ -741,7 +741,7 @@ int cls_cxx_gather(cls_method_context_t hctx, const std::set<std::string> &src_o
 
 int cls_cxx_get_gathered_data(cls_method_context_t hctx, std::map<std::string, bufferlist> *results)
 {
-  assert(results);
+  ceph_assert(results);
   PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext**)hctx;
   PrimaryLogPG::OpFinisher* op_finisher = nullptr;
   int r = 0;
index 225873a2f6436d24eb3f3c02542d2c87412889de..7050828d31f07e451d76af6f81cfb0cf03968d74 100644 (file)
@@ -522,7 +522,7 @@ WorkItem mClockScheduler::dequeue()
   if (!high_priority.empty()) {
     auto iter = high_priority.begin();
     // invariant: high_priority entries are never empty
-    assert(!iter->second.empty());
+    ceph_assert(!iter->second.empty());
     WorkItem ret{std::move(iter->second.back())};
     iter->second.pop_back();
     if (iter->second.empty()) {
index 48b78a7b38d11216bba65131ea7701eb85aad488..c7840fcfd6f6efc3c60d95356458077661b12bcc 100644 (file)
@@ -334,7 +334,7 @@ private:
        * retain the token until the event either fires or is canceled.
        * If a user needs/wants to relax that requirement, this assert can
        * be removed */
-      assert(!cb_token);
+      ceph_assert(!cb_token);
     }
   };
 public:
@@ -361,7 +361,7 @@ public:
       ScrubMachine *parent,
       std::shared_ptr<scheduled_event_state_t> event_state)
       :  parent(parent), event_state(event_state) {
-      assert(*this);
+      ceph_assert(*this);
     }
 
     void swap(timer_event_token_t &rhs) {
@@ -373,17 +373,17 @@ public:
     timer_event_token_t() = default;
     timer_event_token_t(timer_event_token_t &&rhs) {
       swap(rhs);
-      assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
+      ceph_assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
     }
 
     timer_event_token_t &operator=(timer_event_token_t &&rhs) {
       swap(rhs);
-      assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
+      ceph_assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
       return *this;
     }
 
     operator bool() const {
-      assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
+      ceph_assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
       return parent;
     }
 
@@ -425,7 +425,7 @@ public:
          token->cb_token = nullptr;
          process_event(std::move(event));
        } else {
-         assert(nullptr == token->cb_token);
+         ceph_assert(nullptr == token->cb_token);
        }
       }
     );