Now that we fallback to RelDebWithInfo [1], C asserts are not compiled in.
Replace some of the existing asserts with ceph_assserts instead.
[1] https://github.com/ceph/ceph/pull/61637
Fixes: https://tracker.ceph.com/issues/71360
Signed-off-by: Matan Breizman <mbreizma@redhat.com>
shard_id_map<ObjectStore::Transaction> *transactions,
DoutPrefixProvider *dpp,
const OSDMapRef &osdmap) final {
- assert(t);
+ ceph_assert(t);
ECTransaction::generate_transactions(
t.get(),
plan,
DoutPrefixProvider *dpp,
const ceph_release_t require_osd_release) final
{
- assert(t);
+ ceph_assert(t);
ECTransactionL::generate_transactions(
t.get(),
plan,
}
std::pair<uint64_t, uint64_t> offset_length_to_data_chunk_indices(
uint64_t off, uint64_t len) const {
- assert(chunk_size > 0);
+ ceph_assert(chunk_size > 0);
const auto first_chunk_idx = (off / chunk_size);
const auto last_chunk_idx = (chunk_size - 1 + off + len) / chunk_size;
return {first_chunk_idx, last_chunk_idx};
if (len == 0) {
return true;
}
- assert(chunk_size > 0);
+ ceph_assert(chunk_size > 0);
const auto first_stripe_idx = off / stripe_width;
const auto last_inc_stripe_idx = (off + len - 1) / stripe_width;
return first_stripe_idx == last_inc_stripe_idx;
std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << pg->pg_id << dendl;
ready_to_merge_source[pg->pg_id.pgid] = version;
- assert(not_ready_to_merge_source.count(pg->pg_id.pgid) == 0);
+ ceph_assert(not_ready_to_merge_source.count(pg->pg_id.pgid) == 0);
_send_ready_to_merge();
}
make_tuple(version,
last_epoch_started,
last_epoch_clean)));
- assert(not_ready_to_merge_target.count(pg->pg_id.pgid) == 0);
+ ceph_assert(not_ready_to_merge_target.count(pg->pg_id.pgid) == 0);
_send_ready_to_merge();
}
std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << source << dendl;
not_ready_to_merge_source.insert(source);
- assert(ready_to_merge_source.count(source) == 0);
+ ceph_assert(ready_to_merge_source.count(source) == 0);
_send_ready_to_merge();
}
std::lock_guard l(merge_lock);
dout(10) << __func__ << " " << target << " source " << source << dendl;
not_ready_to_merge_target[target] = source;
- assert(ready_to_merge_target.count(target) == 0);
+ ceph_assert(ready_to_merge_target.count(target) == 0);
_send_ready_to_merge();
}
for (auto shard : shards) {
shard->prime_splits(osdmap, &new_children);
}
- assert(new_children.empty());
+ ceph_assert(new_children.empty());
}
if (!merge_pgs.empty()) {
for (auto shard : shards) {
shard->prime_merges(osdmap, &merge_pgs);
}
- assert(merge_pgs.empty());
+ ceph_assert(merge_pgs.empty());
}
}
}
}
{
uint32_t shard_index = pgid.hash_to_shard(shards.size());
- assert(NULL != shards[shard_index]);
+ ceph_assert(NULL != shards[shard_index]);
store->set_collection_commit_queue(pg->coll, &(shards[shard_index]->context_queue));
}
{
uint32_t shard_index = pgid.hash_to_shard(shards.size());
- assert(NULL != shards[shard_index]);
+ ceph_assert(NULL != shards[shard_index]);
store->set_collection_commit_queue(pg->coll, &(shards[shard_index]->context_queue));
}
pair<ConnectionRef,ConnectionRef> cons = service.get_con_osd_hb(p, get_osdmap_epoch());
if (!cons.first)
return;
- assert(cons.second);
+ ceph_assert(cons.second);
hi = &heartbeat_peers[p];
hi->peer = p;
per_pool_stats = false;
break;
} else {
- assert(r >= 0);
+ ceph_assert(r >= 0);
m->pool_stat[p] = st;
}
}
{
uint32_t shard_index = i->hash_to_shard(shards.size());
- assert(NULL != shards[shard_index]);
+ ceph_assert(NULL != shards[shard_index]);
store->set_collection_commit_queue(child->coll, &(shards[shard_index]->context_queue));
}
std::vector<int> result;
int primaryshard = 0;
int nonprimaryshard = pool.size - pool.nonprimary_shards.size();
- assert(acting.size() == pool.size);
+ ceph_assert(acting.size() == pool.size);
for (auto shard = 0; shard < pool.size; shard++) {
if (pool.is_nonprimary_shard(shard_id_t(shard))) {
result.emplace_back(acting[nonprimaryshard++]);
float OSDMap::pool_raw_used_rate(int64_t poolid) const
{
const pg_pool_t *pool = get_pg_pool(poolid);
- assert(pool != nullptr);
+ ceph_assert(pool != nullptr);
switch (pool->get_type()) {
case pg_pool_t::TYPE_REPLICATED:
}
bool exists(int osd) const {
- //assert(osd >= 0);
+ //ceph_assert(osd >= 0);
return osd >= 0 && osd < max_osd && (osd_state[osd] & CEPH_OSD_EXISTS);
}
}).then([this] {
if (info.pgid.is_no_shard()) {
// replicated pool pg does not persist this key
- assert(on_disk_rollback_info_trimmed_to == eversion_t());
+ ceph_assert(on_disk_rollback_info_trimmed_to == eversion_t());
on_disk_rollback_info_trimmed_to = info.last_update;
}
log = PGLog::IndexedLog(
});
if (info.pgid.is_no_shard()) {
// replicated pool pg does not persist this key
- assert(on_disk_rollback_info_trimmed_to == eversion_t());
+ ceph_assert(on_disk_rollback_info_trimmed_to == eversion_t());
on_disk_rollback_info_trimmed_to = info.last_update;
}
log = IndexedLog(
void PeeringState::proc_lease(const pg_lease_t& l)
{
- assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
if (!is_nonprimary()) {
psdout(20) << "no-op, !nonprimary" << dendl;
return;
void PeeringState::proc_lease_ack(int from, const pg_lease_ack_t& a)
{
- assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
auto now = pl->get_mnow();
bool was_min = false;
for (unsigned i = 0; i < acting.size(); ++i) {
void PeeringState::proc_renew_lease()
{
- assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
renew_lease(pl->get_mnow());
if (actingset.size() > 1) {
send_lease();
void PeeringState::recalc_readable_until()
{
- assert(is_primary());
+ ceph_assert(is_primary());
ceph::signedspan min = readable_until_ub_sent;
for (unsigned i = 0; i < acting.size(); ++i) {
if (acting[i] == pg_whoami.osd || acting[i] == CRUSH_ITEM_NONE) {
bool PeeringState::check_prior_readable_down_osds(const OSDMapRef& map)
{
- assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
bool changed = false;
auto p = prior_readable_down_osds.begin();
while (p != prior_readable_down_osds.end()) {
!primary->second.is_incomplete() &&
primary->second.last_update >=
auth_log_shard->second.log_tail) {
- assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
+ ceph_assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
auto approx_missing_objects =
primary->second.stats.stats.sum.num_objects_missing;
auto auth_version = auth_log_shard->second.last_update.version;
public:
void add_osd(osd_ord_t ord, osd_id_t osd) {
// osds will be added in smallest to largest order
- assert(osds.empty() || osds.back().first <= ord);
+ ceph_assert(osds.empty() || osds.back().first <= ord);
osds.push_back(std::make_pair(ord, osd));
}
osd_id_t pop_osd() {
// past the authoritative last_update the same as those equal to it.
version_t auth_version = auth_info.last_update.version;
version_t candidate_version = shard_info.last_update.version;
- assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
+ ceph_assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
auto approx_missing_objects =
shard_info.stats.stats.sum.num_objects_missing;
if (auth_version > candidate_version) {
// logs plus historical missing objects as the cost of recovery
version_t auth_version = auth_info.last_update.version;
version_t candidate_version = shard_info.last_update.version;
- assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
+ ceph_assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
auto approx_missing_objects =
shard_info.stats.stats.sum.num_objects_missing;
if (auth_version > candidate_version) {
purged.intersection_of(to_trim, info.purged_snaps);
to_trim.subtract(purged);
- assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
renew_lease(pl->get_mnow());
// do not schedule until we are actually activated
psdout(10) << "last_complete now " << info.last_complete
<< " log.complete_to at end" << dendl;
//below is not true in the repair case.
- //assert(missing.num_missing() == 0); // otherwise, complete_to was wrong.
+ //ceph_assert(missing.num_missing() == 0); // otherwise, complete_to was wrong.
ceph_assert(info.last_complete == info.last_update);
}
}
psdout(10) << "calc_trim_to " << pg_trim_to << " -> " << new_trim_to << dendl;
pg_trim_to = new_trim_to;
- assert(pg_trim_to <= pg_log.get_head());
- assert(pg_trim_to <= min_last_complete_ondisk);
+ ceph_assert(pg_trim_to <= pg_log.get_head());
+ ceph_assert(pg_trim_to <= min_last_complete_ondisk);
}
}
if (merge_target) {
pg_t src = pgid;
src.set_ps(ps->pool.info.get_pg_num_pending());
- assert(src.get_parent() == pgid);
+ ceph_assert(src.get_parent() == pgid);
pl->set_not_ready_to_merge_target(pgid, src);
} else {
pl->set_not_ready_to_merge_source(pgid);
ceph_assert(!ps->acting_recovery_backfill.empty());
ceph_assert(ps->blocked_by.empty());
- assert(HAVE_FEATURE(ps->upacting_features, SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(ps->upacting_features, SERVER_OCTOPUS));
// this is overkill when the activation is quick, but when it is slow it
// is important, because the lease was renewed by the activate itself but we
// don't know how long ago that was, and simply scheduling now may leave
continue;
}
auto pm = get_peer_missing().find(p);
- assert(pm != get_peer_missing().end());
+ ceph_assert(pm != get_peer_missing().end());
auto nm = pm->second.num_missing();
if (nm != 0) {
if (is_async_recovery_target(p)) {
}
PeeringCtxWrapper &get_recovery_ctx() {
- assert(state->rctx);
+ ceph_assert(state->rctx);
return *(state->rctx);
}
return pg_log.get_missing();
} else {
auto it = peer_missing.find(peer);
- assert(it != peer_missing.end());
+ ceph_assert(it != peer_missing.end());
return it->second;
}
}
const pg_info_t&get_peer_info(pg_shard_t peer) const {
auto it = peer_info.find(peer);
- assert(it != peer_info.end());
+ ceph_assert(it != peer_info.end());
return it->second;
}
bool has_peer_info(pg_shard_t peer) const {
bool PrimaryLogPG::check_laggy(OpRequestRef& op)
{
- assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
+ ceph_assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
SERVER_OCTOPUS));
if (state_test(PG_STATE_WAIT)) {
dout(10) << __func__ << " PG is WAIT state" << dendl;
bool PrimaryLogPG::check_laggy_requeue(OpRequestRef& op)
{
- assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
+ ceph_assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
SERVER_OCTOPUS));
if (!state_test(PG_STATE_WAIT) && !state_test(PG_STATE_LAGGY)) {
return true; // not laggy
if (!pwop->sent_reply) {
// send commit.
- assert(pwop->ctx->reply == nullptr);
+ ceph_assert(pwop->ctx->reply == nullptr);
MOSDOpReply *reply = new MOSDOpReply(m, r, get_osdmap_epoch(), 0,
true /* we claim it below */);
reply->set_reply_versions(eversion_t(), pwop->user_version);
case pg_pool_t::TYPE_FINGERPRINT_SHA512:
return ceph::crypto::digest<ceph::crypto::SHA512>(chunk).to_str();
default:
- assert(0 == "unrecognized fingerprint type");
+ ceph_assert(0 == "unrecognized fingerprint type");
return {};
}
}();
dout(20) << " BACKFILL keeping " << check
<< " with ver " << obj_v
<< " on peers " << keep_ver_targs << dendl;
- //assert(!waiting_for_degraded_object.count(check));
+ //ceph_assert(!waiting_for_degraded_object.count(check));
}
if (!need_ver_targs.empty() || !missing_targs.empty()) {
ObjectContextRef obc = get_object_context(backfill_info.begin, false);
if (size)
data_subset.insert(0, size);
- assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
const auto it = missing.get_items().find(head);
- assert(it != missing.get_items().end());
+ ceph_assert(it != missing.get_items().end());
data_subset.intersection_of(it->second.clean_regions.get_dirty_regions());
dout(10) << "calc_head_subsets " << head
<< " data_subset " << data_subset << dendl;
// pulling head or unversioned object.
// always pull the whole thing.
recovery_info.copy_subset.insert(0, (uint64_t)-1);
- assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
+ ceph_assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
recovery_info.copy_subset.intersection_of(missing_iter->second.clean_regions.get_dirty_regions());
recovery_info.size = ((uint64_t)-1);
recovery_info.object_exist = missing_iter->second.clean_regions.object_is_exist();
get_parent()->begin_peer_recover(peer, soid);
const auto pmissing_iter = get_parent()->get_shard_missing().find(peer);
const auto missing_iter = pmissing_iter->second.get_items().find(soid);
- assert(missing_iter != pmissing_iter->second.get_items().end());
+ ceph_assert(missing_iter != pmissing_iter->second.get_items().end());
// take note.
push_info_t &push_info = pushing[soid][peer];
push_info.obc = obc;
if (!complete) {
//clone overlap content in local object
if (recovery_info.object_exist) {
- assert(r == 0);
+ ceph_assert(r == 0);
uint64_t local_size = std::min(recovery_info.size, (uint64_t)st.st_size);
interval_set<uint64_t> local_intervals_included, local_intervals_excluded;
if (local_size) {
// Punch zeros for data, if fiemap indicates nothing but it is marked dirty
if (data_zeros.size() > 0) {
data_zeros.intersection_of(recovery_info.copy_subset);
- assert(intervals_included.subset_of(data_zeros));
+ ceph_assert(intervals_included.subset_of(data_zeros));
data_zeros.subtract(intervals_included);
dout(20) << __func__ <<" recovering object " << recovery_info.soid
if (attrset.find(SS_ATTR) != attrset.end()) {
bufferlist ssbv = attrset.at(SS_ATTR);
SnapSet ss(ssbv);
- assert(!pull_info.obc->ssc->exists || ss.seq == pull_info.obc->ssc->snapset.seq);
+ ceph_assert(!pull_info.obc->ssc->exists || ss.seq == pull_info.obc->ssc->snapset.seq);
}
pull_info.recovery_info.oi = pull_info.obc->obs.oi;
pull_info.recovery_info = recalc_subsets(
} else {
recovery_info.copy_subset.clear();
}
- assert(recovery_info.clone_subset.empty());
+ ceph_assert(recovery_info.clone_subset.empty());
}
r = build_push_op(recovery_info, progress, 0, reply);
{
dout(20) << __func__ << ": " << soid << " from " << from << dendl;
auto it = pulling.find(soid);
- assert(it != pulling.end());
+ ceph_assert(it != pulling.end());
get_parent()->on_failed_pull(
{ from },
soid,
reinterpret_cast<FuturizedStore::Shard::omap_values_t&>(*out) = std::move(vals);
return 0;
}, FuturizedStore::Shard::read_errorator::all_same_way([] (auto& e) {
- assert(e.value() > 0);
+ ceph_assert(e.value() > 0);
return -e.value();
}))); // this requires seastar::thread
}
return -ENOENT;
} else {
CRIMSON_DEBUG("OSDriver::get_next returning next: {}, ", nit->first);
- assert(nit->first > key);
+ ceph_assert(nit->first > key);
*next = *nit;
return 0;
}
ch, hoid, FuturizedStore::Shard::omap_keys_t{key}
).safe_then([&key, next_or_current] (FuturizedStore::Shard::omap_values_t&& vals) {
CRIMSON_DEBUG("OSDriver::get_next_or_current returning {}", key);
- assert(vals.size() == 1);
+ ceph_assert(vals.size() == 1);
*next_or_current = std::make_pair(key, std::move(vals.begin()->second));
return 0;
}, FuturizedStore::Shard::read_errorator::all_same_way(
__func__, mapping.hoid, mapping.snap, pool, begin, end)
<< dendl;
} else {
- assert(mapping.snap >= begin);
- assert(mapping.snap < end);
- assert(mapping.hoid.pool == pool);
+ ceph_assert(mapping.snap >= begin);
+ ceph_assert(mapping.snap < end);
+ ceph_assert(mapping.hoid.pool == pool);
// invalid
dout(10) << fmt::format(
"{} stray {} snap {} in pool {} shard {} purged_snaps[{}, {})",
int subop_num = (*pctx)->current_osd_subop_num;
OSDOp *osd_op = &(*(*pctx)->ops)[subop_num];
auto [iter, inserted] = (*pctx)->op_finishers.emplace(std::make_pair(subop_num, std::make_unique<GatherFinisher>(osd_op)));
- assert(inserted);
+ ceph_assert(inserted);
auto &gather = *static_cast<GatherFinisher*>(iter->second.get());
for (const auto &obj : src_objs) {
gather.src_obj_buffs[obj] = bufferlist();
int cls_cxx_get_gathered_data(cls_method_context_t hctx, std::map<std::string, bufferlist> *results)
{
- assert(results);
+ ceph_assert(results);
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext**)hctx;
PrimaryLogPG::OpFinisher* op_finisher = nullptr;
int r = 0;
if (!high_priority.empty()) {
auto iter = high_priority.begin();
// invariant: high_priority entries are never empty
- assert(!iter->second.empty());
+ ceph_assert(!iter->second.empty());
WorkItem ret{std::move(iter->second.back())};
iter->second.pop_back();
if (iter->second.empty()) {
* retain the token until the event either fires or is canceled.
* If a user needs/wants to relax that requirement, this assert can
* be removed */
- assert(!cb_token);
+ ceph_assert(!cb_token);
}
};
public:
ScrubMachine *parent,
std::shared_ptr<scheduled_event_state_t> event_state)
: parent(parent), event_state(event_state) {
- assert(*this);
+ ceph_assert(*this);
}
void swap(timer_event_token_t &rhs) {
timer_event_token_t() = default;
timer_event_token_t(timer_event_token_t &&rhs) {
swap(rhs);
- assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
+ ceph_assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
}
timer_event_token_t &operator=(timer_event_token_t &&rhs) {
swap(rhs);
- assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
+ ceph_assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
return *this;
}
operator bool() const {
- assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
+ ceph_assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
return parent;
}
token->cb_token = nullptr;
process_event(std::move(event));
} else {
- assert(nullptr == token->cb_token);
+ ceph_assert(nullptr == token->cb_token);
}
}
);