From 9d239edf2c24b6caabda9c173c29a13dd96cbc24 Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Thu, 23 Aug 2018 11:26:10 -0400 Subject: [PATCH] osd: Use ceph_assert for asserts. Signed-off-by: Adam C. Emerson --- src/osd/ClassHandler.cc | 6 +- src/osd/ECBackend.cc | 154 ++++---- src/osd/ECBackend.h | 4 +- src/osd/ECTransaction.cc | 82 ++--- src/osd/ECTransaction.h | 16 +- src/osd/ECUtil.cc | 54 +-- src/osd/ECUtil.h | 10 +- src/osd/ExtentCache.cc | 26 +- src/osd/ExtentCache.h | 30 +- src/osd/HitSet.h | 2 +- src/osd/OSD.cc | 268 +++++++------- src/osd/OSD.h | 30 +- src/osd/OSDMap.cc | 46 +-- src/osd/OSDMap.h | 68 ++-- src/osd/OSDMapMapping.cc | 10 +- src/osd/OSDMapMapping.h | 14 +- src/osd/OpRequest.cc | 2 +- src/osd/PG.cc | 392 ++++++++++----------- src/osd/PG.h | 72 ++-- src/osd/PGBackend.cc | 42 +-- src/osd/PGBackend.h | 4 +- src/osd/PGLog.cc | 20 +- src/osd/PGLog.h | 82 ++--- src/osd/PGTransaction.h | 44 +-- src/osd/PrimaryLogPG.cc | 606 ++++++++++++++++---------------- src/osd/PrimaryLogPG.h | 32 +- src/osd/ReplicatedBackend.cc | 102 +++--- src/osd/ReplicatedBackend.h | 2 +- src/osd/ScrubStore.cc | 6 +- src/osd/Session.cc | 12 +- src/osd/Session.h | 10 +- src/osd/SnapMapper.cc | 32 +- src/osd/SnapMapper.h | 4 +- src/osd/TierAgentState.h | 4 +- src/osd/Watch.cc | 30 +- src/osd/mClockOpClassSupport.cc | 4 +- src/osd/osd_internal_types.h | 46 +-- src/osd/osd_types.cc | 68 ++-- src/osd/osd_types.h | 56 +-- 39 files changed, 1246 insertions(+), 1246 deletions(-) diff --git a/src/osd/ClassHandler.cc b/src/osd/ClassHandler.cc index a77acbaeccb21..51a8fdb510f18 100644 --- a/src/osd/ClassHandler.cc +++ b/src/osd/ClassHandler.cc @@ -28,9 +28,9 @@ void ClassHandler::add_embedded_class(const string& cname) { - assert(mutex.is_locked()); + ceph_assert(mutex.is_locked()); ClassData *cls = _get_class(cname, false); - assert(cls->status == ClassData::CLASS_UNKNOWN); + ceph_assert(cls->status == ClassData::CLASS_UNKNOWN); cls->status = ClassData::CLASS_INITIALIZING; } @@ -211,7 +211,7 @@ int ClassHandler::_load_class(ClassData *cls) ClassHandler::ClassData *ClassHandler::register_class(const char *cname) { - assert(mutex.is_locked()); + ceph_assert(mutex.is_locked()); ClassData *cls = _get_class(cname, false); ldout(cct, 10) << "register_class " << cname << " status " << cls->status << dendl; diff --git a/src/osd/ECBackend.cc b/src/osd/ECBackend.cc index d5af9f792b979..eba12bec67748 100644 --- a/src/osd/ECBackend.cc +++ b/src/osd/ECBackend.cc @@ -46,7 +46,7 @@ ostream &operator<<(ostream &lhs, const ECBackend::pipeline_state_t &rhs) { case ECBackend::pipeline_state_t::CACHE_INVALID: return lhs << "CACHE_INVALID"; default: - assert(0 == "invalid pipeline state"); + ceph_assert(0 == "invalid pipeline state"); } return lhs; // unreachable } @@ -199,7 +199,7 @@ ECBackend::ECBackend( : PGBackend(cct, pg, store, coll, ch), ec_impl(ec_impl), sinfo(ec_impl->get_data_chunk_count(), stripe_width) { - assert((ec_impl->get_data_chunk_count() * + ceph_assert((ec_impl->get_data_chunk_count() * ec_impl->get_chunk_size(stripe_width)) == stripe_width); } @@ -216,7 +216,7 @@ void ECBackend::_failed_push(const hobject_t &hoid, << res.r << " errors=" << res.errors << dendl; dout(10) << __func__ << ": canceling recovery op for obj " << hoid << dendl; - assert(recovery_ops.count(hoid)); + ceph_assert(recovery_ops.count(hoid)); eversion_t v = recovery_ops[hoid].v; recovery_ops.erase(hoid); @@ -241,7 +241,7 @@ struct OnRecoveryReadComplete : pg->_failed_push(hoid, in); return; } - assert(res.returned.size() == 1); + ceph_assert(res.returned.size() == 1); pg->handle_recovery_read_complete( hoid, res.returned.back(), @@ -262,7 +262,7 @@ struct RecoveryMessages { bool attrs) { list > to_read; to_read.push_back(boost::make_tuple(off, len, 0)); - assert(!reads.count(hoid)); + ceph_assert(!reads.count(hoid)); want_to_read.insert(make_pair(hoid, std::move(_want_to_read))); reads.insert( make_pair( @@ -317,7 +317,7 @@ void ECBackend::handle_recovery_push( if (!op.data_included.empty()) { uint64_t start = op.data_included.range_start(); uint64_t end = op.data_included.range_end(); - assert(op.data.length() == (end - start)); + ceph_assert(op.data.length() == (end - start)); m->t.write( coll, @@ -326,11 +326,11 @@ void ECBackend::handle_recovery_push( op.data.length(), op.data); } else { - assert(op.data.length() == 0); + ceph_assert(op.data.length() == 0); } if (op.before_progress.first) { - assert(op.attrset.count(string("_"))); + ceph_assert(op.attrset.count(string("_"))); m->t.setattrs( coll, tobj, @@ -350,8 +350,8 @@ void ECBackend::handle_recovery_push( } if (op.after_progress.data_complete) { if ((get_parent()->pgb_is_primary())) { - assert(recovery_ops.count(op.soid)); - assert(recovery_ops[op.soid].obc); + ceph_assert(recovery_ops.count(op.soid)); + ceph_assert(recovery_ops[op.soid].obc); get_parent()->on_local_recover( op.soid, op.recovery_info, @@ -379,7 +379,7 @@ void ECBackend::handle_recovery_push_reply( if (!recovery_ops.count(op.soid)) return; RecoveryOp &rop = recovery_ops[op.soid]; - assert(rop.waiting_on_pushes.count(from)); + ceph_assert(rop.waiting_on_pushes.count(from)); rop.waiting_on_pushes.erase(from); continue_recovery_op(rop, m); } @@ -396,9 +396,9 @@ void ECBackend::handle_recovery_read_complete( << ", " << to_read.get<2>() << ")" << dendl; - assert(recovery_ops.count(hoid)); + ceph_assert(recovery_ops.count(hoid)); RecoveryOp &op = recovery_ops[hoid]; - assert(op.returned_data.empty()); + ceph_assert(op.returned_data.empty()); map target; for (set::iterator i = op.missing_on_shards.begin(); i != op.missing_on_shards.end(); @@ -414,7 +414,7 @@ void ECBackend::handle_recovery_read_complete( dout(10) << __func__ << ": " << from << dendl; int r; r = ECUtil::decode(sinfo, ec_impl, from, target); - assert(r == 0); + ceph_assert(r == 0); if (attrs) { op.xattrs.swap(*attrs); @@ -435,21 +435,21 @@ void ECBackend::handle_recovery_read_complete( map sanitized_attrs(op.xattrs); sanitized_attrs.erase(ECUtil::get_hinfo_key()); op.obc = get_parent()->get_obc(hoid, sanitized_attrs); - assert(op.obc); + ceph_assert(op.obc); op.recovery_info.size = op.obc->obs.oi.size; op.recovery_info.oi = op.obc->obs.oi; } ECUtil::HashInfo hinfo(ec_impl->get_chunk_count()); if (op.obc->obs.oi.size > 0) { - assert(op.xattrs.count(ECUtil::get_hinfo_key())); + ceph_assert(op.xattrs.count(ECUtil::get_hinfo_key())); auto bp = op.xattrs[ECUtil::get_hinfo_key()].cbegin(); decode(hinfo, bp); } op.hinfo = unstable_hashinfo_registry.lookup_or_create(hoid, hinfo); } - assert(op.xattrs.size()); - assert(op.obc); + ceph_assert(op.xattrs.size()); + ceph_assert(op.obc); continue_recovery_op(op, m); } @@ -544,7 +544,7 @@ void ECBackend::continue_recovery_op( case RecoveryOp::IDLE: { // start read op.state = RecoveryOp::READING; - assert(!op.recovery_progress.data_complete); + ceph_assert(!op.recovery_progress.data_complete); set want(op.missing_on_shards.begin(), op.missing_on_shards.end()); uint64_t from = op.recovery_progress.data_recovered_to; uint64_t amount = get_recovery_chunk_size(); @@ -552,7 +552,7 @@ void ECBackend::continue_recovery_op( if (op.recovery_progress.first && op.obc) { /* We've got the attrs and the hinfo, might as well use them */ op.hinfo = get_hash_info(op.hoid); - assert(op.hinfo); + ceph_assert(op.hinfo); op.xattrs = op.obc->attr_cache; encode(*(op.hinfo), op.xattrs[ECUtil::get_hinfo_key()]); } @@ -562,7 +562,7 @@ void ECBackend::continue_recovery_op( op.hoid, want, true, false, &to_read); if (r != 0) { // we must have lost a recovery source - assert(!op.recovery_progress.first); + ceph_assert(!op.recovery_progress.first); dout(10) << __func__ << ": canceling recovery op for obj " << op.hoid << dendl; get_parent()->cancel_pull(op.hoid); @@ -585,8 +585,8 @@ void ECBackend::continue_recovery_op( } case RecoveryOp::READING: { // read completed, start write - assert(op.xattrs.size()); - assert(op.returned_data.size()); + ceph_assert(op.xattrs.size()); + ceph_assert(op.returned_data.size()); op.state = RecoveryOp::WRITING; ObjectRecoveryProgress after_progress = op.recovery_progress; after_progress.data_recovered_to += op.extent_requested.second; @@ -600,7 +600,7 @@ void ECBackend::continue_recovery_op( for (set::iterator mi = op.missing_on.begin(); mi != op.missing_on.end(); ++mi) { - assert(op.returned_data.count(mi->shard)); + ceph_assert(op.returned_data.count(mi->shard)); m->pushes[*mi].push_back(PushOp()); PushOp &pop = m->pushes[*mi].back(); pop.soid = op.hoid; @@ -610,7 +610,7 @@ void ECBackend::continue_recovery_op( << ", after_progress=" << after_progress << ", pop.data.length()=" << pop.data.length() << ", size=" << op.obc->obs.oi.size << dendl; - assert( + ceph_assert( pop.data.length() == sinfo.aligned_logical_offset_to_chunk_offset( after_progress.data_recovered_to - @@ -690,7 +690,7 @@ void ECBackend::run_recovery_op( i != h->ops.end(); ++i) { dout(10) << __func__ << ": starting " << *i << dendl; - assert(!recovery_ops.count(i->hoid)); + ceph_assert(!recovery_ops.count(i->hoid)); RecoveryOp &op = recovery_ops.insert(make_pair(i->hoid, *i)).first->second; continue_recovery_op(op, &m); } @@ -720,13 +720,13 @@ int ECBackend::recover_object( } if (hoid.is_snap()) { if (obc) { - assert(obc->ssc); + ceph_assert(obc->ssc); h->ops.back().recovery_info.ss = obc->ssc->snapset; } else if (head) { - assert(head->ssc); + ceph_assert(head->ssc); h->ops.back().recovery_info.ss = head->ssc->snapset; } else { - assert(0 == "neither obc nor head set for a snap object"); + ceph_assert(0 == "neither obc nor head set for a snap object"); } } h->ops.back().recovery_progress.omap_complete = true; @@ -1026,7 +1026,7 @@ void ECBackend::handle_sub_read( // are read in sections, so the digest check here won't be done here. // Do NOT check osd_read_eio_on_bad_digest here. We need to report // the state of our chunk in case other chunks could substitute. - assert(hinfo->has_chunk_hash()); + ceph_assert(hinfo->has_chunk_hash()); if ((bl.length() == hinfo->get_total_chunk_size()) && (j->get<0>() == 0)) { dout(20) << __func__ << ": Checking hash of " << i->first << dendl; @@ -1079,10 +1079,10 @@ void ECBackend::handle_sub_write_reply( const ZTracer::Trace &trace) { map::iterator i = tid_to_op_map.find(op.tid); - assert(i != tid_to_op_map.end()); + ceph_assert(i != tid_to_op_map.end()); if (op.committed) { trace.event("sub write committed"); - assert(i->second.pending_commit.count(from)); + ceph_assert(i->second.pending_commit.count(from)); i->second.pending_commit.erase(from); if (from != get_parent()->whoami_shard()) { get_parent()->update_peer_last_complete_ondisk(from, op.last_complete); @@ -1090,7 +1090,7 @@ void ECBackend::handle_sub_write_reply( } if (op.applied) { trace.event("sub write applied"); - assert(i->second.pending_apply.count(from)); + ceph_assert(i->second.pending_apply.count(from)); i->second.pending_apply.erase(from); } @@ -1124,7 +1124,7 @@ void ECBackend::handle_sub_read_reply( for (auto i = op.buffers_read.begin(); i != op.buffers_read.end(); ++i) { - assert(!op.errors.count(i->first)); // If attribute error we better not have sent a buffer + ceph_assert(!op.errors.count(i->first)); // If attribute error we better not have sent a buffer if (!rop.to_read.count(i->first)) { // We canceled this read! @see filter_read_op dout(20) << __func__ << " to_read skipping" << dendl; @@ -1139,19 +1139,19 @@ void ECBackend::handle_sub_read_reply( for (list >::iterator j = i->second.begin(); j != i->second.end(); ++j, ++req_iter, ++riter) { - assert(req_iter != rop.to_read.find(i->first)->second.to_read.end()); - assert(riter != rop.complete[i->first].returned.end()); + ceph_assert(req_iter != rop.to_read.find(i->first)->second.to_read.end()); + ceph_assert(riter != rop.complete[i->first].returned.end()); pair adjusted = sinfo.aligned_offset_len_to_chunk( make_pair(req_iter->get<0>(), req_iter->get<1>())); - assert(adjusted.first == j->first); + ceph_assert(adjusted.first == j->first); riter->get<2>()[from].claim(j->second); } } for (auto i = op.attrs_read.begin(); i != op.attrs_read.end(); ++i) { - assert(!op.errors.count(i->first)); // if read error better not have sent an attribute + ceph_assert(!op.errors.count(i->first)); // if read error better not have sent an attribute if (!rop.to_read.count(i->first)) { // We canceled this read! @see filter_read_op dout(20) << __func__ << " to_read skipping" << dendl; @@ -1172,11 +1172,11 @@ void ECBackend::handle_sub_read_reply( map >::iterator siter = shard_to_read_map.find(from); - assert(siter != shard_to_read_map.end()); - assert(siter->second.count(op.tid)); + ceph_assert(siter != shard_to_read_map.end()); + ceph_assert(siter->second.count(op.tid)); siter->second.erase(op.tid); - assert(rop.in_progress.count(from)); + ceph_assert(rop.in_progress.count(from)); rop.in_progress.erase(from); unsigned is_complete = 0; // For redundant reads check for completion as each shard comes in, @@ -1219,7 +1219,7 @@ void ECBackend::handle_sub_read_reply( ++is_complete; } } else { - assert(rop.complete[iter->first].r == 0); + ceph_assert(rop.complete[iter->first].r == 0); if (!rop.complete[iter->first].errors.empty()) { if (cct->_conf->osd_read_ec_check_for_errors) { dout(10) << __func__ << ": Not ignoring errors, use one shard err=" << err << dendl; @@ -1252,7 +1252,7 @@ void ECBackend::complete_read_op(ReadOp &rop, RecoveryMessages *m) rop.to_read.begin(); map::iterator resiter = rop.complete.begin(); - assert(rop.to_read.size() == rop.complete.size()); + ceph_assert(rop.to_read.size() == rop.complete.size()); for (; reqiter != rop.to_read.end(); ++reqiter, ++resiter) { if (reqiter->second.cb) { pair arg( @@ -1277,7 +1277,7 @@ struct FinishReadOp : public GenContext { FinishReadOp(ECBackend *ec, ceph_tid_t tid) : ec(ec), tid(tid) {} void finish(ThreadPool::TPHandle &handle) override { auto ropiter = ec->tid_to_read_map.find(tid); - assert(ropiter != ec->tid_to_read_map.end()); + ceph_assert(ropiter != ec->tid_to_read_map.end()); int priority = ropiter->second.priority; RecoveryMessages rm; ec->complete_read_op(ropiter->second, &rm); @@ -1317,7 +1317,7 @@ void ECBackend::filter_read_op( if (i->second.empty()) { op.source_to_obj.erase(i++); } else { - assert(!osdmap->is_down(i->first.osd)); + ceph_assert(!osdmap->is_down(i->first.osd)); ++i; } } @@ -1327,11 +1327,11 @@ void ECBackend::filter_read_op( ++i) { get_parent()->cancel_pull(*i); - assert(op.to_read.count(*i)); + ceph_assert(op.to_read.count(*i)); read_request_t &req = op.to_read.find(*i)->second; dout(10) << __func__ << ": canceling " << req << " for obj " << *i << dendl; - assert(req.cb); + ceph_assert(req.cb); delete req.cb; req.cb = nullptr; @@ -1365,7 +1365,7 @@ void ECBackend::check_recovery_sources(const OSDMapRef& osdmap) i != tids_to_filter.end(); ++i) { map::iterator j = tid_to_read_map.find(*i); - assert(j != tid_to_read_map.end()); + ceph_assert(j != tid_to_read_map.end()); filter_read_op(osdmap, j->second); } } @@ -1445,7 +1445,7 @@ void ECBackend::submit_transaction( OpRequestRef client_op ) { - assert(!tid_to_op_map.count(tid)); + ceph_assert(!tid_to_op_map.count(tid)); Op *op = &(tid_to_op_map[tid]); op->hoid = hoid; op->delta_stats = delta_stats; @@ -1492,9 +1492,9 @@ void ECBackend::get_all_avail_shards( if (error_shards.find(*i) != error_shards.end()) continue; if (!missing.is_missing(hoid)) { - assert(!have.count(i->shard)); + ceph_assert(!have.count(i->shard)); have.insert(i->shard); - assert(!shards.count(i->shard)); + ceph_assert(!shards.count(i->shard)); shards.insert(make_pair(i->shard, *i)); } } @@ -1507,11 +1507,11 @@ void ECBackend::get_all_avail_shards( if (error_shards.find(*i) != error_shards.end()) continue; if (have.count(i->shard)) { - assert(shards.count(i->shard)); + ceph_assert(shards.count(i->shard)); continue; } dout(10) << __func__ << ": checking backfill " << *i << dendl; - assert(!shards.count(i->shard)); + ceph_assert(!shards.count(i->shard)); const pg_info_t &info = get_parent()->get_shard_info(*i); const pg_missing_t &missing = get_parent()->get_shard_missing(*i); if (hoid < info.last_backfill && @@ -1530,7 +1530,7 @@ void ECBackend::get_all_avail_shards( dout(10) << __func__ << ": checking missing_loc " << *i << dendl; auto m = get_parent()->maybe_get_shard_missing(*i); if (m) { - assert(!(*m).is_missing(hoid)); + ceph_assert(!(*m).is_missing(hoid)); } if (error_shards.find(*i) != error_shards.end()) continue; @@ -1549,7 +1549,7 @@ int ECBackend::get_min_avail_to_read_shards( map>> *to_read) { // Make sure we don't do redundant reads for recovery - assert(!for_recovery || !do_redundant_reads); + ceph_assert(!for_recovery || !do_redundant_reads); set have; map shards; @@ -1574,7 +1574,7 @@ int ECBackend::get_min_avail_to_read_shards( return 0; for (auto &&i:need) { - assert(shards.count(shard_id_t(i.first))); + ceph_assert(shards.count(shard_id_t(i.first))); to_read->insert(make_pair(shards[shard_id_t(i.first)], i.second)); } return 0; @@ -1588,7 +1588,7 @@ int ECBackend::get_remaining_shards( map>> *to_read, bool for_recovery) { - assert(to_read); + ceph_assert(to_read); set have; map shards; @@ -1619,8 +1619,8 @@ int ECBackend::get_remaining_shards( for (set::iterator i = shards_left.begin(); i != shards_left.end(); ++i) { - assert(shards.count(shard_id_t(*i))); - assert(avail.find(*i) == avail.end()); + ceph_assert(shards.count(shard_id_t(*i))); + ceph_assert(avail.find(*i) == avail.end()); to_read->insert(make_pair(shards[shard_id_t(*i)], subchunks)); } return 0; @@ -1635,7 +1635,7 @@ void ECBackend::start_read_op( bool for_recovery) { ceph_tid_t tid = get_parent()->get_tid(); - assert(!tid_to_read_map.count(tid)); + ceph_assert(!tid_to_read_map.count(tid)); auto &op = tid_to_read_map.emplace( tid, ReadOp( @@ -1693,7 +1693,7 @@ void ECBackend::do_read_op(ReadOp &op) chunk_off_len.second, j->get<2>())); } - assert(!need_attrs); + ceph_assert(!need_attrs); } } @@ -1785,7 +1785,7 @@ ECUtil::HashInfoRef ECBackend::get_hash_info( void ECBackend::start_rmw(Op *op, PGTransactionUPtr &&t) { - assert(op); + ceph_assert(op); op->plan = ECTransaction::get_write_plan( sinfo, @@ -1816,7 +1816,7 @@ bool ECBackend::try_state_to_reads() Op *op = &(waiting_state.front()); if (op->requires_rmw() && pipeline_state.cache_invalid()) { - assert(get_parent()->get_pool().allows_ecoverwrites()); + ceph_assert(get_parent()->get_pool().allows_ecoverwrites()); dout(20) << __func__ << ": blocking " << *op << " because it requires an rmw and the cache is invalid " << pipeline_state @@ -1870,7 +1870,7 @@ bool ECBackend::try_state_to_reads() dout(10) << __func__ << ": " << *op << dendl; if (!op->remote_read.empty()) { - assert(get_parent()->get_pool().allows_ecoverwrites()); + ceph_assert(get_parent()->get_pool().allows_ecoverwrites()); objects_read_async_no_cache( op->remote_read, [this, op](map > &&results) { @@ -1911,7 +1911,7 @@ bool ECBackend::try_reads_to_commit() } op->pending_read.clear(); } else { - assert(op->pending_read.empty()); + ceph_assert(op->pending_read.empty()); } map trans; @@ -1959,7 +1959,7 @@ bool ECBackend::try_reads_to_commit() written_set[i.first] = i.second.get_interval_set(); } dout(20) << __func__ << ": written_set: " << written_set << dendl; - assert(written_set == op->plan.will_write); + ceph_assert(written_set == op->plan.will_write); if (op->using_cache) { for (auto &&hpair: written) { @@ -1982,7 +1982,7 @@ bool ECBackend::try_reads_to_commit() op->pending_commit.insert(*i); map::iterator iter = trans.find(i->shard); - assert(iter != trans.end()); + ceph_assert(iter != trans.end()); bool should_send = get_parent()->should_send_op(*i, op->hoid); const pg_stat_t &stats = (should_send || !backfill_shards.count(*i)) ? @@ -2180,13 +2180,13 @@ void ECBackend::objects_read_async( if (r == 0) r = got.first; } else { - assert(read.second.first); + ceph_assert(read.second.first); uint64_t offset = read.first.get<0>(); uint64_t length = read.first.get<1>(); auto range = got.second.get_containing_range(offset, length); - assert(range.first != range.second); - assert(range.first.get_off() <= offset); - assert( + ceph_assert(range.first != range.second); + ceph_assert(range.first.get_off() <= offset); + ceph_assert( (offset + length) <= (range.first.get_off() + range.first.get_len())); read.second.first->substr_of( @@ -2239,13 +2239,13 @@ struct CallClientContexts : extent_map result; if (res.r != 0) goto out; - assert(res.returned.size() == to_read.size()); - assert(res.errors.empty()); + ceph_assert(res.returned.size() == to_read.size()); + ceph_assert(res.errors.empty()); for (auto &&read: to_read) { pair adjusted = ec->sinfo.offset_len_to_stripe_bounds( make_pair(read.get<0>(), read.get<1>())); - assert(res.returned.front().get<0>() == adjusted.first && + ceph_assert(res.returned.front().get<0>() == adjusted.first && res.returned.front().get<1>() == adjusted.second); map to_decode; bufferlist bl; @@ -2307,7 +2307,7 @@ void ECBackend::objects_read_and_reconstruct( false, fast_read, &shards); - assert(r == 0); + ceph_assert(r == 0); CallClientContexts *c = new CallClientContexts( to_read.first, @@ -2402,7 +2402,7 @@ void ECBackend::rollback_append( uint64_t old_size, ObjectStore::Transaction *t) { - assert(old_size % sinfo.get_stripe_width() == 0); + ceph_assert(old_size % sinfo.get_stripe_width() == 0); t->truncate( coll, ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard), @@ -2474,7 +2474,7 @@ int ECBackend::be_deep_scrub( return 0; } else { if (!get_parent()->get_pool().allows_ecoverwrites()) { - assert(hinfo->has_chunk_hash()); + ceph_assert(hinfo->has_chunk_hash()); if (hinfo->get_total_chunk_size() != (unsigned)pos.data_pos) { dout(0) << "_scan_list " << poid << " got incorrect size on read 0x" << std::hex << pos diff --git a/src/osd/ECBackend.h b/src/osd/ECBackend.h index 7fc8a25717947..d8d4a71d664bf 100644 --- a/src/osd/ECBackend.h +++ b/src/osd/ECBackend.h @@ -155,9 +155,9 @@ public: const hobject_t &hoid, int err, extent_map &&buffers) { - assert(objects_to_read); + ceph_assert(objects_to_read); --objects_to_read; - assert(!results.count(hoid)); + ceph_assert(!results.count(hoid)); results.emplace(hoid, make_pair(err, std::move(buffers))); } bool is_complete() const { diff --git a/src/osd/ECTransaction.cc b/src/osd/ECTransaction.cc index fe1304003633a..ee791d633c6ea 100644 --- a/src/osd/ECTransaction.cc +++ b/src/osd/ECTransaction.cc @@ -36,14 +36,14 @@ void encode_and_write( map *transactions, DoutPrefixProvider *dpp) { const uint64_t before_size = hinfo->get_total_logical_size(sinfo); - assert(sinfo.logical_offset_is_stripe_aligned(offset)); - assert(sinfo.logical_offset_is_stripe_aligned(bl.length())); - assert(bl.length()); + ceph_assert(sinfo.logical_offset_is_stripe_aligned(offset)); + ceph_assert(sinfo.logical_offset_is_stripe_aligned(bl.length())); + ceph_assert(bl.length()); map buffers; int r = ECUtil::encode( sinfo, ecimpl, bl, want, &buffers); - assert(r == 0); + ceph_assert(r == 0); written.insert(offset, bl.length(), bl); @@ -53,14 +53,14 @@ void encode_and_write( << dendl; if (offset >= before_size) { - assert(offset == before_size); + ceph_assert(offset == before_size); hinfo->append( sinfo.aligned_logical_offset_to_chunk_offset(offset), buffers); } for (auto &&i : *transactions) { - assert(buffers.count(i.first)); + ceph_assert(buffers.count(i.first)); bufferlist &enc_bl = buffers[i.first]; if (offset >= before_size) { i.second.set_alloc_hint( @@ -107,11 +107,11 @@ void ECTransaction::generate_transactions( set *temp_removed, DoutPrefixProvider *dpp) { - assert(written_map); - assert(transactions); - assert(temp_added); - assert(temp_removed); - assert(plan.t); + ceph_assert(written_map); + ceph_assert(transactions); + ceph_assert(temp_added); + ceph_assert(temp_removed); + ceph_assert(plan.t); auto &t = *(plan.t); auto &hash_infos = plan.hash_infos; @@ -137,15 +137,15 @@ void ECTransaction::generate_transactions( obc = obiter->second; } if (entry) { - assert(obc); + ceph_assert(obc); } else { - assert(oid.is_temp()); + ceph_assert(oid.is_temp()); } ECUtil::HashInfoRef hinfo; { auto iter = hash_infos.find(oid); - assert(iter != hash_infos.end()); + ceph_assert(iter != hash_infos.end()); hinfo = iter->second; } @@ -185,17 +185,17 @@ void ECTransaction::generate_transactions( } map > xattr_rollback; - assert(hinfo); + ceph_assert(hinfo); bufferlist old_hinfo; encode(*hinfo, old_hinfo); xattr_rollback[ECUtil::get_hinfo_key()] = old_hinfo; if (op.is_none() && op.truncate && op.truncate->first == 0) { - assert(op.truncate->first == 0); - assert(op.truncate->first == + ceph_assert(op.truncate->first == 0); + ceph_assert(op.truncate->first == op.truncate->second); - assert(entry); - assert(obc); + ceph_assert(entry); + ceph_assert(obc); if (op.truncate->first != op.truncate->second) { op.truncate->first = op.truncate->second; @@ -278,17 +278,17 @@ void ECTransaction::generate_transactions( } auto siter = hash_infos.find(op.source); - assert(siter != hash_infos.end()); + ceph_assert(siter != hash_infos.end()); hinfo->update_to(*(siter->second)); if (obc) { auto cobciter = obc_map.find(op.source); - assert(cobciter != obc_map.end()); + ceph_assert(cobciter != obc_map.end()); obc->attr_cache = cobciter->second->attr_cache; } }, [&](const PGTransaction::ObjectOperation::Init::Rename &op) { - assert(op.source.is_temp()); + ceph_assert(op.source.is_temp()); for (auto &&st: *transactions) { st.second.collection_move_rename( coll_t(spg_t(pgid, st.first)), @@ -297,19 +297,19 @@ void ECTransaction::generate_transactions( ghobject_t(oid, ghobject_t::NO_GEN, st.first)); } auto siter = hash_infos.find(op.source); - assert(siter != hash_infos.end()); + ceph_assert(siter != hash_infos.end()); hinfo->update_to(*(siter->second)); if (obc) { auto cobciter = obc_map.find(op.source); - assert(cobciter == obc_map.end()); + ceph_assert(cobciter == obc_map.end()); obc->attr_cache.clear(); } }); // omap not supported (except 0, handled above) - assert(!(op.clear_omap)); - assert(!(op.omap_header)); - assert(op.omap_updates.empty()); + ceph_assert(!(op.clear_omap)); + ceph_assert(!(op.omap_header)); + ceph_assert(op.omap_updates.empty()); if (!op.attr_updates.empty()) { map to_set; @@ -347,7 +347,7 @@ void ECTransaction::generate_transactions( obc->attr_cache.erase(citer); } } else { - assert(!entry); + ceph_assert(!entry); } } for (auto &&st : *transactions) { @@ -356,7 +356,7 @@ void ECTransaction::generate_transactions( ghobject_t(oid, ghobject_t::NO_GEN, st.first), to_set); } - assert(!xattr_rollback.empty()); + ceph_assert(!xattr_rollback.empty()); } if (entry && !xattr_rollback.empty()) { entry->mod_desc.setattrs(xattr_rollback); @@ -397,7 +397,7 @@ void ECTransaction::generate_transactions( uint64_t append_after = new_size; ldpp_dout(dpp, 20) << __func__ << ": new_size start " << new_size << dendl; if (op.truncate && op.truncate->first < new_size) { - assert(!op.is_fresh_object()); + ceph_assert(!op.is_fresh_object()); new_size = sinfo.logical_to_next_stripe_offset( op.truncate->first); ldpp_dout(dpp, 20) << __func__ << ": new_size truncate down " @@ -424,7 +424,7 @@ void ECTransaction::generate_transactions( uint64_t restore_len = sinfo.aligned_logical_offset_to_chunk_offset( orig_size - sinfo.logical_to_prev_stripe_offset(op.truncate->first)); - assert(rollback_extents.empty()); + ceph_assert(rollback_extents.empty()); ldpp_dout(dpp, 20) << __func__ << ": saving extent " << make_pair(restore_from, restore_len) @@ -473,7 +473,7 @@ void ECTransaction::generate_transactions( bl.append_zero(extent.get_len()); }, [&](const BufferUpdate::CloneRange &) { - assert( + ceph_assert( 0 == "CloneRange is not allowed, do_op should have returned ENOTSUPP"); }); @@ -484,9 +484,9 @@ void ECTransaction::generate_transactions( ldpp_dout(dpp, 20) << __func__ << ": adding buffer_update " << make_pair(off, len) << dendl; - assert(len > 0); + ceph_assert(len > 0); if (off > new_size) { - assert(off > append_after); + ceph_assert(off > append_after); bl.prepend_zero(off - new_size); len += off - new_size; ldpp_dout(dpp, 20) << __func__ << ": prepending zeroes to align " @@ -514,7 +514,7 @@ void ECTransaction::generate_transactions( if (op.truncate && op.truncate->second > new_size) { - assert(op.truncate->second > append_after); + ceph_assert(op.truncate->second > append_after); uint64_t truncate_to = sinfo.logical_to_next_stripe_offset( op.truncate->second); @@ -540,9 +540,9 @@ void ECTransaction::generate_transactions( << to_overwrite << dendl; for (auto &&extent: to_overwrite) { - assert(extent.get_off() + extent.get_len() <= append_after); - assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off())); - assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len())); + ceph_assert(extent.get_off() + extent.get_len() <= append_after); + ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off())); + ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len())); if (entry) { uint64_t restore_from = sinfo.aligned_logical_offset_to_chunk_offset( extent.get_off()); @@ -591,8 +591,8 @@ void ECTransaction::generate_transactions( << to_append << dendl; for (auto &&extent: to_append) { - assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off())); - assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len())); + ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off())); + ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len())); ldpp_dout(dpp, 20) << __func__ << ": appending " << extent.get_off() << "~" << extent.get_len() << dendl; @@ -627,7 +627,7 @@ void ECTransaction::generate_transactions( hinfo->set_total_chunk_size_clear_hash( sinfo.aligned_logical_offset_to_chunk_offset(new_size)); } else { - assert(hinfo->get_total_logical_size(sinfo) == new_size); + ceph_assert(hinfo->get_total_logical_size(sinfo) == new_size); } if (entry && !to_append.empty()) { diff --git a/src/osd/ECTransaction.h b/src/osd/ECTransaction.h index f6719380c5f73..ae0faf5df4de4 100644 --- a/src/osd/ECTransaction.h +++ b/src/osd/ECTransaction.h @@ -89,7 +89,7 @@ namespace ECTransaction { for (auto &&extent: i.second.buffer_updates) { using BufferUpdate = PGTransaction::ObjectOperation::BufferUpdate; if (boost::get(&(extent.get_val()))) { - assert( + ceph_assert( 0 == "CloneRange is not allowed, do_op should have returned ENOTSUPP"); } @@ -109,8 +109,8 @@ namespace ECTransaction { } if (head_start != head_finish && head_start < orig_size) { - assert(head_finish <= orig_size); - assert(head_finish - head_start == sinfo.get_stripe_width()); + ceph_assert(head_finish <= orig_size); + ceph_assert(head_finish - head_start == sinfo.get_stripe_width()); ldpp_dout(dpp, 20) << __func__ << ": reading partial head stripe " << head_start << "~" << sinfo.get_stripe_width() << dendl; @@ -127,8 +127,8 @@ namespace ECTransaction { if (tail_start != tail_finish && (head_start == head_finish || tail_start != head_start) && tail_start < orig_size) { - assert(tail_finish <= orig_size); - assert(tail_finish - tail_start == sinfo.get_stripe_width()); + ceph_assert(tail_finish <= orig_size); + ceph_assert(tail_finish - tail_start == sinfo.get_stripe_width()); ldpp_dout(dpp, 20) << __func__ << ": reading partial tail stripe " << tail_start << "~" << sinfo.get_stripe_width() << dendl; @@ -137,7 +137,7 @@ namespace ECTransaction { } if (head_start != tail_finish) { - assert( + ceph_assert( sinfo.logical_offset_is_stripe_aligned( tail_finish - head_start) ); @@ -146,7 +146,7 @@ namespace ECTransaction { if (tail_finish > projected_size) projected_size = tail_finish; } else { - assert(tail_finish <= projected_size); + ceph_assert(tail_finish <= projected_size); } } @@ -174,7 +174,7 @@ namespace ECTransaction { * to_read should have an entry for i.first iff it isn't empty * and if we are reading from i.first, we can't be renaming or * cloning it */ - assert(plan.to_read.count(i.first) == 0 || + ceph_assert(plan.to_read.count(i.first) == 0 || (!plan.to_read.at(i.first).empty() && !i.second.has_source())); }); diff --git a/src/osd/ECUtil.cc b/src/osd/ECUtil.cc index 786e2cea4a709..311e8526a769d 100644 --- a/src/osd/ECUtil.cc +++ b/src/osd/ECUtil.cc @@ -11,18 +11,18 @@ int ECUtil::decode( ErasureCodeInterfaceRef &ec_impl, map &to_decode, bufferlist *out) { - assert(to_decode.size()); + ceph_assert(to_decode.size()); uint64_t total_data_size = to_decode.begin()->second.length(); - assert(total_data_size % sinfo.get_chunk_size() == 0); + ceph_assert(total_data_size % sinfo.get_chunk_size() == 0); - assert(out); - assert(out->length() == 0); + ceph_assert(out); + ceph_assert(out->length() == 0); for (map::iterator i = to_decode.begin(); i != to_decode.end(); ++i) { - assert(i->second.length() == total_data_size); + ceph_assert(i->second.length() == total_data_size); } if (total_data_size == 0) @@ -37,8 +37,8 @@ int ECUtil::decode( } bufferlist bl; int r = ec_impl->decode_concat(chunks, &bl); - assert(r == 0); - assert(bl.length() == sinfo.get_stripe_width()); + ceph_assert(r == 0); + ceph_assert(bl.length() == sinfo.get_stripe_width()); out->claim_append(bl); } return 0; @@ -50,7 +50,7 @@ int ECUtil::decode( map &to_decode, map &out) { - assert(to_decode.size()); + ceph_assert(to_decode.size()); for (auto &&i : to_decode) { if(i.second.length() == 0) @@ -61,20 +61,20 @@ int ECUtil::decode( for (map::iterator i = out.begin(); i != out.end(); ++i) { - assert(i->second); - assert(i->second->length() == 0); + ceph_assert(i->second); + ceph_assert(i->second->length() == 0); need.insert(i->first); } set avail; for (auto &&i : to_decode) { - assert(i.second.length() != 0); + ceph_assert(i.second.length() != 0); avail.insert(i.first); } map>> min; int r = ec_impl->minimum_to_decode(need, avail, &min); - assert(r == 0); + ceph_assert(r == 0); int chunks_count = 0; int repair_data_per_chunk = 0; @@ -104,15 +104,15 @@ int ECUtil::decode( } map out_bls; r = ec_impl->decode(need, chunks, &out_bls, sinfo.get_chunk_size()); - assert(r == 0); + ceph_assert(r == 0); for (auto j = out.begin(); j != out.end(); ++j) { - assert(out_bls.count(j->first)); - assert(out_bls[j->first].length() == sinfo.get_chunk_size()); + ceph_assert(out_bls.count(j->first)); + ceph_assert(out_bls[j->first].length() == sinfo.get_chunk_size()); j->second->claim_append(out_bls[j->first]); } } for (auto &&i : out) { - assert(i.second->length() == chunks_count * sinfo.get_chunk_size()); + ceph_assert(i.second->length() == chunks_count * sinfo.get_chunk_size()); } return 0; } @@ -126,9 +126,9 @@ int ECUtil::encode( uint64_t logical_size = in.length(); - assert(logical_size % sinfo.get_stripe_width() == 0); - assert(out); - assert(out->empty()); + ceph_assert(logical_size % sinfo.get_stripe_width() == 0); + ceph_assert(out); + ceph_assert(out->empty()); if (logical_size == 0) return 0; @@ -138,11 +138,11 @@ int ECUtil::encode( bufferlist buf; buf.substr_of(in, i, sinfo.get_stripe_width()); int r = ec_impl->encode(want, buf, &encoded); - assert(r == 0); + ceph_assert(r == 0); for (map::iterator i = encoded.begin(); i != encoded.end(); ++i) { - assert(i->second.length() == sinfo.get_chunk_size()); + ceph_assert(i->second.length() == sinfo.get_chunk_size()); (*out)[i->first].claim_append(i->second); } } @@ -150,8 +150,8 @@ int ECUtil::encode( for (map::iterator i = out->begin(); i != out->end(); ++i) { - assert(i->second.length() % sinfo.get_chunk_size() == 0); - assert( + ceph_assert(i->second.length() % sinfo.get_chunk_size() == 0); + ceph_assert( sinfo.aligned_chunk_offset_to_logical_offset(i->second.length()) == logical_size); } @@ -160,15 +160,15 @@ int ECUtil::encode( void ECUtil::HashInfo::append(uint64_t old_size, map &to_append) { - assert(old_size == total_chunk_size); + ceph_assert(old_size == total_chunk_size); uint64_t size_to_append = to_append.begin()->second.length(); if (has_chunk_hash()) { - assert(to_append.size() == cumulative_shard_hashes.size()); + ceph_assert(to_append.size() == cumulative_shard_hashes.size()); for (map::iterator i = to_append.begin(); i != to_append.end(); ++i) { - assert(size_to_append == i->second.length()); - assert((unsigned)i->first < cumulative_shard_hashes.size()); + ceph_assert(size_to_append == i->second.length()); + ceph_assert((unsigned)i->first < cumulative_shard_hashes.size()); uint32_t new_hash = i->second.crc32c(cumulative_shard_hashes[i->first]); cumulative_shard_hashes[i->first] = new_hash; } diff --git a/src/osd/ECUtil.h b/src/osd/ECUtil.h index 610e0ff451d06..e843d83c4579c 100644 --- a/src/osd/ECUtil.h +++ b/src/osd/ECUtil.h @@ -31,7 +31,7 @@ public: stripe_info_t(uint64_t stripe_size, uint64_t stripe_width) : stripe_width(stripe_width), chunk_size(stripe_width / stripe_size) { - assert(stripe_width % stripe_size == 0); + ceph_assert(stripe_width % stripe_size == 0); } bool logical_offset_is_stripe_aligned(uint64_t logical) const { return (logical % stripe_width) == 0; @@ -57,11 +57,11 @@ public: offset); } uint64_t aligned_logical_offset_to_chunk_offset(uint64_t offset) const { - assert(offset % stripe_width == 0); + ceph_assert(offset % stripe_width == 0); return (offset / stripe_width) * chunk_size; } uint64_t aligned_chunk_offset_to_logical_offset(uint64_t offset) const { - assert(offset % chunk_size == 0); + ceph_assert(offset % chunk_size == 0); return (offset / chunk_size) * stripe_width; } std::pair aligned_offset_len_to_chunk( @@ -120,7 +120,7 @@ public: void dump(Formatter *f) const; static void generate_test_instances(std::list& o); uint32_t get_chunk_hash(int shard) const { - assert((unsigned)shard < cumulative_shard_hashes.size()); + ceph_assert((unsigned)shard < cumulative_shard_hashes.size()); return cumulative_shard_hashes[shard]; } uint64_t get_total_chunk_size() const { @@ -140,7 +140,7 @@ public: void set_projected_total_logical_size( const stripe_info_t &sinfo, uint64_t logical_size) { - assert(sinfo.logical_offset_is_stripe_aligned(logical_size)); + ceph_assert(sinfo.logical_offset_is_stripe_aligned(logical_size)); projected_total_chunk_size = sinfo.aligned_logical_offset_to_chunk_offset( logical_size); } diff --git a/src/osd/ExtentCache.cc b/src/osd/ExtentCache.cc index 5368298a450de..a09fc86efa94d 100644 --- a/src/osd/ExtentCache.cc +++ b/src/osd/ExtentCache.cc @@ -16,16 +16,16 @@ void ExtentCache::extent::_link_pin_state(pin_state &pin_state) { - assert(parent_extent_set); - assert(!parent_pin_state); + ceph_assert(parent_extent_set); + ceph_assert(!parent_pin_state); parent_pin_state = &pin_state; pin_state.pin_list.push_back(*this); } void ExtentCache::extent::_unlink_pin_state() { - assert(parent_extent_set); - assert(parent_pin_state); + ceph_assert(parent_extent_set); + ceph_assert(parent_pin_state); auto liter = pin_state::list::s_iterator_to(*this); parent_pin_state->pin_list.erase(liter); parent_pin_state = nullptr; @@ -33,8 +33,8 @@ void ExtentCache::extent::_unlink_pin_state() void ExtentCache::extent::unlink() { - assert(parent_extent_set); - assert(parent_pin_state); + ceph_assert(parent_extent_set); + ceph_assert(parent_pin_state); _unlink_pin_state(); @@ -42,19 +42,19 @@ void ExtentCache::extent::unlink() { auto siter = object_extent_set::set::s_iterator_to(*this); auto &set = object_extent_set::set::container_from_iterator(siter); - assert(&set == &(parent_extent_set->extent_set)); + ceph_assert(&set == &(parent_extent_set->extent_set)); set.erase(siter); } parent_extent_set = nullptr; - assert(!parent_pin_state); + ceph_assert(!parent_pin_state); } void ExtentCache::extent::link( object_extent_set &extent_set, pin_state &pin_state) { - assert(!parent_extent_set); + ceph_assert(!parent_extent_set); parent_extent_set = &extent_set; extent_set.extent_set.insert(*this); @@ -73,7 +73,7 @@ void ExtentCache::remove_and_destroy_if_empty(object_extent_set &eset) if (eset.extent_set.empty()) { auto siter = cache_set::s_iterator_to(eset); auto &set = cache_set::container_from_iterator(siter); - assert(&set == &per_object_caches); + ceph_assert(&set == &per_object_caches); // per_object_caches owns eset per_object_caches.erase(eset); @@ -175,10 +175,10 @@ extent_map ExtentCache::get_remaining_extents_for_rmw( res.second, [&](uint64_t off, uint64_t len, extent *ext, object_extent_set::update_action *action) { - assert(off == cur); + ceph_assert(off == cur); cur = off + len; action->action = object_extent_set::update_action::NONE; - assert(ext && ext->bl && ext->pinned_by_write()); + ceph_assert(ext && ext->bl && ext->pinned_by_write()); bl.substr_of( *(ext->bl), off - ext->offset, @@ -206,7 +206,7 @@ void ExtentCache::present_rmw_update( [&](uint64_t off, uint64_t len, extent *ext, object_extent_set::update_action *action) { action->action = object_extent_set::update_action::NONE; - assert(ext && ext->pinned_by_write()); + ceph_assert(ext && ext->pinned_by_write()); action->bl = bufferlist(); action->bl->substr_of( res.get_val(), diff --git a/src/osd/ExtentCache.h b/src/osd/ExtentCache.h index ffde71d632247..7f6e3e2e51a63 100644 --- a/src/osd/ExtentCache.h +++ b/src/osd/ExtentCache.h @@ -142,12 +142,12 @@ private: } bool pinned_by_write() const { - assert(parent_pin_state); + ceph_assert(parent_pin_state); return parent_pin_state->is_write(); } uint64_t pin_tid() const { - assert(parent_pin_state); + ceph_assert(parent_pin_state); return parent_pin_state->tid; } @@ -220,14 +220,14 @@ private: update_action action; f(offset, extlen, nullptr, &action); - assert(!action.bl || action.bl->length() == extlen); + ceph_assert(!action.bl || action.bl->length() == extlen); if (action.action == update_action::UPDATE_PIN) { extent *ext = action.bl ? new extent(offset, *action.bl) : new extent(offset, extlen); ext->link(*this, pin); } else { - assert(!action.bl); + ceph_assert(!action.bl); } } @@ -242,7 +242,7 @@ private: update_action action; f(extoff, extlen, ext, &action); - assert(!action.bl || action.bl->length() == extlen); + ceph_assert(!action.bl || action.bl->length() == extlen); extent *final_extent = nullptr; if (action.action == update_action::NONE) { final_extent = ext; @@ -302,8 +302,8 @@ private: } if (action.bl) { - assert(final_extent); - assert(final_extent->length == action.bl->length()); + ceph_assert(final_extent); + ceph_assert(final_extent->length == action.bl->length()); final_extent->bl = *(action.bl); } @@ -315,14 +315,14 @@ private: update_action action; f(tailoff, taillen, nullptr, &action); - assert(!action.bl || action.bl->length() == taillen); + ceph_assert(!action.bl || action.bl->length() == taillen); if (action.action == update_action::UPDATE_PIN) { extent *ext = action.bl ? new extent(tailoff, *action.bl) : new extent(tailoff, taillen); ext->link(*this, pin); } else { - assert(!action.bl); + ceph_assert(!action.bl); } } } @@ -367,13 +367,13 @@ private: using list = boost::intrusive::list; list pin_list; ~pin_state() { - assert(pin_list.empty()); - assert(tid == 0); - assert(pin_type == NONE); + ceph_assert(pin_list.empty()); + ceph_assert(tid == 0); + ceph_assert(pin_type == NONE); } void _open(uint64_t in_tid, pin_type_t in_type) { - assert(pin_type == NONE); - assert(in_tid > 0); + ceph_assert(pin_type == NONE); + ceph_assert(in_tid > 0); tid = in_tid; pin_type = in_type; } @@ -383,7 +383,7 @@ private: for (auto iter = p.pin_list.begin(); iter != p.pin_list.end(); ) { unique_ptr extent(&*iter); // we now own this iter++; // unlink will invalidate - assert(extent->parent_extent_set); + ceph_assert(extent->parent_extent_set); auto &eset = *(extent->parent_extent_set); extent->unlink(); remove_and_destroy_if_empty(eset); diff --git a/src/osd/HitSet.h b/src/osd/HitSet.h index 5f41321c33f2d..c6337c2eeb4c4 100644 --- a/src/osd/HitSet.h +++ b/src/osd/HitSet.h @@ -153,7 +153,7 @@ public: return impl->approx_unique_insert_count(); } void seal() { - assert(!sealed); + ceph_assert(!sealed); sealed = true; impl->seal(); } diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 10f6ebc41245e..7e9ef685648e0 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -308,8 +308,8 @@ void OSDService::add_pgid(spg_t pgid, PG *pg){ void OSDService::remove_pgid(spg_t pgid, PG *pg) { Mutex::Locker l(pgid_lock); - assert(pgid_tracker.count(pgid)); - assert(pgid_tracker[pgid] > 0); + ceph_assert(pgid_tracker.count(pgid)); + ceph_assert(pgid_tracker[pgid] > 0); pgid_tracker[pgid]--; if (pgid_tracker[pgid] == 0) { pgid_tracker.erase(pgid); @@ -519,12 +519,12 @@ void OSDService::agent_stop() Mutex::Locker l(agent_lock); // By this time all ops should be cancelled - assert(agent_ops == 0); + ceph_assert(agent_ops == 0); // By this time all PGs are shutdown and dequeued if (!agent_queue.empty()) { set& top = agent_queue.rbegin()->second; derr << "agent queue not empty, for example " << (*top.begin())->get_pgid() << dendl; - assert(0 == "agent queue not empty"); + ceph_assert(0 == "agent queue not empty"); } agent_stop_flag = true; @@ -829,7 +829,7 @@ void OSDService::send_message_osd_cluster(int peer, Message *m, epoch_t from_epo { OSDMapRef next_map = get_nextmap_reserved(); // service map is always newer/newest - assert(from_epoch <= next_map->get_epoch()); + ceph_assert(from_epoch <= next_map->get_epoch()); if (next_map->is_down(peer) || next_map->get_info(peer).up_from > from_epoch) { @@ -848,7 +848,7 @@ ConnectionRef OSDService::get_con_osd_cluster(int peer, epoch_t from_epoch) { OSDMapRef next_map = get_nextmap_reserved(); // service map is always newer/newest - assert(from_epoch <= next_map->get_epoch()); + ceph_assert(from_epoch <= next_map->get_epoch()); if (next_map->is_down(peer) || next_map->get_info(peer).up_from > from_epoch) { @@ -865,7 +865,7 @@ pair OSDService::get_con_osd_hb(int peer, epoch_t f { OSDMapRef next_map = get_nextmap_reserved(); // service map is always newer/newest - assert(from_epoch <= next_map->get_epoch()); + ceph_assert(from_epoch <= next_map->get_epoch()); pair ret; if (next_map->is_down(peer) || @@ -1163,7 +1163,7 @@ void OSDService::dec_scrubs_pending() dout(20) << "dec_scrubs_pending " << scrubs_pending << " -> " << (scrubs_pending-1) << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl; --scrubs_pending; - assert(scrubs_pending >= 0); + ceph_assert(scrubs_pending >= 0); sched_scrub_lock.Unlock(); } @@ -1176,7 +1176,7 @@ void OSDService::inc_scrubs_active(bool reserved) dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active << " (max " << cct->_conf->osd_max_scrubs << ", pending " << (scrubs_pending+1) << " -> " << scrubs_pending << ")" << dendl; - assert(scrubs_pending >= 0); + ceph_assert(scrubs_pending >= 0); } else { dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active << " (max " << cct->_conf->osd_max_scrubs @@ -1191,7 +1191,7 @@ void OSDService::dec_scrubs_active() dout(20) << "dec_scrubs_active " << scrubs_active << " -> " << (scrubs_active-1) << " (max " << cct->_conf->osd_max_scrubs << ", pending " << scrubs_pending << ")" << dendl; --scrubs_active; - assert(scrubs_active >= 0); + ceph_assert(scrubs_active >= 0); sched_scrub_lock.Unlock(); } @@ -1212,15 +1212,15 @@ void OSDService::set_epochs(const epoch_t *_boot_epoch, const epoch_t *_up_epoch { Mutex::Locker l(epoch_lock); if (_boot_epoch) { - assert(*_boot_epoch == 0 || *_boot_epoch >= boot_epoch); + ceph_assert(*_boot_epoch == 0 || *_boot_epoch >= boot_epoch); boot_epoch = *_boot_epoch; } if (_up_epoch) { - assert(*_up_epoch == 0 || *_up_epoch >= up_epoch); + ceph_assert(*_up_epoch == 0 || *_up_epoch >= up_epoch); up_epoch = *_up_epoch; } if (_bind_epoch) { - assert(*_bind_epoch == 0 || *_bind_epoch >= bind_epoch); + ceph_assert(*_bind_epoch == 0 || *_bind_epoch >= bind_epoch); bind_epoch = *_bind_epoch; } } @@ -1483,7 +1483,7 @@ void OSDService::reply_op_error(OpRequestRef op, int err, eversion_t v, version_t uv) { const MOSDOp *m = static_cast(op->get_req()); - assert(m->get_type() == CEPH_MSG_OSD_OP); + ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); int flags; flags = m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK); @@ -1499,9 +1499,9 @@ void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op) } const MOSDOp *m = static_cast(op->get_req()); - assert(m->get_type() == CEPH_MSG_OSD_OP); + ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); - assert(m->get_map_epoch() >= pg->get_history().same_primary_since); + ceph_assert(m->get_map_epoch() >= pg->get_history().same_primary_since); if (pg->is_ec_pg()) { /** @@ -1520,7 +1520,7 @@ void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op) * splitting. The simplest thing is to detect such cases here and drop * them without an error (the client will resend anyway). */ - assert(m->get_map_epoch() <= superblock.newest_map); + ceph_assert(m->get_map_epoch() <= superblock.newest_map); OSDMapRef opmap = try_get_map(m->get_map_epoch()); if (!opmap) { dout(7) << __func__ << ": " << *pg << " no longer have map for " @@ -1638,7 +1638,7 @@ void OSDService::_queue_for_recovery( std::pair p, uint64_t reserved_pushes) { - assert(recovery_lock.is_locked_by_me()); + ceph_assert(recovery_lock.is_locked_by_me()); enqueue_back( OpQueueItem( unique_ptr( @@ -1977,7 +1977,7 @@ void cls_initialize(ClassHandler *ch); void OSD::handle_signal(int signum) { - assert(signum == SIGINT || signum == SIGTERM); + ceph_assert(signum == SIGINT || signum == SIGTERM); derr << "*** Got signal " << sig_str(signum) << " ***" << dendl; shutdown(); } @@ -2253,7 +2253,7 @@ will start to track new ops received afterwards."; } f->close_section(); } else { - assert(0 == "broken asok registration"); + ceph_assert(0 == "broken asok registration"); } f->flush(ss); delete f; @@ -2386,7 +2386,7 @@ int OSD::init() << " (looks like " << (store_is_rotational ? "hdd" : "ssd") << ")" << dendl; dout(2) << "journal " << journal_path << dendl; - assert(store); // call pre_init() first! + ceph_assert(store); // call pre_init() first! store->set_cache_shards(get_num_op_shards()); @@ -2568,7 +2568,7 @@ int OSD::init() { struct store_statfs_t stbuf; int r = store->statfs(&stbuf); - assert(r == 0); + ceph_assert(r == 0); service.set_statfs(stbuf); } @@ -2693,69 +2693,69 @@ void OSD::final_init() asok_hook = new OSDSocketHook(this); int r = admin_socket->register_command("status", "status", asok_hook, "high-level status of OSD"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("flush_journal", "flush_journal", asok_hook, "flush the journal to permanent store"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_ops_in_flight", "dump_ops_in_flight " \ "name=filterstr,type=CephString,n=N,req=false", asok_hook, "show the ops currently in flight"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("ops", "ops " \ "name=filterstr,type=CephString,n=N,req=false", asok_hook, "show the ops currently in flight"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_blocked_ops", "dump_blocked_ops " \ "name=filterstr,type=CephString,n=N,req=false", asok_hook, "show the blocked ops currently in flight"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_historic_ops", "dump_historic_ops " \ "name=filterstr,type=CephString,n=N,req=false", asok_hook, "show recent ops"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_historic_slow_ops", "dump_historic_slow_ops " \ "name=filterstr,type=CephString,n=N,req=false", asok_hook, "show slowest recent ops"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_historic_ops_by_duration", "dump_historic_ops_by_duration " \ "name=filterstr,type=CephString,n=N,req=false", asok_hook, "show slowest recent ops, sorted by duration"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_op_pq_state", "dump_op_pq_state", asok_hook, "dump op priority queue state"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_blacklist", "dump_blacklist", asok_hook, "dump blacklisted clients and times"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_watchers", "dump_watchers", asok_hook, "show clients which have active watches," " and on which objects"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_reservations", "dump_reservations", asok_hook, "show recovery reservations"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("get_latest_osdmap", "get_latest_osdmap", asok_hook, "force osd to update the latest map from " "the mon"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "heap", "heap " \ @@ -2763,7 +2763,7 @@ void OSD::final_init() asok_hook, "show heap usage info (available only if " "compiled with tcmalloc)"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("set_heap_property", "set_heap_property " \ @@ -2771,60 +2771,60 @@ void OSD::final_init() "name=value,type=CephInt", asok_hook, "update malloc extension heap property"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("get_heap_property", "get_heap_property " \ "name=property,type=CephString", asok_hook, "get malloc extension heap property"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_objectstore_kv_stats", "dump_objectstore_kv_stats", asok_hook, "print statistics of kvdb which used by bluestore"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_scrubs", "dump_scrubs", asok_hook, "print scheduled scrubs"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("calc_objectstore_db_histogram", "calc_objectstore_db_histogram", asok_hook, "Generate key value histogram of kvdb(rocksdb) which used by bluestore"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("flush_store_cache", "flush_store_cache", asok_hook, "Flush bluestore internal cache"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_pgstate_history", "dump_pgstate_history", asok_hook, "show recent state history"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("compact", "compact", asok_hook, "Commpact object store's omap." " WARNING: Compaction probably slows your requests"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("get_mapped_pools", "get_mapped_pools", asok_hook, "dump pools whose PG(s) are mapped to this OSD."); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("smart", "smart name=devid,type=CephString,req=False", asok_hook, "probe OSD devices for SMART data."); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("list_devices", "list_devices", asok_hook, @@ -2842,7 +2842,7 @@ void OSD::final_init() "name=val,type=CephString", test_ops_hook, "set omap key"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "rmomapkey", "rmomapkey " \ @@ -2851,7 +2851,7 @@ void OSD::final_init() "name=key,type=CephString", test_ops_hook, "remove omap key"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "setomapheader", "setomapheader " \ @@ -2860,7 +2860,7 @@ void OSD::final_init() "name=header,type=CephString", test_ops_hook, "set omap header"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "getomap", @@ -2869,7 +2869,7 @@ void OSD::final_init() "name=objname,type=CephObjectname", test_ops_hook, "output entire object map"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "truncobj", @@ -2879,7 +2879,7 @@ void OSD::final_init() "name=len,type=CephInt", test_ops_hook, "truncate object to length"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "injectdataerr", @@ -2889,7 +2889,7 @@ void OSD::final_init() "name=shardid,type=CephInt,req=false,range=0|255", test_ops_hook, "inject data error to an object"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "injectmdataerr", @@ -2899,21 +2899,21 @@ void OSD::final_init() "name=shardid,type=CephInt,req=false,range=0|255", test_ops_hook, "inject metadata error to an object"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "set_recovery_delay", "set_recovery_delay " \ "name=utime,type=CephInt,req=false", test_ops_hook, "Delay osd recovery by specified seconds"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "trigger_scrub", "trigger_scrub " \ "name=pgid,type=CephString ", test_ops_hook, "Trigger a scheduled scrub "); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command( "injectfull", "injectfull " \ @@ -2921,7 +2921,7 @@ void OSD::final_init() "name=count,type=CephInt,req=false ", test_ops_hook, "Inject a full disk (optional count times)"); - assert(r == 0); + ceph_assert(r == 0); } void OSD::create_logger() @@ -3575,7 +3575,7 @@ void OSD::clear_temp_objects() while (1) { vector objects; auto ch = store->open_collection(*p); - assert(ch); + ceph_assert(ch); store->collection_list(ch, next, ghobject_t::get_max(), store->get_ideal_list_max(), &objects, &next); @@ -3646,12 +3646,12 @@ void OSD::recursive_remove_collection(CephContext* cct, t.remove(tmp, p); } int r = store->queue_transaction(ch, std::move(t)); - assert(r == 0); + ceph_assert(r == 0); t = ObjectStore::Transaction(); } t.remove_collection(tmp); int r = store->queue_transaction(ch, std::move(t)); - assert(r == 0); + ceph_assert(r == 0); C_SaferCond waiter; if (!ch->flush_commit(&waiter)) { @@ -3746,7 +3746,7 @@ void OSD::register_pg(PGRef pg) auto sdata = shards[shard_index]; Mutex::Locker l(sdata->shard_lock); auto r = sdata->pg_slots.emplace(pgid, make_unique()); - assert(r.second); + ceph_assert(r.second); auto *slot = r.first->second.get(); dout(20) << __func__ << " " << pgid << " " << pg << dendl; sdata->_attach_pg(slot, pg.get()); @@ -3755,7 +3755,7 @@ void OSD::register_pg(PGRef pg) void OSD::unregister_pg(PG *pg) { auto sdata = pg->osd_shard; - assert(sdata); + ceph_assert(sdata); Mutex::Locker l(sdata->shard_lock); auto p = sdata->pg_slots.find(pg->pg_id); if (p != sdata->pg_slots.end() && @@ -3800,7 +3800,7 @@ PGRef OSD::lookup_lock_pg(spg_t pgid) void OSD::load_pgs() { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); dout(0) << "load_pgs" << dendl; vector ls; @@ -3850,7 +3850,7 @@ void OSD::load_pgs() derr << __func__ << ": have pgid " << pgid << " at epoch " << map_epoch << ", but missing map. Crashing." << dendl; - assert(0 == "Missing map in load_pgs"); + ceph_assert(0 == "Missing map in load_pgs"); } } pg = _make_pg(pgosdmap, pgid); @@ -3885,7 +3885,7 @@ void OSD::load_pgs() for (auto shard : shards) { shard->prime_splits(osdmap, &new_children); } - assert(new_children.empty()); + ceph_assert(new_children.empty()); } pg->reg_next_scrub(); @@ -3940,7 +3940,7 @@ PGRef OSD::handle_pg_create_info(const OSDMapRef& osdmap, pg->lock(true); // we are holding the shard lock - assert(!pg->is_deleted()); + ceph_assert(!pg->is_deleted()); pg->init( role, @@ -4178,7 +4178,7 @@ bool OSD::project_pg_history(spg_t pgid, pg_history_t& h, epoch_t from, dout(15) << __func__ << ": found map gap, returning false" << dendl; return false; } - assert(oldmap->have_pg_pool(pgid.pool())); + ceph_assert(oldmap->have_pg_pool(pgid.pool())); int upprimary, actingprimary; vector up, acting; @@ -4287,7 +4287,7 @@ void OSD::_add_heartbeat_peer(int p) void OSD::_remove_heartbeat_peer(int n) { map::iterator q = heartbeat_peers.find(n); - assert(q != heartbeat_peers.end()); + ceph_assert(q != heartbeat_peers.end()); dout(20) << " removing heartbeat peer osd." << n << " " << q->second.con_back->get_peer_addr() << " " << (q->second.con_front ? q->second.con_front->get_peer_addr() : entity_addr_t()) @@ -4309,7 +4309,7 @@ void OSD::need_heartbeat_peer_update() void OSD::maybe_update_heartbeat_peers() { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); if (is_waiting_for_healthy() || is_active()) { utime_t now = ceph_clock_now(); @@ -4416,7 +4416,7 @@ void OSD::maybe_update_heartbeat_peers() void OSD::reset_heartbeat_peers() { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); dout(10) << "reset_heartbeat_peers" << dendl; Mutex::Locker l(heartbeat_lock); while (!heartbeat_peers.empty()) { @@ -4531,12 +4531,12 @@ void OSD::handle_osd_ping(MOSDPing *m) << " last_rx_front " << i->second.last_rx_front << dendl; i->second.last_rx_back = now; - assert(unacknowledged > 0); + ceph_assert(unacknowledged > 0); --unacknowledged; // if there is no front con, set both stamps. if (i->second.con_front == NULL) { i->second.last_rx_front = now; - assert(unacknowledged > 0); + ceph_assert(unacknowledged > 0); --unacknowledged; } } else if (m->get_connection() == i->second.con_front) { @@ -4547,7 +4547,7 @@ void OSD::handle_osd_ping(MOSDPing *m) << " last_rx_front " << i->second.last_rx_front << " -> " << now << dendl; i->second.last_rx_front = now; - assert(unacknowledged > 0); + ceph_assert(unacknowledged > 0); --unacknowledged; } @@ -4633,7 +4633,7 @@ void OSD::heartbeat_entry() void OSD::heartbeat_check() { - assert(heartbeat_lock.is_locked()); + ceph_assert(heartbeat_lock.is_locked()); utime_t now = ceph_clock_now(); // check for incoming heartbeats (move me elsewhere?) @@ -4711,7 +4711,7 @@ void OSD::heartbeat() auto new_stat = service.set_osd_stat(hb_peers, get_num_pgs()); dout(5) << __func__ << " " << new_stat << dendl; - assert(new_stat.kb); + ceph_assert(new_stat.kb); float ratio = ((float)new_stat.kb_used) / ((float)new_stat.kb); service.check_full_status(ratio); @@ -4809,7 +4809,7 @@ bool OSD::heartbeat_reset(Connection *con) void OSD::tick() { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); dout(10) << "tick" << dendl; if (is_active() || is_waiting_for_healthy()) { @@ -4827,7 +4827,7 @@ void OSD::tick() void OSD::tick_without_osd_lock() { - assert(tick_timer_lock.is_locked()); + ceph_assert(tick_timer_lock.is_locked()); dout(10) << "tick_without_osd_lock" << dendl; logger->set(l_osd_buf, buffer::get_total_alloc()); @@ -4840,7 +4840,7 @@ void OSD::tick_without_osd_lock() // refresh osd stats struct store_statfs_t stbuf; int r = store->statfs(&stbuf); - assert(r == 0); + ceph_assert(r == 0); service.set_statfs(stbuf); // osd_lock is not being held, which means the OSD state @@ -5180,7 +5180,7 @@ void OSD::ms_handle_fast_connect(Connection *con) dout(10) << " new session (outgoing) " << s << " con=" << s->con << " addr=" << s->con->get_peer_addr() << dendl; // we don't connect to clients - assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD); + ceph_assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD); s->entity_name.set_type(CEPH_ENTITY_TYPE_OSD); } } @@ -5198,7 +5198,7 @@ void OSD::ms_handle_fast_accept(Connection *con) dout(10) << "new session (incoming)" << s << " con=" << con << " addr=" << con->get_peer_addr() << " must have raced with connect" << dendl; - assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD); + ceph_assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD); s->entity_name.set_type(CEPH_ENTITY_TYPE_OSD); } } @@ -5294,7 +5294,7 @@ void OSD::_got_mon_epochs(epoch_t oldest, epoch_t newest) void OSD::_preboot(epoch_t oldest, epoch_t newest) { - assert(is_preboot()); + ceph_assert(is_preboot()); dout(10) << __func__ << " _preboot mon has osdmaps " << oldest << ".." << newest << dendl; @@ -5519,7 +5519,7 @@ void OSD::queue_want_up_thru(epoch_t want) void OSD::send_alive() { - assert(mon_report_lock.is_locked()); + ceph_assert(mon_report_lock.is_locked()); if (!osdmap->exists(whoami)) return; epoch_t up_thru = osdmap->get_up_thru(whoami); @@ -5535,10 +5535,10 @@ void OSD::request_full_map(epoch_t first, epoch_t last) dout(10) << __func__ << " " << first << ".." << last << ", previously requested " << requested_full_first << ".." << requested_full_last << dendl; - assert(osd_lock.is_locked()); - assert(first > 0 && last > 0); - assert(first <= last); - assert(first >= requested_full_first); // we shouldn't ever ask for older maps + ceph_assert(osd_lock.is_locked()); + ceph_assert(first > 0 && last > 0); + ceph_assert(first <= last); + ceph_assert(first >= requested_full_first); // we shouldn't ever ask for older maps if (requested_full_first == 0) { // first request requested_full_first = first; @@ -5558,8 +5558,8 @@ void OSD::request_full_map(epoch_t first, epoch_t last) void OSD::got_full_map(epoch_t e) { - assert(requested_full_first <= requested_full_last); - assert(osd_lock.is_locked()); + ceph_assert(requested_full_first <= requested_full_last); + ceph_assert(osd_lock.is_locked()); if (requested_full_first == 0) { dout(20) << __func__ << " " << e << ", nothing requested" << dendl; return; @@ -5599,8 +5599,8 @@ void OSD::requeue_failures() void OSD::send_failures() { - assert(map_lock.is_locked()); - assert(mon_report_lock.is_locked()); + ceph_assert(map_lock.is_locked()); + ceph_assert(mon_report_lock.is_locked()); Mutex::Locker l(heartbeat_lock); utime_t now = ceph_clock_now(); while (!failure_queue.empty()) { @@ -6423,12 +6423,12 @@ void OSD::maybe_share_map( void OSD::dispatch_session_waiting(SessionRef session, OSDMapRef osdmap) { - assert(session->session_dispatch_lock.is_locked()); + ceph_assert(session->session_dispatch_lock.is_locked()); auto i = session->waiting_on_map.begin(); while (i != session->waiting_on_map.end()) { OpRequestRef op = &(*i); - assert(ms_can_fast_dispatch(op->get_req())); + ceph_assert(ms_can_fast_dispatch(op->get_req())); const MOSDFastDispatchOp *m = static_cast( op->get_req()); if (m->get_min_epoch() > osdmap->get_epoch()) { @@ -6524,7 +6524,7 @@ void OSD::ms_fast_dispatch(Message *m) // note sender epoch, min req's epoch op->sent_epoch = static_cast(m)->get_map_epoch(); op->min_epoch = static_cast(m)->get_min_epoch(); - assert(op->min_epoch <= op->sent_epoch); // sanity check! + ceph_assert(op->min_epoch <= op->sent_epoch); // sanity check! service.maybe_inject_dispatch_delay(); @@ -6675,7 +6675,7 @@ bool OSD::ms_verify_authorizer( void OSD::do_waiters() { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); dout(10) << "do_waiters -- start" << dendl; while (!finished.empty()) { @@ -6698,7 +6698,7 @@ void OSD::dispatch_op(OpRequestRef op) void OSD::_dispatch(Message *m) { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); dout(20) << "_dispatch " << m << " " << *m << dendl; switch (m->get_type()) { @@ -7092,7 +7092,7 @@ void OSD::wait_for_new_map(OpRequestRef op) void OSD::note_down_osd(int peer) { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); cluster_messenger->mark_down_addrs(osdmap->get_cluster_addrs(peer)); heartbeat_lock.Lock(); @@ -7159,7 +7159,7 @@ void OSD::trim_maps(epoch_t oldest, int nreceived, bool skip_maps) service.publish_superblock(superblock); write_superblock(t); int tr = store->queue_transaction(service.meta_ch, std::move(t), nullptr); - assert(tr == 0); + ceph_assert(tr == 0); num = 0; if (!skip_maps) { // skip_maps leaves us with a range of old maps if we fail to remove all @@ -7175,15 +7175,15 @@ void OSD::trim_maps(epoch_t oldest, int nreceived, bool skip_maps) service.publish_superblock(superblock); write_superblock(t); int tr = store->queue_transaction(service.meta_ch, std::move(t), nullptr); - assert(tr == 0); + ceph_assert(tr == 0); } // we should not remove the cached maps - assert(min <= service.map_cache.cached_key_lower_bound()); + ceph_assert(min <= service.map_cache.cached_key_lower_bound()); } void OSD::handle_osd_map(MOSDMap *m) { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); // Keep a ref in the list until we get the newly received map written // onto disk. This is important because as long as the refs are alive, // the OSDMaps will be pinned in the cache and we won't try to read it @@ -7232,7 +7232,7 @@ void OSD::handle_osd_map(MOSDMap *m) logger->inc(l_osd_mape_dup, superblock.newest_map - first + 1); if (service.max_oldest_map < m->oldest_map) { service.max_oldest_map = m->oldest_map; - assert(service.max_oldest_map >= superblock.oldest_map); + ceph_assert(service.max_oldest_map >= superblock.oldest_map); } // make sure there is something new, here, before we bother flushing @@ -7273,7 +7273,7 @@ void OSD::handle_osd_map(MOSDMap *m) // up with. epoch_t max_lag = cct->_conf->osd_map_cache_size * m_osd_pg_epoch_max_lag_factor; - assert(max_lag > 0); + ceph_assert(max_lag > 0); if (osdmap->get_epoch() > max_lag) { epoch_t need = osdmap->get_epoch() - max_lag; for (auto shard : shards) { @@ -7298,7 +7298,7 @@ void OSD::handle_osd_map(MOSDMap *m) for (epoch_t e = start; e <= last; e++) { if (txn_size >= t.get_num_bytes()) { derr << __func__ << " transaction size overflowed" << dendl; - assert(txn_size < t.get_num_bytes()); + ceph_assert(txn_size < t.get_num_bytes()); } txn_size = t.get_num_bytes(); map::iterator p; @@ -7331,7 +7331,7 @@ void OSD::handle_osd_map(MOSDMap *m) bool got = get_map_bl(e - 1, obl); if (!got) { auto p = added_maps_bl.find(e - 1); - assert(p != added_maps_bl.end()); + ceph_assert(p != added_maps_bl.end()); obl = p->second; } o->decode(obl); @@ -7342,7 +7342,7 @@ void OSD::handle_osd_map(MOSDMap *m) inc.decode(p); if (o->apply_incremental(inc) < 0) { derr << "ERROR: bad fsid? i have " << osdmap->get_fsid() << " and inc has " << inc.fsid << dendl; - assert(0 == "bad fsid"); + ceph_assert(0 == "bad fsid"); } bufferlist fbl; @@ -7377,7 +7377,7 @@ void OSD::handle_osd_map(MOSDMap *m) continue; } - assert(0 == "MOSDMap lied about what maps it had?"); + ceph_assert(0 == "MOSDMap lied about what maps it had?"); } // even if this map isn't from a mon, we may have satisfied our subscription @@ -7416,7 +7416,7 @@ void OSD::handle_osd_map(MOSDMap *m) continue; } } - assert(lastmap->get_epoch() + 1 == i.second->get_epoch()); + ceph_assert(lastmap->get_epoch() + 1 == i.second->get_epoch()); for (auto& j : lastmap->get_pools()) { if (!i.second->have_pg_pool(j.first)) { dout(10) << __func__ << " recording final pg_pool_t for pool " @@ -7476,7 +7476,7 @@ void OSD::_committed_osd_maps(epoch_t first, epoch_t last, MOSDMap *m) << ")" << dendl; OSDMapRef newmap = get_map(cur); - assert(newmap); // we just cached it above! + ceph_assert(newmap); // we just cached it above! // start blacklisting messages sent to peers that go down. service.pre_publish_map(newmap); @@ -7776,7 +7776,7 @@ void OSD::check_osdmap_features() ObjectStore::Transaction t; write_superblock(t); int err = store->queue_transaction(service.meta_ch, std::move(t), NULL); - assert(err == 0); + ceph_assert(err == 0); } } } @@ -7823,9 +7823,9 @@ void OSD::advance_pg( ThreadPool::TPHandle &handle, PG::RecoveryCtx *rctx) { - assert(pg->is_locked()); + ceph_assert(pg->is_locked()); OSDMapRef lastmap = pg->get_osdmap(); - assert(lastmap->get_epoch() < osd_epoch); + ceph_assert(lastmap->get_epoch() < osd_epoch); set new_pgs; // any split children for (epoch_t next_epoch = pg->get_osdmap_epoch() + 1; next_epoch <= osd_epoch; @@ -7871,7 +7871,7 @@ void OSD::advance_pg( void OSD::consume_map() { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); dout(7) << "consume_map version " << osdmap->get_epoch() << dendl; /** make sure the cluster is speaking in SORTBITWISE, because we don't @@ -7896,7 +7896,7 @@ void OSD::consume_map() for (auto& shard : shards) { shard->prime_splits(osdmap, &newly_split); } - assert(newly_split.empty()); + ceph_assert(newly_split.empty()); } unsigned pushes_to_free = 0; @@ -7961,7 +7961,7 @@ void OSD::consume_map() void OSD::activate_map() { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); dout(7) << "activate_map version " << osdmap->get_epoch() << dendl; @@ -8081,7 +8081,7 @@ bool OSD::require_same_or_newer_map(OpRequestRef& op, epoch_t epoch, dout(15) << "require_same_or_newer_map " << epoch << " (i am " << osdmap->get_epoch() << ") " << m << dendl; - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); // do they have a newer map? if (epoch > osdmap->get_epoch()) { @@ -8128,7 +8128,7 @@ void OSD::split_pgs( for (set::const_iterator i = childpgids.begin(); i != childpgids.end(); ++i, ++stat_iter) { - assert(stat_iter != updated_stats.end()); + ceph_assert(stat_iter != updated_stats.end()); dout(10) << __func__ << " splitting " << *parent << " into " << *i << dendl; PG* child = _make_pg(nextmap, *i); child->lock(true); @@ -8153,7 +8153,7 @@ void OSD::split_pgs( child->finish_split_stats(*stat_iter, rctx->transaction); child->unlock(); } - assert(stat_iter != updated_stats.end()); + ceph_assert(stat_iter != updated_stats.end()); parent->finish_split_stats(*stat_iter, rctx->transaction); } @@ -8163,7 +8163,7 @@ void OSD::split_pgs( void OSD::handle_pg_create(OpRequestRef op) { const MOSDPGCreate *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_CREATE); + ceph_assert(m->get_type() == MSG_OSD_PG_CREATE); dout(10) << "handle_pg_create " << *m << dendl; @@ -8180,7 +8180,7 @@ void OSD::handle_pg_create(OpRequestRef op) for (map::const_iterator p = m->mkpg.begin(); p != m->mkpg.end(); ++p, ++ci) { - assert(ci != m->ctimes.end() && ci->first == p->first); + ceph_assert(ci != m->ctimes.end() && ci->first == p->first); epoch_t created = p->second.created; if (p->second.split_bits) // Skip split pgs continue; @@ -8208,7 +8208,7 @@ void OSD::handle_pg_create(OpRequestRef op) spg_t pgid; bool mapped = osdmap->get_primary_shard(on, &pgid); - assert(mapped); + ceph_assert(mapped); PastIntervals pi; pg_history_t history; @@ -8274,7 +8274,7 @@ void OSD::dispatch_context_transaction(PG::RecoveryCtx &ctx, PG *pg, int tr = store->queue_transaction( pg->ch, std::move(*ctx.transaction), TrackedOpRef(), handle); - assert(tr == 0); + ceph_assert(tr == 0); delete (ctx.transaction); ctx.transaction = new ObjectStore::Transaction; } @@ -8297,7 +8297,7 @@ void OSD::dispatch_context(PG::RecoveryCtx &ctx, PG *pg, OSDMapRef curmap, pg->ch, std::move(*ctx.transaction), TrackedOpRef(), handle); - assert(tr == 0); + ceph_assert(tr == 0); } delete ctx.notify_list; delete ctx.query_map; @@ -8669,7 +8669,7 @@ void OSD::handle_pg_query_nopg(const MQuery& q) // RECOVERY void OSDService::_maybe_queue_recovery() { - assert(recovery_lock.is_locked_by_me()); + ceph_assert(recovery_lock.is_locked_by_me()); uint64_t available_pushes; while (!awaiting_throttle.empty() && _recover_now(&available_pushes)) { @@ -8784,7 +8784,7 @@ void OSD::do_recovery( } out: - assert(started <= reserved_pushes); + ceph_assert(started <= reserved_pushes); service.release_reserved_pushes(reserved_pushes); } @@ -8799,7 +8799,7 @@ void OSDService::start_recovery_op(PG *pg, const hobject_t& soid) #ifdef DEBUG_RECOVERY_OIDS dout(20) << " active was " << recovery_oids[pg->pg_id] << dendl; - assert(recovery_oids[pg->pg_id].count(soid) == 0); + ceph_assert(recovery_oids[pg->pg_id].count(soid) == 0); recovery_oids[pg->pg_id].insert(soid); #endif } @@ -8813,12 +8813,12 @@ void OSDService::finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue) << dendl; // adjust count - assert(recovery_ops_active > 0); + ceph_assert(recovery_ops_active > 0); recovery_ops_active--; #ifdef DEBUG_RECOVERY_OIDS dout(20) << " active oids was " << recovery_oids[pg->pg_id] << dendl; - assert(recovery_oids[pg->pg_id].count(soid)); + ceph_assert(recovery_oids[pg->pg_id].count(soid)); recovery_oids[pg->pg_id].erase(soid); #endif @@ -8836,7 +8836,7 @@ void OSDService::release_reserved_pushes(uint64_t pushes) dout(10) << __func__ << "(" << pushes << "), recovery_ops_reserved " << recovery_ops_reserved << " -> " << (recovery_ops_reserved-pushes) << dendl; - assert(recovery_ops_reserved >= pushes); + ceph_assert(recovery_ops_reserved >= pushes); recovery_ops_reserved -= pushes; _maybe_queue_recovery(); } @@ -9617,7 +9617,7 @@ void OSDShard::_prime_splits(set *pgids) r.first->second->waiting_for_split = true; } else { auto q = r.first; - assert(q != pg_slots.end()); + ceph_assert(q != pg_slots.end()); if (q->second->waiting_for_split) { dout(10) << "slot " << *p << " already primed" << dendl; } else { @@ -9638,10 +9638,10 @@ void OSDShard::register_and_wake_split_child(PG *pg) Mutex::Locker l(shard_lock); dout(10) << pg->pg_id << " " << pg << dendl; auto p = pg_slots.find(pg->pg_id); - assert(p != pg_slots.end()); + ceph_assert(p != pg_slots.end()); auto *slot = p->second.get(); - assert(!slot->pg); - assert(slot->waiting_for_split); + ceph_assert(!slot->pg); + ceph_assert(slot->waiting_for_split); _attach_pg(slot, pg); _wake_pg_slot(pg->pg_id, slot); } @@ -9701,7 +9701,7 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb) { uint32_t shard_index = thread_index % osd->num_shards; auto& sdata = osd->shards[shard_index]; - assert(sdata); + ceph_assert(sdata); // peek at spg_t sdata->shard_lock.Lock(); if (sdata->pqueue->empty()) { @@ -9908,7 +9908,7 @@ void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb) for (auto shard : osd->shards) { shard->prime_splits(osdmap, &new_children); } - assert(new_children.empty()); + ceph_assert(new_children.empty()); } // osd_opwq_process marks the point at which an operation has been dequeued @@ -9976,7 +9976,7 @@ void OSD::ShardedOpWQ::_enqueue_front(OpQueueItem&& item) { auto shard_index = item.get_ordering_token().hash_to_shard(osd->shards.size()); auto& sdata = osd->shards[shard_index]; - assert(sdata); + ceph_assert(sdata); sdata->shard_lock.Lock(); auto p = sdata->pg_slots.find(item.get_ordering_token()); if (p != sdata->pg_slots.end() && diff --git a/src/osd/OSD.h b/src/osd/OSD.h index 21bb7d236bab5..91a64264a1a1c 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -371,8 +371,8 @@ public: Mutex::Locker l(pre_publish_lock); map::iterator i = map_reservations.find(osdmap->get_epoch()); - assert(i != map_reservations.end()); - assert(i->second > 0); + ceph_assert(i != map_reservations.end()); + ceph_assert(i->second > 0); if (--(i->second) == 0) { map_reservations.erase(i); } @@ -381,7 +381,7 @@ public: /// blocks until there are no reserved maps prior to next_osdmap void await_reserved_maps() { Mutex::Locker l(pre_publish_lock); - assert(next_osdmap); + ceph_assert(next_osdmap); while (true) { map::const_iterator i = map_reservations.cbegin(); if (i == map_reservations.cend() || i->first >= next_osdmap->get_epoch()) { @@ -472,7 +472,7 @@ public: void unreg_pg_scrub(spg_t pgid, utime_t t) { Mutex::Locker l(sched_scrub_lock); size_t removed = sched_scrub_pg.erase(ScrubJob(cct, pgid, t)); - assert(removed); + ceph_assert(removed); } bool first_scrub_stamp(ScrubJob *out) { Mutex::Locker l(sched_scrub_lock); @@ -498,7 +498,7 @@ public: } void dumps_scrub(Formatter *f) { - assert(f != nullptr); + ceph_assert(f != nullptr); Mutex::Locker l(sched_scrub_lock); f->open_array_section("scrubs"); @@ -564,7 +564,7 @@ public: void _dequeue(PG *pg, uint64_t old_priority) { set& oq = agent_queue[old_priority]; set::iterator p = oq.find(pg); - assert(p != oq.end()); + ceph_assert(p != oq.end()); if (p == agent_queue_pos) ++agent_queue_pos; oq.erase(p); @@ -584,7 +584,7 @@ public: /// adjust priority for an enagled pg void agent_adjust_pg(PG *pg, uint64_t old_priority, uint64_t new_priority) { Mutex::Locker l(agent_lock); - assert(new_priority != old_priority); + ceph_assert(new_priority != old_priority); _enqueue(pg, new_priority); _dequeue(pg, old_priority); } @@ -604,7 +604,7 @@ public: /// note finish or cancellation of an async (evict) op void agent_finish_evict_op() { Mutex::Locker l(agent_lock); - assert(agent_ops > 0); + ceph_assert(agent_ops > 0); --agent_ops; agent_cond.Signal(); } @@ -613,16 +613,16 @@ public: void agent_start_op(const hobject_t& oid) { Mutex::Locker l(agent_lock); ++agent_ops; - assert(agent_oids.count(oid) == 0); + ceph_assert(agent_oids.count(oid) == 0); agent_oids.insert(oid); } /// note finish or cancellation of an async (flush) op void agent_finish_op(const hobject_t& oid) { Mutex::Locker l(agent_lock); - assert(agent_ops > 0); + ceph_assert(agent_ops > 0); --agent_ops; - assert(agent_oids.count(oid) == 1); + ceph_assert(agent_oids.count(oid) == 1); agent_oids.erase(oid); agent_cond.Signal(); } @@ -817,7 +817,7 @@ public: OSDMapRef try_get_map(epoch_t e); OSDMapRef get_map(epoch_t e) { OSDMapRef ret(try_get_map(e)); - assert(ret); + ceph_assert(ret); return ret; } OSDMapRef add_map(OSDMap *o) { @@ -1629,7 +1629,7 @@ private: list finished; void take_waiters(list& ls) { - assert(osd_lock.is_locked()); + ceph_assert(osd_lock.is_locked()); finished.splice(finished.end(), ls); } void do_waiters(); @@ -1730,7 +1730,7 @@ protected: char queue_name[32] = {0}; snprintf(queue_name, sizeof(queue_name), "%s%" PRIu32, "OSD:ShardedOpWQ:", i); - assert(NULL != sdata); + ceph_assert(NULL != sdata); sdata->shard_lock.Lock(); f->open_object_section(queue_name); @@ -1743,7 +1743,7 @@ protected: bool is_shard_empty(uint32_t thread_index) override { uint32_t shard_index = thread_index % osd->num_shards; auto &&sdata = osd->shards[shard_index]; - assert(sdata); + ceph_assert(sdata); Mutex::Locker l(sdata->shard_lock); return sdata->pqueue->empty(); } diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc index 0274623c80695..89fcf4cc1792d 100644 --- a/src/osd/OSDMap.cc +++ b/src/osd/OSDMap.cc @@ -203,7 +203,7 @@ int OSDMap::Incremental::identify_osd(uuid_d u) const int OSDMap::Incremental::propagate_snaps_to_tiers(CephContext *cct, const OSDMap& osdmap) { - assert(epoch == osdmap.get_epoch() + 1); + ceph_assert(epoch == osdmap.get_epoch() + 1); for (auto &new_pool : new_pools) { if (!new_pool.second.tiers.empty()) { @@ -291,7 +291,7 @@ bool OSDMap::containing_subtree_is_down(CephContext *cct, int id, int subtree_ty } else { type = crush->get_bucket_type(current); } - assert(type >= 0); + ceph_assert(type >= 0); if (!subtree_is_down(current, down_cache)) { ldout(cct, 30) << "containing_subtree_is_down(" << id << ") = false" << dendl; @@ -502,7 +502,7 @@ void OSDMap::Incremental::encode(bufferlist& bl, uint64_t features) const // OSDMaps. others should be passing around the canonical encoded // buffers from on high. select out those callers by passing in an // "impossible" feature bit. - assert(features & CEPH_FEATURE_RESERVED); + ceph_assert(features & CEPH_FEATURE_RESERVED); features &= ~CEPH_FEATURE_RESERVED; size_t start_offset = bl.length(); @@ -1288,9 +1288,9 @@ void OSDMap::get_full_pools(CephContext *cct, set *backfillfull, set *nearfull) const { - assert(full); - assert(backfillfull); - assert(nearfull); + ceph_assert(full); + ceph_assert(backfillfull); + ceph_assert(nearfull); full->clear(); backfillfull->clear(); nearfull->clear(); @@ -1850,7 +1850,7 @@ int OSDMap::apply_incremental(const Incremental &inc) else if (inc.fsid != fsid) return -EINVAL; - assert(inc.epoch == epoch+1); + ceph_assert(inc.epoch == epoch+1); epoch++; modified = inc.modified; @@ -1902,7 +1902,7 @@ int OSDMap::apply_incremental(const Incremental &inc) p != new_purged_snaps.end(); ++p) { auto q = removed_snaps_queue.find(p->first); - assert(q != removed_snaps_queue.end()); + ceph_assert(q != removed_snaps_queue.end()); q->second.subtract(p->second); if (q->second.empty()) { removed_snaps_queue.erase(q); @@ -2626,7 +2626,7 @@ void OSDMap::encode(bufferlist& bl, uint64_t features) const // OSDMaps. others should be passing around the canonical encoded // buffers from on high. select out those callers by passing in an // "impossible" feature bit. - assert(features & CEPH_FEATURE_RESERVED); + ceph_assert(features & CEPH_FEATURE_RESERVED); features &= ~CEPH_FEATURE_RESERVED; size_t start_offset = bl.length(); @@ -2707,8 +2707,8 @@ void OSDMap::encode(bufferlist& bl, uint64_t features) const encode(pg_upmap, bl); encode(pg_upmap_items, bl); } else { - assert(pg_upmap.empty()); - assert(pg_upmap_items.empty()); + ceph_assert(pg_upmap.empty()); + ceph_assert(pg_upmap_items.empty()); } if (v >= 6) { encode(crush_version, bl); @@ -3640,7 +3640,7 @@ void OSDMap::print_tree(Formatter *f, ostream *out, unsigned filter, string buck if (f) { OSDTreeFormattingDumper(crush.get(), this, filter).dump(f, bucket); } else { - assert(out); + ceph_assert(out); TextTable tbl; OSDTreePlainDumper(crush.get(), this, filter).dump(&tbl, bucket); *out << tbl; @@ -3776,12 +3776,12 @@ int OSDMap::build_simple_optioned(CephContext *cct, epoch_t e, uuid_d &fsid, r = build_simple_crush_map(cct, *crush, nosd, &ss); else r = build_simple_crush_map_from_conf(cct, *crush, &ss); - assert(r == 0); + ceph_assert(r == 0); int poolbase = get_max_osd() ? get_max_osd() : 1; const int default_replicated_rule = crush->get_osd_pool_default_crush_replicated_ruleset(cct); - assert(default_replicated_rule >= 0); + ceph_assert(default_replicated_rule >= 0); if (default_pool) { // pgp_num <= pg_num @@ -3867,7 +3867,7 @@ int OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush, int rootid; int r = crush.add_bucket(0, 0, CRUSH_HASH_DEFAULT, root_type, 0, NULL, NULL, &rootid); - assert(r == 0); + ceph_assert(r == 0); crush.set_item_name(rootid, "default"); for (int o=0; o overfull; for (auto& i : pgs_by_osd) { // make sure osd is still there (belongs to this crush-tree) - assert(osd_weight.count(i.first)); + ceph_assert(osd_weight.count(i.first)); float target = osd_weight[i.first] * pgs_per_weight; float deviation = (float)i.second.size() - target; ldout(cct, 20) << " osd." << i.first @@ -4347,7 +4347,7 @@ int OSDMap::calc_pg_upmaps( int osd = p->second; float deviation = p->first; float target = osd_weight[osd] * pgs_per_weight; - assert(target > 0); + ceph_assert(target > 0); if (deviation/target < max_deviation_ratio) { ldout(cct, 10) << " osd." << osd << " target " << target @@ -4399,7 +4399,7 @@ int OSDMap::calc_pg_upmaps( if (orig.size() != out.size()) { continue; } - assert(orig != out); + ceph_assert(orig != out); auto& rmi = tmp.pg_upmap_items[pg]; for (unsigned i = 0; i < out.size(); ++i) { if (orig[i] != out[i]) { @@ -4440,13 +4440,13 @@ void OSDMap::get_pool_ids_by_osd(CephContext *cct, int osd, set *pool_ids) const { - assert(pool_ids); + ceph_assert(pool_ids); set raw_rules; int r = crush->get_rules_by_osd(osd, &raw_rules); if (r < 0) { lderr(cct) << __func__ << " get_rules_by_osd failed: " << cpp_strerror(r) << dendl; - assert(r >= 0); + ceph_assert(r >= 0); } set rules; for (auto &i: raw_rules) { @@ -4917,7 +4917,7 @@ void OSDMap::check_health(health_check_map_t *checks) const } } num_down_in_osds = down_in_osds.size(); - assert(num_down_in_osds <= num_in_osds); + ceph_assert(num_down_in_osds <= num_in_osds); if (num_down_in_osds > 0) { // summary of down subtree types and osds for (int type = max_type; type > 0; type--) { diff --git a/src/osd/OSDMap.h b/src/osd/OSDMap.h index 8e510a3b10189..d3e8853f32d81 100644 --- a/src/osd/OSDMap.h +++ b/src/osd/OSDMap.h @@ -169,7 +169,7 @@ struct PGTempMap { void init_current() { if (it != end) { current.first = it->first; - assert(it->second); + ceph_assert(it->second); current.second.resize(*it->second); int32_t *p = it->second + 1; for (int n = 0; n < *it->second; ++n, ++p) { @@ -455,7 +455,7 @@ public: /// filter out osds with any pending state changing size_t get_pending_state_osds(vector *osds) { - assert(osds); + ceph_assert(osds); osds->clear(); for (auto &p : new_state) { @@ -717,27 +717,27 @@ public: static void calc_state_set(int state, set& st); int get_state(int o) const { - assert(o < max_osd); + ceph_assert(o < max_osd); return osd_state[o]; } int get_state(int o, set& st) const { - assert(o < max_osd); + ceph_assert(o < max_osd); unsigned t = osd_state[o]; calc_state_set(t, st); return osd_state[o]; } void set_state(int o, unsigned s) { - assert(o < max_osd); + ceph_assert(o < max_osd); osd_state[o] = s; } void set_weight(int o, unsigned w) { - assert(o < max_osd); + ceph_assert(o < max_osd); osd_weight[o] = w; if (w) osd_state[o] |= CEPH_OSD_EXISTS; } unsigned get_weight(int o) const { - assert(o < max_osd); + ceph_assert(o < max_osd); return osd_weight[o]; } float get_weightf(int o) const { @@ -746,7 +746,7 @@ public: void adjust_osd_weights(const map& weights, Incremental& inc) const; void set_primary_affinity(int o, int w) { - assert(o < max_osd); + ceph_assert(o < max_osd); if (!osd_primary_affinity) osd_primary_affinity.reset( new mempool::osdmap::vector<__u32>( @@ -754,7 +754,7 @@ public: (*osd_primary_affinity)[o] = w; } unsigned get_primary_affinity(int o) const { - assert(o < max_osd); + ceph_assert(o < max_osd); if (!osd_primary_affinity) return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; return (*osd_primary_affinity)[o]; @@ -833,7 +833,7 @@ public: } void get_noup_osds(vector *osds) const { - assert(osds); + ceph_assert(osds); osds->clear(); for (int i = 0; i < max_osd; i++) { @@ -844,7 +844,7 @@ public: } void get_nodown_osds(vector *osds) const { - assert(osds); + ceph_assert(osds); osds->clear(); for (int i = 0; i < max_osd; i++) { @@ -855,7 +855,7 @@ public: } void get_noin_osds(vector *osds) const { - assert(osds); + ceph_assert(osds); osds->clear(); for (int i = 0; i < max_osd; i++) { @@ -866,7 +866,7 @@ public: } void get_noout_osds(vector *osds) const { - assert(osds); + ceph_assert(osds); osds->clear(); for (int i = 0; i < max_osd; i++) { @@ -895,7 +895,7 @@ public: int find_osd_on_ip(const entity_addr_t& ip) const; const entity_addrvec_t& get_addrs(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return osd_addrs->client_addrs[osd] ? *osd_addrs->client_addrs[osd] : _blank_addrvec; } @@ -903,45 +903,45 @@ public: return get_addrs(osd); } const entity_addrvec_t &get_cluster_addrs(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return osd_addrs->cluster_addrs[osd] ? *osd_addrs->cluster_addrs[osd] : _blank_addrvec; } const entity_addrvec_t &get_hb_back_addrs(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return osd_addrs->hb_back_addrs[osd] ? *osd_addrs->hb_back_addrs[osd] : _blank_addrvec; } const entity_addrvec_t &get_hb_front_addrs(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return osd_addrs->hb_front_addrs[osd] ? *osd_addrs->hb_front_addrs[osd] : _blank_addrvec; } const uuid_d& get_uuid(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return (*osd_uuid)[osd]; } const epoch_t& get_up_from(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return osd_info[osd].up_from; } const epoch_t& get_up_thru(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return osd_info[osd].up_thru; } const epoch_t& get_down_at(int osd) const { - assert(exists(osd)); + ceph_assert(exists(osd)); return osd_info[osd].down_at; } const osd_info_t& get_info(int osd) const { - assert(osd < max_osd); + ceph_assert(osd < max_osd); return osd_info[osd]; } const osd_xinfo_t& get_xinfo(int osd) const { - assert(osd < max_osd); + ceph_assert(osd < max_osd); return osd_xinfo[osd]; } @@ -1042,7 +1042,7 @@ public: const object_locator_t& loc) const { pg_t pg; int ret = object_locator_to_pg(oid, loc, pg); - assert(ret == 0); + ceph_assert(ret == 0); return pg; } @@ -1062,7 +1062,7 @@ public: int get_pg_num(int pg_pool) const { const pg_pool_t *pool = get_pg_pool(pg_pool); - assert(NULL != pool); + ceph_assert(NULL != pool); return pool->get_pg_num(); } @@ -1076,7 +1076,7 @@ public: return -ENOENT; } const pg_pool_t *p = get_pg_pool(pgid.pool()); - assert(p); + ceph_assert(p); return p->get_min_size(); } @@ -1085,7 +1085,7 @@ public: return -ENOENT; } const pg_pool_t *p = get_pg_pool(pgid.pool()); - assert(p); + ceph_assert(p); return p->get_size(); } @@ -1094,7 +1094,7 @@ public: return -ENOENT; } const pg_pool_t *p = get_pg_pool(pgid.pool()); - assert(p); + ceph_assert(p); return p->get_crush_rule(); } @@ -1172,7 +1172,7 @@ public: } bool pg_is_ec(pg_t pg) const { auto i = pools.find(pg.pool()); - assert(i != pools.end()); + ceph_assert(i != pools.end()); return i->second.is_erasure(); } bool get_primary_shard(const pg_t& pgid, spg_t *out) const { @@ -1246,7 +1246,7 @@ public: return pools; } void get_pool_ids_by_rule(int rule_id, set *pool_ids) const { - assert(pool_ids); + ceph_assert(pool_ids); for (auto &p: pools) { if (p.second.get_crush_rule() == rule_id) { pool_ids->insert(p.first); @@ -1258,7 +1258,7 @@ public: set *pool_ids) const; const string& get_pool_name(int64_t p) const { auto i = pool_name.find(p); - assert(i != pool_name.end()); + ceph_assert(i != pool_name.end()); return i->second; } const mempool::osdmap::map& get_pool_names() const { @@ -1275,19 +1275,19 @@ public: } unsigned get_pg_size(pg_t pg) const { auto p = pools.find(pg.pool()); - assert(p != pools.end()); + ceph_assert(p != pools.end()); return p->second.get_size(); } int get_pg_type(pg_t pg) const { auto p = pools.find(pg.pool()); - assert(p != pools.end()); + ceph_assert(p != pools.end()); return p->second.get_type(); } pg_t raw_pg_to_pg(pg_t pg) const { auto p = pools.find(pg.pool()); - assert(p != pools.end()); + ceph_assert(p != pools.end()); return p->second.raw_pg_to_pg(pg); } diff --git a/src/osd/OSDMapMapping.cc b/src/osd/OSDMapMapping.cc index 6566acaadb3ae..649ac6c433228 100644 --- a/src/osd/OSDMapMapping.cc +++ b/src/osd/OSDMapMapping.cc @@ -39,7 +39,7 @@ void OSDMapMapping::_init_mappings(const OSDMap& osdmap) p.second.is_erasure())); } pools.erase(q, pools.end()); - assert(pools.size() == osdmap.get_pools().size()); + ceph_assert(pools.size() == osdmap.get_pools().size()); } void OSDMapMapping::update(const OSDMap& osdmap) @@ -109,9 +109,9 @@ void OSDMapMapping::_update_range( unsigned pg_end) { auto i = pools.find(pool); - assert(i != pools.end()); - assert(pg_begin <= pg_end); - assert(pg_end <= i->second.pg_num); + ceph_assert(i != pools.end()); + ceph_assert(pg_begin <= pg_end); + ceph_assert(pg_end <= i->second.pg_num); for (unsigned ps = pg_begin; ps < pg_end; ++ps) { vector up, acting; int up_primary, acting_primary; @@ -169,5 +169,5 @@ void ParallelPGMapper::queue( any = true; } } - assert(any); + ceph_assert(any); } diff --git a/src/osd/OSDMapMapping.h b/src/osd/OSDMapMapping.h index 86ad9743b84c2..3f14cb8f0c7a3 100644 --- a/src/osd/OSDMapMapping.h +++ b/src/osd/OSDMapMapping.h @@ -28,7 +28,7 @@ public: Job(const OSDMap *om) : start(ceph_clock_now()), osdmap(om) {} virtual ~Job() { - assert(shards == 0); + ceph_assert(shards == 0); } // child must implement this @@ -142,7 +142,7 @@ protected: void _process(Item *i, ThreadPool::TPHandle &h) override; void _clear() override { - assert(_empty()); + ceph_assert(_empty()); } bool _empty() override { @@ -286,8 +286,8 @@ public: std::vector *acting, int *acting_primary) const { auto p = pools.find(pgid.pool()); - assert(p != pools.end()); - assert(pgid.ps() < p->second.pg_num); + ceph_assert(p != pools.end()); + ceph_assert(pgid.ps() < p->second.pg_num); p->second.get(pgid.ps(), up, up_primary, acting, acting_primary); } @@ -295,8 +295,8 @@ public: int *acting_primary, spg_t *spgid) { auto p = pools.find(pgid.pool()); - assert(p != pools.end()); - assert(pgid.ps() < p->second.pg_num); + ceph_assert(p != pools.end()); + ceph_assert(pgid.ps() < p->second.pg_num); vector acting; p->second.get(pgid.ps(), nullptr, nullptr, &acting, acting_primary); if (p->second.erasure) { @@ -314,7 +314,7 @@ public: } const mempool::osdmap_mapping::vector& get_osd_acting_pgs(unsigned osd) { - assert(osd < acting_rmap.size()); + ceph_assert(osd < acting_rmap.size()); return acting_rmap[osd]; } diff --git a/src/osd/OpRequest.cc b/src/osd/OpRequest.cc index 3784453da6747..1e8c6dbcdb59d 100644 --- a/src/osd/OpRequest.cc +++ b/src/osd/OpRequest.cc @@ -80,7 +80,7 @@ void OpRequest::_unregistered() { } bool OpRequest::check_rmw(int flag) const { - assert(rmw_flags != 0); + ceph_assert(rmw_flags != 0); return rmw_flags & flag; } bool OpRequest::may_read() const { diff --git a/src/osd/PG.cc b/src/osd/PG.cc index 70e452a4d6515..00448f6bf98d7 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -165,7 +165,7 @@ void PG::put(const char* tag) { Mutex::Locker l(_ref_id_lock); auto tag_counts_entry = _tag_counts.find(tag); - assert(tag_counts_entry != _tag_counts.end()); + ceph_assert(tag_counts_entry != _tag_counts.end()); --tag_counts_entry->second; if (tag_counts_entry->second == 0) { _tag_counts.erase(tag_counts_entry); @@ -186,7 +186,7 @@ uint64_t PG::get_with_id() stringstream ss; bt.print(ss); dout(20) << __func__ << ": " << info.pgid << " got id " << id << " (new) ref==" << ref << dendl; - assert(!_live_ids.count(id)); + ceph_assert(!_live_ids.count(id)); _live_ids.insert(make_pair(id, ss.str())); return id; } @@ -196,7 +196,7 @@ void PG::put_with_id(uint64_t id) dout(20) << __func__ << ": " << info.pgid << " put id " << id << " (current) ref==" << ref << dendl; { Mutex::Locker l(_ref_id_lock); - assert(_live_ids.count(id)); + ceph_assert(_live_ids.count(id)); _live_ids.erase(id); } if (--ref == 0) @@ -294,7 +294,7 @@ void PGPool::update(CephContext *cct, OSDMapRef map) << " pool.cached_removed_snaps " << cached_removed_snaps << dendl; } - assert(actual_removed_snaps == cached_removed_snaps); + ceph_assert(actual_removed_snaps == cached_removed_snaps); } } if (info.is_pool_snaps_mode() && updated) { @@ -377,8 +377,8 @@ void PG::lock(bool no_lockdep) const { _lock.Lock(no_lockdep); // if we have unrecorded dirty state with the lock dropped, there is a bug - assert(!dirty_info); - assert(!dirty_big_info); + ceph_assert(!dirty_info); + ceph_assert(!dirty_big_info); dout(30) << "lock" << dendl; } @@ -406,7 +406,7 @@ void PG::proc_master_log( { dout(10) << "proc_master_log for osd." << from << ": " << olog << " " << omissing << dendl; - assert(!is_peered() && is_primary()); + ceph_assert(!is_peered() && is_primary()); // merge log into our own log to build master log. no need to // make any adjustments to their missing map; we are taking their @@ -427,7 +427,7 @@ void PG::proc_master_log( dirty_info = true; } update_history(oinfo.history); - assert(cct->_conf->osd_find_best_info_ignore_history_les || + ceph_assert(cct->_conf->osd_find_best_info_ignore_history_les || info.last_epoch_started >= info.history.last_epoch_started); peer_missing[from].claim(omissing); @@ -474,7 +474,7 @@ bool PG::proc_replica_info( } dout(10) << " got osd." << from << " " << oinfo << dendl; - assert(is_primary()); + ceph_assert(is_primary()); peer_info[from] = oinfo; might_have_unfound.insert(from); @@ -524,7 +524,7 @@ void PG::update_object_snap_mapping( ObjectStore::Transaction *t, const hobject_t &soid, const set &snaps) { OSDriver::OSTransaction _t(osdriver.get_transaction(t)); - assert(soid.snap < CEPH_MAXSNAP); + ceph_assert(soid.snap < CEPH_MAXSNAP); int r = snap_mapper.remove_oid( soid, &_t); @@ -841,7 +841,7 @@ void PG::discover_all_missing(map > &query_map) /******* PG ***********/ bool PG::needs_recovery() const { - assert(is_primary()); + ceph_assert(is_primary()); auto &missing = pg_log.get_missing(); @@ -851,7 +851,7 @@ bool PG::needs_recovery() const return true; } - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); set::const_iterator end = acting_recovery_backfill.end(); set::const_iterator a = acting_recovery_backfill.begin(); for (; a != end; ++a) { @@ -876,7 +876,7 @@ bool PG::needs_recovery() const bool PG::needs_backfill() const { - assert(is_primary()); + ceph_assert(is_primary()); // We can assume that only possible osds that need backfill // are on the backfill_targets vector nodes. @@ -918,7 +918,7 @@ void PG::check_past_interval_bounds() const derr << info.pgid << " required past_interval bounds are" << " not empty [" << rpib << ") but past_intervals " << past_intervals << " is empty" << dendl; - assert(!past_intervals.empty()); + ceph_assert(!past_intervals.empty()); } auto apib = past_intervals.get_bounds(); @@ -929,7 +929,7 @@ void PG::check_past_interval_bounds() const derr << info.pgid << " past_intervals [" << apib << ") start interval does not contain the required" << " bound [" << rpib << ") start" << dendl; - assert(0 == "past_interval start interval mismatch"); + ceph_assert(0 == "past_interval start interval mismatch"); } if (apib.second != rpib.second) { osd->clog->error() << info.pgid << " past_interal bound [" << apib @@ -938,7 +938,7 @@ void PG::check_past_interval_bounds() const derr << info.pgid << " past_interal bound [" << apib << ") end does not match required [" << rpib << ") end" << dendl; - assert(0 == "past_interval end mismatch"); + ceph_assert(0 == "past_interval end mismatch"); } } } @@ -983,7 +983,7 @@ void PG::remove_down_peer_info(const OSDMapRef osdmap) */ bool PG::all_unfound_are_queried_or_lost(const OSDMapRef osdmap) const { - assert(is_primary()); + ceph_assert(is_primary()); set::const_iterator peer = might_have_unfound.begin(); set::const_iterator mend = might_have_unfound.end(); @@ -1015,7 +1015,7 @@ PastIntervals::PriorSet PG::build_prior() for (map::iterator it = peer_info.begin(); it != peer_info.end(); ++it) { - assert(info.history.last_epoch_started >= it->second.history.last_epoch_started); + ceph_assert(info.history.last_epoch_started >= it->second.history.last_epoch_started); } } @@ -1128,7 +1128,7 @@ map::const_iterator PG::find_best_info( bool restrict_to_up_acting, bool *history_les_bound) const { - assert(history_les_bound); + ceph_assert(history_les_bound); /* See doc/dev/osd_internals/last_epoch_started.rst before attempting * to make changes to this process. Also, make sure to update it * when you find bugs! */ @@ -1264,7 +1264,7 @@ void PG::calc_ec_acting( for (set::iterator j = all_info_by_shard[shard_id_t(i)].begin(); j != all_info_by_shard[shard_id_t(i)].end(); ++j) { - assert(j->shard == i); + ceph_assert(j->shard == i); if (!all_info.find(*j)->second.is_incomplete() && all_info.find(*j)->second.last_update >= auth_log_shard->second.log_tail) { @@ -1321,7 +1321,7 @@ void PG::calc_replicated_acting( auth_log_shard->second.log_tail) { ss << "up_primary: " << up_primary << ") selected as primary" << std::endl; } else { - assert(!auth_log_shard->second.is_incomplete()); + ceph_assert(!auth_log_shard->second.is_incomplete()); ss << "up[0] needs backfill, osd." << auth_log_shard_id << " selected as primary instead" << std::endl; primary = auth_log_shard; @@ -1399,7 +1399,7 @@ void PG::calc_replicated_acting( ); for (auto &p: candidate_by_last_update) { - assert(usable < size); + ceph_assert(usable < size); want->push_back(p.second); pg_shard_t s = pg_shard_t(p.second, shard_id_t::NO_SHARD); acting_backfill->insert(s); @@ -1454,7 +1454,7 @@ void PG::calc_replicated_acting( ); for (auto &p: candidate_by_last_update) { - assert(usable < size); + ceph_assert(usable < size); want->push_back(p.second); pg_shard_t s = pg_shard_t(p.second, shard_id_t::NO_SHARD); acting_backfill->insert(s); @@ -1653,12 +1653,12 @@ bool PG::choose_acting(pg_shard_t &auth_log_shard_id, osd->queue_want_pg_temp(info.pgid.pgid, empty); } else { dout(10) << __func__ << " failed" << dendl; - assert(want_acting.empty()); + ceph_assert(want_acting.empty()); } return false; } - assert(!auth_log_shard->second.is_incomplete()); + ceph_assert(!auth_log_shard->second.is_incomplete()); auth_log_shard_id = auth_log_shard->first; set want_backfill, want_acting_backfill; @@ -1712,7 +1712,7 @@ bool PG::choose_acting(pg_shard_t &auth_log_shard_id, if (want_acting == up) { // There can't be any pending backfill if // want is the same as crush map up OSDs. - assert(want_backfill.empty()); + ceph_assert(want_backfill.empty()); vector empty; osd->queue_want_pg_temp(info.pgid.pgid, empty); } else @@ -1722,13 +1722,13 @@ bool PG::choose_acting(pg_shard_t &auth_log_shard_id, want_acting.clear(); acting_recovery_backfill = want_acting_backfill; dout(10) << "acting_recovery_backfill is " << acting_recovery_backfill << dendl; - assert(backfill_targets.empty() || backfill_targets == want_backfill); + ceph_assert(backfill_targets.empty() || backfill_targets == want_backfill); if (backfill_targets.empty()) { // Caller is GetInfo backfill_targets = want_backfill; } // Adding !needs_recovery() to let the async_recovery_targets reset after recovery is complete - assert(async_recovery_targets.empty() || async_recovery_targets == want_async_recovery || !needs_recovery()); + ceph_assert(async_recovery_targets.empty() || async_recovery_targets == want_async_recovery || !needs_recovery()); if (async_recovery_targets.empty() || !needs_recovery()) { async_recovery_targets = want_async_recovery; } @@ -1737,7 +1737,7 @@ bool PG::choose_acting(pg_shard_t &auth_log_shard_id, for (set::iterator i = want_backfill.begin(); i != want_backfill.end(); ++i) { - assert(stray_set.find(*i) == stray_set.end()); + ceph_assert(stray_set.find(*i) == stray_set.end()); } dout(10) << "choose_acting want=" << want << " backfill_targets=" << want_backfill << " async_recovery_targets=" @@ -1755,8 +1755,8 @@ bool PG::choose_acting(pg_shard_t &auth_log_shard_id, */ void PG::build_might_have_unfound() { - assert(might_have_unfound.empty()); - assert(is_primary()); + ceph_assert(might_have_unfound.empty()); + ceph_assert(is_primary()); dout(10) << __func__ << dendl; @@ -1784,9 +1784,9 @@ void PG::activate(ObjectStore::Transaction& t, PastIntervals> > > *activator_map, RecoveryCtx *ctx) { - assert(!is_peered()); - assert(scrubber.callbacks.empty()); - assert(callbacks_for_degraded_object.empty()); + ceph_assert(!is_peered()); + ceph_assert(scrubber.callbacks.empty()); + ceph_assert(callbacks_for_degraded_object.empty()); // twiddle pg state state_clear(PG_STATE_DOWN); @@ -1796,7 +1796,7 @@ void PG::activate(ObjectStore::Transaction& t, if (is_primary()) { // only update primary last_epoch_started if we will go active if (acting.size() >= pool.info.min_size) { - assert(cct->_conf->osd_find_best_info_ignore_history_les || + ceph_assert(cct->_conf->osd_find_best_info_ignore_history_les || info.last_epoch_started <= activation_epoch); info.last_epoch_started = activation_epoch; info.last_interval_started = info.history.same_interval_since; @@ -1881,22 +1881,22 @@ void PG::activate(ObjectStore::Transaction& t, // if primary.. if (is_primary()) { - assert(ctx); + ceph_assert(ctx); // start up replicas - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { if (*i == pg_whoami) continue; pg_shard_t peer = *i; - assert(peer_info.count(peer)); + ceph_assert(peer_info.count(peer)); pg_info_t& pi = peer_info[peer]; dout(10) << "activate peer osd." << peer << " " << pi << dendl; MOSDPGLog *m = 0; - assert(peer_missing.count(peer)); + ceph_assert(peer_missing.count(peer)); pg_missing_t& pm = peer_missing[peer]; bool needs_past_intervals = pi.dne(); @@ -1977,7 +1977,7 @@ void PG::activate(ObjectStore::Transaction& t, pm.clear(); } else { // catch up - assert(pg_log.get_tail() <= pi.last_update); + ceph_assert(pg_log.get_tail() <= pi.last_update); m = new MOSDPGLog( i->shard, pg_whoami.shard, get_osdmap()->get_epoch(), info, @@ -2038,7 +2038,7 @@ void PG::activate(ObjectStore::Transaction& t, complete_shards.insert(*i); } else { auto peer_missing_entry = peer_missing.find(*i); - assert(peer_missing_entry != peer_missing.end()); + ceph_assert(peer_missing_entry != peer_missing.end()); missing_loc.add_active_missing(peer_missing_entry->second); if (!peer_missing_entry->second.have_missing() && peer_info[*i].last_backfill.is_max()) @@ -2065,8 +2065,8 @@ void PG::activate(ObjectStore::Transaction& t, ++i) { if (*i == pg_whoami) continue; dout(10) << __func__ << ": adding " << *i << " as a source" << dendl; - assert(peer_missing.count(*i)); - assert(peer_info.count(*i)); + ceph_assert(peer_missing.count(*i)); + ceph_assert(peer_info.count(*i)); missing_loc.add_source_info( *i, peer_info[*i], @@ -2079,7 +2079,7 @@ void PG::activate(ObjectStore::Transaction& t, ++i) { if (is_acting_recovery_backfill(i->first)) continue; - assert(peer_info.count(i->first)); + ceph_assert(peer_info.count(i->first)); search_for_missing( peer_info[i->first], i->second, @@ -2160,14 +2160,14 @@ void PG::_activate_committed(epoch_t epoch, epoch_t activation_epoch) dout(10) << "_activate_committed " << epoch << ", that was an old interval" << dendl; } else if (is_primary()) { - assert(!peer_activated.count(pg_whoami)); + ceph_assert(!peer_activated.count(pg_whoami)); peer_activated.insert(pg_whoami); dout(10) << "_activate_committed " << epoch << " peer_activated now " << peer_activated << " last_interval_started " << info.history.last_interval_started << " last_epoch_started " << info.history.last_epoch_started << " same_interval_since " << info.history.same_interval_since << dendl; - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); if (peer_activated.size() == acting_recovery_backfill.size()) all_activated_and_committed(); } else { @@ -2197,12 +2197,12 @@ void PG::_activate_committed(epoch_t epoch, epoch_t activation_epoch) dout(10) << __func__ << " flushes in progress, moving " << waiting_for_peered.size() << " items to waiting_for_flush" << dendl; - assert(waiting_for_flush.empty()); + ceph_assert(waiting_for_flush.empty()); waiting_for_flush.swap(waiting_for_peered); } } - assert(!dirty_info); + ceph_assert(!dirty_info); unlock(); } @@ -2215,10 +2215,10 @@ void PG::_activate_committed(epoch_t epoch, epoch_t activation_epoch) void PG::all_activated_and_committed() { dout(10) << "all_activated_and_committed" << dendl; - assert(is_primary()); - assert(peer_activated.size() == acting_recovery_backfill.size()); - assert(!acting_recovery_backfill.empty()); - assert(blocked_by.empty()); + ceph_assert(is_primary()); + ceph_assert(peer_activated.size() == acting_recovery_backfill.size()); + ceph_assert(!acting_recovery_backfill.empty()); + ceph_assert(blocked_by.empty()); // Degraded? _update_calc_stats(); @@ -2238,7 +2238,7 @@ void PG::all_activated_and_committed() bool PG::requeue_scrub(bool high_priority) { - assert(is_locked()); + ceph_assert(is_locked()); if (scrub_queued) { dout(10) << __func__ << ": already queued" << dendl; return false; @@ -2254,7 +2254,7 @@ void PG::queue_recovery() { if (!is_primary() || !is_peered()) { dout(10) << "queue_recovery -- not primary or not peered " << dendl; - assert(!recovery_queued); + ceph_assert(!recovery_queued); } else if (recovery_queued) { dout(10) << "queue_recovery -- already queued" << dendl; } else { @@ -2266,7 +2266,7 @@ void PG::queue_recovery() bool PG::queue_scrub() { - assert(is_locked()); + ceph_assert(is_locked()); if (is_scrubbing()) { return false; } @@ -2396,7 +2396,7 @@ unsigned PG::get_backfill_priority() } else if (is_undersized()) { // undersized: OSD_BACKFILL_DEGRADED_PRIORITY_BASE + num missing replicas - assert(pool.info.size > actingset.size()); + ceph_assert(pool.info.size > actingset.size()); ret = OSD_BACKFILL_DEGRADED_PRIORITY_BASE + (pool.info.size - actingset.size()); } else if (is_degraded()) { @@ -2430,7 +2430,7 @@ unsigned PG::get_delete_priority() Context *PG::finish_recovery() { dout(10) << "finish_recovery" << dendl; - assert(info.last_complete == info.last_update); + ceph_assert(info.last_complete == info.last_update); clear_recovery_state(); @@ -2474,7 +2474,7 @@ void PG::start_recovery_op(const hobject_t& soid) << " (" << recovering_oids << ")" #endif << dendl; - assert(recovery_ops_active >= 0); + ceph_assert(recovery_ops_active >= 0); recovery_ops_active++; #ifdef DEBUG_RECOVERY_OIDS recovering_oids.insert(soid); @@ -2489,10 +2489,10 @@ void PG::finish_recovery_op(const hobject_t& soid, bool dequeue) << " (" << recovering_oids << ")" #endif << dendl; - assert(recovery_ops_active > 0); + ceph_assert(recovery_ops_active > 0); recovery_ops_active--; #ifdef DEBUG_RECOVERY_OIDS - assert(recovering_oids.count(soid)); + ceph_assert(recovering_oids.count(soid)); recovering_oids.erase(recovering_oids.find(soid)); #endif osd->finish_recovery_op(this, soid, dequeue); @@ -2674,7 +2674,7 @@ void PG::release_backoffs(const hobject_t& begin, const hobject_t& end) Mutex::Locker l(b->lock); dout(10) << __func__ << " " << *b << dendl; if (b->session) { - assert(b->pg == this); + ceph_assert(b->pg == this); ConnectionRef con = b->session->con; if (con) { // OSD::ms_handle_reset clears s->con without a lock con->send_message( @@ -2710,7 +2710,7 @@ void PG::clear_backoffs() Mutex::Locker l(b->lock); dout(10) << __func__ << " " << *b << dendl; if (b->session) { - assert(b->pg == this); + ceph_assert(b->pg == this); if (b->is_new()) { b->state = Backoff::STATE_DELETING; } else { @@ -2728,8 +2728,8 @@ void PG::rm_backoff(BackoffRef b) { dout(10) << __func__ << " " << *b << dendl; Mutex::Locker l(backoff_lock); - assert(b->lock.is_locked_by_me()); - assert(b->pg == this); + ceph_assert(b->lock.is_locked_by_me()); + ceph_assert(b->pg == this); auto p = backoffs.find(b->begin); // may race with release_backoffs() if (p != backoffs.end()) { @@ -2781,7 +2781,7 @@ void PG::purge_strays() for (set::iterator p = stray_set.begin(); p != stray_set.end(); ++p) { - assert(!is_acting_recovery_backfill(*p)); + ceph_assert(!is_acting_recovery_backfill(*p)); if (get_osdmap()->is_up(p->osd)) { dout(10) << "sending PGRemove to osd." << *p << dendl; vector to_remove; @@ -2830,7 +2830,7 @@ void PG::clear_probe_targets() void PG::update_heartbeat_peers() { - assert(is_locked()); + ceph_assert(is_locked()); if (!is_primary()) return; @@ -2930,7 +2930,7 @@ void PG::_update_calc_stats() << upset << " acting_recovery_backfill " << acting_recovery_backfill << dendl; dout(20) << __func__ << " acting " << acting << " up " << up << dendl; - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); bool estimate = false; @@ -2950,7 +2950,7 @@ void PG::_update_calc_stats() // Primary first missing = pg_log.get_missing().num_missing(); - assert(acting_recovery_backfill.count(pg_whoami)); + ceph_assert(acting_recovery_backfill.count(pg_whoami)); if (upset.count(pg_whoami)) { missing_target_objects.insert(make_pair(missing, pg_whoami)); } else { @@ -3019,7 +3019,7 @@ void PG::_update_calc_stats() // Copies on other osds but limited to the possible degraded int more_osds = std::min(missing_shards, ml.first.other); int omisplaced = ml.second * more_osds; - assert(omisplaced <= odegraded); + ceph_assert(omisplaced <= odegraded); odegraded -= omisplaced; misplaced += omisplaced; @@ -3033,7 +3033,7 @@ void PG::_update_calc_stats() // Handle undersized case if (pool.info.is_replicated()) { // Add degraded for missing targets (num_objects missing) - assert(target >= upset.size()); + ceph_assert(target >= upset.size()); unsigned needed = target - upset.size(); degraded += num_objects * needed; } else { @@ -3066,7 +3066,7 @@ void PG::_update_calc_stats() // Handle undersized case if (pool.info.is_replicated()) { // Add to missing_target_objects - assert(target >= missing_target_objects.size()); + ceph_assert(target >= missing_target_objects.size()); unsigned needed = target - missing_target_objects.size(); if (needed) missing_target_objects.insert(make_pair(num_objects * needed, pg_shard_t(pg_shard_t::NO_OSD))); @@ -3341,13 +3341,13 @@ void PG::upgrade(ObjectStore *store) { dout(0) << __func__ << " " << info_struct_v << " -> " << latest_struct_v << dendl; - assert(info_struct_v <= 10); + ceph_assert(info_struct_v <= 10); ObjectStore::Transaction t; // // finished upgrade! - assert(info_struct_v == 10); + ceph_assert(info_struct_v == 10); // update infover_key if (info_struct_v < latest_struct_v) { @@ -3368,7 +3368,7 @@ void PG::upgrade(ObjectStore *store) << cpp_strerror(r) << dendl; ceph_abort(); } - assert(r == 0); + ceph_assert(r == 0); C_SaferCond waiter; if (!ch->flush_commit(&waiter)) { @@ -3402,7 +3402,7 @@ int PG::_prepare_write_info(CephContext* cct, pg_fast_info_t fast; fast.populate_from(info); bool did = fast.try_apply_to(&last_written_info); - assert(did); // we verified last_update increased above + ceph_assert(did); // we verified last_update increased above if (info == last_written_info) { encode(fast, (*km)[fastinfo_key]); if (logger) @@ -3486,7 +3486,7 @@ void PG::prepare_write_info(map *km) dirty_big_info, need_update_epoch, cct->_conf->osd_fast_info, osd->logger); - assert(ret == 0); + ceph_assert(ret == 0); if (need_update_epoch) last_epoch = get_osdmap()->get_epoch(); last_persisted_osdmap = last_epoch; @@ -3510,7 +3510,7 @@ bool PG::_has_removal_flag(ObjectStore *store, keys.insert("_remove"); map values; auto ch = store->open_collection(coll); - assert(ch); + ceph_assert(ch); if (store->omap_get_values(ch, pgmeta_oid, keys, &values) == 0 && values.size() == 1) return true; @@ -3528,7 +3528,7 @@ int PG::peek_map_epoch(ObjectStore *store, epoch_t cur_epoch = 0; // validate collection name - assert(coll.is_pg()); + ceph_assert(coll.is_pg()); // try for v8 set keys; @@ -3536,16 +3536,16 @@ int PG::peek_map_epoch(ObjectStore *store, keys.insert(epoch_key); map values; auto ch = store->open_collection(coll); - assert(ch); + ceph_assert(ch); int r = store->omap_get_values(ch, pgmeta_oid, keys, &values); if (r == 0) { - assert(values.size() == 2); + ceph_assert(values.size() == 2); // sanity check version auto bp = values[infover_key].cbegin(); __u8 struct_v = 0; decode(struct_v, bp); - assert(struct_v >= 8); + ceph_assert(struct_v >= 8); // get epoch bp = values[epoch_key].begin(); @@ -3579,7 +3579,7 @@ void PG::add_log_entry(const pg_log_entry_t& e, bool applied) info.last_complete = e.version; // raise last_update. - assert(e.version > info.last_update); + ceph_assert(e.version > info.last_update); info.last_update = e.version; // raise user_version, if it increased (it may have not get bumped @@ -3703,15 +3703,15 @@ int PG::read_info( ghobject_t pgmeta_oid(pgid.make_pgmeta_oid()); map values; auto ch = store->open_collection(coll); - assert(ch); + ceph_assert(ch); int r = store->omap_get_values(ch, pgmeta_oid, keys, &values); - assert(r == 0); - assert(values.size() == 3 || + ceph_assert(r == 0); + ceph_assert(values.size() == 3 || values.size() == 4); auto p = values[infover_key].cbegin(); decode(struct_v, p); - assert(struct_v >= 10); + ceph_assert(struct_v >= 10); p = values[info_key].begin(); decode(info, p); @@ -3733,12 +3733,12 @@ void PG::read_state(ObjectStore *store) { int r = read_info(store, pg_id, coll, info, past_intervals, info_struct_v); - assert(r >= 0); + ceph_assert(r >= 0); if (info_struct_v < compat_struct_v) { derr << "PG needs upgrade, but on-disk data is too old; upgrade to" << " an older version first." << dendl; - assert(0 == "PG too old to upgrade"); + ceph_assert(0 == "PG too old to upgrade"); } last_written_info = info; @@ -3831,9 +3831,9 @@ void PG::update_snap_map( int r = snap_mapper.remove_oid( i->soid, &_t); - assert(r == 0); + ceph_assert(r == 0); } else if (i->is_update()) { - assert(i->snaps.length() > 0); + ceph_assert(i->snaps.length() > 0); vector snaps; bufferlist snapbl = i->snaps; auto p = snapbl.cbegin(); @@ -3856,9 +3856,9 @@ void PG::update_snap_map( _snaps, 0, &_t); - assert(r == 0); + ceph_assert(r == 0); } else { - assert(i->is_clean()); + ceph_assert(i->is_clean()); } } } @@ -3993,7 +3993,7 @@ void PG::requeue_map_waiters() bool PG::sched_scrub() { bool nodeep_scrub = false; - assert(is_locked()); + ceph_assert(is_locked()); if (!(is_primary() && is_active() && is_clean() && !is_scrubbing())) { return false; } @@ -4022,7 +4022,7 @@ bool PG::sched_scrub() } if (!scrubber.must_scrub) { - assert(!scrubber.must_deep_scrub); + ceph_assert(!scrubber.must_deep_scrub); //NOSCRUB so skip regular scrubs if ((osd->osd->get_osdmap()->test_flag(CEPH_OSDMAP_NOSCRUB) || @@ -4055,7 +4055,7 @@ bool PG::sched_scrub() bool ret = true; if (!scrubber.reserved) { - assert(scrubber.reserved_peers.empty()); + ceph_assert(scrubber.reserved_peers.empty()); if ((cct->_conf->osd_scrub_during_recovery || !osd->is_recovery_active()) && osd->inc_scrubs_pending()) { dout(20) << __func__ << ": reserved locally, reserving replicas" << dendl; @@ -4126,7 +4126,7 @@ void PG::reg_next_scrub() double scrub_min_interval = 0, scrub_max_interval = 0; pool.info.opts.get(pool_opts_t::SCRUB_MIN_INTERVAL, &scrub_min_interval); pool.info.opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &scrub_max_interval); - assert(scrubber.scrub_reg_stamp == utime_t()); + ceph_assert(scrubber.scrub_reg_stamp == utime_t()); scrubber.scrub_reg_stamp = osd->reg_pg_scrub(info.pgid, reg_stamp, scrub_min_interval, @@ -4169,7 +4169,7 @@ void PG::do_replica_scrub_map(OpRequestRef op) dout(10) << __func__ << " waiting_on_whom was " << scrubber.waiting_on_whom << dendl; - assert(scrubber.waiting_on_whom.count(m->from)); + ceph_assert(scrubber.waiting_on_whom.count(m->from)); scrubber.waiting_on_whom.erase(m->from); if (m->preempted) { dout(10) << __func__ << " replica was preempted, setting flag" << dendl; @@ -4187,7 +4187,7 @@ void PG::_request_scrub_map( bool deep, bool allow_preemption) { - assert(replica != pg_whoami); + ceph_assert(replica != pg_whoami); dout(10) << "scrub requesting scrubmap from osd." << replica << " deep " << (int)deep << dendl; MOSDRepScrub *repscrubop = new MOSDRepScrub( @@ -4316,7 +4316,7 @@ void PG::clear_scrub_reserved() void PG::scrub_reserve_replicas() { - assert(backfill_targets.empty()); + ceph_assert(backfill_targets.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { @@ -4333,7 +4333,7 @@ void PG::scrub_reserve_replicas() void PG::scrub_unreserve_replicas() { - assert(backfill_targets.empty()); + ceph_assert(backfill_targets.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { @@ -4389,7 +4389,7 @@ void PG::_scan_snaps(ScrubMap &smap) dout(20) << __func__ << " " << hoid << dendl; - assert(!hoid.is_snapdir()); + ceph_assert(!hoid.is_snapdir()); if (hoid.is_head()) { // parse the SnapSet bufferlist bl; @@ -4571,7 +4571,7 @@ int PG::build_scrub_map_chunk( // finish dout(20) << __func__ << " finishing" << dendl; - assert(pos.done()); + ceph_assert(pos.done()); _repair_oinfo_oid(map); if (!is_primary()) { ScrubMap for_meta_scrub; @@ -4599,7 +4599,7 @@ void PG::Scrubber::cleanup_store(ObjectStore::Transaction *t) { }; store->cleanup(t); t->register_on_complete(new OnComplete(std::move(store))); - assert(!store); + ceph_assert(!store); } void PG::repair_object( @@ -4622,13 +4622,13 @@ void PG::repair_object( decode(oi, bliter); } catch (...) { dout(0) << __func__ << ": Need version of replica, bad object_info_t: " << soid << dendl; - assert(0); + ceph_assert(0); } if (bad_peer != primary) { peer_missing[bad_peer].add(soid, oi.version, eversion_t(), false); } else { // We should only be scrubbing if the PG is clean. - assert(waiting_for_unreadable_object.empty()); + ceph_assert(waiting_for_unreadable_object.empty()); pg_log.missing_add(soid, oi.version, eversion_t()); @@ -4659,7 +4659,7 @@ void PG::replica_scrub( ThreadPool::TPHandle &handle) { const MOSDRepScrub *msg = static_cast(op->get_req()); - assert(!scrubber.active_rep_scrub); + ceph_assert(!scrubber.active_rep_scrub); dout(7) << "replica_scrub" << dendl; if (msg->map_epoch < info.history.same_interval_since) { @@ -4669,7 +4669,7 @@ void PG::replica_scrub( return; } - assert(msg->chunky); + ceph_assert(msg->chunky); if (active_pushes > 0) { dout(10) << "waiting for active pushes to finish" << dendl; scrubber.active_rep_scrub = op; @@ -4746,7 +4746,7 @@ void PG::scrub(epoch_t queued, ThreadPool::TPHandle &handle) if (pg_has_reset_since(queued)) { return; } - assert(scrub_queued); + ceph_assert(scrub_queued); scrub_queued = false; scrubber.needs_sleep = true; @@ -4767,7 +4767,7 @@ void PG::scrub(epoch_t queued, ThreadPool::TPHandle &handle) } if (!scrubber.active) { - assert(backfill_targets.empty()); + ceph_assert(backfill_targets.empty()); scrubber.deep = state_test(PG_STATE_DEEP_SCRUB); @@ -4878,7 +4878,7 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) switch (scrubber.state) { case PG::Scrubber::INACTIVE: dout(10) << "scrub start" << dendl; - assert(is_primary()); + ceph_assert(is_primary()); publish_stats_to_osd(); scrubber.epoch_start = info.history.same_interval_since; @@ -4960,7 +4960,7 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) max, &objects, &candidate_end); - assert(ret >= 0); + ceph_assert(ret >= 0); if (!objects.empty()) { hobject_t back = objects.back(); @@ -4969,18 +4969,18 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) candidate_end = back; objects.pop_back(); if (objects.empty()) { - assert(0 == + ceph_assert(0 == "Somehow we got more than 2 objects which" "have the same head but are not clones"); } back = objects.back(); } if (candidate_end.is_head()) { - assert(candidate_end != back.get_head()); + ceph_assert(candidate_end != back.get_head()); candidate_end = candidate_end.get_object_boundary(); } } else { - assert(candidate_end.is_max()); + ceph_assert(candidate_end.is_max()); } if (!_range_available_for_scrub(scrubber.start, candidate_end)) { @@ -5061,7 +5061,7 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) break; case PG::Scrubber::BUILD_MAP: - assert(last_update_applied >= scrubber.subset_last_update); + ceph_assert(last_update_applied >= scrubber.subset_last_update); // build my own scrub map if (scrub_preempted) { @@ -5093,7 +5093,7 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) } dout(10) << __func__ << " waiting_on_whom was " << scrubber.waiting_on_whom << dendl; - assert(scrubber.waiting_on_whom.count(pg_whoami)); + ceph_assert(scrubber.waiting_on_whom.count(pg_whoami)); scrubber.waiting_on_whom.erase(pg_whoami); scrubber.state = PG::Scrubber::WAIT_REPLICAS; @@ -5117,8 +5117,8 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) break; case PG::Scrubber::COMPARE_MAPS: - assert(last_update_applied >= scrubber.subset_last_update); - assert(scrubber.waiting_on_whom.empty()); + ceph_assert(last_update_applied >= scrubber.subset_last_update); + ceph_assert(scrubber.waiting_on_whom.empty()); scrub_compare_maps(); scrubber.start = scrubber.end; @@ -5242,7 +5242,7 @@ bool PG::range_intersects_scrub(const hobject_t &start, const hobject_t& end) void PG::scrub_clear_state() { - assert(is_locked()); + ceph_assert(is_locked()); state_clear(PG_STATE_SCRUBBING); state_clear(PG_STATE_REPAIR); state_clear(PG_STATE_DEEP_SCRUB); @@ -5479,7 +5479,7 @@ void PG::scrub_finish() // when every one has been fixed. if (repair) { if (scrubber.fixed == scrubber.shallow_errors + scrubber.deep_errors) { - assert(deep_scrub); + ceph_assert(deep_scrub); scrubber.shallow_errors = scrubber.deep_errors = 0; } else { // Deep scrub in order to get corrected error counts @@ -5509,7 +5509,7 @@ void PG::scrub_finish() dirty_info = true; write_if_dirty(t); int tr = osd->store->queue_transaction(ch, std::move(t), NULL); - assert(tr == 0); + ceph_assert(tr == 0); } @@ -5535,7 +5535,7 @@ void PG::share_pg_info() dout(10) << "share_pg_info" << dendl; // share new pg_info_t with replicas - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { @@ -5565,8 +5565,8 @@ bool PG::append_log_entries_update_missing( ObjectStore::Transaction &t, boost::optional trim_to, boost::optional roll_forward_to) { - assert(!entries.empty()); - assert(entries.begin()->version > info.last_update); + ceph_assert(!entries.empty()); + ceph_assert(entries.begin()->version > info.last_update); PGLogEntryHandler rollbacker{this, &t}; bool invalidate_stats = @@ -5607,7 +5607,7 @@ void PG::merge_new_log_entries( boost::optional roll_forward_to) { dout(10) << __func__ << " " << entries << dendl; - assert(is_primary()); + ceph_assert(is_primary()); bool rebuild_missing = append_log_entries_update_missing(entries, t, trim_to, roll_forward_to); for (set::const_iterator i = acting_recovery_backfill.begin(); @@ -5615,8 +5615,8 @@ void PG::merge_new_log_entries( ++i) { pg_shard_t peer(*i); if (peer == pg_whoami) continue; - assert(peer_missing.count(peer)); - assert(peer_info.count(peer)); + ceph_assert(peer_missing.count(peer)); + ceph_assert(peer_info.count(peer)); pg_missing_t& pmissing(peer_missing[peer]); dout(20) << __func__ << " peer_missing for " << peer << " = " << pmissing << dendl; pg_info_t& pinfo(peer_info[peer]); @@ -5669,8 +5669,8 @@ void PG::fulfill_info( pg_shard_t from, const pg_query_t &query, pair ¬ify_info) { - assert(from == primary); - assert(query.type == pg_query_t::INFO); + ceph_assert(from == primary); + ceph_assert(query.type == pg_query_t::INFO); // info dout(10) << "sending info" << dendl; @@ -5681,8 +5681,8 @@ void PG::fulfill_log( pg_shard_t from, const pg_query_t &query, epoch_t query_epoch) { dout(10) << "log request from " << from << dendl; - assert(from == primary); - assert(query.type != pg_query_t::INFO); + ceph_assert(from == primary); + ceph_assert(query.type != pg_query_t::INFO); ConnectionRef con = osd->get_con_osd_cluster( from.osd, get_osdmap()->get_epoch()); if (!con) return; @@ -5917,7 +5917,7 @@ void PG::start_peering_interval( info.history.same_interval_since = osdmap->get_epoch(); } else { std::stringstream debug; - assert(info.history.same_interval_since != 0); + ceph_assert(info.history.same_interval_since != 0); boost::scoped_ptr recoverable( get_is_recoverable_predicate()); bool new_interval = PastIntervals::check_new_interval( @@ -6005,7 +6005,7 @@ void PG::start_peering_interval( projected_last_update = eversion_t(); - assert(!deleting); + ceph_assert(!deleting); // should we tell the primary we are here? send_notify = !is_primary(); @@ -6077,7 +6077,7 @@ void PG::on_new_interval() void PG::proc_primary_info(ObjectStore::Transaction &t, const pg_info_t &oinfo) { - assert(!is_primary()); + ceph_assert(!is_primary()); update_history(oinfo.history); if (!info.stats.stats_invalid && info.stats.stats.sum.num_scrub_errors) { @@ -6243,7 +6243,7 @@ template bool PG::can_discard_replica_op(OpRequestRef& op) { const T *m = static_cast(op->get_req()); - assert(m->get_type() == MSGTYPE); + ceph_assert(m->get_type() == MSGTYPE); int from = m->get_source().num(); @@ -6278,7 +6278,7 @@ bool PG::can_discard_replica_op(OpRequestRef& op) bool PG::can_discard_scan(OpRequestRef op) { const MOSDPGScan *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_SCAN); + ceph_assert(m->get_type() == MSG_OSD_PG_SCAN); if (old_peering_msg(m->map_epoch, m->query_epoch)) { dout(10) << " got old scan, ignoring" << dendl; @@ -6290,7 +6290,7 @@ bool PG::can_discard_scan(OpRequestRef op) bool PG::can_discard_backfill(OpRequestRef op) { const MOSDPGBackfill *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_BACKFILL); + ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL); if (old_peering_msg(m->map_epoch, m->query_epoch)) { dout(10) << " got old backfill, ignoring" << dendl; @@ -6365,7 +6365,7 @@ void PG::take_waiters() void PG::do_peering_event(PGPeeringEventRef evt, RecoveryCtx *rctx) { dout(10) << __func__ << ": " << evt->get_desc() << dendl; - assert(have_same_or_newer_map(evt->get_epoch_sent())); + ceph_assert(have_same_or_newer_map(evt->get_epoch_sent())); if (old_peering_evt(evt)) { dout(10) << "discard old " << evt->get_desc() << dendl; } else { @@ -6447,8 +6447,8 @@ void PG::handle_advance_map( vector& newacting, int acting_primary, RecoveryCtx *rctx) { - assert(lastmap->get_epoch() == osdmap_ref->get_epoch()); - assert(lastmap == osdmap_ref); + ceph_assert(lastmap->get_epoch() == osdmap_ref->get_epoch()); + ceph_assert(lastmap == osdmap_ref); dout(10) << "handle_advance_map " << newup << "/" << newacting << " -- " << up_primary << "/" << acting_primary @@ -6521,7 +6521,7 @@ struct C_DeleteMore : public Context { ceph_abort(); } void complete(int r) override { - assert(r == 0); + ceph_assert(r == 0); pg->lock(); if (!pg->pg_has_reset_since(epoch)) { pg->osd->queue_for_pg_delete(pg->get_pgid(), epoch); @@ -6608,7 +6608,7 @@ PG::RecoveryState::Crashed::Crashed(my_context ctx) NamedState(context< RecoveryMachine >().pg, "Crashed") { context< RecoveryMachine >().log_enter(state_name); - assert(0 == "we got a bad state machine event"); + ceph_assert(0 == "we got a bad state machine event"); } @@ -6632,7 +6632,7 @@ boost::statechart::result PG::RecoveryState::Initial::react(const MNotifyRec& no boost::statechart::result PG::RecoveryState::Initial::react(const MInfoRec& i) { PG *pg = context< RecoveryMachine >().pg; - assert(!pg->is_primary()); + ceph_assert(!pg->is_primary()); post_event(i); return transit< Stray >(); } @@ -6640,7 +6640,7 @@ boost::statechart::result PG::RecoveryState::Initial::react(const MInfoRec& i) boost::statechart::result PG::RecoveryState::Initial::react(const MLogRec& i) { PG *pg = context< RecoveryMachine >().pg; - assert(!pg->is_primary()); + ceph_assert(!pg->is_primary()); post_event(i); return transit< Stray >(); } @@ -6825,7 +6825,7 @@ PG::RecoveryState::Primary::Primary(my_context ctx) { context< RecoveryMachine >().log_enter(state_name); PG *pg = context< RecoveryMachine >().pg; - assert(pg->want_acting.empty()); + ceph_assert(pg->want_acting.empty()); // set CREATING bit until we have peered for the first time. if (pg->info.history.last_epoch_started == 0) { @@ -6932,9 +6932,9 @@ PG::RecoveryState::Peering::Peering(my_context ctx) context< RecoveryMachine >().log_enter(state_name); PG *pg = context< RecoveryMachine >().pg; - assert(!pg->is_peered()); - assert(!pg->is_peering()); - assert(pg->is_primary()); + ceph_assert(!pg->is_peered()); + ceph_assert(!pg->is_peering()); + ceph_assert(pg->is_primary()); pg->state_set(PG_STATE_PEERING); } @@ -7041,7 +7041,7 @@ void PG::RecoveryState::Backfilling::backfill_release_reservations() for (set::iterator it = pg->backfill_targets.begin(); it != pg->backfill_targets.end(); ++it) { - assert(*it != pg->pg_whoami); + ceph_assert(*it != pg->pg_whoami); ConnectionRef con = pg->osd->get_con_osd_cluster( it->osd, pg->get_osdmap()->get_epoch()); if (con) { @@ -7148,7 +7148,7 @@ PG::RecoveryState::WaitRemoteBackfillReserved::react(const RemoteBackfillReserve if (backfill_osd_it != context< Active >().remote_shards_to_reserve_backfill.end()) { //The primary never backfills itself - assert(*backfill_osd_it != pg->pg_whoami); + ceph_assert(*backfill_osd_it != pg->pg_whoami); ConnectionRef con = pg->osd->get_con_osd_cluster( backfill_osd_it->osd, pg->get_osdmap()->get_epoch()); if (con) { @@ -7184,10 +7184,10 @@ void PG::RecoveryState::WaitRemoteBackfillReserved::retry() set::const_iterator it, begin, end; begin = context< Active >().remote_shards_to_reserve_backfill.begin(); end = context< Active >().remote_shards_to_reserve_backfill.end(); - assert(begin != end); + ceph_assert(begin != end); for (it = begin; it != backfill_osd_it; ++it) { //The primary never backfills itself - assert(*it != pg->pg_whoami); + ceph_assert(*it != pg->pg_whoami); ConnectionRef con = pg->osd->get_con_osd_cluster( it->osd, pg->get_osdmap()->get_epoch()); if (con) { @@ -7638,7 +7638,7 @@ PG::RecoveryState::WaitRemoteRecoveryReserved::react(const RemoteRecoveryReserve PG *pg = context< RecoveryMachine >().pg; if (remote_recovery_reservation_it != context< Active >().remote_shards_to_reserve_recovery.end()) { - assert(*remote_recovery_reservation_it != pg->pg_whoami); + ceph_assert(*remote_recovery_reservation_it != pg->pg_whoami); ConnectionRef con = pg->osd->get_con_osd_cluster( remote_recovery_reservation_it->osd, pg->get_osdmap()->get_epoch()); if (con) { @@ -7675,7 +7675,7 @@ PG::RecoveryState::Recovering::Recovering(my_context ctx) pg->state_clear(PG_STATE_RECOVERY_WAIT); pg->state_clear(PG_STATE_RECOVERY_TOOFULL); pg->state_set(PG_STATE_RECOVERING); - assert(!pg->state_test(PG_STATE_ACTIVATING)); + ceph_assert(!pg->state_test(PG_STATE_ACTIVATING)); pg->publish_stats_to_osd(); pg->queue_recovery(); } @@ -7683,7 +7683,7 @@ PG::RecoveryState::Recovering::Recovering(my_context ctx) void PG::RecoveryState::Recovering::release_reservations(bool cancel) { PG *pg = context< RecoveryMachine >().pg; - assert(cancel || !pg->pg_log.get_missing().have_missing()); + ceph_assert(cancel || !pg->pg_log.get_missing().have_missing()); // release remote reservations for (set::const_iterator i = @@ -7775,11 +7775,11 @@ PG::RecoveryState::Recovered::Recovered(my_context ctx) PG *pg = context< RecoveryMachine >().pg; - assert(!pg->needs_recovery()); + ceph_assert(!pg->needs_recovery()); // if we finished backfill, all acting are active; recheck if // DEGRADED | UNDERSIZED is appropriate. - assert(!pg->acting_recovery_backfill.empty()); + ceph_assert(!pg->acting_recovery_backfill.empty()); if (pg->get_osdmap()->get_pg_size(pg->info.pgid.pgid) <= pg->acting_recovery_backfill.size()) { pg->state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY); @@ -7790,7 +7790,7 @@ PG::RecoveryState::Recovered::Recovered(my_context ctx) bool history_les_bound = false; if (pg->acting != pg->up && !pg->choose_acting(auth_log_shard, true, &history_les_bound)) { - assert(pg->want_acting.size()); + ceph_assert(pg->want_acting.size()); } else if (!pg->async_recovery_targets.empty()) { pg->choose_acting(auth_log_shard, true, &history_les_bound); } @@ -7874,9 +7874,9 @@ PG::RecoveryState::Active::Active(my_context ctx) PG *pg = context< RecoveryMachine >().pg; - assert(!pg->backfill_reserving); - assert(!pg->backfill_reserved); - assert(pg->is_primary()); + ceph_assert(!pg->backfill_reserving); + ceph_assert(!pg->backfill_reserved); + ceph_assert(pg->is_primary()); ldout(pg->cct, 10) << "In Active, about to call activate" << dendl; pg->start_flush(context< RecoveryMachine >().get_cur_transaction()); pg->activate(*context< RecoveryMachine >().get_cur_transaction(), @@ -7948,7 +7948,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const AdvMap& advmap) } ldout(pg->cct,10) << __func__ << " new removed_snaps " << i->second << ", snap_trimq now " << pg->snap_trimq << dendl; - assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps); + ceph_assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps); pg->dirty_info = true; pg->dirty_big_info = true; } @@ -7983,7 +7983,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const AdvMap& advmap) } ldout(pg->cct,10) << __func__ << " new purged_snaps " << j->second << ", now " << pg->info.purged_snaps << dendl; - assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps); + ceph_assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps); pg->dirty_info = true; pg->dirty_big_info = true; } @@ -8005,7 +8005,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const AdvMap& advmap) int osd = pg->want_acting[i]; if (!advmap.osdmap->is_up(osd)) { pg_shard_t osd_with_shard(osd, shard_id_t(i)); - assert(pg->is_acting(osd_with_shard) || pg->is_up(osd_with_shard)); + ceph_assert(pg->is_acting(osd_with_shard) || pg->is_up(osd_with_shard)); } } @@ -8039,7 +8039,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const ActMap&) { PG *pg = context< RecoveryMachine >().pg; ldout(pg->cct, 10) << "Active: handling ActMap" << dendl; - assert(pg->is_primary()); + ceph_assert(pg->is_primary()); if (pg->have_unfound()) { // object may have become unfound @@ -8079,7 +8079,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const ActMap&) boost::statechart::result PG::RecoveryState::Active::react(const MNotifyRec& notevt) { PG *pg = context< RecoveryMachine >().pg; - assert(pg->is_primary()); + ceph_assert(pg->is_primary()); if (pg->peer_info.count(notevt.from)) { ldout(pg->cct, 10) << "Active: got notify from " << notevt.from << ", already have info from that osd, ignoring" @@ -8104,7 +8104,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const MNotifyRec& not boost::statechart::result PG::RecoveryState::Active::react(const MTrim& trim) { PG *pg = context< RecoveryMachine >().pg; - assert(pg->is_primary()); + ceph_assert(pg->is_primary()); // peer is informing us of their last_complete_ondisk ldout(pg->cct,10) << " replica osd." << trim.from << " lcod " << trim.trim_to << dendl; @@ -8118,9 +8118,9 @@ boost::statechart::result PG::RecoveryState::Active::react(const MTrim& trim) boost::statechart::result PG::RecoveryState::Active::react(const MInfoRec& infoevt) { PG *pg = context< RecoveryMachine >().pg; - assert(pg->is_primary()); + ceph_assert(pg->is_primary()); - assert(!pg->acting_recovery_backfill.empty()); + ceph_assert(!pg->acting_recovery_backfill.empty()); // don't update history (yet) if we are active and primary; the replica // may be telling us they have activated (and committed) but we can't // share that until _everyone_ does the same. @@ -8251,7 +8251,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const AllReplicasActi << pg->waiting_for_peered.size() << " items to waiting_for_flush" << dendl; - assert(pg->waiting_for_flush.empty()); + ceph_assert(pg->waiting_for_flush.empty()); pg->waiting_for_flush.swap(pg->waiting_for_peered); } @@ -8319,7 +8319,7 @@ boost::statechart::result PG::RecoveryState::ReplicaActive::react(const MLogRec& ldout(pg->cct, 10) << "received log from " << logevt.from << dendl; ObjectStore::Transaction* t = context().get_cur_transaction(); pg->merge_log(*t, logevt.msg->info, logevt.msg->log, logevt.from); - assert(pg->pg_log.get_head() == pg->info.last_update); + ceph_assert(pg->pg_log.get_head() == pg->info.last_update); return discard_event(); } @@ -8384,9 +8384,9 @@ PG::RecoveryState::Stray::Stray(my_context ctx) context< RecoveryMachine >().log_enter(state_name); PG *pg = context< RecoveryMachine >().pg; - assert(!pg->is_peered()); - assert(!pg->is_peering()); - assert(!pg->is_primary()); + ceph_assert(!pg->is_peered()); + ceph_assert(!pg->is_peering()); + ceph_assert(!pg->is_primary()); if (!pg->get_osdmap()->have_pg_pool(pg->get_pgid().pool())) { ldout(pg->cct,10) << __func__ << " pool is deleted" << dendl; @@ -8419,7 +8419,7 @@ boost::statechart::result PG::RecoveryState::Stray::react(const MLogRec& logevt) pg->merge_log(*t, msg->info, msg->log, logevt.from); } - assert(pg->pg_log.get_head() == pg->info.last_update); + ceph_assert(pg->pg_log.get_head() == pg->info.last_update); post_event(Activate(logevt.msg->info.last_epoch_started)); return transit(); @@ -8438,8 +8438,8 @@ boost::statechart::result PG::RecoveryState::Stray::react(const MInfoRec& infoev pg->info.hit_set = infoevt.info.hit_set; } - assert(infoevt.info.last_update == pg->info.last_update); - assert(pg->pg_log.get_head() == pg->info.last_update); + ceph_assert(infoevt.info.last_update == pg->info.last_update); + ceph_assert(pg->pg_log.get_head() == pg->info.last_update); post_event(Activate(infoevt.info.last_epoch_started)); return transit(); @@ -8576,7 +8576,7 @@ PG::RecoveryState::GetInfo::GetInfo(my_context ctx) pg->check_past_interval_bounds(); PastIntervals::PriorSet &prior_set = context< Peering >().prior_set; - assert(pg->blocked_by.empty()); + ceph_assert(pg->blocked_by.empty()); prior_set = pg->build_prior(); @@ -8748,7 +8748,7 @@ PG::RecoveryState::GetLog::GetLog(my_context ctx) // how much log to request? eversion_t request_log_from = pg->info.last_update; - assert(!pg->acting_recovery_backfill.empty()); + ceph_assert(!pg->acting_recovery_backfill.empty()); for (set::iterator p = pg->acting_recovery_backfill.begin(); p != pg->acting_recovery_backfill.end(); ++p) { @@ -8769,7 +8769,7 @@ PG::RecoveryState::GetLog::GetLog(my_context ctx) request_log_from, pg->info.history, pg->get_osdmap()->get_epoch())); - assert(pg->blocked_by.empty()); + ceph_assert(pg->blocked_by.empty()); pg->blocked_by.insert(auth_log_shard.osd); pg->publish_stats_to_osd(); } @@ -8794,7 +8794,7 @@ boost::statechart::result PG::RecoveryState::GetLog::react(const AdvMap& advmap) boost::statechart::result PG::RecoveryState::GetLog::react(const MLogRec& logevt) { PG *pg = context< RecoveryMachine >().pg; - assert(!msg); + ceph_assert(!msg); if (logevt.from != auth_log_shard) { ldout(pg->cct, 10) << "GetLog: discarding log from " << "non-auth_log_shard osd." << logevt.from << dendl; @@ -8915,7 +8915,7 @@ PG::RecoveryState::Down::Down(my_context ctx) pg->state_set(PG_STATE_DOWN); auto &prior_set = context< Peering >().prior_set; - assert(pg->blocked_by.empty()); + ceph_assert(pg->blocked_by.empty()); pg->blocked_by.insert(prior_set.down.begin(), prior_set.down.end()); pg->publish_stats_to_osd(); } @@ -8947,7 +8947,7 @@ boost::statechart::result PG::RecoveryState::Down::react(const MNotifyRec& infoe { PG *pg = context< RecoveryMachine >().pg; - assert(pg->is_primary()); + ceph_assert(pg->is_primary()); epoch_t old_start = pg->info.history.last_epoch_started; if (!pg->peer_info.count(infoevt.from) && pg->get_osdmap()->has_been_up_since(infoevt.from.osd, infoevt.notify.epoch_sent)) { @@ -8977,7 +8977,7 @@ PG::RecoveryState::Incomplete::Incomplete(my_context ctx) pg->state_set(PG_STATE_INCOMPLETE); PastIntervals::PriorSet &prior_set = context< Peering >().prior_set; - assert(pg->blocked_by.empty()); + ceph_assert(pg->blocked_by.empty()); pg->blocked_by.insert(prior_set.down.begin(), prior_set.down.end()); pg->publish_stats_to_osd(); } @@ -9040,7 +9040,7 @@ PG::RecoveryState::GetMissing::GetMissing(my_context ctx) context< RecoveryMachine >().log_enter(state_name); PG *pg = context< RecoveryMachine >().pg; - assert(!pg->acting_recovery_backfill.empty()); + ceph_assert(!pg->acting_recovery_backfill.empty()); eversion_t since; for (set::iterator i = pg->acting_recovery_backfill.begin(); i != pg->acting_recovery_backfill.end(); @@ -9081,7 +9081,7 @@ PG::RecoveryState::GetMissing::GetMissing(my_context ctx) // We pull the log from the peer's last_epoch_started to ensure we // get enough log to detect divergent updates. since.epoch = pi.last_epoch_started; - assert(pi.last_update >= pg->info.log_tail); // or else choose_acting() did a bad thing + ceph_assert(pi.last_update >= pg->info.log_tail); // or else choose_acting() did a bad thing if (pi.log_tail <= since) { ldout(pg->cct, 10) << " requesting log+missing since " << since << " from osd." << *i << dendl; context< RecoveryMachine >().send_query( @@ -9248,8 +9248,8 @@ void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_ #define dout_prefix ((debug_pg ? debug_pg->gen_prefix(*_dout) : *_dout) << " PriorSet: ") void PG::RecoveryState::start_handle(RecoveryCtx *new_ctx) { - assert(!rctx); - assert(!orig_ctx); + ceph_assert(!rctx); + ceph_assert(!orig_ctx); orig_ctx = new_ctx; if (new_ctx) { if (messages_pending_flush) { @@ -9262,23 +9262,23 @@ void PG::RecoveryState::start_handle(RecoveryCtx *new_ctx) { } void PG::RecoveryState::begin_block_outgoing() { - assert(!messages_pending_flush); - assert(orig_ctx); - assert(rctx); + ceph_assert(!messages_pending_flush); + ceph_assert(orig_ctx); + ceph_assert(rctx); messages_pending_flush = BufferedRecoveryMessages(); rctx = RecoveryCtx(*messages_pending_flush, *orig_ctx); } void PG::RecoveryState::clear_blocked_outgoing() { - assert(orig_ctx); - assert(rctx); + ceph_assert(orig_ctx); + ceph_assert(rctx); messages_pending_flush = boost::optional(); } void PG::RecoveryState::end_block_outgoing() { - assert(messages_pending_flush); - assert(orig_ctx); - assert(rctx); + ceph_assert(messages_pending_flush); + ceph_assert(orig_ctx); + ceph_assert(rctx); rctx = RecoveryCtx(*orig_ctx); rctx->accept_buffered_messages(*messages_pending_flush); diff --git a/src/osd/PG.h b/src/osd/PG.h index bd2d196e68eff..37f72c6575499 100644 --- a/src/osd/PG.h +++ b/src/osd/PG.h @@ -267,8 +267,8 @@ public: } OSDMapRef get_osdmap() const { - assert(is_locked()); - assert(osdmap_ref); + ceph_assert(is_locked()); + ceph_assert(osdmap_ref); return osdmap_ref; } epoch_t get_osdmap_epoch() const { @@ -283,8 +283,8 @@ public: void lock(bool no_lockdep = false) const; void unlock() const { //generic_dout(0) << this << " " << info.pgid << " unlock" << dendl; - assert(!dirty_info); - assert(!dirty_big_info); + ceph_assert(!dirty_info); + ceph_assert(!dirty_big_info); _lock.Unlock(); } bool is_locked() const { @@ -328,7 +328,7 @@ public: return pg_whoami == primary; } bool pg_has_reset_since(epoch_t e) { - assert(is_locked()); + ceph_assert(is_locked()); return deleted || e < get_last_peering_reset(); } @@ -565,7 +565,7 @@ protected: void requeue_map_waiters(); void update_osdmap_ref(OSDMapRef newmap) { - assert(_lock.is_locked_by_me()); + ceph_assert(_lock.is_locked_by_me()); osdmap_ref = std::move(newmap); } @@ -626,8 +626,8 @@ protected: (l.other < r.other))); } friend ostream& operator<<(ostream& out, const loc_count_t& l) { - assert(l.up >= 0); - assert(l.other >= 0); + ceph_assert(l.up >= 0); + ceph_assert(l.other >= 0); return out << "(" << l.up << "+" << l.other << ")"; } }; @@ -682,7 +682,7 @@ protected: pgs_by_shard_id(s, pgsbs); for (auto shard: pgsbs) { auto p = missing_by_count[shard.first].find(_get_count(shard.second)); - assert(p != missing_by_count[shard.first].end()); + ceph_assert(p != missing_by_count[shard.first].end()); if (--p->second == 0) { missing_by_count[shard.first].erase(p); } @@ -815,7 +815,7 @@ protected: lgeneric_dout(pg->cct, 0) << this << " " << pg->info.pgid << " unexpected need for " << i->first << " have " << j->second << " tried to add " << i->second << dendl; - assert(i->second.need == j->second.need); + ceph_assert(i->second.need == j->second.need); } } } @@ -825,7 +825,7 @@ protected: } void revise_need(const hobject_t &hoid, eversion_t need) { auto it = needs_recovery_map.find(hoid); - assert(it != needs_recovery_map.end()); + ceph_assert(it != needs_recovery_map.end()); it->second.need = need; } @@ -875,7 +875,7 @@ protected: if (i == self) continue; auto pmiter = pmissing.find(i); - assert(pmiter != pmissing.end()); + ceph_assert(pmiter != pmissing.end()); miter = pmiter->second.get_items().find(hoid); if (miter != pmiter->second.get_items().end()) { item = miter->second; @@ -891,15 +891,15 @@ protected: return; auto mliter = missing_loc.insert(make_pair(hoid, set())).first; - assert(info.last_backfill.is_max()); - assert(info.last_update >= item->need); + ceph_assert(info.last_backfill.is_max()); + ceph_assert(info.last_update >= item->need); if (!missing.is_missing(hoid)) mliter->second.insert(self); for (auto &&i: pmissing) { if (i.first == self) continue; auto pinfoiter = pinfo.find(i.first); - assert(pinfoiter != pinfo.end()); + ceph_assert(pinfoiter != pinfo.end()); if (item->need <= pinfoiter->second.last_update && hoid <= pinfoiter->second.last_backfill && !i.second.is_missing(hoid)) @@ -1004,9 +1004,9 @@ public: handle(rctx.handle) {} void accept_buffered_messages(BufferedRecoveryMessages &m) { - assert(query_map); - assert(info_map); - assert(notify_list); + ceph_assert(query_map); + ceph_assert(info_map); + ceph_assert(notify_list); for (map >::iterator i = m.query_map.begin(); i != m.query_map.end(); ++i) { @@ -1039,7 +1039,7 @@ public: void send_notify(pg_shard_t to, const pg_notify_t &info, const PastIntervals &pi) { - assert(notify_list); + ceph_assert(notify_list); (*notify_list)[to.osd].push_back(make_pair(info, pi)); } }; @@ -1158,7 +1158,7 @@ public: /// drop first entry, and adjust @begin accordingly void pop_front() { - assert(!objects.empty()); + ceph_assert(!objects.empty()); objects.erase(objects.begin()); trim(); } @@ -1356,7 +1356,7 @@ protected: void calc_min_last_complete_ondisk() { eversion_t min = last_complete_ondisk; - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { @@ -1395,7 +1395,7 @@ protected: pg->get_pgbackend()->try_stash(hoid, v, t); } void rollback(const pg_log_entry_t &entry) override { - assert(entry.can_rollback()); + ceph_assert(entry.can_rollback()); pg->get_pgbackend()->rollback(entry, t); } void rollforward(const pg_log_entry_t &entry) override { @@ -1969,27 +1969,27 @@ protected: /* Accessor functions for state methods */ ObjectStore::Transaction* get_cur_transaction() { - assert(state->rctx); - assert(state->rctx->transaction); + ceph_assert(state->rctx); + ceph_assert(state->rctx->transaction); return state->rctx->transaction; } void send_query(pg_shard_t to, const pg_query_t &query) { - assert(state->rctx); - assert(state->rctx->query_map); + ceph_assert(state->rctx); + ceph_assert(state->rctx->query_map); (*state->rctx->query_map)[to.osd][spg_t(pg->info.pgid.pgid, to.shard)] = query; } map > *get_query_map() { - assert(state->rctx); - assert(state->rctx->query_map); + ceph_assert(state->rctx); + ceph_assert(state->rctx->query_map); return state->rctx->query_map; } map > > *get_info_map() { - assert(state->rctx); - assert(state->rctx->info_map); + ceph_assert(state->rctx); + ceph_assert(state->rctx->info_map); return state->rctx->info_map; } @@ -1997,7 +1997,7 @@ protected: void send_notify(pg_shard_t to, const pg_notify_t &info, const PastIntervals &pi) { - assert(state->rctx); + ceph_assert(state->rctx); state->rctx->send_notify(to, info, pi); } }; @@ -2797,8 +2797,8 @@ protected: break; } } - assert(up_primary.osd == new_up_primary); - assert(primary.osd == new_acting_primary); + ceph_assert(up_primary.osd == new_up_primary); + ceph_assert(primary.osd == new_acting_primary); } void set_role(int r) { @@ -2871,9 +2871,9 @@ protected: eversion_t at_version( get_osdmap()->get_epoch(), projected_last_update.version+1); - assert(at_version > info.last_update); - assert(at_version > pg_log.get_head()); - assert(at_version > projected_last_update); + ceph_assert(at_version > info.last_update); + ceph_assert(at_version > pg_log.get_head()); + ceph_assert(at_version > projected_last_update); return at_version; } diff --git a/src/osd/PGBackend.cc b/src/osd/PGBackend.cc index a0b4f726f033f..03dc4b41283d3 100644 --- a/src/osd/PGBackend.cc +++ b/src/osd/PGBackend.cc @@ -42,7 +42,7 @@ static ostream& _prefix(std::ostream *_dout, PGBackend *pgb) { void PGBackend::recover_delete_object(const hobject_t &oid, eversion_t v, RecoveryHandle *h) { - assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0); + ceph_assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0); for (const auto& shard : get_parent()->get_acting_recovery_backfill_shards()) { if (shard == get_parent()->whoami_shard()) continue; @@ -117,7 +117,7 @@ bool PGBackend::handle_message(OpRequestRef op) void PGBackend::handle_recovery_delete(OpRequestRef op) { const MOSDPGRecoveryDelete *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE); + ceph_assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE); dout(20) << __func__ << " " << op << dendl; op->mark_started(); @@ -150,7 +150,7 @@ void PGBackend::handle_recovery_delete(OpRequestRef op) void PGBackend::handle_recovery_delete_reply(OpRequestRef op) { const MOSDPGRecoveryDeleteReply *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE_REPLY); + ceph_assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE_REPLY); dout(20) << __func__ << " " << op << dendl; for (const auto &p : m->objects) { @@ -237,7 +237,7 @@ void PGBackend::rollback( } }; - assert(entry.mod_desc.can_rollback()); + ceph_assert(entry.mod_desc.can_rollback()); RollbackVisitor vis(entry.soid, this); entry.mod_desc.visit(&vis); t->append(vis.t); @@ -305,7 +305,7 @@ void PGBackend::try_stash( void PGBackend::remove( const hobject_t &hoid, ObjectStore::Transaction *t) { - assert(!hoid.is_temp()); + ceph_assert(!hoid.is_temp()); t->remove( coll, ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard)); @@ -335,7 +335,7 @@ int PGBackend::objects_list_partial( vector *ls, hobject_t *next) { - assert(ls); + ceph_assert(ls); // Starts with the smallest generation to make sure the result list // has the marker object (it might have multiple generations // though, which would be filtered). @@ -383,7 +383,7 @@ int PGBackend::objects_list_range( vector *ls, vector *gen_obs) { - assert(ls); + ceph_assert(ls); vector objects; int r = store->collection_list( ch, @@ -441,7 +441,7 @@ void PGBackend::rollback_setattrs( map > &old_attrs, ObjectStore::Transaction *t) { map to_set; - assert(!hoid.is_temp()); + ceph_assert(!hoid.is_temp()); for (map >::iterator i = old_attrs.begin(); i != old_attrs.end(); ++i) { @@ -464,7 +464,7 @@ void PGBackend::rollback_append( const hobject_t &hoid, uint64_t old_size, ObjectStore::Transaction *t) { - assert(!hoid.is_temp()); + ceph_assert(!hoid.is_temp()); t->truncate( coll, ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard), @@ -475,7 +475,7 @@ void PGBackend::rollback_stash( const hobject_t &hoid, version_t old_version, ObjectStore::Transaction *t) { - assert(!hoid.is_temp()); + ceph_assert(!hoid.is_temp()); t->remove( coll, ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard)); @@ -490,7 +490,7 @@ void PGBackend::rollback_try_stash( const hobject_t &hoid, version_t old_version, ObjectStore::Transaction *t) { - assert(!hoid.is_temp()); + ceph_assert(!hoid.is_temp()); t->remove( coll, ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard)); @@ -524,7 +524,7 @@ void PGBackend::trim_rollback_object( const hobject_t &hoid, version_t old_version, ObjectStore::Transaction *t) { - assert(!hoid.is_temp()); + ceph_assert(!hoid.is_temp()); t->remove( coll, ghobject_t(hoid, old_version, get_parent()->whoami_shard().shard)); } @@ -552,7 +552,7 @@ PGBackend *PGBackend::build_pg_backend( ec_profile, &ec_impl, &ss); - assert(ec_impl); + ceph_assert(ec_impl); return new ECBackend( l, coll, @@ -573,8 +573,8 @@ int PGBackend::be_scan_list( ScrubMapBuilder &pos) { dout(10) << __func__ << " " << pos << dendl; - assert(!pos.done()); - assert(pos.pos < pos.ls.size()); + ceph_assert(!pos.done()); + ceph_assert(pos.pos < pos.ls.size()); hobject_t& poid = pos.ls[pos.pos]; struct stat st; @@ -587,7 +587,7 @@ int PGBackend::be_scan_list( if (r == 0) { ScrubMap::object &o = map.objects[poid]; o.size = st.st_size; - assert(!o.negative); + ceph_assert(!o.negative); store->getattrs( ch, ghobject_t( @@ -881,7 +881,7 @@ map::const_iterator } // We won't pick an auth copy if the snapset is missing or won't decode. - assert(!obj.is_snapdir()); + ceph_assert(!obj.is_snapdir()); if (obj.is_head()) { k = i->second.attrs.find(SS_ATTR); if (k == i->second.attrs.end()) { @@ -956,7 +956,7 @@ map::const_iterator } // This is automatically corrected in PG::_repair_oinfo_oid() - assert(oi.soid == obj); + ceph_assert(oi.soid == obj); if (i->second.size != be_get_ondisk_size(oi.size)) { shard_info.set_obj_size_info_mismatch(); @@ -1148,7 +1148,7 @@ void PGBackend::be_compare_scrubmaps( if (fix_digest) { boost::optional data_digest, omap_digest; - assert(auth_object.digest_present); + ceph_assert(auth_object.digest_present); data_digest = auth_object.digest; if (auth_object.omap_digest_present) { omap_digest = auth_object.omap_digest; @@ -1176,7 +1176,7 @@ void PGBackend::be_compare_scrubmaps( // recorded digest != actual digest? if (auth_oi.is_data_digest() && auth_object.digest_present && auth_oi.data_digest != auth_object.digest) { - assert(shard_map[auth->first].has_data_digest_mismatch_info()); + ceph_assert(shard_map[auth->first].has_data_digest_mismatch_info()); errorstream << pgid << " recorded data digest 0x" << std::hex << auth_oi.data_digest << " != on disk 0x" << auth_object.digest << std::dec << " on " << auth_oi.soid @@ -1186,7 +1186,7 @@ void PGBackend::be_compare_scrubmaps( } if (auth_oi.is_omap_digest() && auth_object.omap_digest_present && auth_oi.omap_digest != auth_object.omap_digest) { - assert(shard_map[auth->first].has_omap_digest_mismatch_info()); + ceph_assert(shard_map[auth->first].has_omap_digest_mismatch_info()); errorstream << pgid << " recorded omap digest 0x" << std::hex << auth_oi.omap_digest << " != on disk 0x" << auth_object.omap_digest << std::dec diff --git a/src/osd/PGBackend.h b/src/osd/PGBackend.h index 71bd6604b759c..e9b9d4441d973 100644 --- a/src/osd/PGBackend.h +++ b/src/osd/PGBackend.h @@ -187,7 +187,7 @@ typedef std::shared_ptr OSDMapRef; } virtual const pg_missing_const_i &get_shard_missing(pg_shard_t peer) const { auto m = maybe_get_shard_missing(peer); - assert(m); + ceph_assert(m); return *m; } @@ -198,7 +198,7 @@ typedef std::shared_ptr OSDMapRef; } else { map::const_iterator i = get_shard_info().find(peer); - assert(i != get_shard_info().end()); + ceph_assert(i != get_shard_info().end()); return i->second; } } diff --git a/src/osd/PGLog.cc b/src/osd/PGLog.cc index eb88dc1db3c52..5d789daf085b8 100644 --- a/src/osd/PGLog.cc +++ b/src/osd/PGLog.cc @@ -50,7 +50,7 @@ void PGLog::IndexedLog::trim( set* trimmed_dups, eversion_t *write_from_dups) { - assert(s <= can_rollback_to); + ceph_assert(s <= can_rollback_to); if (complete_to != log.end()) lgeneric_subdout(cct, osd, 20) << " complete_to " << complete_to->version << dendl; @@ -131,7 +131,7 @@ ostream& PGLog::IndexedLog::print(ostream& out) const out << *p << " " << (logged_object(p->soid) ? "indexed" : "NOT INDEXED") << std::endl; - assert(!p->reqid_is_indexed() || logged_req(p->reqid)); + ceph_assert(!p->reqid_is_indexed() || logged_req(p->reqid)); } for (list::const_iterator p = dups.begin(); @@ -177,7 +177,7 @@ void PGLog::trim( // Don't assert for async_recovery_targets or backfill_targets // or whenever there are missing items if (transaction_applied && !async && (missing.num_missing() == 0)) - assert(trim_to <= info.last_complete); + ceph_assert(trim_to <= info.last_complete); dout(10) << "trim " << log << " to " << trim_to << dendl; log.trim(cct, trim_to, &trimmed, &trimmed_dups, &write_from_dups); @@ -340,9 +340,9 @@ void PGLog::merge_log(pg_info_t &oinfo, pg_log_t &olog, pg_shard_t fromosd, // Check preconditions // If our log is empty, the incoming log needs to have not been trimmed. - assert(!log.null() || olog.tail == eversion_t()); + ceph_assert(!log.null() || olog.tail == eversion_t()); // The logs must overlap. - assert(log.head >= olog.tail && olog.head >= log.tail); + ceph_assert(log.head >= olog.tail && olog.head >= log.tail); for (map::const_iterator i = missing.get_items().begin(); i != missing.get_items().end(); @@ -574,11 +574,11 @@ void PGLog::check() { derr << " " << *i << dendl; } } - assert(log.log.size() == log_keys_debug.size()); + ceph_assert(log.log.size() == log_keys_debug.size()); for (list::iterator i = log.log.begin(); i != log.log.end(); ++i) { - assert(log_keys_debug.count(i->get_key_name())); + ceph_assert(log_keys_debug.count(i->get_key_name())); } } @@ -724,7 +724,7 @@ void PGLog::_write_log_and_missing_wo_missing( ++i) { if (i->first[0] == '_') continue; - assert(!log_keys_debug->count(i->first)); + ceph_assert(!log_keys_debug->count(i->first)); log_keys_debug->insert(i->first); } } @@ -806,7 +806,7 @@ void PGLog::_write_log_and_missing( string key = t.get_key_name(); if (log_keys_debug) { auto it = log_keys_debug->find(key); - assert(it != log_keys_debug->end()); + ceph_assert(it != log_keys_debug->end()); log_keys_debug->erase(it); } to_remove.emplace(std::move(key)); @@ -853,7 +853,7 @@ void PGLog::_write_log_and_missing( ++i) { if (i->first[0] == '_') continue; - assert(!log_keys_debug->count(i->first)); + ceph_assert(!log_keys_debug->count(i->first)); log_keys_debug->insert(i->first); } } diff --git a/src/osd/PGLog.h b/src/osd/PGLog.h index 2373cdfac1389..de85388750a38 100644 --- a/src/osd/PGLog.h +++ b/src/osd/PGLog.h @@ -208,8 +208,8 @@ public: /****/ void claim_log_and_clear_rollback_info(const pg_log_t& o) { // we must have already trimmed the old entries - assert(rollback_info_trimmed_to == head); - assert(rollback_info_trimmed_to_riter == log.rbegin()); + ceph_assert(rollback_info_trimmed_to == head); + ceph_assert(rollback_info_trimmed_to_riter == log.rbegin()); *this = IndexedLog(o); @@ -224,8 +224,8 @@ public: void zero() { // we must have already trimmed the old entries - assert(rollback_info_trimmed_to == head); - assert(rollback_info_trimmed_to_riter == log.rbegin()); + ceph_assert(rollback_info_trimmed_to == head); + ceph_assert(rollback_info_trimmed_to_riter == log.rbegin()); unindex(); pg_log_t::clear(); @@ -267,9 +267,9 @@ public: version_t *user_version, int *return_code) const { - assert(version); - assert(user_version); - assert(return_code); + ceph_assert(version); + ceph_assert(user_version); + ceph_assert(return_code); ceph::unordered_map::const_iterator p; if (!(indexed_data & PGLOG_INDEXED_CALLER_OPS)) { index_caller_ops(); @@ -299,7 +299,7 @@ public: return true; } } - assert(0 == "in extra_caller_ops but not extra_reqids"); + ceph_assert(0 == "in extra_caller_ops but not extra_reqids"); } if (!(indexed_data & PGLOG_INDEXED_DUPS)) { @@ -492,7 +492,7 @@ public: // actors void add(const pg_log_entry_t& e, bool applied = true) { if (!applied) { - assert(get_can_rollback_to() == head); + ceph_assert(get_can_rollback_to() == head); } // make sure our buffers don't pin bigger buffers @@ -505,8 +505,8 @@ public: if (rollback_info_trimmed_to_riter == log.rbegin()) ++rollback_info_trimmed_to_riter; - assert(e.version > head); - assert(head.version == 0 || e.version.version > head.version); + ceph_assert(e.version > head); + ceph_assert(head.version == 0 || e.version.version > head.version); head = e.version; // to our index @@ -769,17 +769,17 @@ public: } } - assert(log.get_can_rollback_to() >= v); + ceph_assert(log.get_can_rollback_to() >= v); } void reset_complete_to(pg_info_t *info) { log.complete_to = log.log.begin(); - assert(log.complete_to != log.log.end()); + ceph_assert(log.complete_to != log.log.end()); auto oldest_need = missing.get_oldest_need(); if (oldest_need != eversion_t()) { while (log.complete_to->version < oldest_need) { ++log.complete_to; - assert(log.complete_to != log.log.end()); + ceph_assert(log.complete_to != log.log.end()); } } if (!info) @@ -857,7 +857,7 @@ protected: } // entries is non-empty - assert(!orig_entries.empty()); + ceph_assert(!orig_entries.empty()); // strip out and ignore ERROR entries mempool::osd_pglog::list entries; eversion_t last; @@ -866,7 +866,7 @@ protected: i != orig_entries.end(); ++i) { // all entries are on hoid - assert(i->soid == hoid); + ceph_assert(i->soid == hoid); // did not see error entries before this entry and this entry is not error // then this entry is the first non error entry bool first_non_error = ! seen_non_error && ! i->is_error(); @@ -883,9 +883,9 @@ protected: if (i != orig_entries.begin() && i->prior_version != eversion_t() && ! first_non_error) { // in increasing order of version - assert(i->version > last); + ceph_assert(i->version > last); // prior_version correct (unless it is an ERROR entry) - assert(i->prior_version == last || i->is_error()); + ceph_assert(i->prior_version == last || i->is_error()); } if (i->is_error()) { ldpp_dout(dpp, 20) << __func__ << ": ignoring " << *i << dendl; @@ -920,15 +920,15 @@ protected: ldpp_dout(dpp, 10) << __func__ << ": more recent entry found: " << *objiter->second << ", already merged" << dendl; - assert(objiter->second->version > last_divergent_update); + ceph_assert(objiter->second->version > last_divergent_update); // ensure missing has been updated appropriately if (objiter->second->is_update() || (missing.may_include_deletes && objiter->second->is_delete())) { - assert(missing.is_missing(hoid) && + ceph_assert(missing.is_missing(hoid) && missing.get_items().at(hoid).need == objiter->second->version); } else { - assert(!missing.is_missing(hoid)); + ceph_assert(!missing.is_missing(hoid)); } missing.revise_have(hoid, eversion_t()); if (rollbacker) { @@ -1016,7 +1016,7 @@ protected: for (list::const_reverse_iterator i = entries.rbegin(); i != entries.rend(); ++i) { - assert(i->can_rollback() && i->version > olog_can_rollback_to); + ceph_assert(i->can_rollback() && i->version > olog_can_rollback_to); ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid << " rolling back " << *i << dendl; if (rollbacker) @@ -1124,7 +1124,7 @@ public: const DoutPrefixProvider *dpp) { bool invalidate_stats = false; if (log && !entries.empty()) { - assert(log->head < entries.begin()->version); + ceph_assert(log->head < entries.begin()->version); } for (list::const_iterator p = entries.begin(); p != entries.end(); @@ -1292,8 +1292,8 @@ public: // legacy? struct stat st; int r = store->stat(ch, pgmeta_oid, &st); - assert(r == 0); - assert(st.st_size == 0); + ceph_assert(r == 0); + ceph_assert(st.st_size == 0); // will get overridden below if it had been recorded eversion_t on_disk_can_rollback_to = info.last_update; @@ -1330,14 +1330,14 @@ public: decode(oid, bp); decode(item, bp); if (item.is_delete()) { - assert(missing.may_include_deletes); + ceph_assert(missing.may_include_deletes); } missing.add(oid, item.need, item.have, item.is_delete()); } else if (p->key().substr(0, 4) == string("dup_")) { pg_log_dup_t dup; decode(dup, bp); if (!dups.empty()) { - assert(dups.back().version < dup.version); + ceph_assert(dups.back().version < dup.version); } dups.push_back(dup); } else { @@ -1346,8 +1346,8 @@ public: ldpp_dout(dpp, 20) << "read_log_and_missing " << e << dendl; if (!entries.empty()) { pg_log_entry_t last_e(entries.back()); - assert(last_e.version.version < e.version.version); - assert(last_e.version.epoch <= e.version.epoch); + ceph_assert(last_e.version.version < e.version.version); + ceph_assert(last_e.version.epoch <= e.version.epoch); } entries.push_back(e); if (log_keys_debug) @@ -1401,11 +1401,11 @@ public: << " (have " << oi.version << ")" << dendl; if (debug_verify_stored_missing) { auto miter = missing.get_items().find(i->soid); - assert(miter != missing.get_items().end()); - assert(miter->second.need == i->version); + ceph_assert(miter != missing.get_items().end()); + ceph_assert(miter->second.need == i->version); // the 'have' version is reset if an object is deleted, // then created again - assert(miter->second.have == oi.version || miter->second.have == eversion_t()); + ceph_assert(miter->second.have == oi.version || miter->second.have == eversion_t()); checked.insert(i->soid); } else { missing.add(i->soid, i->version, oi.version, i->is_delete()); @@ -1416,13 +1416,13 @@ public: if (debug_verify_stored_missing) { auto miter = missing.get_items().find(i->soid); if (i->is_delete()) { - assert(miter == missing.get_items().end() || + ceph_assert(miter == missing.get_items().end() || (miter->second.need == i->version && miter->second.have == eversion_t())); } else { - assert(miter != missing.get_items().end()); - assert(miter->second.need == i->version); - assert(miter->second.have == eversion_t()); + ceph_assert(miter != missing.get_items().end()); + ceph_assert(miter->second.need == i->version); + ceph_assert(miter->second.have == eversion_t()); } checked.insert(i->soid); } else { @@ -1440,7 +1440,7 @@ public: << i.first << " " << i.second << " last_backfill = " << info.last_backfill << dendl; - assert(0 == "invalid missing set entry found"); + ceph_assert(0 == "invalid missing set entry found"); } bufferlist bv; int r = store->getattr( @@ -1450,13 +1450,13 @@ public: bv); if (r >= 0) { object_info_t oi(bv); - assert(oi.version == i.second.have || eversion_t() == i.second.have); + ceph_assert(oi.version == i.second.have || eversion_t() == i.second.have); } else { - assert(i.second.is_delete() || eversion_t() == i.second.have); + ceph_assert(i.second.is_delete() || eversion_t() == i.second.have); } } } else { - assert(must_rebuild); + ceph_assert(must_rebuild); for (map::reverse_iterator i = divergent_priors.rbegin(); i != divergent_priors.rend(); @@ -1497,7 +1497,7 @@ public: << "), assuming it is tracker.ceph.com/issues/17916" << dendl; } else { - assert(oi.version == i->first); + ceph_assert(oi.version == i->first); } } else { ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i << dendl; diff --git a/src/osd/PGTransaction.h b/src/osd/PGTransaction.h index 7a15f3bdba3c1..4c1f8307ead5d 100644 --- a/src/osd/PGTransaction.h +++ b/src/osd/PGTransaction.h @@ -223,18 +223,18 @@ public: left, [&](const BufferUpdate::Write &w) -> BufferUpdateType { auto r = boost::get(&right); - assert(r && w.fadvise_flags == r->fadvise_flags); + ceph_assert(r && w.fadvise_flags == r->fadvise_flags); bufferlist bl = w.buffer; bl.append(r->buffer); return BufferUpdate::Write{bl, w.fadvise_flags}; }, [&](const BufferUpdate::Zero &z) -> BufferUpdateType { auto r = boost::get(&right); - assert(r); + ceph_assert(r); return BufferUpdate::Zero{z.len + r->len}; }, [&](const BufferUpdate::CloneRange &c) -> BufferUpdateType { - assert(0 == "violates can_merge condition"); + ceph_assert(0 == "violates can_merge condition"); return left; }); } @@ -250,7 +250,7 @@ public: private: ObjectOperation &get_object_op_for_modify(const hobject_t &hoid) { auto &op = op_map[hoid]; - assert(!op.is_delete()); + ceph_assert(!op.is_delete()); return op; } ObjectOperation &get_object_op(const hobject_t &hoid) { @@ -259,7 +259,7 @@ private: public: void add_obc( ObjectContextRef obc) { - assert(obc); + ceph_assert(obc); obc_map[obc->obs.oi.soid] = obc; } /// Sets up state for new object @@ -267,7 +267,7 @@ public: const hobject_t &hoid ) { auto &op = op_map[hoid]; - assert(op.is_none() || op.is_delete()); + ceph_assert(op.is_none() || op.is_delete()); op.init_type = ObjectOperation::Init::Create(); } @@ -277,7 +277,7 @@ public: const hobject_t &source ///< [in] obj to clone from ) { auto &op = op_map[target]; - assert(op.is_none() || op.is_delete()); + ceph_assert(op.is_none() || op.is_delete()); op.init_type = ObjectOperation::Init::Clone{source}; } @@ -286,10 +286,10 @@ public: const hobject_t &target, ///< [in] to, must not exist, be non-temp const hobject_t &source ///< [in] source (must be a temp object) ) { - assert(source.is_temp()); - assert(!target.is_temp()); + ceph_assert(source.is_temp()); + ceph_assert(!target.is_temp()); auto &op = op_map[target]; - assert(op.is_none() || op.is_delete()); + ceph_assert(op.is_none() || op.is_delete()); bool del_first = op.is_delete(); auto iter = op_map.find(source); @@ -308,11 +308,11 @@ public: ) { auto &op = get_object_op_for_modify(hoid); if (!op.is_fresh_object()) { - assert(!op.updated_snaps); + ceph_assert(!op.updated_snaps); op = ObjectOperation(); op.delete_first = true; } else { - assert(!op.is_rename()); + ceph_assert(!op.is_rename()); op_map.erase(hoid); // make it a noop if it's a fresh object } } @@ -323,9 +323,9 @@ public: const set &new_snaps ///< [in] new snaps value ) { auto &op = get_object_op(hoid); - assert(!op.updated_snaps); - assert(op.buffer_updates.empty()); - assert(!op.truncate); + ceph_assert(!op.updated_snaps); + ceph_assert(op.buffer_updates.empty()); + ceph_assert(!op.truncate); op.updated_snaps = make_pair( old_snaps, new_snaps); @@ -345,7 +345,7 @@ public: uint64_t off ///< [in] offset to truncate to ) { auto &op = get_object_op_for_modify(hoid); - assert(!op.updated_snaps); + ceph_assert(!op.updated_snaps); op.buffer_updates.erase( off, std::numeric_limits::max() - off); @@ -407,9 +407,9 @@ public: uint32_t fadvise_flags = 0 ///< [in] fadvise hint ) { auto &op = get_object_op_for_modify(hoid); - assert(!op.updated_snaps); - assert(len > 0); - assert(len == bl.length()); + ceph_assert(!op.updated_snaps); + ceph_assert(len > 0); + ceph_assert(len == bl.length()); op.buffer_updates.insert( off, len, @@ -423,7 +423,7 @@ public: uint64_t tooff ///< [in] offset ) { auto &op = get_object_op_for_modify(to); - assert(!op.updated_snaps); + ceph_assert(!op.updated_snaps); op.buffer_updates.insert( tooff, len, @@ -435,7 +435,7 @@ public: uint64_t len ///< [in] amount to zero ) { auto &op = get_object_op_for_modify(hoid); - assert(!op.updated_snaps); + ceph_assert(!op.updated_snaps); op.buffer_updates.insert( off, len, @@ -567,7 +567,7 @@ public: /* Internal node: push children onto stack, remove edge, * recurse. When this node is encountered again, it'll * be a leaf */ - assert(!diter->second.empty()); + ceph_assert(!diter->second.empty()); stack.splice(stack.begin(), diter->second); dgraph.erase(diter); } diff --git a/src/osd/PrimaryLogPG.cc b/src/osd/PrimaryLogPG.cc index cfd9e19e3f209..29869c0e19e02 100644 --- a/src/osd/PrimaryLogPG.cc +++ b/src/osd/PrimaryLogPG.cc @@ -274,11 +274,11 @@ void PrimaryLogPG::OpContext::start_async_reads(PrimaryLogPG *pg) } void PrimaryLogPG::OpContext::finish_read(PrimaryLogPG *pg) { - assert(inflightreads > 0); + ceph_assert(inflightreads > 0); --inflightreads; if (async_reads_complete()) { - assert(pg->in_progress_async_reads.size()); - assert(pg->in_progress_async_reads.front().second == this); + ceph_assert(pg->in_progress_async_reads.size()); + ceph_assert(pg->in_progress_async_reads.front().second == this); pg->in_progress_async_reads.pop_front(); // Restart the op context now that all reads have been @@ -374,7 +374,7 @@ void PrimaryLogPG::on_local_recover( } if (!is_delete && pg_log.get_missing().is_missing(recovery_info.soid) && pg_log.get_missing().get_items().find(recovery_info.soid)->second.need > recovery_info.version) { - assert(is_primary()); + ceph_assert(is_primary()); const pg_log_entry_t *latest = pg_log.get_log().objects.find(recovery_info.soid)->second; if (latest->op == pg_log_entry_t::LOST_REVERT && latest->reverting_to == recovery_info.version) { @@ -387,7 +387,7 @@ void PrimaryLogPG::on_local_recover( bufferlist bl; encode(recovery_info.oi, bl, get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr)); - assert(!pool.info.is_erasure()); + ceph_assert(!pool.info.is_erasure()); t->setattr(coll, ghobject_t(recovery_info.soid), OI_ATTR, bl); if (obc) obc->attr_cache[OI_ATTR] = bl; @@ -413,9 +413,9 @@ void PrimaryLogPG::on_local_recover( obc->obs.exists = true; bool got = obc->get_recovery_read(); - assert(got); + ceph_assert(got); - assert(recovering.count(obc->obs.oi.soid)); + ceph_assert(recovering.count(obc->obs.oi.soid)); recovering[obc->obs.oi.soid] = obc; obc->obs.oi = recovery_info.oi; // may have been updated above } @@ -423,7 +423,7 @@ void PrimaryLogPG::on_local_recover( t->register_on_applied(new C_OSD_AppliedRecoveredObject(this, obc)); publish_stats_to_osd(); - assert(missing_loc.needs_recovery(hoid)); + ceph_assert(missing_loc.needs_recovery(hoid)); if (!is_delete) missing_loc.add_location(hoid, pg_whoami); release_backoffs(hoid); @@ -462,12 +462,12 @@ void PrimaryLogPG::on_global_recover( publish_stats_to_osd(); dout(10) << "pushed " << soid << " to all replicas" << dendl; map::iterator i = recovering.find(soid); - assert(i != recovering.end()); + ceph_assert(i != recovering.end()); if (i->second && i->second->rwstate.recovery_read_marker) { // recover missing won't have had an obc, but it gets filled in // during on_local_recover - assert(i->second); + ceph_assert(i->second); list requeue_list; i->second->drop_recovery_read(&requeue_list); requeue_ops(requeue_list); @@ -559,13 +559,13 @@ bool PrimaryLogPG::should_send_op( const hobject_t &hoid) { if (peer == get_primary()) return true; - assert(peer_info.count(peer)); + ceph_assert(peer_info.count(peer)); bool should_send = hoid.pool != (int64_t)info.pgid.pool() || hoid <= last_backfill_started || hoid <= peer_info[peer].last_backfill; if (!should_send) { - assert(is_backfill_targets(peer)); + ceph_assert(is_backfill_targets(peer)); dout(10) << __func__ << " issue_repop shipping empty opt to osd." << peer << ", object " << hoid << " beyond std::max(last_backfill_started " @@ -633,7 +633,7 @@ void PrimaryLogPG::maybe_kick_recovery( void PrimaryLogPG::wait_for_unreadable_object( const hobject_t& soid, OpRequestRef op) { - assert(is_unreadable_object(soid)); + ceph_assert(is_unreadable_object(soid)); maybe_kick_recovery(soid); waiting_for_unreadable_object[soid].push_back(op); op->mark_delayed("waiting for missing object"); @@ -649,7 +649,7 @@ bool PrimaryLogPG::is_degraded_or_backfilling_object(const hobject_t& soid) return true; if (pg_log.get_missing().get_items().count(soid)) return true; - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { @@ -696,7 +696,7 @@ bool PrimaryLogPG::is_degraded_on_async_recovery_target(const hobject_t& soid) void PrimaryLogPG::wait_for_degraded_object(const hobject_t& soid, OpRequestRef op) { - assert(is_degraded_or_backfilling_object(soid) || is_degraded_on_async_recovery_target(soid)); + ceph_assert(is_degraded_or_backfilling_object(soid) || is_degraded_on_async_recovery_target(soid)); maybe_kick_recovery(soid); waiting_for_degraded_object[soid].push_back(op); @@ -729,8 +729,8 @@ void PrimaryLogPG::block_write_on_snap_rollback( dout(20) << __func__ << ": blocking object " << oid.get_head() << " on snap promotion " << obc->obs.oi.soid << dendl; // otherwise, we'd have blocked in do_op - assert(oid.is_head()); - assert(objects_blocked_on_snap_promotion.count(oid) == 0); + ceph_assert(oid.is_head()); + ceph_assert(objects_blocked_on_snap_promotion.count(oid) == 0); objects_blocked_on_snap_promotion[oid] = obc; wait_for_blocked_object(obc->obs.oi.soid, op); } @@ -741,7 +741,7 @@ void PrimaryLogPG::block_write_on_degraded_snap( dout(20) << __func__ << ": blocking object " << snap.get_head() << " on degraded snap " << snap << dendl; // otherwise, we'd have blocked in do_op - assert(objects_blocked_on_degraded_snap.count(snap.get_head()) == 0); + ceph_assert(objects_blocked_on_degraded_snap.count(snap.get_head()) == 0); objects_blocked_on_degraded_snap[snap.get_head()] = snap.snap; wait_for_degraded_object(snap, op); } @@ -793,7 +793,7 @@ void PrimaryLogPG::maybe_force_recovery() min_version = pg_log.get_missing().get_rmissing().begin()->first; soid = pg_log.get_missing().get_rmissing().begin()->second; } - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); for (set::iterator it = acting_recovery_backfill.begin(); it != acting_recovery_backfill.end(); ++it) { @@ -947,7 +947,7 @@ int PrimaryLogPG::get_pgls_filter(bufferlist::const_iterator& iter, PGLSFilter * r = -EINVAL; return r; } else { - assert(cls); + ceph_assert(cls); } ClassHandler::ClassFilter *class_filter = cls->get_filter(filter_name); @@ -966,7 +966,7 @@ int PrimaryLogPG::get_pgls_filter(bufferlist::const_iterator& iter, PGLSFilter * } } - assert(filter); + ceph_assert(filter); int r = filter->init(iter); if (r < 0) { derr << "Error initializing filter " << type << ": " @@ -1083,7 +1083,7 @@ int PrimaryLogPG::do_command( ss << "mode must be 'revert' or 'delete'; mark not yet implemented"; return -EINVAL; } - assert(mode == pg_log_entry_t::LOST_REVERT || + ceph_assert(mode == pg_log_entry_t::LOST_REVERT || mode == pg_log_entry_t::LOST_DELETE); if (!is_primary()) { @@ -1176,7 +1176,7 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) // NOTE: this is non-const because we modify the OSDOp.outdata in // place MOSDOp *m = static_cast(op->get_nonconst_req()); - assert(m->get_type() == CEPH_MSG_OSD_OP); + ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); dout(10) << "do_pg_op " << *m << dendl; op->mark_started(); @@ -1212,7 +1212,7 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) if (result < 0) break; - assert(filter); + ceph_assert(filter); // fall through @@ -1294,11 +1294,11 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) } } else if (mcand < lcand) { candidate = mcand; - assert(!mcand.is_max()); + ceph_assert(!mcand.is_max()); ++missing_iter; } else { candidate = lcand; - assert(!lcand.is_max()); + ceph_assert(!lcand.is_max()); ++ls_iter; } @@ -1384,7 +1384,7 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) if (result < 0) break; - assert(filter); + ceph_assert(filter); // fall through @@ -1428,7 +1428,7 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) break; } - assert(snapid == CEPH_NOSNAP || pg_log.get_missing().get_items().empty()); + ceph_assert(snapid == CEPH_NOSNAP || pg_log.get_missing().get_items().empty()); map::const_iterator missing_iter = pg_log.get_missing().get_items().lower_bound(current); @@ -1453,11 +1453,11 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) } } else if (mcand < lcand) { candidate = mcand; - assert(!mcand.is_max()); + ceph_assert(!mcand.is_max()); ++missing_iter; } else { candidate = lcand; - assert(!lcand.is_max()); + ceph_assert(!lcand.is_max()); ++ls_iter; } @@ -1654,7 +1654,7 @@ void PrimaryLogPG::calc_trim_to() } dout(10) << "calc_trim_to " << pg_trim_to << " -> " << new_trim_to << dendl; pg_trim_to = new_trim_to; - assert(pg_trim_to <= pg_log.get_head()); + ceph_assert(pg_trim_to <= pg_log.get_head()); } } @@ -1776,7 +1776,7 @@ void PrimaryLogPG::do_request( // Delay unless PGBackend says it's ok if (pgbackend->can_handle_while_inactive(op)) { bool handled = pgbackend->handle_message(op); - assert(handled); + ceph_assert(handled); return; } else { waiting_for_peered.push_back(op); @@ -1794,7 +1794,7 @@ void PrimaryLogPG::do_request( return; } - assert(is_peered() && flushes_in_progress == 0); + ceph_assert(is_peered() && flushes_in_progress == 0); if (pgbackend->handle_message(op)) return; @@ -1874,7 +1874,7 @@ void PrimaryLogPG::do_request( break; default: - assert(0 == "bad message type in do_request"); + ceph_assert(0 == "bad message type in do_request"); } } @@ -1886,7 +1886,7 @@ hobject_t PrimaryLogPG::earliest_backfill() const ++i) { pg_shard_t bt = *i; map::const_iterator iter = peer_info.find(bt); - assert(iter != peer_info.end()); + ceph_assert(iter != peer_info.end()); if (iter->second.last_backfill < e) e = iter->second.last_backfill; } @@ -1903,7 +1903,7 @@ void PrimaryLogPG::do_op(OpRequestRef& op) // NOTE: take a non-const pointer here; we must be careful not to // change anything that will break other reads on m (operator<<). MOSDOp *m = static_cast(op->get_nonconst_req()); - assert(m->get_type() == CEPH_MSG_OSD_OP); + ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); if (m->finish_decode()) { op->reset_desc(); // for TrackedOp m->clear_payload(); @@ -1921,7 +1921,7 @@ void PrimaryLogPG::do_op(OpRequestRef& op) << std::hex << head.get_hash() << std::dec << dendl; osd->clog->warn() << info.pgid.pgid << " does not contain " << head << " op " << *m; - assert(!cct->_conf->osd_debug_misdirected_ops); + ceph_assert(!cct->_conf->osd_debug_misdirected_ops); return; } @@ -2220,7 +2220,7 @@ void PrimaryLogPG::do_op(OpRequestRef& op) // we have to wait for the object. if (is_primary()) { // missing the specific snap we need; requeue and wait. - assert(!op->may_write()); // only happens on a read/cache + ceph_assert(!op->may_write()); // only happens on a read/cache wait_for_unreadable_object(missing_oid, op); return; } @@ -2403,7 +2403,7 @@ PrimaryLogPG::cache_result_t PrimaryLogPG::maybe_handle_manifest_detail( bool write_ordered, ObjectContextRef obc) { - assert(obc); + ceph_assert(obc); if (static_cast(op->get_req())->get_flags() & CEPH_OSD_FLAG_IGNORE_REDIRECT) { dout(20) << __func__ << ": ignoring redirect due to flag" << dendl; @@ -2454,7 +2454,7 @@ PrimaryLogPG::cache_result_t PrimaryLogPG::maybe_handle_manifest_detail( } MOSDOp *m = static_cast(op->get_nonconst_req()); - assert(m->get_type() == CEPH_MSG_OSD_OP); + ceph_assert(m->get_type() == CEPH_MSG_OSD_OP); hobject_t head = m->get_hobj(); if (is_degraded_or_backfilling_object(head)) { @@ -2491,7 +2491,7 @@ PrimaryLogPG::cache_result_t PrimaryLogPG::maybe_handle_manifest_detail( return cache_result_t::NOOP; } default: - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } return cache_result_t::NOOP; @@ -2540,7 +2540,7 @@ void PrimaryLogPG::handle_manifest_flush(hobject_t oid, ceph_tid_t tid, int r, } if (p->second->chunks == p->second->io_results.size()) { if (last_peering_reset == get_last_peering_reset()) { - assert(p->second->obc); + ceph_assert(p->second->obc); finish_manifest_flush(oid, tid, r, p->second->obc, last_offset); } } @@ -2575,7 +2575,7 @@ int PrimaryLogPG::do_manifest_flush(OpRequestRef op, ObjectContextRef obc, Flush uint64_t max_copy_size = 0, last_offset = 0; map::iterator iter = manifest.chunk_map.find(start_offset); - assert(iter != manifest.chunk_map.end()); + ceph_assert(iter != manifest.chunk_map.end()); for (;iter != manifest.chunk_map.end(); ++iter) { if (iter->second.flags == chunk_info_t::FLAG_DIRTY) { last_offset = iter->first; @@ -2648,7 +2648,7 @@ void PrimaryLogPG::finish_manifest_flush(hobject_t oid, ceph_tid_t tid, int r, } map::iterator iter = obc->obs.oi.manifest.chunk_map.find(last_offset); - assert(iter != obc->obs.oi.manifest.chunk_map.end()); + ceph_assert(iter != obc->obs.oi.manifest.chunk_map.end()); for (;iter != obc->obs.oi.manifest.chunk_map.end(); ++iter) { if (iter->second.flags == chunk_info_t::FLAG_DIRTY && last_offset < iter->first) { do_manifest_flush(p->second->op, obc, p->second, iter->first, p->second->blocking); @@ -2662,7 +2662,7 @@ void PrimaryLogPG::record_write_error(OpRequestRef op, const hobject_t &soid, MOSDOpReply *orig_reply, int r) { dout(20) << __func__ << " r=" << r << dendl; - assert(op->may_write()); + ceph_assert(op->may_write()); const osd_reqid_t &reqid = static_cast(op->get_req())->get_reqid(); mempool::osd_pglog::list entries; entries.push_back(pg_log_entry_t(pg_log_entry_t::ERROR, soid, @@ -2830,7 +2830,7 @@ PrimaryLogPG::cache_result_t PrimaryLogPG::maybe_handle_cache_detail( return cache_result_t::HANDLED_PROXY; } - assert(0 == "unreachable"); + ceph_assert(0 == "unreachable"); return cache_result_t::NOOP; case pg_pool_t::CACHEMODE_FORWARD: @@ -2907,7 +2907,7 @@ PrimaryLogPG::cache_result_t PrimaryLogPG::maybe_handle_cache_detail( return cache_result_t::HANDLED_PROXY; default: - assert(0 == "unrecognized cache_mode"); + ceph_assert(0 == "unrecognized cache_mode"); } return cache_result_t::NOOP; } @@ -3040,13 +3040,13 @@ struct C_ProxyChunkRead : public Context { if (last_peering_reset == pg->get_last_peering_reset()) { if (r >= 0) { if (!prdop->ops[op_index].outdata.length()) { - assert(req_total_len); + ceph_assert(req_total_len); bufferlist list; bufferptr bptr(req_total_len); list.push_back(std::move(bptr)); prdop->ops[op_index].outdata.append(list); } - assert(obj_op); + ceph_assert(obj_op); uint64_t copy_offset; if (req_offset >= prdop->ops[op_index].op.extent.offset) { copy_offset = req_offset - prdop->ops[op_index].op.extent.offset; @@ -3082,7 +3082,7 @@ void PrimaryLogPG::do_proxy_read(OpRequestRef op, ObjectContextRef obc) soid = obc->obs.oi.manifest.redirect_target; break; default: - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } } else { /* proxy */ @@ -3168,11 +3168,11 @@ void PrimaryLogPG::finish_proxy_read(hobject_t oid, ceph_tid_t tid, int r) dout(10) << __func__ << " no in_progress_proxy_ops found" << dendl; return; } - assert(q->second.size()); + ceph_assert(q->second.size()); list::iterator it = std::find(q->second.begin(), q->second.end(), prdop->op); - assert(it != q->second.end()); + ceph_assert(it != q->second.end()); OpRequestRef op = *it; q->second.erase(it); if (q->second.size() == 0) { @@ -3297,7 +3297,7 @@ void PrimaryLogPG::do_proxy_write(OpRequestRef op, ObjectContextRef obc) soid = obc->obs.oi.manifest.redirect_target; break; default: - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } } else { /* proxy */ @@ -3510,7 +3510,7 @@ void PrimaryLogPG::do_proxy_chunked_read(OpRequestRef op, ObjectContextRef obc, if (chunk_index <= req_offset) { osd_op.op.extent.offset = manifest->chunk_map[chunk_index].offset + req_offset - chunk_index; } else { - assert(0 == "chunk_index > req_offset"); + ceph_assert(0 == "chunk_index > req_offset"); } osd_op.op.extent.length = req_length; @@ -3593,8 +3593,8 @@ void PrimaryLogPG::finish_proxy_write(hobject_t oid, ceph_tid_t tid, int r) return; } ProxyWriteOpRef pwop = p->second; - assert(tid == pwop->objecter_tid); - assert(oid == pwop->soid); + ceph_assert(tid == pwop->objecter_tid); + ceph_assert(oid == pwop->soid); proxywrite_ops.erase(tid); @@ -3606,11 +3606,11 @@ void PrimaryLogPG::finish_proxy_write(hobject_t oid, ceph_tid_t tid, int r) return; } list& in_progress_op = q->second; - assert(in_progress_op.size()); + ceph_assert(in_progress_op.size()); list::iterator it = std::find(in_progress_op.begin(), in_progress_op.end(), pwop->op); - assert(it != in_progress_op.end()); + ceph_assert(it != in_progress_op.end()); in_progress_op.erase(it); if (in_progress_op.size() == 0) { in_progress_proxy_ops.erase(oid); @@ -3629,7 +3629,7 @@ void PrimaryLogPG::finish_proxy_write(hobject_t oid, ceph_tid_t tid, int r) osd->logger->inc(l_osd_tier_proxy_write); const MOSDOp *m = static_cast(pwop->op->get_req()); - assert(m != NULL); + ceph_assert(m != NULL); if (!pwop->sent_reply) { // send commit. @@ -3728,7 +3728,7 @@ struct PromoteFinisher : public PrimaryLogPG::OpFinisher { promote_callback->promote_results.get<1>(), promote_callback->obc); } else { - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } return 0; } @@ -3741,7 +3741,7 @@ void PrimaryLogPG::promote_object(ObjectContextRef obc, ObjectContextRef *promote_obc) { hobject_t hoid = obc ? obc->obs.oi.soid : missing_oid; - assert(hoid != hobject_t()); + ceph_assert(hoid != hobject_t()); if (write_blocked_by_scrub(hoid)) { dout(10) << __func__ << " " << hoid << " blocked by scrub" << dendl; @@ -3757,7 +3757,7 @@ void PrimaryLogPG::promote_object(ObjectContextRef obc, return; } if (!obc) { // we need to create an ObjectContext - assert(missing_oid != hobject_t()); + ceph_assert(missing_oid != hobject_t()); obc = get_object_context(missing_oid, true); } if (promote_obc) @@ -3791,7 +3791,7 @@ void PrimaryLogPG::promote_object(ObjectContextRef obc, src_hoid = obc->obs.oi.manifest.redirect_target; cb = new PromoteCallback(obc, this); } else { - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } } @@ -3803,7 +3803,7 @@ void PrimaryLogPG::promote_object(ObjectContextRef obc, obc->obs.oi.soid.snap == CEPH_NOSNAP, src_fadvise_flags, 0); - assert(obc->is_blocked()); + ceph_assert(obc->is_blocked()); if (op) wait_for_blocked_object(obc->obs.oi.soid, op); @@ -3887,7 +3887,7 @@ void PrimaryLogPG::execute_ctx(OpContext *ctx) if (result == -EINPROGRESS || pending_async_reads) { // come back later. if (pending_async_reads) { - assert(pool.info.is_erasure()); + ceph_assert(pool.info.is_erasure()); in_progress_async_reads.push_back(make_pair(op, ctx)); ctx->start_async_reads(this); } @@ -3931,7 +3931,7 @@ void PrimaryLogPG::execute_ctx(OpContext *ctx) ctx->reply->set_reply_versions(ctx->at_version, ctx->user_at_version); - assert(op->may_write() || op->may_cache()); + ceph_assert(op->may_write() || op->may_cache()); // trim log? calc_trim_to(); @@ -3950,7 +3950,7 @@ void PrimaryLogPG::execute_ctx(OpContext *ctx) dout(20) << " op order client." << n << " tid " << t << " last was " << p->second << dendl; if (p->second > t) { derr << "bad op order, already applied " << p->second << " > this " << t << dendl; - assert(0 == "out of order op"); + ceph_assert(0 == "out of order op"); } p->second = t; } @@ -4101,7 +4101,7 @@ void PrimaryLogPG::do_scan( ThreadPool::TPHandle &handle) { const MOSDPGScan *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_SCAN); + ceph_assert(m->get_type() == MSG_OSD_PG_SCAN); dout(10) << "do_scan " << *m << dendl; op->mark_started(); @@ -4145,7 +4145,7 @@ void PrimaryLogPG::do_scan( pg_shard_t from = m->from; // Check that from is in backfill_targets vector - assert(is_backfill_targets(from)); + ceph_assert(is_backfill_targets(from)); BackfillInterval& bi = peer_backfill_info[from]; bi.begin = m->begin; @@ -4158,7 +4158,7 @@ void PrimaryLogPG::do_scan( if (waiting_on_backfill.erase(from)) { if (waiting_on_backfill.empty()) { - assert(peer_backfill_info.size() == backfill_targets.size()); + ceph_assert(peer_backfill_info.size() == backfill_targets.size()); finish_recovery_op(hobject_t::get_max()); } } else { @@ -4173,7 +4173,7 @@ void PrimaryLogPG::do_scan( void PrimaryLogPG::do_backfill(OpRequestRef op) { const MOSDPGBackfill *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_BACKFILL); + ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL); dout(10) << "do_backfill " << *m << dendl; op->mark_started(); @@ -4181,7 +4181,7 @@ void PrimaryLogPG::do_backfill(OpRequestRef op) switch (m->op) { case MOSDPGBackfill::OP_BACKFILL_FINISH: { - assert(cct->_conf->osd_kill_backfill_at != 1); + ceph_assert(cct->_conf->osd_kill_backfill_at != 1); MOSDPGBackfill *reply = new MOSDPGBackfill( MOSDPGBackfill::OP_BACKFILL_FINISH_ACK, @@ -4201,7 +4201,7 @@ void PrimaryLogPG::do_backfill(OpRequestRef op) case MOSDPGBackfill::OP_BACKFILL_PROGRESS: { - assert(cct->_conf->osd_kill_backfill_at != 2); + ceph_assert(cct->_conf->osd_kill_backfill_at != 2); info.set_last_backfill(m->last_backfill); info.stats = m->stats; @@ -4210,14 +4210,14 @@ void PrimaryLogPG::do_backfill(OpRequestRef op) dirty_info = true; write_if_dirty(t); int tr = osd->store->queue_transaction(ch, std::move(t), NULL); - assert(tr == 0); + ceph_assert(tr == 0); } break; case MOSDPGBackfill::OP_BACKFILL_FINISH_ACK: { - assert(is_primary()); - assert(cct->_conf->osd_kill_backfill_at != 3); + ceph_assert(is_primary()); + ceph_assert(cct->_conf->osd_kill_backfill_at != 3); finish_recovery_op(hobject_t::get_max()); } break; @@ -4228,7 +4228,7 @@ void PrimaryLogPG::do_backfill_remove(OpRequestRef op) { const MOSDPGBackfillRemove *m = static_cast( op->get_req()); - assert(m->get_type() == MSG_OSD_PG_BACKFILL_REMOVE); + ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL_REMOVE); dout(7) << __func__ << " " << m->ls << dendl; op->mark_started(); @@ -4238,7 +4238,7 @@ void PrimaryLogPG::do_backfill_remove(OpRequestRef op) remove_snap_mapped_object(t, p.first); } int r = osd->store->queue_transaction(ch, std::move(t), NULL); - assert(r == 0); + ceph_assert(r == 0); } int PrimaryLogPG::trim_object( @@ -4334,7 +4334,7 @@ int PrimaryLogPG::trim_object( << new_snaps << " ... deleting" << dendl; // ...from snapset - assert(p != snapset.clones.end()); + ceph_assert(p != snapset.clones.end()); snapid_t last = coid.snap; ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(last); @@ -4510,8 +4510,8 @@ int PrimaryLogPG::trim_object( void PrimaryLogPG::kick_snap_trim() { - assert(is_active()); - assert(is_primary()); + ceph_assert(is_active()); + ceph_assert(is_primary()); if (is_clean() && !snap_trimq.empty()) { if (get_osdmap()->test_flag(CEPH_OSDMAP_NOSNAPTRIM)) { dout(10) << __func__ << ": nosnaptrim set, not kicking" << dendl; @@ -4525,7 +4525,7 @@ void PrimaryLogPG::kick_snap_trim() void PrimaryLogPG::snap_trimmer_scrub_complete() { if (is_primary() && is_active() && is_clean()) { - assert(!snap_trimq.empty()); + ceph_assert(!snap_trimq.empty()); snap_trimmer_machine.process_event(ScrubComplete()); } } @@ -4536,7 +4536,7 @@ void PrimaryLogPG::snap_trimmer(epoch_t queued) return; } - assert(is_primary()); + ceph_assert(is_primary()); dout(10) << "snap_trimmer posting" << dendl; snap_trimmer_machine.process_event(DoSnapWork()); @@ -4671,7 +4671,7 @@ int PrimaryLogPG::do_tmapup_slow(OpContext *ctx, bufferlist::const_iterator& bp, auto p = bl.cbegin(); decode(header, p); decode(m, p); - assert(p.end()); + ceph_assert(p.end()); } // do the update(s) @@ -4899,7 +4899,7 @@ int PrimaryLogPG::do_tmapup(OpContext *ctx, bufferlist::const_iterator& bp, OSDO decode(h, tp); map d; decode(d, tp); - assert(tp.end()); + ceph_assert(tp.end()); dout(0) << " **** debug sanity check, looks ok ****" << dendl; } @@ -5025,7 +5025,7 @@ void PrimaryLogPG::maybe_create_new_object( if (!obs.exists) { ctx->delta_stats.num_objects++; obs.exists = true; - assert(!obs.oi.is_whiteout()); + ceph_assert(!obs.oi.is_whiteout()); obs.oi.new_object(); if (!ignore_transaction) ctx->op_t->create(obs.oi.soid); @@ -5799,7 +5799,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) ClassHandler::ClassData *cls; result = osd->class_handler->open_class(cname, &cls); - assert(result == 0); // init_op_flags() already verified this works. + ceph_assert(result == 0); // init_op_flags() already verified this works. ClassHandler::ClassMethod *method = cls->get_method(mname.c_str()); if (!method) { @@ -5945,7 +5945,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) // Check special return value which has set missing_return if (result == -ENOENT) { dout(10) << __func__ << " CEPH_OSD_OP_CACHE_FLUSH got ENOENT" << dendl; - assert(!missing.is_min()); + ceph_assert(!missing.is_min()); wait_for_unreadable_object(missing, ctx->op); // Error code which is used elsewhere when wait_for_unreadable_object() is used result = -EAGAIN; @@ -6125,8 +6125,8 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) dout(20) << "key cookie=" << oi_iter->first.first << " entity=" << oi_iter->first.second << " " << oi_iter->second << dendl; - assert(oi_iter->first.first == oi_iter->second.cookie); - assert(oi_iter->first.second.is_client()); + ceph_assert(oi_iter->first.first == oi_iter->second.cookie); + ceph_assert(oi_iter->first.second.is_client()); watch_item_t wi(oi_iter->first.second, oi_iter->second.cookie, oi_iter->second.timeout_seconds, oi_iter->second.addr); @@ -6149,7 +6149,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) if (!ssc) { ssc = ctx->obc->ssc = get_snapset_context(soid, false); } - assert(ssc); + ceph_assert(ssc); dout(20) << " snapset " << ssc->snapset << dendl; int clonecount = ssc->snapset.clones.size(); @@ -6212,7 +6212,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) break; } if (!ctx->obc->obs.oi.is_whiteout()) { - assert(obs.exists); + ceph_assert(obs.exists); clone_info ci; ci.cloneid = CEPH_NOSNAP; @@ -6474,7 +6474,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) if (result < 0) break; - assert(op.extent.length); + ceph_assert(op.extent.length); if (obs.exists && !oi.is_whiteout()) { t->zero(soid, op.extent.offset, op.extent.length); interval_set ch; @@ -6541,7 +6541,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) break; if (op.extent.truncate_seq) { - assert(op.extent.offset == op.extent.truncate_size); + ceph_assert(op.extent.offset == op.extent.truncate_size); if (op.extent.truncate_seq <= oi.truncate_seq) { dout(10) << " truncate seq " << op.extent.truncate_seq << " <= current " << oi.truncate_seq << ", no-op" << dendl; @@ -6791,7 +6791,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) // finish if (op_finisher) { result = op_finisher->execute(); - assert(result == 0); + ceph_assert(result == 0); } if (!oi.has_manifest() && !oi.manifest.is_redirect()) @@ -6910,7 +6910,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) } else { if (op_finisher) { result = op_finisher->execute(); - assert(result == 0); + ceph_assert(result == 0); } chunk_info_t chunk_info; @@ -6973,7 +6973,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) src_hoid = obs.oi.manifest.redirect_target; cb = new PromoteManifestCallback(ctx->obc, this, ctx); } else { - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } ctx->op_finishers[ctx->current_osd_subop_num].reset( new PromoteFinisher(cb)); @@ -6990,7 +6990,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) result = -EINPROGRESS; } else { result = op_finisher->execute(); - assert(result == 0); + ceph_assert(result == 0); ctx->op_finishers.erase(ctx->current_osd_subop_num); } } @@ -7038,7 +7038,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) } }); } else { - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } oi.clear_flag(object_info_t::FLAG_MANIFEST); @@ -7185,14 +7185,14 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) map m; decode(header, bp); decode(m, bp); - assert(bp.end()); + ceph_assert(bp.end()); bufferlist newbl; encode(header, newbl); encode(m, newbl); newop.indata = newbl; } result = do_osd_ops(ctx, nops); - assert(result == 0); + ceph_assert(result == 0); } break; @@ -7239,7 +7239,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) ObjectMap::ObjectMapIterator iter = osd->store->get_omap_iterator( ch, ghobject_t(soid) ); - assert(iter); + ceph_assert(iter); iter->upper_bound(start_after); for (num = 0; iter->valid(); ++num, iter->next(false)) { if (num >= max_return || @@ -7603,7 +7603,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) } else { // finish result = op_finisher->execute(); - assert(result == 0); + ceph_assert(result == 0); // COPY_FROM cannot be executed multiple times -- it must restart ctx->op_finishers.erase(ctx->current_osd_subop_num); @@ -7734,7 +7734,7 @@ inline int PrimaryLogPG::_delete_oid( ctx->delta_stats.num_wr++; if (soid.is_snap()) { - assert(ctx->obc->ssc->snapset.clone_overlap.count(soid.snap)); + ceph_assert(ctx->obc->ssc->snapset.clone_overlap.count(soid.snap)); ctx->delta_stats.num_bytes -= ctx->obc->ssc->snapset.get_clone_bytes(soid.snap); } else { ctx->delta_stats.num_bytes -= oi.size; @@ -7801,7 +7801,7 @@ int PrimaryLogPG::_rollback_to(OpContext *ctx, ceph_osd_op& op) &rollback_to, false, false, &missing_oid); if (ret == -EAGAIN) { /* clone must be missing */ - assert(is_degraded_or_backfilling_object(missing_oid) || is_degraded_on_async_recovery_target(missing_oid)); + ceph_assert(is_degraded_or_backfilling_object(missing_oid) || is_degraded_on_async_recovery_target(missing_oid)); dout(20) << "_rollback_to attempted to roll back to a missing or backfilling clone " << missing_oid << " (requested snapid: ) " << snapid << dendl; block_write_on_degraded_snap(missing_oid, ctx->op); @@ -7832,16 +7832,16 @@ int PrimaryLogPG::_rollback_to(OpContext *ctx, ceph_osd_op& op) case cache_result_t::NOOP: break; case cache_result_t::BLOCKED_PROMOTE: - assert(promote_obc); + ceph_assert(promote_obc); block_write_on_snap_rollback(soid, promote_obc, ctx->op); return -EAGAIN; case cache_result_t::BLOCKED_FULL: block_write_on_full_cache(soid, ctx->op); return -EAGAIN; case cache_result_t::REPLIED_WITH_EAGAIN: - assert(0 == "this can't happen, no rollback on replica"); + ceph_assert(0 == "this can't happen, no rollback on replica"); default: - assert(0 == "must promote was set, other values are not valid"); + ceph_assert(0 == "must promote was set, other values are not valid"); return -EAGAIN; } } @@ -7860,7 +7860,7 @@ int PrimaryLogPG::_rollback_to(OpContext *ctx, ceph_osd_op& op) } } else if (ret) { // ummm....huh? It *can't* return anything else at time of writing. - assert(0 == "unexpected error code in _rollback_to"); + ceph_assert(0 == "unexpected error code in _rollback_to"); } else { //we got our context, let's use it to do the rollback! hobject_t& rollback_to_sobject = rollback_to->obs.oi.soid; if (is_degraded_or_backfilling_object(rollback_to_sobject) || @@ -7888,7 +7888,7 @@ int PrimaryLogPG::_rollback_to(OpContext *ctx, ceph_osd_op& op) map >::iterator iter = snapset.clone_overlap.lower_bound(snapid); - assert(iter != snapset.clone_overlap.end()); + ceph_assert(iter != snapset.clone_overlap.end()); interval_set overlaps = iter->second; for ( ; iter != snapset.clone_overlap.end(); @@ -7950,7 +7950,7 @@ void PrimaryLogPG::make_writeable(OpContext *ctx) SnapContext& snapc = ctx->snapc; // clone? - assert(soid.snap == CEPH_NOSNAP); + ceph_assert(soid.snap == CEPH_NOSNAP); dout(20) << "make_writeable " << soid << " snapset=" << ctx->new_snapset << " snapc=" << snapc << dendl; @@ -7959,7 +7959,7 @@ void PrimaryLogPG::make_writeable(OpContext *ctx) // we will mark the object dirty if (ctx->undirty && was_dirty) { dout(20) << " clearing DIRTY flag" << dendl; - assert(ctx->new_obs.oi.is_dirty()); + ceph_assert(ctx->new_obs.oi.is_dirty()); ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY); --ctx->delta_stats.num_objects_dirty; osd->logger->inc(l_osd_tier_clean); @@ -8029,7 +8029,7 @@ void PrimaryLogPG::make_writeable(OpContext *ctx) coid, ctx->clone_obc, ctx->op); - assert(got); + ceph_assert(got); dout(20) << " got greedy write on clone_obc " << *ctx->clone_obc << dendl; } else { snap_oi = &static_snap_oi; @@ -8179,7 +8179,7 @@ void PrimaryLogPG::do_osd_op_effects(OpContext *ctx, const ConnectionRef& conn) // disconnects first complete_disconnect_watches(ctx->obc, ctx->watch_disconnects); - assert(conn); + ceph_assert(conn); auto session = conn->get_priv(); if (!session) @@ -8282,7 +8282,7 @@ hobject_t PrimaryLogPG::get_temp_recovery_object( int PrimaryLogPG::prepare_transaction(OpContext *ctx) { - assert(!ctx->ops->empty()); + ceph_assert(!ctx->ops->empty()); // valid snap context? if (!ctx->snapc.is_valid()) { @@ -8471,7 +8471,7 @@ void PrimaryLogPG::apply_stats( void PrimaryLogPG::complete_read_ctx(int result, OpContext *ctx) { const MOSDOp *m = static_cast(ctx->op->get_req()); - assert(ctx->async_reads_complete()); + ceph_assert(ctx->async_reads_complete()); for (vector::iterator p = ctx->ops->begin(); p != ctx->ops->end() && result >= 0; ++p) { @@ -8549,8 +8549,8 @@ struct C_CopyFrom_AsyncReadCb : public Context { return; } - assert(len > 0); - assert(len <= reply_obj.data.length()); + ceph_assert(len > 0); + ceph_assert(len <= reply_obj.data.length()); bufferlist bl; bl.substr_of(reply_obj.data, 0, len); reply_obj.data.swap(bl); @@ -8611,10 +8611,10 @@ int PrimaryLogPG::do_copy_get(OpContext *ctx, bufferlist::const_iterator& bp, // size, mtime reply_obj.size = oi.size; reply_obj.mtime = oi.mtime; - assert(obc->ssc); + ceph_assert(obc->ssc); if (soid.snap < CEPH_NOSNAP) { auto p = obc->ssc->snapset.clone_snaps.find(soid.snap); - assert(p != obc->ssc->snapset.clone_snaps.end()); // warn? + ceph_assert(p != obc->ssc->snapset.clone_snaps.end()); // warn? reply_obj.snaps = p->second; } else { reply_obj.snap_seq = obc->ssc->snapset.seq; @@ -8679,7 +8679,7 @@ int PrimaryLogPG::do_copy_get(OpContext *ctx, bufferlist::const_iterator& bp, cursor.data_complete = true; dout(20) << " got data" << dendl; } - assert(cursor.data_offset <= oi.size); + ceph_assert(cursor.data_offset <= oi.size); } // omap @@ -8688,7 +8688,7 @@ int PrimaryLogPG::do_copy_get(OpContext *ctx, bufferlist::const_iterator& bp, cursor.omap_complete = true; } else { if (left > 0 && !cursor.omap_complete) { - assert(cursor.data_complete); + ceph_assert(cursor.data_complete); if (cursor.omap_offset.empty()) { osd->store->omap_get_header(ch, ghobject_t(oi.soid), &reply_obj.omap_header); @@ -8696,7 +8696,7 @@ int PrimaryLogPG::do_copy_get(OpContext *ctx, bufferlist::const_iterator& bp, bufferlist omap_data; ObjectMap::ObjectMapIterator iter = osd->store->get_omap_iterator(ch, ghobject_t(oi.soid)); - assert(iter); + ceph_assert(iter); iter->upper_bound(cursor.omap_offset); for (; iter->valid(); iter->next(false)) { ++omap_keys; @@ -8783,7 +8783,7 @@ void PrimaryLogPG::start_copy(CopyCallback *cb, ObjectContextRef obc, << (mirror_snapset ? " mirror_snapset" : "") << dendl; - assert(!mirror_snapset || src.snap == CEPH_NOSNAP); + ceph_assert(!mirror_snapset || src.snap == CEPH_NOSNAP); // cancel a previous in-progress copy? if (copy_ops.count(dest)) { @@ -8810,7 +8810,7 @@ void PrimaryLogPG::start_copy(CopyCallback *cb, ObjectContextRef obc, auto p = obc->obs.oi.manifest.chunk_map.begin(); _copy_some_manifest(obc, cop, p->first); } else { - assert(0 == "unrecognized manifest type"); + ceph_assert(0 == "unrecognized manifest type"); } } } @@ -8835,7 +8835,7 @@ void PrimaryLogPG::_copy_some(ObjectContextRef obc, CopyOpRef cop) if (cop->cursor.is_initial() && cop->mirror_snapset) { // list snaps too. - assert(cop->src.snap == CEPH_NOSNAP); + ceph_assert(cop->src.snap == CEPH_NOSNAP); ObjectOperation op; op.list_snaps(&cop->results.snapset, NULL); ceph_tid_t tid = osd->objecter->read(cop->src.oid, cop->oloc, op, @@ -8850,7 +8850,7 @@ void PrimaryLogPG::_copy_some(ObjectContextRef obc, CopyOpRef cop) } else { // we should learn the version after the first chunk, if we didn't know // it already! - assert(cop->cursor.is_initial()); + ceph_assert(cop->cursor.is_initial()); } op.copy_get(&cop->cursor, get_copy_chunk_size(), &cop->results.object_size, &cop->results.mtime, @@ -8949,7 +8949,7 @@ void PrimaryLogPG::_copy_some_manifest(ObjectContextRef obc, CopyOpRef cop, uint } else { // we should learn the version after the first chunk, if we didn't know // it already! - assert(cop->cursor.is_initial()); + ceph_assert(cop->cursor.is_initial()); } op.set_last_op_flags(cop->src_obj_fadvise_flags); @@ -9002,7 +9002,7 @@ void PrimaryLogPG::process_copy_chunk(hobject_t oid, ceph_tid_t tid, int r) if (r < 0) goto out; - assert(cop->rval >= 0); + ceph_assert(cop->rval >= 0); if (oid.snap < CEPH_NOSNAP && !cop->results.snaps.empty()) { // verify snap hasn't been deleted @@ -9027,7 +9027,7 @@ void PrimaryLogPG::process_copy_chunk(hobject_t oid, ceph_tid_t tid, int r) } } - assert(cop->rval >= 0); + ceph_assert(cop->rval >= 0); if (!cop->temp_cursor.data_complete) { cop->results.data_digest = cop->data.crc32c(cop->results.data_digest); @@ -9056,7 +9056,7 @@ void PrimaryLogPG::process_copy_chunk(hobject_t oid, ceph_tid_t tid, int r) if (!cop->cursor.is_complete()) { // write out what we have so far if (cop->temp_cursor.is_initial()) { - assert(!cop->results.started_temp_obj); + ceph_assert(!cop->results.started_temp_obj); cop->results.started_temp_obj = true; cop->results.temp_oid = generate_temp_object(oid); dout(20) << __func__ << " using temp " << cop->results.temp_oid << dendl; @@ -9311,7 +9311,7 @@ void PrimaryLogPG::_write_copy_chunk(CopyOpRef cop, PGTransaction *t) t->create(cop->results.temp_oid); } if (!cop->temp_cursor.data_complete) { - assert(cop->data.length() + cop->temp_cursor.data_offset == + ceph_assert(cop->data.length() + cop->temp_cursor.data_offset == cop->cursor.data_offset); if (pool.info.required_alignment() && !cop->cursor.data_complete) { @@ -9319,7 +9319,7 @@ void PrimaryLogPG::_write_copy_chunk(CopyOpRef cop, PGTransaction *t) * Trim off the unaligned bit at the end, we'll adjust cursor.data_offset * to pick it up on the next pass. */ - assert(cop->temp_cursor.data_offset % + ceph_assert(cop->temp_cursor.data_offset % pool.info.required_alignment() == 0); if (cop->data.length() % pool.info.required_alignment() != 0) { uint64_t to_trim = @@ -9328,7 +9328,7 @@ void PrimaryLogPG::_write_copy_chunk(CopyOpRef cop, PGTransaction *t) bl.substr_of(cop->data, 0, cop->data.length() - to_trim); cop->data.swap(bl); cop->cursor.data_offset -= to_trim; - assert(cop->data.length() + cop->temp_cursor.data_offset == + ceph_assert(cop->data.length() + cop->temp_cursor.data_offset == cop->cursor.data_offset); } } @@ -9359,8 +9359,8 @@ void PrimaryLogPG::_write_copy_chunk(CopyOpRef cop, PGTransaction *t) } } } else { - assert(cop->omap_header.length() == 0); - assert(cop->omap_data.length() == 0); + ceph_assert(cop->omap_header.length() == 0); + ceph_assert(cop->omap_data.length() == 0); } cop->temp_cursor = cop->cursor; } @@ -9476,7 +9476,7 @@ void PrimaryLogPG::finish_promote(int r, CopyResults *results, if (r < 0 && results->started_temp_obj) { dout(10) << __func__ << " abort; will clean up partial work" << dendl; ObjectContextRef tempobc = get_object_context(results->temp_oid, false); - assert(tempobc); + ceph_assert(tempobc); OpContextUPtr ctx = simple_opc_create(tempobc); ctx->op_t->remove(results->temp_oid); simple_opc_submit(std::move(ctx)); @@ -9490,7 +9490,7 @@ void PrimaryLogPG::finish_promote(int r, CopyResults *results, << dendl; hobject_t head(soid.get_head()); ObjectContextRef obc = get_object_context(head, false); - assert(obc); + ceph_assert(obc); OpContextUPtr tctx = simple_opc_create(obc); tctx->at_version = get_next_version(); @@ -9517,7 +9517,7 @@ void PrimaryLogPG::finish_promote(int r, CopyResults *results, if (!tctx->lock_manager.take_write_lock( head, obc)) { - assert(0 == "problem!"); + ceph_assert(0 == "problem!"); } dout(20) << __func__ << " took lock on obc, " << obc->rwstate << dendl; @@ -9529,7 +9529,7 @@ void PrimaryLogPG::finish_promote(int r, CopyResults *results, bool whiteout = false; if (r == -ENOENT) { - assert(soid.snap == CEPH_NOSNAP); // snap case is above + ceph_assert(soid.snap == CEPH_NOSNAP); // snap case is above dout(10) << __func__ << " whiteout " << soid << dendl; whiteout = true; } @@ -9599,11 +9599,11 @@ void PrimaryLogPG::finish_promote(int r, CopyResults *results, tctx->new_obs.oi.truncate_size = results->truncate_size; if (soid.snap != CEPH_NOSNAP) { - assert(obc->ssc->snapset.clone_snaps.count(soid.snap)); - assert(obc->ssc->snapset.clone_size.count(soid.snap)); - assert(obc->ssc->snapset.clone_size[soid.snap] == + ceph_assert(obc->ssc->snapset.clone_snaps.count(soid.snap)); + ceph_assert(obc->ssc->snapset.clone_size.count(soid.snap)); + ceph_assert(obc->ssc->snapset.clone_size[soid.snap] == results->object_size); - assert(obc->ssc->snapset.clone_overlap.count(soid.snap)); + ceph_assert(obc->ssc->snapset.clone_overlap.count(soid.snap)); tctx->delta_stats.num_bytes += obc->ssc->snapset.get_clone_bytes(soid.snap); } else { @@ -9612,7 +9612,7 @@ void PrimaryLogPG::finish_promote(int r, CopyResults *results, } if (results->mirror_snapset) { - assert(tctx->new_obs.oi.soid.snap == CEPH_NOSNAP); + ceph_assert(tctx->new_obs.oi.soid.snap == CEPH_NOSNAP); tctx->new_snapset.from_snap_set( results->snapset, get_osdmap()->require_osd_release < CEPH_RELEASE_LUMINOUS); @@ -9623,7 +9623,7 @@ void PrimaryLogPG::finish_promote(int r, CopyResults *results, if (!tctx->lock_manager.take_write_lock( obc->obs.oi.soid, obc)) { - assert(0 == "problem!"); + ceph_assert(0 == "problem!"); } dout(20) << __func__ << " took lock on obc, " << obc->rwstate << dendl; @@ -9792,7 +9792,7 @@ int PrimaryLogPG::start_flush( if (p != snapset.clones.rend()) { hobject_t next = soid; next.snap = *p; - assert(next.snap < soid.snap); + ceph_assert(next.snap < soid.snap); if (pg_log.get_missing().is_missing(next)) { dout(10) << __func__ << " missing clone is " << next << dendl; if (pmissing) @@ -9889,7 +9889,7 @@ int PrimaryLogPG::start_flush( } else { snapid_t min_included_snap; auto p = snapset.clone_snaps.find(soid.snap); - assert(p != snapset.clone_snaps.end()); + ceph_assert(p != snapset.clone_snaps.end()); min_included_snap = p->second.back(); snapc = snapset.get_ssc_as_of(min_included_snap - 1); } @@ -10096,7 +10096,7 @@ int PrimaryLogPG::try_flush_mark_clean(FlushOpRef fop) oid, obc, op); - assert(!locked); + ceph_assert(!locked); } close_op_ctx(ctx.release()); return -EAGAIN; // will retry @@ -10121,7 +10121,7 @@ int PrimaryLogPG::try_flush_mark_clean(FlushOpRef fop) ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY); --ctx->delta_stats.num_objects_dirty; if (fop->obc->obs.oi.has_manifest()) { - assert(obc->obs.oi.manifest.is_chunked()); + ceph_assert(obc->obs.oi.manifest.is_chunked()); PGTransaction* t = ctx->op_t.get(); uint64_t chunks_size = 0; for (auto &p : ctx->new_obs.oi.manifest.chunk_map) { @@ -10261,8 +10261,8 @@ void PrimaryLogPG::repop_all_committed(RepGather *repop) void PrimaryLogPG::op_applied(const eversion_t &applied_version) { dout(10) << "op_applied version " << applied_version << dendl; - assert(applied_version != eversion_t()); - assert(applied_version <= info.last_update); + ceph_assert(applied_version != eversion_t()); + ceph_assert(applied_version <= info.last_update); last_update_applied = applied_version; if (is_primary()) { if (scrubber.active) { @@ -10270,7 +10270,7 @@ void PrimaryLogPG::op_applied(const eversion_t &applied_version) requeue_scrub(ops_blocked_by_scrub()); } } else { - assert(scrubber.start == scrubber.end); + ceph_assert(scrubber.start == scrubber.end); } } } @@ -10297,7 +10297,7 @@ void PrimaryLogPG::eval_repop(RepGather *repop) // send dup commits, in order auto it = waiting_for_ondisk.find(repop->v); if (it != waiting_for_ondisk.end()) { - assert(waiting_for_ondisk.begin()->first == repop->v); + ceph_assert(waiting_for_ondisk.begin()->first == repop->v); for (list >::iterator i = it->second.begin(); i != it->second.end(); @@ -10312,7 +10312,7 @@ void PrimaryLogPG::eval_repop(RepGather *repop) calc_min_last_complete_ondisk(); dout(10) << " removing " << *repop << dendl; - assert(!repop_queue.empty()); + ceph_assert(!repop_queue.empty()); dout(20) << " q front is " << *repop_queue.front() << dendl; if (repop_queue.front() == repop) { RepGather *to_remove = nullptr; @@ -10362,7 +10362,7 @@ void PrimaryLogPG::issue_repop(RepGather *repop, OpContext *ctx) Context *on_all_commit = new C_OSD_RepopCommit(this, repop); if (!(ctx->log.empty())) { - assert(ctx->at_version >= projected_last_update); + ceph_assert(ctx->at_version >= projected_last_update); projected_last_update = ctx->at_version; } for (auto &&entry: ctx->log) { @@ -10518,11 +10518,11 @@ void PrimaryLogPG::submit_log_entries( int r) { dout(10) << __func__ << " " << entries << dendl; - assert(is_primary()); + ceph_assert(is_primary()); eversion_t version; if (!entries.empty()) { - assert(entries.rbegin()->version >= projected_last_update); + ceph_assert(entries.rbegin()->version >= projected_last_update); version = projected_last_update = entries.rbegin()->version; } @@ -10552,10 +10552,10 @@ void PrimaryLogPG::submit_log_entries( ++i) { pg_shard_t peer(*i); if (peer == pg_whoami) continue; - assert(peer_missing.count(peer)); - assert(peer_info.count(peer)); + ceph_assert(peer_missing.count(peer)); + ceph_assert(peer_info.count(peer)); if (get_osdmap()->require_osd_release >= CEPH_RELEASE_JEWEL) { - assert(repop); + ceph_assert(repop); MOSDPGUpdateLogMissing *m = new MOSDPGUpdateLogMissing( entries, spg_t(info.pgid.pgid, i->shard), @@ -10600,9 +10600,9 @@ void PrimaryLogPG::submit_log_entries( pg->lock(); if (!pg->pg_has_reset_since(epoch)) { auto it = pg->log_entry_update_waiting_on.find(rep_tid); - assert(it != pg->log_entry_update_waiting_on.end()); + ceph_assert(it != pg->log_entry_update_waiting_on.end()); auto it2 = it->second.waiting_on.find(pg->pg_whoami); - assert(it2 != it->second.waiting_on.end()); + ceph_assert(it2 != it->second.waiting_on.end()); it->second.waiting_on.erase(it2); if (it->second.waiting_on.empty()) { pg->repop_all_committed(it->second.repop.get()); @@ -10615,7 +10615,7 @@ void PrimaryLogPG::submit_log_entries( t.register_on_commit( new OnComplete{this, rep_tid, get_osdmap()->get_epoch()}); int r = osd->store->queue_transaction(ch, std::move(t), NULL); - assert(r == 0); + ceph_assert(r == 0); op_applied(info.last_update); }); @@ -10685,7 +10685,7 @@ void PrimaryLogPG::check_blacklisted_obc_watchers(ObjectContextRef obc) dout(30) << "watch: Check entity_addr_t " << ea << dendl; if (get_osdmap()->is_blacklisted(ea)) { dout(10) << "watch: Found blacklisted watcher for " << ea << dendl; - assert(j->second->get_pg() == this); + ceph_assert(j->second->get_pg() == this); j->second->unregister_cb(); handle_watch_timeout(j->second); } @@ -10694,9 +10694,9 @@ void PrimaryLogPG::check_blacklisted_obc_watchers(ObjectContextRef obc) void PrimaryLogPG::populate_obc_watchers(ObjectContextRef obc) { - assert(is_active()); + ceph_assert(is_active()); auto it_objects = pg_log.get_log().objects.find(obc->obs.oi.soid); - assert((recovering.count(obc->obs.oi.soid) || + ceph_assert((recovering.count(obc->obs.oi.soid) || !is_missing_object(obc->obs.oi.soid)) || (it_objects != pg_log.get_log().objects.end() && // or this is a revert... see recover_primary() it_objects->second->op == @@ -10705,7 +10705,7 @@ void PrimaryLogPG::populate_obc_watchers(ObjectContextRef obc) obc->obs.oi.version)); dout(10) << "populate_obc_watchers " << obc->obs.oi.soid << dendl; - assert(obc->watchers.empty()); + ceph_assert(obc->watchers.empty()); // populate unconnected_watchers for (map, watch_info_t>::iterator p = obc->obs.oi.watchers.begin(); @@ -10797,7 +10797,7 @@ ObjectContextRef PrimaryLogPG::create_object_context(const object_info_t& oi, SnapSetContext *ssc) { ObjectContextRef obc(object_contexts.lookup_or_create(oi.soid)); - assert(obc->destructor_callback == NULL); + ceph_assert(obc->destructor_callback == NULL); obc->destructor_callback = new C_PG_ObjectContext(this, obc.get()); obc->obs.oi = oi; obc->obs.exists = false; @@ -10816,7 +10816,7 @@ ObjectContextRef PrimaryLogPG::get_object_context( const map *attrs) { auto it_objects = pg_log.get_log().objects.find(soid); - assert( + ceph_assert( attrs || !pg_log.get_missing().is_missing(soid) || // or this is a revert... see recover_primary() (it_objects != pg_log.get_log().objects.end() && @@ -10834,7 +10834,7 @@ ObjectContextRef PrimaryLogPG::get_object_context( bufferlist bv; if (attrs) { auto it_oi = attrs->find(OI_ATTR); - assert(it_oi != attrs->end()); + ceph_assert(it_oi != attrs->end()); bv = it_oi->second; } else { int r = pgbackend->objects_get_attr(soid, OI_ATTR, &bv); @@ -10853,7 +10853,7 @@ ObjectContextRef PrimaryLogPG::get_object_context( object_info_t oi(soid); SnapSetContext *ssc = get_snapset_context( soid, true, 0, false); - assert(ssc); + ceph_assert(ssc); obc = create_object_context(oi, ssc); dout(10) << __func__ << ": " << obc << " " << soid << " " << obc->rwstate @@ -10873,7 +10873,7 @@ ObjectContextRef PrimaryLogPG::get_object_context( return ObjectContextRef(); // -ENOENT! } - assert(oi.soid.pool == (int64_t)info.pgid.pool()); + ceph_assert(oi.soid.pool == (int64_t)info.pgid.pool()); obc = object_contexts.lookup_or_create(oi.soid); obc->destructor_callback = new C_PG_ObjectContext(this, obc.get()); @@ -10894,7 +10894,7 @@ ObjectContextRef PrimaryLogPG::get_object_context( int r = pgbackend->objects_get_attrs( soid, &obc->attr_cache); - assert(r == 0); + ceph_assert(r == 0); } } @@ -10951,7 +10951,7 @@ int PrimaryLogPG::find_object_context(const hobject_t& oid, hobject_t *pmissing) { FUNCTRACE(cct); - assert(oid.pool == static_cast(info.pgid.pool())); + ceph_assert(oid.pool == static_cast(info.pgid.pool())); // want the head? if (oid.snap == CEPH_NOSNAP) { ObjectContextRef obc = get_object_context(oid, can_create); @@ -11060,7 +11060,7 @@ int PrimaryLogPG::find_object_context(const hobject_t& oid, if (!obc->ssc) obc->ssc = ssc; else { - assert(ssc == obc->ssc); + ceph_assert(ssc == obc->ssc); put_snapset_context(ssc); } *pobc = obc; @@ -11110,7 +11110,7 @@ int PrimaryLogPG::find_object_context(const hobject_t& oid, if (!obc->ssc) { obc->ssc = ssc; } else { - assert(obc->ssc == ssc); + ceph_assert(obc->ssc == ssc); put_snapset_context(ssc); } ssc = 0; @@ -11121,10 +11121,10 @@ int PrimaryLogPG::find_object_context(const hobject_t& oid, << dendl; snapid_t first, last; auto p = obc->ssc->snapset.clone_snaps.find(soid.snap); - assert(p != obc->ssc->snapset.clone_snaps.end()); + ceph_assert(p != obc->ssc->snapset.clone_snaps.end()); if (p->second.empty()) { dout(1) << __func__ << " " << soid << " empty snapset -- DNE" << dendl; - assert(!cct->_conf->osd_debug_verify_snaps); + ceph_assert(!cct->_conf->osd_debug_verify_snaps); return -ENOENT; } first = p->second.back(); @@ -11152,7 +11152,7 @@ void PrimaryLogPG::add_object_context_to_pg_stat(ObjectContextRef obc, pg_stat_t object_info_t& oi = obc->obs.oi; dout(10) << __func__ << " " << oi.soid << dendl; - assert(!oi.soid.is_snapdir()); + ceph_assert(!oi.soid.is_snapdir()); object_stat_sum_t stat; stat.num_objects++; @@ -11172,7 +11172,7 @@ void PrimaryLogPG::add_object_context_to_pg_stat(ObjectContextRef obc, pg_stat_t if (!obc->ssc) obc->ssc = get_snapset_context(oi.soid, false); - assert(obc->ssc); + ceph_assert(obc->ssc); stat.num_bytes += obc->ssc->snapset.get_clone_bytes(oi.soid.snap); } else { stat.num_bytes += oi.size; @@ -11201,7 +11201,7 @@ void PrimaryLogPG::kick_object_context_blocked(ObjectContextRef obc) map::iterator i = objects_blocked_on_snap_promotion.find(obc->obs.oi.soid.get_head()); if (i != objects_blocked_on_snap_promotion.end()) { - assert(i->second == obc); + ceph_assert(i->second == obc); objects_blocked_on_snap_promotion.erase(i); } @@ -11238,7 +11238,7 @@ SnapSetContext *PrimaryLogPG::get_snapset_context( return NULL; } else { auto it_ss = attrs->find(SS_ATTR); - assert(it_ss != attrs->end()); + ceph_assert(it_ss != attrs->end()); bv = it_ss->second; } ssc = new SnapSetContext(oid.get_snapdir()); @@ -11256,7 +11256,7 @@ SnapSetContext *PrimaryLogPG::get_snapset_context( ssc->exists = false; } } - assert(ssc); + ceph_assert(ssc); ssc->ref++; return ssc; } @@ -11294,7 +11294,7 @@ int PrimaryLogPG::recover_missing( if (missing_loc.is_deleted(soid)) { start_recovery_op(soid); - assert(!recovering.count(soid)); + ceph_assert(!recovering.count(soid)); recovering.insert(make_pair(soid, ObjectContextRef())); epoch_t cur_epoch = get_osdmap()->get_epoch(); remove_missing_object(soid, v, new FunctionContext( @@ -11349,10 +11349,10 @@ int PrimaryLogPG::recover_missing( head, false, 0); - assert(head_obc); + ceph_assert(head_obc); } start_recovery_op(soid); - assert(!recovering.count(soid)); + ceph_assert(!recovering.count(soid)); recovering.insert(make_pair(soid, obc)); int r = pgbackend->recover_object( soid, @@ -11361,7 +11361,7 @@ int PrimaryLogPG::recover_missing( obc, h); // This is only a pull which shouldn't return an error - assert(r >= 0); + ceph_assert(r >= 0); return PULL_YES; } @@ -11369,7 +11369,7 @@ void PrimaryLogPG::remove_missing_object(const hobject_t &soid, eversion_t v, Context *on_complete) { dout(20) << __func__ << " " << soid << " " << v << dendl; - assert(on_complete != nullptr); + ceph_assert(on_complete != nullptr); // delete locally ObjectStore::Transaction t; remove_snap_mapped_object(t, soid); @@ -11387,7 +11387,7 @@ void PrimaryLogPG::remove_missing_object(const hobject_t &soid, on_local_recover(soid, recovery_info, ObjectContextRef(), true, &t2); t2.register_on_complete(on_complete); int r = osd->store->queue_transaction(ch, std::move(t2), nullptr); - assert(r == 0); + ceph_assert(r == 0); unlock(); } else { unlock(); @@ -11395,7 +11395,7 @@ void PrimaryLogPG::remove_missing_object(const hobject_t &soid, } })); int r = osd->store->queue_transaction(ch, std::move(t), nullptr); - assert(r == 0); + ceph_assert(r == 0); } void PrimaryLogPG::finish_degraded_object(const hobject_t& oid) @@ -11455,7 +11455,7 @@ void PrimaryLogPG::_applied_recovered_object(ObjectContextRef obc) if (obc) { dout(20) << "obc = " << *obc << dendl; } - assert(active_pushes >= 1); + ceph_assert(active_pushes >= 1); --active_pushes; // requeue an active chunky scrub waiting on recovery ops @@ -11468,7 +11468,7 @@ void PrimaryLogPG::_applied_recovered_object(ObjectContextRef obc) void PrimaryLogPG::_applied_recovered_object_replica() { dout(20) << __func__ << dendl; - assert(active_pushes >= 1); + ceph_assert(active_pushes >= 1); --active_pushes; // requeue an active chunky scrub waiting on recovery ops @@ -11503,7 +11503,7 @@ void PrimaryLogPG::recover_got(hobject_t oid, eversion_t v) << " log.complete_to at end" << dendl; //below is not true in the repair case. //assert(missing.num_missing() == 0); // otherwise, complete_to was wrong. - assert(info.last_complete == info.last_update); + ceph_assert(info.last_complete == info.last_update); } } @@ -11516,7 +11516,7 @@ void PrimaryLogPG::primary_failed(const hobject_t &soid) void PrimaryLogPG::failed_push(const list &from, const hobject_t &soid) { dout(20) << __func__ << ": " << soid << dendl; - assert(recovering.count(soid)); + ceph_assert(recovering.count(soid)); auto obc = recovering[soid]; if (obc) { list blocked_ops; @@ -11537,11 +11537,11 @@ eversion_t PrimaryLogPG::pick_newest_available(const hobject_t& oid) eversion_t v; pg_missing_item pmi; bool is_missing = pg_log.get_missing().is_missing(oid, &pmi); - assert(is_missing); + ceph_assert(is_missing); v = pmi.have; dout(10) << "pick_newest_available " << oid << " " << v << " on osd." << osd->whoami << " (local)" << dendl; - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { @@ -11564,7 +11564,7 @@ void PrimaryLogPG::do_update_log_missing(OpRequestRef &op) { const MOSDPGUpdateLogMissing *m = static_cast( op->get_req()); - assert(m->get_type() == MSG_OSD_PG_UPDATE_LOG_MISSING); + ceph_assert(m->get_type() == MSG_OSD_PG_UPDATE_LOG_MISSING); ObjectStore::Transaction t; boost::optional op_trim_to, op_roll_forward_to; if (m->pg_trim_to != eversion_t()) @@ -11616,7 +11616,7 @@ void PrimaryLogPG::do_update_log_missing(OpRequestRef &op) ch, std::move(t), nullptr); - assert(tr == 0); + ceph_assert(tr == 0); op_applied(info.last_update); } @@ -11692,7 +11692,7 @@ void PrimaryLogPG::mark_all_unfound_lost( switch (what) { case pg_log_entry_t::LOST_MARK: - assert(0 == "actually, not implemented yet!"); + ceph_assert(0 == "actually, not implemented yet!"); break; case pg_log_entry_t::LOST_REVERT: @@ -11796,7 +11796,7 @@ void PrimaryLogPG::mark_all_unfound_lost( void PrimaryLogPG::_split_into(pg_t child_pgid, PG *child, unsigned split_bits) { - assert(repop_queue.empty()); + ceph_assert(repop_queue.empty()); } /* @@ -11841,7 +11841,7 @@ void PrimaryLogPG::apply_and_flush_repops(bool requeue) remove_repop(repop); } - assert(repop_queue.empty()); + ceph_assert(repop_queue.empty()); if (requeue) { requeue_ops(rq); @@ -11858,7 +11858,7 @@ void PrimaryLogPG::apply_and_flush_repops(bool requeue) << i->first << dendl; } } - assert(waiting_for_ondisk.empty()); + ceph_assert(waiting_for_ondisk.empty()); } } @@ -11867,7 +11867,7 @@ void PrimaryLogPG::apply_and_flush_repops(bool requeue) void PrimaryLogPG::on_flushed() { - assert(flushes_in_progress > 0); + ceph_assert(flushes_in_progress > 0); flushes_in_progress--; if (flushes_in_progress == 0) { requeue_ops(waiting_for_flush); @@ -11877,7 +11877,7 @@ void PrimaryLogPG::on_flushed() while (object_contexts.get_next(i.first, &i)) { derr << __func__ << ": object " << i.first << " obc still alive" << dendl; } - assert(object_contexts.empty()); + ceph_assert(object_contexts.empty()); } } @@ -11988,7 +11988,7 @@ void PrimaryLogPG::on_activate() if (!backfill_targets.empty()) { last_backfill_started = earliest_backfill(); new_backfill = true; - assert(!last_backfill_started.is_max()); + ceph_assert(!last_backfill_started.is_max()); dout(5) << __func__ << ": bft=" << backfill_targets << " from " << last_backfill_started << dendl; for (set::iterator i = backfill_targets.begin(); @@ -12011,7 +12011,7 @@ void PrimaryLogPG::_on_new_interval() get_osdmap()->test_flag(CEPH_OSDMAP_RECOVERY_DELETES)) { pg_log.rebuild_missing_set_with_deletes(osd->store, ch, info); } - assert(pg_log.get_missing().may_include_deletes == get_osdmap()->test_flag(CEPH_OSDMAP_RECOVERY_DELETES)); + ceph_assert(pg_log.get_missing().may_include_deletes == get_osdmap()->test_flag(CEPH_OSDMAP_RECOVERY_DELETES)); } void PrimaryLogPG::on_change(ObjectStore::Transaction *t) @@ -12078,7 +12078,7 @@ void PrimaryLogPG::on_change(ObjectStore::Transaction *t) ) { finish_degraded_object((i++)->first); } - assert(callbacks_for_degraded_object.empty()); + ceph_assert(callbacks_for_degraded_object.empty()); if (is_primary()) { requeue_ops(waiting_for_cache_not_full); @@ -12121,7 +12121,7 @@ void PrimaryLogPG::on_change(ObjectStore::Transaction *t) object_contexts.clear(); // should have been cleared above by finishing all of the degraded objects - assert(objects_blocked_on_degraded_snap.empty()); + ceph_assert(objects_blocked_on_degraded_snap.empty()); } void PrimaryLogPG::on_role_change() @@ -12162,7 +12162,7 @@ void PrimaryLogPG::_clear_recovery_state() last_backfill_started = hobject_t(); set::iterator i = backfills_in_flight.begin(); while (i != backfills_in_flight.end()) { - assert(recovering.count(*i)); + ceph_assert(recovering.count(*i)); backfills_in_flight.erase(i++); } @@ -12175,16 +12175,16 @@ void PrimaryLogPG::_clear_recovery_state() requeue_ops(blocked_ops); } } - assert(backfills_in_flight.empty()); + ceph_assert(backfills_in_flight.empty()); pending_backfill_updates.clear(); - assert(recovering.empty()); + ceph_assert(recovering.empty()); pgbackend->clear_recovery_state(); } void PrimaryLogPG::cancel_pull(const hobject_t &soid) { dout(20) << __func__ << ": " << soid << dendl; - assert(recovering.count(soid)); + ceph_assert(recovering.count(soid)); ObjectContextRef obc = recovering[soid]; if (obc) { list blocked_ops; @@ -12250,11 +12250,11 @@ bool PrimaryLogPG::start_recovery_ops( started = 0; bool work_in_progress = false; bool recovery_started = false; - assert(is_primary()); - assert(is_peered()); - assert(!is_deleting()); + ceph_assert(is_primary()); + ceph_assert(is_peered()); + ceph_assert(!is_deleting()); - assert(recovery_queued); + ceph_assert(recovery_queued); recovery_queued = false; if (!state_test(PG_STATE_RECOVERING) && @@ -12329,8 +12329,8 @@ bool PrimaryLogPG::start_recovery_ops( work_in_progress || recovery_ops_active > 0 || deferred_backfill) return !work_in_progress && have_unfound(); - assert(recovering.empty()); - assert(recovery_ops_active == 0); + ceph_assert(recovering.empty()); + ceph_assert(recovery_ops_active == 0); dout(10) << __func__ << " needs_recovery: " << missing_loc.get_needs_recovery() @@ -12404,7 +12404,7 @@ bool PrimaryLogPG::start_recovery_ops( */ uint64_t PrimaryLogPG::recover_primary(uint64_t max, ThreadPool::TPHandle &handle) { - assert(is_primary()); + ceph_assert(is_primary()); const auto &missing = pg_log.get_missing(); @@ -12430,7 +12430,7 @@ uint64_t PrimaryLogPG::recover_primary(uint64_t max, ThreadPool::TPHandle &handl auto it_objects = pg_log.get_log().objects.find(p->second); if (it_objects != pg_log.get_log().objects.end()) { latest = it_objects->second; - assert(latest->is_update() || latest->is_delete()); + ceph_assert(latest->is_update() || latest->is_delete()); soid = latest->soid; } else { latest = 0; @@ -12478,7 +12478,7 @@ uint64_t PrimaryLogPG::recover_primary(uint64_t max, ThreadPool::TPHandle &handl obc->obs.oi.encode( b2, get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr)); - assert(!pool.info.require_rollback()); + ceph_assert(!pool.info.require_rollback()); t.setattr(coll, ghobject_t(soid), OI_ATTR, b2); recover_got(soid, latest->version); @@ -12564,7 +12564,7 @@ bool PrimaryLogPG::primary_error( pg_log.set_last_requested(0); missing_loc.remove_location(soid, pg_whoami); bool uhoh = true; - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); for (set::iterator i = acting_recovery_backfill.begin(); i != acting_recovery_backfill.end(); ++i) { @@ -12590,7 +12590,7 @@ int PrimaryLogPG::prep_object_replica_deletes( PGBackend::RecoveryHandle *h, bool *work_started) { - assert(is_primary()); + ceph_assert(is_primary()); dout(10) << __func__ << ": on " << soid << dendl; ObjectContextRef obc = get_object_context(soid, false); @@ -12607,7 +12607,7 @@ int PrimaryLogPG::prep_object_replica_deletes( } start_recovery_op(soid); - assert(!recovering.count(soid)); + ceph_assert(!recovering.count(soid)); if (!obc) recovering.insert(make_pair(soid, ObjectContextRef())); else @@ -12622,7 +12622,7 @@ int PrimaryLogPG::prep_object_replica_pushes( PGBackend::RecoveryHandle *h, bool *work_started) { - assert(is_primary()); + ceph_assert(is_primary()); dout(10) << __func__ << ": on " << soid << dendl; // NOTE: we know we will get a valid oloc off of disk here. @@ -12643,7 +12643,7 @@ int PrimaryLogPG::prep_object_replica_pushes( } start_recovery_op(soid); - assert(!recovering.count(soid)); + ceph_assert(!recovering.count(soid)); recovering.insert(make_pair(soid, obc)); /* We need this in case there is an in progress write on the object. In fact, @@ -12675,7 +12675,7 @@ uint64_t PrimaryLogPG::recover_replicas(uint64_t max, ThreadPool::TPHandle &hand PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op(); // this is FAR from an optimal recovery order. pretty lame, really. - assert(!acting_recovery_backfill.empty()); + ceph_assert(!acting_recovery_backfill.empty()); // choose replicas to recover, replica has the shortest missing list first // so we can bring it back to normal ASAP std::vector> replicas_by_num_missing, @@ -12686,7 +12686,7 @@ uint64_t PrimaryLogPG::recover_replicas(uint64_t max, ThreadPool::TPHandle &hand continue; } auto pm = peer_missing.find(p); - assert(pm != peer_missing.end()); + ceph_assert(pm != peer_missing.end()); auto nm = pm->second.num_missing(); if (nm != 0) { if (async_recovery_targets.count(p)) { @@ -12709,11 +12709,11 @@ uint64_t PrimaryLogPG::recover_replicas(uint64_t max, ThreadPool::TPHandle &hand async_by_num_missing.begin(), async_by_num_missing.end()); for (auto &replica: replicas_by_num_missing) { pg_shard_t &peer = replica.second; - assert(peer != get_primary()); + ceph_assert(peer != get_primary()); map::const_iterator pm = peer_missing.find(peer); - assert(pm != peer_missing.end()); + ceph_assert(pm != peer_missing.end()); map::const_iterator pi = peer_info.find(peer); - assert(pi != peer_info.end()); + ceph_assert(pi != peer_info.end()); size_t m_sz = pm->second.num_missing(); dout(10) << " peer osd." << peer << " missing " << m_sz << " objects." << dendl; @@ -12784,7 +12784,7 @@ hobject_t PrimaryLogPG::earliest_peer_backfill() const pg_shard_t peer = *i; map::const_iterator iter = peer_backfill_info.find(peer); - assert(iter != peer_backfill_info.end()); + ceph_assert(iter != peer_backfill_info.end()); if (iter->second.begin < e) e = iter->second.begin; } @@ -12794,7 +12794,7 @@ hobject_t PrimaryLogPG::earliest_peer_backfill() const bool PrimaryLogPG::all_peer_done() const { // Primary hasn't got any more objects - assert(backfill_info.empty()); + ceph_assert(backfill_info.empty()); for (set::const_iterator i = backfill_targets.begin(); i != backfill_targets.end(); @@ -12802,7 +12802,7 @@ bool PrimaryLogPG::all_peer_done() const pg_shard_t bt = *i; map::const_iterator piter = peer_backfill_info.find(bt); - assert(piter != peer_backfill_info.end()); + ceph_assert(piter != peer_backfill_info.end()); const BackfillInterval& pbi = piter->second; // See if peer has more to process if (!pbi.extends_to_end() || !pbi.empty()) @@ -12848,12 +12848,12 @@ uint64_t PrimaryLogPG::recover_backfill( << " last_backfill_started " << last_backfill_started << (new_backfill ? " new_backfill":"") << dendl; - assert(!backfill_targets.empty()); + ceph_assert(!backfill_targets.empty()); // Initialize from prior backfill state if (new_backfill) { // on_activate() was called prior to getting here - assert(last_backfill_started == earliest_backfill()); + ceph_assert(last_backfill_started == earliest_backfill()); new_backfill = false; // initialize BackfillIntervals @@ -12925,7 +12925,7 @@ uint64_t PrimaryLogPG::recover_backfill( spg_t(info.pgid.pgid, bt.shard), pbi.end, hobject_t()); osd->send_message_osd_cluster(bt.osd, m, get_osdmap()->get_epoch()); - assert(waiting_on_backfill.find(bt) == waiting_on_backfill.end()); + ceph_assert(waiting_on_backfill.find(bt) == waiting_on_backfill.end()); waiting_on_backfill.insert(bt); sent_scan = true; } @@ -12958,7 +12958,7 @@ uint64_t PrimaryLogPG::recover_backfill( if (pbi.begin == check) check_targets.insert(bt); } - assert(!check_targets.empty()); + ceph_assert(!check_targets.empty()); dout(20) << " BACKFILL removing " << check << " from peers " << check_targets << dendl; @@ -12967,7 +12967,7 @@ uint64_t PrimaryLogPG::recover_backfill( ++i) { pg_shard_t bt = *i; BackfillInterval& pbi = peer_backfill_info[bt]; - assert(pbi.begin == check); + ceph_assert(pbi.begin == check); to_remove.push_back(boost::make_tuple(check, pbi.objects.begin()->second, bt)); pbi.pop_front(); @@ -13017,7 +13017,7 @@ uint64_t PrimaryLogPG::recover_backfill( } if (!need_ver_targs.empty() || !missing_targs.empty()) { ObjectContextRef obc = get_object_context(backfill_info.begin, false); - assert(obc); + ceph_assert(obc); if (obc->get_recovery_read()) { if (!need_ver_targs.empty()) { dout(20) << " BACKFILL replacing " << check @@ -13075,7 +13075,7 @@ uint64_t PrimaryLogPG::recover_backfill( i != add_to_stat.end(); ++i) { ObjectContextRef obc = get_object_context(*i, false); - assert(obc); + ceph_assert(obc); pg_stat_t stat; add_object_context_to_pg_stat(obc, &stat); pending_backfill_updates[*i] = stat; @@ -13124,7 +13124,7 @@ uint64_t PrimaryLogPG::recover_backfill( i->first < next_backfill_to_complete; pending_backfill_updates.erase(i++)) { dout(20) << " pending_backfill_update " << i->first << dendl; - assert(i->first > new_last_backfill); + ceph_assert(i->first > new_last_backfill); for (set::iterator j = backfill_targets.begin(); j != backfill_targets.end(); ++j) { @@ -13138,11 +13138,11 @@ uint64_t PrimaryLogPG::recover_backfill( } dout(10) << "possible new_last_backfill at " << new_last_backfill << dendl; - assert(!pending_backfill_updates.empty() || + ceph_assert(!pending_backfill_updates.empty() || new_last_backfill == last_backfill_started); if (pending_backfill_updates.empty() && backfill_pos.is_max()) { - assert(backfills_in_flight.empty()); + ceph_assert(backfills_in_flight.empty()); new_last_backfill = backfill_pos; last_backfill_started = backfill_pos; } @@ -13202,16 +13202,16 @@ int PrimaryLogPG::prep_backfill_object_push( PGBackend::RecoveryHandle *h) { dout(10) << __func__ << " " << oid << " v " << v << " to peers " << peers << dendl; - assert(!peers.empty()); + ceph_assert(!peers.empty()); backfills_in_flight.insert(oid); for (unsigned int i = 0 ; i < peers.size(); ++i) { map::iterator bpm = peer_missing.find(peers[i]); - assert(bpm != peer_missing.end()); + ceph_assert(bpm != peer_missing.end()); bpm->second.add(oid, eversion_t(), eversion_t(), false); } - assert(!recovering.count(oid)); + ceph_assert(!recovering.count(oid)); start_recovery_op(oid); recovering.insert(make_pair(oid, obc)); @@ -13249,7 +13249,7 @@ void PrimaryLogPG::update_range( if (bi->version >= projected_last_update) { dout(10) << __func__<< ": bi is current " << dendl; - assert(bi->version == projected_last_update); + ceph_assert(bi->version == projected_last_update); } else if (bi->version >= info.log_tail) { if (pg_log.get_log().empty() && projected_log.empty()) { /* Because we don't move log_tail on split, the log might be @@ -13258,7 +13258,7 @@ void PrimaryLogPG::update_range( * eversion_t(), because otherwise the entry which changed * last_update since the last scan would have to be present. */ - assert(bi->version == eversion_t()); + ceph_assert(bi->version == eversion_t()); return; } @@ -13292,7 +13292,7 @@ void PrimaryLogPG::update_range( projected_log.scan_log_after(bi->version, func); bi->version = projected_last_update; } else { - assert(0 == "scan_range should have raised bi->version past log_tail"); + ceph_assert(0 == "scan_range should have raised bi->version past log_tail"); } } @@ -13300,14 +13300,14 @@ void PrimaryLogPG::scan_range( int min, int max, BackfillInterval *bi, ThreadPool::TPHandle &handle) { - assert(is_locked()); + ceph_assert(is_locked()); dout(10) << "scan_range from " << bi->begin << dendl; bi->clear_objects(); vector ls; ls.reserve(max); int r = pgbackend->objects_list_partial(bi->begin, min, max, &ls, &bi->end); - assert(r >= 0); + ceph_assert(r >= 0); dout(10) << " got " << ls.size() << " items, next " << bi->end << dendl; dout(20) << ls << dendl; @@ -13330,7 +13330,7 @@ void PrimaryLogPG::scan_range( if (r == -ENOENT) continue; - assert(r >= 0); + ceph_assert(r >= 0); object_info_t oi(bl); bi->objects[*p] = oi.version; dout(20) << " " << *p << " " << oi.version << dendl; @@ -13347,7 +13347,7 @@ void PrimaryLogPG::check_local() { dout(10) << __func__ << dendl; - assert(info.last_update >= pg_log.get_tail()); // otherwise we need some help! + ceph_assert(info.last_update >= pg_log.get_tail()); // otherwise we need some help! if (!cct->_conf->osd_debug_verify_stray_on_activate) return; @@ -13372,7 +13372,7 @@ void PrimaryLogPG::check_local() if (r != -ENOENT) { derr << __func__ << " " << p->soid << " exists, but should have been " << "deleted" << dendl; - assert(0 == "erroneously present object"); + ceph_assert(0 == "erroneously present object"); } } else { // ignore old(+missing) objects @@ -13467,11 +13467,11 @@ void PrimaryLogPG::hit_set_remove_all() if (!info.hit_set.history.empty()) { list::reverse_iterator p = info.hit_set.history.rbegin(); - assert(p != info.hit_set.history.rend()); + ceph_assert(p != info.hit_set.history.rend()); hobject_t oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt); - assert(!is_degraded_or_backfilling_object(oid)); + ceph_assert(!is_degraded_or_backfilling_object(oid)); ObjectContextRef obc = get_object_context(oid, false); - assert(obc); + ceph_assert(obc); OpContextUPtr ctx = simple_opc_create(obc); ctx->at_version = get_next_version(); @@ -13593,7 +13593,7 @@ void PrimaryLogPG::hit_set_persist() for (set::iterator p = backfill_targets.begin(); p != backfill_targets.end(); ++p) { - assert(peer_info.count(*p)); + ceph_assert(peer_info.count(*p)); const pg_info_t& pi = peer_info[*p]; if (pi.last_backfill == hobject_t() || pi.last_backfill.get_hash() == info.pgid.ps()) { @@ -13693,15 +13693,15 @@ void PrimaryLogPG::hit_set_persist() void PrimaryLogPG::hit_set_trim(OpContextUPtr &ctx, unsigned max) { - assert(ctx->updated_hset_history); + ceph_assert(ctx->updated_hset_history); pg_hit_set_history_t &updated_hit_set_hist = *(ctx->updated_hset_history); for (unsigned num = updated_hit_set_hist.history.size(); num > max; --num) { list::iterator p = updated_hit_set_hist.history.begin(); - assert(p != updated_hit_set_hist.history.end()); + ceph_assert(p != updated_hit_set_hist.history.end()); hobject_t oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt); - assert(!is_degraded_or_backfilling_object(oid)); + ceph_assert(!is_degraded_or_backfilling_object(oid)); dout(20) << __func__ << " removing " << oid << dendl; ++ctx->at_version.version; @@ -13719,7 +13719,7 @@ void PrimaryLogPG::hit_set_trim(OpContextUPtr &ctx, unsigned max) updated_hit_set_hist.history.pop_front(); ObjectContextRef obc = get_object_context(oid, false); - assert(obc); + ceph_assert(obc); --ctx->delta_stats.num_objects; --ctx->delta_stats.num_objects_hit_set_archive; ctx->delta_stats.num_bytes -= obc->obs.oi.size; @@ -13740,7 +13740,7 @@ void PrimaryLogPG::hit_set_in_memory_trim(uint32_t max_in_memory) void PrimaryLogPG::agent_setup() { - assert(is_locked()); + ceph_assert(is_locked()); if (!is_active() || !is_primary() || pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE || @@ -13789,7 +13789,7 @@ bool PrimaryLogPG::agent_work(int start_max, int agent_flush_quota) return true; } - assert(!deleting); + ceph_assert(!deleting); if (agent_state->is_idle()) { dout(10) << __func__ << " idle, stopping" << dendl; @@ -13805,13 +13805,13 @@ bool PrimaryLogPG::agent_work(int start_max, int agent_flush_quota) << ", evict " << agent_state->get_evict_mode_name() << ", pos " << agent_state->position << dendl; - assert(is_primary()); - assert(is_active()); + ceph_assert(is_primary()); + ceph_assert(is_active()); agent_load_hit_sets(); const pg_pool_t *base_pool = get_osdmap()->get_pg_pool(pool.info.tier_of); - assert(base_pool); + ceph_assert(base_pool); int ls_min = 1; int ls_max = cct->_conf->osd_pool_default_cache_max_evict_check_size; @@ -13825,7 +13825,7 @@ bool PrimaryLogPG::agent_work(int start_max, int agent_flush_quota) hobject_t next; int r = pgbackend->objects_list_partial(agent_state->position, ls_min, ls_max, &ls, &next); - assert(r >= 0); + ceph_assert(r >= 0); dout(20) << __func__ << " got " << ls.size() << " objects" << dendl; int started = 0; for (vector::iterator p = ls.begin(); @@ -13937,7 +13937,7 @@ bool PrimaryLogPG::agent_work(int start_max, int agent_flush_quota) hit_set_in_memory_trim(pool.info.hit_set_count); if (need_delay) { - assert(agent_state->delaying == false); + ceph_assert(agent_state->delaying == false); agent_delay(); unlock(); return false; @@ -13981,7 +13981,7 @@ void PrimaryLogPG::agent_load_hit_sets() bufferlist bl; { int r = osd->store->read(ch, ghobject_t(oid), 0, 0, bl); - assert(r >= 0); + ceph_assert(r >= 0); } HitSetRef hs(new HitSet); bufferlist::const_iterator pbl = bl.begin(); @@ -14144,7 +14144,7 @@ bool PrimaryLogPG::agent_maybe_evict(ObjectContextRef& obc, bool after_flush) }); ctx->at_version = get_next_version(); - assert(ctx->new_obs.exists); + ceph_assert(ctx->new_obs.exists); int r = _delete_oid(ctx.get(), true, false); if (obc->obs.oi.is_omap()) ctx->delta_stats.num_objects_omap--; @@ -14152,7 +14152,7 @@ bool PrimaryLogPG::agent_maybe_evict(ObjectContextRef& obc, bool after_flush) ctx->delta_stats.num_evict_kb += shift_round_up(obc->obs.oi.size, 10); if (obc->obs.oi.is_dirty()) --ctx->delta_stats.num_objects_dirty; - assert(r == 0); + ceph_assert(r == 0); finish_ctx(ctx.get(), pg_log_entry_t::DELETE); simple_opc_submit(std::move(ctx)); osd->logger->inc(l_osd_tier_evict); @@ -14174,7 +14174,7 @@ void PrimaryLogPG::agent_delay() { dout(20) << __func__ << dendl; if (agent_state && !agent_state->is_idle()) { - assert(agent_state->delaying == false); + ceph_assert(agent_state->delaying == false); agent_state->delaying = true; osd->agent_disable_pg(this, agent_state->evict_effort); } @@ -14212,7 +14212,7 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) { uint64_t divisor = pool.info.get_pg_num_divisor(info.pgid.pgid); - assert(divisor > 0); + ceph_assert(divisor > 0); // adjust (effective) user objects down based on the number // of HitSet objects, which should not count toward our total since @@ -14221,7 +14221,7 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) // also exclude omap objects if ec backing pool const pg_pool_t *base_pool = get_osdmap()->get_pg_pool(pool.info.tier_of); - assert(base_pool); + ceph_assert(base_pool); if (!base_pool->supports_omap()) unflushable += info.stats.stats.sum.num_objects_omap; @@ -14332,12 +14332,12 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) // quantize effort to avoid too much reordering in the agent_queue. uint64_t inc = cct->_conf->osd_agent_quantize_effort * 1000000; - assert(inc > 0); + ceph_assert(inc > 0); uint64_t was = evict_effort; evict_effort -= evict_effort % inc; if (evict_effort < inc) evict_effort = inc; - assert(evict_effort >= inc && evict_effort <= 1000000); + ceph_assert(evict_effort >= inc && evict_effort <= 1000000); dout(30) << __func__ << " evict_effort " << was << " quantized by " << inc << " to " << evict_effort << dendl; } } @@ -14422,8 +14422,8 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) void PrimaryLogPG::agent_estimate_temp(const hobject_t& oid, int *temp) { - assert(hit_set); - assert(temp); + ceph_assert(hit_set); + ceph_assert(temp); *temp = 0; if (hit_set->contains(oid)) *temp = 1000000; @@ -14530,7 +14530,7 @@ void PrimaryLogPG::log_missing(unsigned missing, const char *mode, bool allow_incomplete_clones) { - assert(head); + ceph_assert(head); if (allow_incomplete_clones) { dout(20) << func << " " << mode << " " << pgid << " " << head.get() << " skipped " << missing << " clone(s) in cache tier" << dendl; @@ -14550,8 +14550,8 @@ unsigned PrimaryLogPG::process_clones_to(const boost::optional &head, vector::reverse_iterator *curclone, inconsistent_snapset_wrapper &e) { - assert(head); - assert(snapset); + ceph_assert(head); + ceph_assert(snapset); unsigned missing = 0; // NOTE: clones are in descending order, thus **curclone > target test here @@ -14623,7 +14623,7 @@ void PrimaryLogPG::scrub_snapshot_metadata( for (map::reverse_iterator p = scrubmap.objects.rbegin(); p != scrubmap.objects.rend(); ++p) { const hobject_t& soid = p->first; - assert(!soid.is_snapdir()); + ceph_assert(!soid.is_snapdir()); soid_error = inconsistent_snapset_wrapper{soid}; object_stat_sum_t stat; boost::optional oi; @@ -14705,7 +14705,7 @@ void PrimaryLogPG::scrub_snapshot_metadata( target = all_clones; } else { - assert(soid.is_snap()); + ceph_assert(soid.is_snap()); target = soid.snap; } @@ -14720,7 +14720,7 @@ void PrimaryLogPG::scrub_snapshot_metadata( if (doing_clones(snapset, curclone)) { // A head would have processed all clones above // or all greater than *curclone. - assert(soid.is_snap() && *curclone <= soid.snap); + ceph_assert(soid.is_snap() && *curclone <= soid.snap); // After processing above clone snap should match the expected curclone expected = (*curclone == soid.snap); @@ -14803,10 +14803,10 @@ void PrimaryLogPG::scrub_snapshot_metadata( } } } else { - assert(soid.is_snap()); - assert(head); - assert(snapset); - assert(soid.snap == *curclone); + ceph_assert(soid.is_snap()); + ceph_assert(head); + ceph_assert(snapset); + ceph_assert(soid.snap == *curclone); dout(20) << __func__ << " " << mode << " matched clone " << soid << dendl; @@ -14887,7 +14887,7 @@ void PrimaryLogPG::scrub_snapshot_metadata( scrubber.store->add_snap_error(pool.id, head_error); for (auto p = missing_digest.begin(); p != missing_digest.end(); ++p) { - assert(!p->first.is_snapdir()); + ceph_assert(!p->first.is_snapdir()); dout(10) << __func__ << " recording digests for " << p->first << dendl; ObjectContextRef obc = get_object_context(p->first, false); if (!obc) { @@ -15020,8 +15020,8 @@ int PrimaryLogPG::rep_repair_primary_object(const hobject_t& soid, OpContext *ct { OpRequestRef op = ctx->op; // Only supports replicated pools - assert(!pool.info.is_erasure()); - assert(is_primary()); + ceph_assert(!pool.info.is_erasure()); + ceph_assert(is_primary()); dout(10) << __func__ << " " << soid << " peers osd.{" << acting_recovery_backfill << "}" << dendl; @@ -15031,7 +15031,7 @@ int PrimaryLogPG::rep_repair_primary_object(const hobject_t& soid, OpContext *ct return -EAGAIN; } - assert(!pg_log.get_missing().is_missing(soid)); + ceph_assert(!pg_log.get_missing().is_missing(soid)); auto& oi = ctx->new_obs.oi; eversion_t v = oi.version; @@ -15052,7 +15052,7 @@ int PrimaryLogPG::rep_repair_primary_object(const hobject_t& soid, OpContext *ct if (!eio_errors_to_process) { eio_errors_to_process = true; - assert(is_clean()); + ceph_assert(is_clean()); queue_peering_event( PGPeeringEventRef( std::make_shared( @@ -15159,9 +15159,9 @@ boost::statechart::result PrimaryLogPG::AwaitAsyncWork::react(const DoSnapWork&) PrimaryLogPGRef pg = context< SnapTrimmer >().pg; snapid_t snap_to_trim = context().snap_to_trim; auto &in_flight = context().in_flight; - assert(in_flight.empty()); + ceph_assert(in_flight.empty()); - assert(pg->is_primary() && pg->is_active()); + ceph_assert(pg->is_primary() && pg->is_active()); if (!context< SnapTrimmer >().can_trim()) { ldout(pg->cct, 10) << "something changed, reverting to NotTrimming" << dendl; post_event(KickTrim()); @@ -15180,7 +15180,7 @@ boost::statechart::result PrimaryLogPG::AwaitAsyncWork::react(const DoSnapWork&) if (r != 0 && r != -ENOENT) { lderr(pg->cct) << "get_next_objects_to_trim returned " << cpp_strerror(r) << dendl; - assert(0 == "get_next_objects_to_trim returned an invalid code"); + ceph_assert(0 == "get_next_objects_to_trim returned an invalid code"); } else if (r == -ENOENT) { // Done! ldout(pg->cct, 10) << "got ENOENT" << dendl; @@ -15198,13 +15198,13 @@ boost::statechart::result PrimaryLogPG::AwaitAsyncWork::react(const DoSnapWork&) pg->dirty_big_info = true; pg->write_if_dirty(t); int tr = pg->osd->store->queue_transaction(pg->ch, std::move(t), NULL); - assert(tr == 0); + ceph_assert(tr == 0); pg->share_pg_info(); post_event(KickTrim()); return transit< NotTrimming >(); } - assert(!to_trim.empty()); + ceph_assert(!to_trim.empty()); for (auto &&object: to_trim) { // Get next @@ -15235,7 +15235,7 @@ boost::statechart::result PrimaryLogPG::AwaitAsyncWork::react(const DoSnapWork&) in_flight.insert(object); ctx->register_on_success( [pg, object, &in_flight]() { - assert(in_flight.find(object) != in_flight.end()); + ceph_assert(in_flight.find(object) != in_flight.end()); in_flight.erase(object); if (in_flight.empty()) { if (pg->state_test(PG_STATE_SNAPTRIM_ERROR)) { @@ -15300,7 +15300,7 @@ int PrimaryLogPG::getattrs_maybe_cache( map *out) { int r = 0; - assert(out); + ceph_assert(out); if (pool.info.is_erasure()) { *out = obc->attr_cache; } else { diff --git a/src/osd/PrimaryLogPG.h b/src/osd/PrimaryLogPG.h index 431bcb28397f9..0afdbaf2d8c23 100644 --- a/src/osd/PrimaryLogPG.h +++ b/src/osd/PrimaryLogPG.h @@ -251,7 +251,7 @@ public: FlushOp() : flushed_version(0), objecter_tid(0), rval(0), blocking(false), removal(false), chunks(0) {} - ~FlushOp() { assert(!on_flush); } + ~FlushOp() { ceph_assert(!on_flush); } }; typedef std::shared_ptr FlushOpRef; @@ -664,7 +664,7 @@ public: } } ~OpContext() { - assert(!op_t); + ceph_assert(!op_t); if (reply) reply->put(); for (list, @@ -755,7 +755,7 @@ public: return this; } void put() { - assert(nref > 0); + ceph_assert(nref > 0); if (--nref == 0) { delete this; //generic_dout(0) << "deleting " << this << dendl; @@ -783,12 +783,12 @@ protected: } else if (write_ordered) { ctx->lock_type = ObjectContext::RWState::RWWRITE; } else { - assert(ctx->op->may_read()); + ceph_assert(ctx->op->may_read()); ctx->lock_type = ObjectContext::RWState::RWREAD; } if (ctx->head_obc) { - assert(!ctx->obc->obs.exists); + ceph_assert(!ctx->obc->obs.exists); if (!ctx->lock_manager.get_lock_type( ctx->lock_type, ctx->head_obc->obs.oi.soid, @@ -805,7 +805,7 @@ protected: ctx->op)) { return true; } else { - assert(!ctx->head_obc); + ceph_assert(!ctx->head_obc); ctx->lock_type = ObjectContext::RWState::RWNONE; return false; } @@ -1005,9 +1005,9 @@ protected: _register_snapset_context(ssc); } void _register_snapset_context(SnapSetContext *ssc) { - assert(snapset_contexts_lock.is_locked()); + ceph_assert(snapset_contexts_lock.is_locked()); if (!ssc->registered) { - assert(snapset_contexts.count(ssc->oid) == 0); + ceph_assert(snapset_contexts.count(ssc->oid) == 0); ssc->registered = true; snapset_contexts[ssc->oid] = ssc; } @@ -1570,8 +1570,8 @@ private: : my_base(ctx), NamedState(context< SnapTrimmer >().pg, "Trimming") { context< SnapTrimmer >().log_enter(state_name); - assert(context< SnapTrimmer >().can_trim()); - assert(in_flight.empty()); + ceph_assert(context< SnapTrimmer >().can_trim()); + ceph_assert(in_flight.empty()); } void exit() { context< SnapTrimmer >().log_exit(state_name, enter_time); @@ -1595,7 +1595,7 @@ private: : my_base(ctx), NamedState(context< SnapTrimmer >().pg, "Trimming/WaitTrimTimer") { context< SnapTrimmer >().log_enter(state_name); - assert(context().in_flight.empty()); + ceph_assert(context().in_flight.empty()); struct OnTimer : Context { PrimaryLogPGRef pg; epoch_t epoch; @@ -1645,7 +1645,7 @@ private: : my_base(ctx), NamedState(context< SnapTrimmer >().pg, "Trimming/WaitRWLock") { context< SnapTrimmer >().log_enter(state_name); - assert(context().in_flight.empty()); + ceph_assert(context().in_flight.empty()); } void exit() { context< SnapTrimmer >().log_exit(state_name, enter_time); @@ -1668,7 +1668,7 @@ private: : my_base(ctx), NamedState(context< SnapTrimmer >().pg, "Trimming/WaitRepops") { context< SnapTrimmer >().log_enter(state_name); - assert(!context().in_flight.empty()); + ceph_assert(!context().in_flight.empty()); } void exit() { context< SnapTrimmer >().log_exit(state_name, enter_time); @@ -1711,8 +1711,8 @@ private: pg->unlock(); } void cancel() { - assert(pg->is_locked()); - assert(!canceled); + ceph_assert(pg->is_locked()); + ceph_assert(!canceled); canceled = true; } }; @@ -1722,7 +1722,7 @@ private: : my_base(ctx), NamedState(context< SnapTrimmer >().pg, "Trimming/WaitReservation") { context< SnapTrimmer >().log_enter(state_name); - assert(context().in_flight.empty()); + ceph_assert(context().in_flight.empty()); auto *pg = context< SnapTrimmer >().pg; pending = new ReservationCB(pg); pg->osd->snap_reserver.request_reservation( diff --git a/src/osd/ReplicatedBackend.cc b/src/osd/ReplicatedBackend.cc index 845ebc0b32968..0b20d9ca5e037 100644 --- a/src/osd/ReplicatedBackend.cc +++ b/src/osd/ReplicatedBackend.cc @@ -93,7 +93,7 @@ static void log_subop_stats( logger->inc(l_osd_sop_push_inb, inb); logger->tinc(l_osd_sop_push_lat, latency); } else - assert("no support subop" == 0); + ceph_assert("no support subop" == 0); } else { logger->tinc(l_osd_sop_pull_lat, latency); } @@ -129,7 +129,7 @@ int ReplicatedBackend::recover_object( dout(10) << __func__ << ": " << hoid << dendl; RPGHandle *h = static_cast(_h); if (get_parent()->get_local_missing().is_missing(hoid)) { - assert(!obc); + ceph_assert(!obc); // pull prepare_pull( v, @@ -137,7 +137,7 @@ int ReplicatedBackend::recover_object( head, h); } else { - assert(obc); + ceph_assert(obc); int started = start_pushes( hoid, obc, @@ -261,7 +261,7 @@ void ReplicatedBackend::objects_read_async( Context *on_complete, bool fast_read) { - assert(0 == "async read is not used by replica pool"); + ceph_assert(0 == "async read is not used by replica pool"); } class C_OSD_OnOpCommit : public Context { @@ -283,9 +283,9 @@ void generate_transaction( set *added, set *removed) { - assert(t); - assert(added); - assert(removed); + ceph_assert(t); + ceph_assert(added); + ceph_assert(removed); for (auto &&le: log_entries) { le.mark_unrollbackable(); @@ -332,7 +332,7 @@ void generate_transaction( goid); }, [&](const PGTransaction::ObjectOperation::Init::Rename &op) { - assert(op.source.is_temp()); + ceph_assert(op.source.is_temp()); t->collection_move_rename( coll, ghobject_t( @@ -407,7 +407,7 @@ void generate_transaction( extent.get_len()); }, [&](const BufferUpdate::CloneRange &op) { - assert(op.len == extent.get_len()); + ceph_assert(op.len == extent.get_len()); t->clone_range( coll, ghobject_t(op.from, ghobject_t::NO_GEN, shard_id_t::NO_SHARD), @@ -449,8 +449,8 @@ void ReplicatedBackend::submit_transaction( &op_t, &added, &removed); - assert(added.size() <= 1); - assert(removed.size() <= 1); + ceph_assert(added.size() <= 1); + ceph_assert(removed.size() <= 1); auto insert_res = in_progress_ops.insert( make_pair( @@ -460,7 +460,7 @@ void ReplicatedBackend::submit_transaction( orig_op, at_version) ) ); - assert(insert_res.second); + ceph_assert(insert_res.second); InProgressOp &op = *insert_res.first->second; op.waiting_for_commit.insert( @@ -526,7 +526,7 @@ void ReplicatedBackend::op_commit( if (op->waiting_for_commit.empty()) { op->on_commit->complete(0); op->on_commit = 0; - assert(!op->on_commit); + ceph_assert(!op->on_commit); in_progress_ops.erase(op->tid); } } @@ -535,7 +535,7 @@ void ReplicatedBackend::do_repop_reply(OpRequestRef op) { static_cast(op->get_nonconst_req())->finish_decode(); const MOSDRepOpReply *r = static_cast(op->get_req()); - assert(r->get_header().type == MSG_OSD_REPOPREPLY); + ceph_assert(r->get_header().type == MSG_OSD_REPOPREPLY); op->mark_started(); @@ -564,7 +564,7 @@ void ReplicatedBackend::do_repop_reply(OpRequestRef op) // oh, good. if (r->ack_type & CEPH_OSD_FLAG_ONDISK) { - assert(ip_op.waiting_for_commit.count(from)); + ceph_assert(ip_op.waiting_for_commit.count(from)); ip_op.waiting_for_commit.erase(from); if (ip_op.op) { ostringstream ss; @@ -607,7 +607,7 @@ int ReplicatedBackend::be_deep_scrub( sleeptime.sleep(); } - assert(poid == pos.ls[pos.pos]); + ceph_assert(poid == pos.ls[pos.pos]); if (!pos.data_done()) { if (pos.data_pos == 0) { pos.data_hash = bufferhash(-1); @@ -672,7 +672,7 @@ int ReplicatedBackend::be_deep_scrub( ch, ghobject_t( poid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard)); - assert(iter); + ceph_assert(iter); if (pos.omap_pos.length()) { iter->lower_bound(pos.omap_pos); } else { @@ -728,7 +728,7 @@ int ReplicatedBackend::be_deep_scrub( void ReplicatedBackend::_do_push(OpRequestRef op) { const MOSDPGPush *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_PUSH); + ceph_assert(m->get_type() == MSG_OSD_PG_PUSH); pg_shard_t from = m->from; op->mark_started(); @@ -773,7 +773,7 @@ struct C_ReplicatedBackend_OnPullComplete : GenContext { ReplicatedBackend::RPGHandle *h = bc->_open_recovery_op(); for (auto &&i: to_continue) { auto j = bc->pulling.find(i.hoid); - assert(j != bc->pulling.end()); + ceph_assert(j != bc->pulling.end()); ObjectContextRef obc = j->second.obc; bc->clear_pull(j, false /* already did it */); int started = bc->start_pushes(i.hoid, obc, h); @@ -794,7 +794,7 @@ struct C_ReplicatedBackend_OnPullComplete : GenContext { void ReplicatedBackend::_do_pull_response(OpRequestRef op) { const MOSDPGPush *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_PUSH); + ceph_assert(m->get_type() == MSG_OSD_PG_PUSH); pg_shard_t from = m->from; op->mark_started(); @@ -848,7 +848,7 @@ void ReplicatedBackend::_do_pull_response(OpRequestRef op) void ReplicatedBackend::do_pull(OpRequestRef op) { MOSDPGPull *m = static_cast(op->get_nonconst_req()); - assert(m->get_type() == MSG_OSD_PG_PULL); + ceph_assert(m->get_type() == MSG_OSD_PG_PULL); pg_shard_t from = m->from; map > replies; @@ -864,7 +864,7 @@ void ReplicatedBackend::do_pull(OpRequestRef op) void ReplicatedBackend::do_push_reply(OpRequestRef op) { const MOSDPGPushReply *m = static_cast(op->get_req()); - assert(m->get_type() == MSG_OSD_PG_PUSH_REPLY); + ceph_assert(m->get_type() == MSG_OSD_PG_PUSH_REPLY); pg_shard_t from = m->from; vector replies(1); @@ -993,7 +993,7 @@ void ReplicatedBackend::do_repop(OpRequestRef op) static_cast(op->get_nonconst_req())->finish_decode(); const MOSDRepOp *m = static_cast(op->get_req()); int msg_type = m->get_type(); - assert(MSG_OSD_REPOP == msg_type); + ceph_assert(MSG_OSD_REPOP == msg_type); const hobject_t& soid = m->poid; @@ -1004,7 +1004,7 @@ void ReplicatedBackend::do_repop(OpRequestRef op) << dendl; // sanity checks - assert(m->map_epoch >= get_info().history.same_interval_since); + ceph_assert(m->map_epoch >= get_info().history.same_interval_since); dout(30) << __func__ << " missing before " << get_parent()->get_log().get_missing().get_items() << dendl; parent->maybe_preempt_replica_scrub(soid); @@ -1019,7 +1019,7 @@ void ReplicatedBackend::do_repop(OpRequestRef op) rm->last_complete = get_info().last_complete; rm->epoch_started = get_osdmap()->get_epoch(); - assert(m->logbl.length()); + ceph_assert(m->logbl.length()); // shipped transaction and log entries vector log; @@ -1096,11 +1096,11 @@ void ReplicatedBackend::repop_commit(RepModifyRef rm) // send commit. const MOSDRepOp *m = static_cast(rm->op->get_req()); - assert(m->get_type() == MSG_OSD_REPOP); + ceph_assert(m->get_type() == MSG_OSD_REPOP); dout(10) << __func__ << " on op " << *m << ", sending commit to osd." << rm->ackerosd << dendl; - assert(get_osdmap()->is_up(rm->ackerosd)); + ceph_assert(get_osdmap()->is_up(rm->ackerosd)); get_parent()->update_last_complete_ondisk(rm->last_complete); @@ -1279,24 +1279,24 @@ void ReplicatedBackend::prepare_pull( ObjectContextRef headctx, RPGHandle *h) { - assert(get_parent()->get_local_missing().get_items().count(soid)); + ceph_assert(get_parent()->get_local_missing().get_items().count(soid)); eversion_t _v = get_parent()->get_local_missing().get_items().find( soid)->second.need; - assert(_v == v); + ceph_assert(_v == v); const map> &missing_loc( get_parent()->get_missing_loc_shards()); const map &peer_missing( get_parent()->get_shard_missing()); map>::const_iterator q = missing_loc.find(soid); - assert(q != missing_loc.end()); - assert(!q->second.empty()); + ceph_assert(q != missing_loc.end()); + ceph_assert(!q->second.empty()); // pick a pullee auto p = q->second.begin(); std::advance(p, util::generate_random_number(0, q->second.size() - 1)); - assert(get_osdmap()->is_up(p->osd)); + ceph_assert(get_osdmap()->is_up(p->osd)); pg_shard_t fromshard = *p; dout(7) << "pull " << soid @@ -1305,15 +1305,15 @@ void ReplicatedBackend::prepare_pull( << " from osd." << fromshard << dendl; - assert(peer_missing.count(fromshard)); + ceph_assert(peer_missing.count(fromshard)); const pg_missing_t &pmissing = peer_missing.find(fromshard)->second; if (pmissing.is_missing(soid, v)) { - assert(pmissing.get_items().find(soid)->second.have != v); + ceph_assert(pmissing.get_items().find(soid)->second.have != v); dout(10) << "pulling soid " << soid << " from osd " << fromshard << " at version " << pmissing.get_items().find(soid)->second.have << " rather than at version " << v << dendl; v = pmissing.get_items().find(soid)->second.have; - assert(get_parent()->get_log().get_log().objects.count(soid) && + ceph_assert(get_parent()->get_log().get_log().objects.count(soid) && (get_parent()->get_log().get_log().objects.find(soid)->second->op == pg_log_entry_t::LOST_REVERT) && (get_parent()->get_log().get_log().objects.find( @@ -1325,11 +1325,11 @@ void ReplicatedBackend::prepare_pull( ObcLockManager lock_manager; if (soid.is_snap()) { - assert(!get_parent()->get_local_missing().is_missing(soid.get_head())); - assert(headctx); + ceph_assert(!get_parent()->get_local_missing().is_missing(soid.get_head())); + ceph_assert(headctx); // check snapset SnapSetContext *ssc = headctx->ssc; - assert(ssc); + ceph_assert(ssc); dout(10) << " snapset " << ssc->snapset << dendl; recovery_info.ss = ssc->snapset; calc_clone_subsets( @@ -1341,7 +1341,7 @@ void ReplicatedBackend::prepare_pull( // FIXME: this may overestimate if we are pulling multiple clones in parallel... dout(10) << " pulling " << recovery_info << dendl; - assert(ssc->snapset.clone_size.count(soid.snap)); + ceph_assert(ssc->snapset.clone_size.count(soid.snap)); recovery_info.size = ssc->snapset.clone_size[soid.snap]; } else { // pulling head or unversioned object. @@ -1362,7 +1362,7 @@ void ReplicatedBackend::prepare_pull( op.recovery_progress.data_recovered_to = 0; op.recovery_progress.first = true; - assert(!pulling.count(soid)); + ceph_assert(!pulling.count(soid)); pull_from_peer[fromshard].insert(soid); PullInfo &pi = pulling[soid]; pi.from = fromshard; @@ -1405,15 +1405,15 @@ int ReplicatedBackend::prep_push_to_replica( } SnapSetContext *ssc = obc->ssc; - assert(ssc); + ceph_assert(ssc); dout(15) << "push_to_replica snapset is " << ssc->snapset << dendl; pop->recovery_info.ss = ssc->snapset; map::const_iterator pm = get_parent()->get_shard_missing().find(peer); - assert(pm != get_parent()->get_shard_missing().end()); + ceph_assert(pm != get_parent()->get_shard_missing().end()); map::const_iterator pi = get_parent()->get_shard_info().find(peer); - assert(pi != get_parent()->get_shard_info().end()); + ceph_assert(pi != get_parent()->get_shard_info().end()); calc_clone_subsets( ssc->snapset, soid, pm->second, @@ -1424,7 +1424,7 @@ int ReplicatedBackend::prep_push_to_replica( // pushing head or unversioned object. // base this on partially on replica's clones? SnapSetContext *ssc = obc->ssc; - assert(ssc); + ceph_assert(ssc); dout(15) << "push_to_replica snapset is " << ssc->snapset << dendl; calc_head_subsets( obc, @@ -1596,7 +1596,7 @@ ObjectRecoveryInfo ReplicatedBackend::recalc_subsets( ObjectRecoveryInfo new_info = recovery_info; new_info.copy_subset.clear(); new_info.clone_subset.clear(); - assert(ssc); + ceph_assert(ssc); get_parent()->release_locks(manager); // might already have locks calc_clone_subsets( ssc->snapset, new_info.soid, get_parent()->get_local_missing(), @@ -1627,7 +1627,7 @@ bool ReplicatedBackend::handle_pull_response( } const hobject_t &hoid = pop.soid; - assert((data_included.empty() && data.length() == 0) || + ceph_assert((data_included.empty() && data.length() == 0) || (!data_included.empty() && data.length() > 0)); auto piter = pulling.find(hoid); @@ -1867,14 +1867,14 @@ int ReplicatedBackend::build_push_op(const ObjectRecoveryInfo &recovery_info, } // Once we provide the version subsequent requests will have it, so // at this point it must be known. - assert(v != eversion_t()); + ceph_assert(v != eversion_t()); uint64_t available = cct->_conf->osd_recovery_max_chunk; if (!progress.omap_complete) { ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, ghobject_t(recovery_info.soid)); - assert(iter); + ceph_assert(iter); for (iter->lower_bound(progress.omap_recovered_to); iter->valid(); iter->next(false)) { @@ -2084,7 +2084,7 @@ void ReplicatedBackend::handle_pull(pg_shard_t peer, PullOp &op, PushOp *reply) recovery_info.copy_subset.clear(); if (st.st_size) recovery_info.copy_subset.insert(0, st.st_size); - assert(recovery_info.clone_subset.empty()); + ceph_assert(recovery_info.clone_subset.empty()); } r = build_push_op(recovery_info, progress, 0, reply); @@ -2175,7 +2175,7 @@ int ReplicatedBackend::start_pushes( dout(20) << __func__ << " soid " << soid << dendl; // who needs it? - assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0); + ceph_assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0); for (set::iterator i = get_parent()->get_acting_recovery_backfill_shards().begin(); i != get_parent()->get_acting_recovery_backfill_shards().end(); @@ -2184,7 +2184,7 @@ int ReplicatedBackend::start_pushes( pg_shard_t peer = *i; map::const_iterator j = get_parent()->get_shard_missing().find(peer); - assert(j != get_parent()->get_shard_missing().end()); + ceph_assert(j != get_parent()->get_shard_missing().end()); if (j->second.is_missing(soid)) { shards.push_back(j); } diff --git a/src/osd/ReplicatedBackend.h b/src/osd/ReplicatedBackend.h index 6dbba861a9a62..35feef16bce7b 100644 --- a/src/osd/ReplicatedBackend.h +++ b/src/osd/ReplicatedBackend.h @@ -102,7 +102,7 @@ public: j != i->second.end(); ++j) { f->open_object_section("pull_info"); - assert(pulling.count(*j)); + ceph_assert(pulling.count(*j)); pulling.find(*j)->second.dump(f); f->close_section(); } diff --git a/src/osd/ScrubStore.cc b/src/osd/ScrubStore.cc index 08dcae85e2ff3..036f4a88e7393 100644 --- a/src/osd/ScrubStore.cc +++ b/src/osd/ScrubStore.cc @@ -99,8 +99,8 @@ Store::create(ObjectStore* store, const spg_t& pgid, const coll_t& coll) { - assert(store); - assert(t); + ceph_assert(store); + ceph_assert(t); ghobject_t oid = make_scrub_object(pgid); t->touch(coll, oid); return new Store{coll, oid, store}; @@ -115,7 +115,7 @@ Store::Store(const coll_t& coll, const ghobject_t& oid, ObjectStore* store) Store::~Store() { - assert(results.empty()); + ceph_assert(results.empty()); } void Store::add_object_error(int64_t pool, const inconsistent_obj_wrapper& e) diff --git a/src/osd/Session.cc b/src/osd/Session.cc index f24a7d072fc3d..82f10e2d0abbd 100644 --- a/src/osd/Session.cc +++ b/src/osd/Session.cc @@ -22,14 +22,14 @@ void Session::clear_backoffs() for (auto& b : p.second) { Mutex::Locker l(b->lock); if (b->pg) { - assert(b->session == this); - assert(b->is_new() || b->is_acked()); + ceph_assert(b->session == this); + ceph_assert(b->is_new() || b->is_acked()); b->pg->rm_backoff(b); b->pg.reset(); b->session.reset(); } else if (b->session) { - assert(b->session == this); - assert(b->is_deleting()); + ceph_assert(b->session == this); + ceph_assert(b->is_deleting()); b->session.reset(); } } @@ -79,7 +79,7 @@ void Session::ack_backoff( backoffs.erase(p); } } - assert(!backoff_count == backoffs.empty()); + ceph_assert(!backoff_count == backoffs.empty()); } bool Session::check_backoff( @@ -89,7 +89,7 @@ bool Session::check_backoff( if (b) { dout(10) << __func__ << " session " << this << " has backoff " << *b << " for " << *m << dendl; - assert(!b->is_acked() || !g_conf()->osd_debug_crash_on_ignored_backoff); + ceph_assert(!b->is_acked() || !g_conf()->osd_debug_crash_on_ignored_backoff); return true; } // we may race with ms_handle_reset. it clears session->con before removing diff --git a/src/osd/Session.h b/src/osd/Session.h index 2ced429080452..8b0e897f31a21 100644 --- a/src/osd/Session.h +++ b/src/osd/Session.h @@ -176,7 +176,7 @@ struct Session : public RefCountedObject { return nullptr; } Mutex::Locker l(backoff_lock); - assert(!backoff_count == backoffs.empty()); + ceph_assert(!backoff_count == backoffs.empty()); auto i = backoffs.find(pgid); if (i == backoffs.end()) { return nullptr; @@ -204,7 +204,7 @@ struct Session : public RefCountedObject { void add_backoff(BackoffRef b) { Mutex::Locker l(backoff_lock); - assert(!backoff_count == backoffs.empty()); + ceph_assert(!backoff_count == backoffs.empty()); backoffs[b->pgid][b->begin].insert(b); ++backoff_count; } @@ -212,8 +212,8 @@ struct Session : public RefCountedObject { // called by PG::release_*_backoffs and PG::clear_backoffs() void rm_backoff(BackoffRef b) { Mutex::Locker l(backoff_lock); - assert(b->lock.is_locked_by_me()); - assert(b->session == this); + ceph_assert(b->lock.is_locked_by_me()); + ceph_assert(b->session == this); auto i = backoffs.find(b->pgid); if (i != backoffs.end()) { // may race with clear_backoffs() @@ -232,7 +232,7 @@ struct Session : public RefCountedObject { } } } - assert(!backoff_count == backoffs.empty()); + ceph_assert(!backoff_count == backoffs.empty()); } void clear_backoffs(); }; diff --git a/src/osd/SnapMapper.cc b/src/osd/SnapMapper.cc index 125dd66336b8d..3e4b1abf1b827 100644 --- a/src/osd/SnapMapper.cc +++ b/src/osd/SnapMapper.cc @@ -149,7 +149,7 @@ int SnapMapper::get_snaps( const hobject_t &oid, object_snaps *out) { - assert(check(oid)); + ceph_assert(check(oid)); set keys; map got; keys.insert(to_object_key(oid)); @@ -168,7 +168,7 @@ int SnapMapper::get_snaps( dout(20) << __func__ << " " << oid << " " << out->snaps << dendl; if (out->snaps.empty()) { dout(1) << __func__ << " " << oid << " empty snapset" << dendl; - assert(!cct->_conf->osd_debug_verify_snaps); + ceph_assert(!cct->_conf->osd_debug_verify_snaps); } } else { dout(20) << __func__ << " " << oid << " (out == NULL)" << dendl; @@ -181,7 +181,7 @@ void SnapMapper::clear_snaps( MapCacher::Transaction *t) { dout(20) << __func__ << " " << oid << dendl; - assert(check(oid)); + ceph_assert(check(oid)); set to_remove; to_remove.insert(to_object_key(oid)); if (g_conf()->subsys.should_gather()) { @@ -197,7 +197,7 @@ void SnapMapper::set_snaps( const object_snaps &in, MapCacher::Transaction *t) { - assert(check(oid)); + ceph_assert(check(oid)); map to_set; bufferlist bl; encode(in, bl); @@ -220,7 +220,7 @@ int SnapMapper::update_snaps( dout(20) << __func__ << " " << oid << " " << new_snaps << " was " << (old_snaps_check ? *old_snaps_check : set()) << dendl; - assert(check(oid)); + ceph_assert(check(oid)); if (new_snaps.empty()) return remove_oid(oid, t); @@ -229,7 +229,7 @@ int SnapMapper::update_snaps( if (r < 0) return r; if (old_snaps_check) - assert(out.snaps == *old_snaps_check); + ceph_assert(out.snaps == *old_snaps_check); object_snaps in(oid, new_snaps); set_snaps(oid, in, t); @@ -257,15 +257,15 @@ void SnapMapper::add_oid( MapCacher::Transaction *t) { dout(20) << __func__ << " " << oid << " " << snaps << dendl; - assert(!snaps.empty()); - assert(check(oid)); + ceph_assert(!snaps.empty()); + ceph_assert(check(oid)); { object_snaps out; int r = get_snaps(oid, &out); if (r != -ENOENT) { derr << __func__ << " found existing snaps mapped on " << oid << ", removing" << dendl; - assert(!cct->_conf->osd_debug_verify_snaps); + ceph_assert(!cct->_conf->osd_debug_verify_snaps); remove_oid(oid, t); } } @@ -292,8 +292,8 @@ int SnapMapper::get_next_objects_to_trim( unsigned max, vector *out) { - assert(out); - assert(out->empty()); + ceph_assert(out); + ceph_assert(out->empty()); int r = 0; for (set::iterator i = prefixes.begin(); i != prefixes.end() && out->size() < max && r == 0; @@ -314,12 +314,12 @@ int SnapMapper::get_next_objects_to_trim( break; // Done with this prefix } - assert(is_mapping(next.first)); + ceph_assert(is_mapping(next.first)); dout(20) << __func__ << " " << next.first << dendl; pair next_decoded(from_raw(next)); - assert(next_decoded.first == snap); - assert(check(next_decoded.second)); + ceph_assert(next_decoded.first == snap); + ceph_assert(check(next_decoded.second)); out->push_back(next_decoded.second); pos = next.first; @@ -338,7 +338,7 @@ int SnapMapper::remove_oid( MapCacher::Transaction *t) { dout(20) << __func__ << " " << oid << dendl; - assert(check(oid)); + ceph_assert(check(oid)); return _remove_oid(oid, t); } @@ -373,7 +373,7 @@ int SnapMapper::get_snaps( const hobject_t &oid, std::set *snaps) { - assert(check(oid)); + ceph_assert(check(oid)); object_snaps out; int r = get_snaps(oid, &out); if (r < 0) diff --git a/src/osd/SnapMapper.h b/src/osd/SnapMapper.h index f553ba45d37b5..26e90d60c0639 100644 --- a/src/osd/SnapMapper.h +++ b/src/osd/SnapMapper.h @@ -157,7 +157,7 @@ public: return string(); char buf[20]; int r = snprintf(buf, sizeof(buf), ".%x", (int)shard); - assert(r < (int)sizeof(buf)); + ceph_assert(r < (int)sizeof(buf)); return string(buf, r) + '_'; } uint32_t mask_bits; @@ -184,7 +184,7 @@ public: void update_bits( uint32_t new_bits ///< [in] new split bits ) { - assert(new_bits >= mask_bits); + ceph_assert(new_bits >= mask_bits); mask_bits = new_bits; set _prefixes = hobject_t::get_prefixes( mask_bits, diff --git a/src/osd/TierAgentState.h b/src/osd/TierAgentState.h index e1665e6418688..5372685b78e83 100644 --- a/src/osd/TierAgentState.h +++ b/src/osd/TierAgentState.h @@ -42,7 +42,7 @@ struct TierAgentState { case FLUSH_MODE_IDLE: return "idle"; case FLUSH_MODE_LOW: return "low"; case FLUSH_MODE_HIGH: return "high"; - default: assert(0 == "bad flush mode"); + default: ceph_assert(0 == "bad flush mode"); } } const char *get_flush_mode_name() const { @@ -59,7 +59,7 @@ struct TierAgentState { case EVICT_MODE_IDLE: return "idle"; case EVICT_MODE_SOME: return "some"; case EVICT_MODE_FULL: return "full"; - default: assert(0 == "bad evict mode"); + default: ceph_assert(0 == "bad evict mode"); } } const char *get_evict_mode_name() const { diff --git a/src/osd/Watch.cc b/src/osd/Watch.cc index d07af3faf0c22..29d325aeb9e82 100644 --- a/src/osd/Watch.cc +++ b/src/osd/Watch.cc @@ -84,14 +84,14 @@ public: notif->osd->watch_lock.Lock(); } void cancel() override { - assert(notif->lock.is_locked_by_me()); + ceph_assert(notif->lock.is_locked_by_me()); canceled = true; } }; void Notify::do_timeout() { - assert(lock.is_locked_by_me()); + ceph_assert(lock.is_locked_by_me()); dout(10) << "timeout" << dendl; cb = nullptr; if (is_discarded()) { @@ -101,7 +101,7 @@ void Notify::do_timeout() timed_out = true; // we will send the client an error code maybe_complete_notify(); - assert(complete); + ceph_assert(complete); set _watchers; _watchers.swap(watchers); lock.Unlock(); @@ -120,7 +120,7 @@ void Notify::do_timeout() void Notify::register_cb() { - assert(lock.is_locked_by_me()); + ceph_assert(lock.is_locked_by_me()); { osd->watch_lock.Lock(); cb = new NotifyTimeoutCB(self.lock()); @@ -133,7 +133,7 @@ void Notify::register_cb() void Notify::unregister_cb() { - assert(lock.is_locked_by_me()); + ceph_assert(lock.is_locked_by_me()); if (!cb) return; cb->cancel(); @@ -158,7 +158,7 @@ void Notify::complete_watcher(WatchRef watch, bufferlist& reply_bl) dout(10) << "complete_watcher" << dendl; if (is_discarded()) return; - assert(watchers.count(watch)); + ceph_assert(watchers.count(watch)); watchers.erase(watch); notify_replies.insert(make_pair(make_pair(watch->get_watcher_gid(), watch->get_cookie()), @@ -172,7 +172,7 @@ void Notify::complete_watcher_remove(WatchRef watch) dout(10) << __func__ << dendl; if (is_discarded()) return; - assert(watchers.count(watch)); + ceph_assert(watchers.count(watch)); watchers.erase(watch); maybe_complete_notify(); } @@ -267,7 +267,7 @@ public: void finish(int) override { OSDService *osd(watch->osd); dout(10) << "HandleWatchTimeoutDelayed" << dendl; - assert(watch->pg->is_locked()); + ceph_assert(watch->pg->is_locked()); watch->cb = nullptr; if (!watch->is_discarded() && !canceled) watch->pg->handle_watch_timeout(watch); @@ -307,15 +307,15 @@ Watch::Watch( Watch::~Watch() { dout(10) << "~Watch" << dendl; // users must have called remove() or discard() prior to this point - assert(!obc); - assert(!conn); + ceph_assert(!obc); + ceph_assert(!conn); } bool Watch::connected() { return !!conn; } Context *Watch::get_delayed_cb() { - assert(!cb); + ceph_assert(!cb); cb = new HandleDelayedWatchTimeout(self.lock()); return cb; } @@ -407,9 +407,9 @@ void Watch::discard() void Watch::discard_state() { - assert(pg->is_locked()); - assert(!discarded); - assert(obc); + ceph_assert(pg->is_locked()); + ceph_assert(!discarded); + ceph_assert(obc); in_progress_notifies.clear(); unregister_cb(); discarded = true; @@ -447,7 +447,7 @@ void Watch::remove(bool send_disconnect) void Watch::start_notify(NotifyRef notif) { - assert(in_progress_notifies.find(notif->notify_id) == + ceph_assert(in_progress_notifies.find(notif->notify_id) == in_progress_notifies.end()); if (will_ping) { utime_t cutoff = ceph_clock_now(); diff --git a/src/osd/mClockOpClassSupport.cc b/src/osd/mClockOpClassSupport.cc index 5ff4fd76d113f..83bea1c46f21f 100644 --- a/src/osd/mClockOpClassSupport.cc +++ b/src/osd/mClockOpClassSupport.cc @@ -75,7 +75,7 @@ namespace ceph { } void OpClassClientInfoMgr::add_rep_op_msg(int message_code) { - assert(message_code >= 0 && message_code < int(rep_op_msg_bitset_size)); + ceph_assert(message_code >= 0 && message_code < int(rep_op_msg_bitset_size)); rep_op_msg_bitset.set(message_code); } @@ -89,7 +89,7 @@ namespace ceph { // stores type as unsigned little endian, so be sure to // convert to CPU byte ordering boost::optional op_ref_maybe = op.maybe_get_op(); - assert(op_ref_maybe); + ceph_assert(op_ref_maybe); __le16 mtype_le = (*op_ref_maybe)->get_req()->get_header().type; __u16 mtype = le16_to_cpu(mtype_le); if (rep_op_msg_bitset.test(mtype)) { diff --git a/src/osd/osd_internal_types.h b/src/osd/osd_internal_types.h index 79ef12b9f594c..775cdf7a646ce 100644 --- a/src/osd/osd_internal_types.h +++ b/src/osd/osd_internal_types.h @@ -105,7 +105,7 @@ public: } switch (state) { case RWNONE: - assert(count == 0); + ceph_assert(count == 0); state = RWREAD; // fall through case RWREAD: @@ -116,7 +116,7 @@ public: case RWEXCL: return false; default: - assert(0 == "unhandled case"); + ceph_assert(0 == "unhandled case"); return false; } } @@ -139,7 +139,7 @@ public: } switch (state) { case RWNONE: - assert(count == 0); + ceph_assert(count == 0); state = RWWRITE; // fall through case RWWRITE: @@ -150,14 +150,14 @@ public: case RWEXCL: return false; default: - assert(0 == "unhandled case"); + ceph_assert(0 == "unhandled case"); return false; } } bool get_excl_lock() { switch (state) { case RWNONE: - assert(count == 0); + ceph_assert(count == 0); state = RWEXCL; count = 1; return true; @@ -168,7 +168,7 @@ public: case RWEXCL: return false; default: - assert(0 == "unhandled case"); + ceph_assert(0 == "unhandled case"); return false; } } @@ -189,8 +189,8 @@ public: return get_write_lock(); } void dec(list *requeue) { - assert(count > 0); - assert(requeue); + ceph_assert(count > 0); + ceph_assert(requeue); count--; if (count == 0) { state = RWNONE; @@ -198,15 +198,15 @@ public: } } void put_read(list *requeue) { - assert(state == RWREAD); + ceph_assert(state == RWREAD); dec(requeue); } void put_write(list *requeue) { - assert(state == RWWRITE); + ceph_assert(state == RWWRITE); dec(requeue); } void put_excl(list *requeue) { - assert(state == RWEXCL); + ceph_assert(state == RWEXCL); dec(requeue); } bool empty() const { return state == RWNONE; } @@ -230,7 +230,7 @@ public: case RWState::RWEXCL: return get_excl(op); default: - assert(0 == "invalid lock type"); + ceph_assert(0 == "invalid lock type"); return true; } } @@ -257,7 +257,7 @@ public: return rwstate.get_read_lock(); } void drop_recovery_read(list *ls) { - assert(rwstate.recovery_read_marker); + ceph_assert(rwstate.recovery_read_marker); rwstate.put_read(ls); rwstate.recovery_read_marker = false; } @@ -277,7 +277,7 @@ public: rwstate.put_excl(to_wake); break; default: - assert(0 == "invalid lock type"); + ceph_assert(0 == "invalid lock type"); } if (rwstate.empty() && rwstate.recovery_read_marker) { rwstate.recovery_read_marker = false; @@ -298,17 +298,17 @@ public: blocked(false), requeue_scrub_on_unblock(false) {} ~ObjectContext() { - assert(rwstate.empty()); + ceph_assert(rwstate.empty()); if (destructor_callback) destructor_callback->complete(0); } void start_block() { - assert(!blocked); + ceph_assert(!blocked); blocked = true; } void stop_block() { - assert(blocked); + ceph_assert(blocked); blocked = false; } bool is_blocked() const { @@ -365,7 +365,7 @@ public: const hobject_t &hoid, ObjectContextRef& obc, OpRequestRef& op) { - assert(locks.find(hoid) == locks.end()); + ceph_assert(locks.find(hoid) == locks.end()); if (obc->get_lock_type(op, type)) { locks.insert(make_pair(hoid, ObjectLockState(obc, type))); return true; @@ -377,7 +377,7 @@ public: bool take_write_lock( const hobject_t &hoid, ObjectContextRef obc) { - assert(locks.find(hoid) == locks.end()); + ceph_assert(locks.find(hoid) == locks.end()); if (obc->rwstate.take_write_lock()) { locks.insert( make_pair( @@ -392,7 +392,7 @@ public: const hobject_t &hoid, ObjectContextRef obc, bool mark_if_unsuccessful) { - assert(locks.find(hoid) == locks.end()); + ceph_assert(locks.find(hoid) == locks.end()); if (obc->get_snaptrimmer_write(mark_if_unsuccessful)) { locks.insert( make_pair( @@ -407,7 +407,7 @@ public: const hobject_t &hoid, ObjectContextRef obc, OpRequestRef op) { - assert(locks.find(hoid) == locks.end()); + ceph_assert(locks.find(hoid) == locks.end()); if (obc->get_write_greedy(op)) { locks.insert( make_pair( @@ -422,7 +422,7 @@ public: bool try_get_read_lock( const hobject_t &hoid, ObjectContextRef obc) { - assert(locks.find(hoid) == locks.end()); + ceph_assert(locks.find(hoid) == locks.end()); if (obc->try_get_read_lock()) { locks.insert( make_pair( @@ -455,7 +455,7 @@ public: locks.clear(); } ~ObcLockManager() { - assert(locks.empty()); + ceph_assert(locks.empty()); } }; diff --git a/src/osd/osd_types.cc b/src/osd/osd_types.cc index 6e343e967f4db..93aa37f4e448b 100644 --- a/src/osd/osd_types.cc +++ b/src/osd/osd_types.cc @@ -180,7 +180,7 @@ void osd_reqid_t::generate_test_instances(list& o) void object_locator_t::encode(bufferlist& bl) const { // verify that nobody's corrupted the locator - assert(hash == -1 || key.empty()); + ceph_assert(hash == -1 || key.empty()); __u8 encode_compat = 3; ENCODE_START(6, encode_compat, bl); encode(pool, bl); @@ -217,7 +217,7 @@ void object_locator_t::decode(bufferlist::const_iterator& p) hash = -1; DECODE_FINISH(p); // verify that nobody's corrupted the locator - assert(hash == -1 || key.empty()); + ceph_assert(hash == -1 || key.empty()); } void object_locator_t::dump(Formatter *f) const @@ -501,7 +501,7 @@ pg_t pg_t::get_ancestor(unsigned old_pg_num) const bool pg_t::is_split(unsigned old_pg_num, unsigned new_pg_num, set *children) const { - assert(m_seed < old_pg_num); + ceph_assert(m_seed < old_pg_num); if (new_pg_num <= old_pg_num) return false; @@ -542,11 +542,11 @@ bool pg_t::is_split(unsigned old_pg_num, unsigned new_pg_num, set *childre unsigned pg_t::get_split_bits(unsigned pg_num) const { if (pg_num == 1) return 0; - assert(pg_num > 1); + ceph_assert(pg_num > 1); // Find unique p such that pg_num \in [2^(p-1), 2^p) unsigned p = cbits(pg_num); - assert(p); // silence coverity #751330 + ceph_assert(p); // silence coverity #751330 if ((m_seed % (1<<(p-1))) < (pg_num % (1<<(p-1)))) return p; @@ -557,7 +557,7 @@ unsigned pg_t::get_split_bits(unsigned pg_num) const { pg_t pg_t::get_parent() const { unsigned bits = cbits(m_seed); - assert(bits); + ceph_assert(bits); pg_t retval = *this; retval.m_seed &= ~((~0)<<(bits - 1)); return retval; @@ -578,7 +578,7 @@ hobject_t pg_t::get_hobj_end(unsigned pg_num) const uint64_t rev_start = hobject_t::_reverse_bits(m_seed); uint64_t rev_end = (rev_start | (0xffffffff >> bits)) + 1; if (rev_end >= 0x100000000) { - assert(rev_end == 0x100000000); + ceph_assert(rev_end == 0x100000000); return hobject_t::get_max(); } else { return hobject_t(object_t(), string(), CEPH_NOSNAP, @@ -640,7 +640,7 @@ void coll_t::calc_str() _str = pgid.calc_name(_str_buff + spg_t::calc_name_buf_size - 1, "PMET_"); break; default: - assert(0 == "unknown collection type"); + ceph_assert(0 == "unknown collection type"); } } @@ -651,7 +651,7 @@ bool coll_t::parse(const std::string& s) pgid = spg_t(); removal_seq = 0; calc_str(); - assert(s == _str); + ceph_assert(s == _str); return true; } if (s.find("_head") == s.length() - 5 && @@ -659,7 +659,7 @@ bool coll_t::parse(const std::string& s) type = TYPE_PG; removal_seq = 0; calc_str(); - assert(s == _str); + ceph_assert(s == _str); return true; } if (s.find("_TEMP") == s.length() - 5 && @@ -667,7 +667,7 @@ bool coll_t::parse(const std::string& s) type = TYPE_PG_TEMP; removal_seq = 0; calc_str(); - assert(s == _str); + ceph_assert(s == _str); return true; } return false; @@ -1034,7 +1034,7 @@ bool pool_opts_t::is_opt_name(const std::string& name) { pool_opts_t::opt_desc_t pool_opts_t::get_opt_desc(const std::string& name) { opt_mapping_t::iterator i = opt_mapping.find(name); - assert(i != opt_mapping.end()); + ceph_assert(i != opt_mapping.end()); return i->second; } @@ -1044,7 +1044,7 @@ bool pool_opts_t::is_set(pool_opts_t::key_t key) const { const pool_opts_t::value_t& pool_opts_t::get(pool_opts_t::key_t key) const { opts_t::const_iterator i = opts.find(key); - assert(i != opts.end()); + ceph_assert(i != opts.end()); return i->second; } @@ -1152,7 +1152,7 @@ void pool_opts_t::decode(bufferlist::const_iterator& bl) { decode(d, bl); opts[static_cast(k)] = d; } else { - assert(!"invalid type"); + ceph_assert(!"invalid type"); } } DECODE_FINISH(bl); @@ -1350,7 +1350,7 @@ snapid_t pg_pool_t::snap_exists(const char *s) const void pg_pool_t::add_snap(const char *n, utime_t stamp) { - assert(!is_unmanaged_snaps_mode()); + ceph_assert(!is_unmanaged_snaps_mode()); flags |= FLAG_POOL_SNAPS; snapid_t s = get_snap_seq() + 1; snap_seq = s; @@ -1361,7 +1361,7 @@ void pg_pool_t::add_snap(const char *n, utime_t stamp) void pg_pool_t::add_unmanaged_snap(uint64_t& snapid) { - assert(!is_pool_snaps_mode()); + ceph_assert(!is_pool_snaps_mode()); if (snap_seq == 0) { // kludge for pre-mimic tracking of pool vs selfmanaged snaps. after // mimic this field is not decoded but our flag is set; pre-mimic, we @@ -1375,14 +1375,14 @@ void pg_pool_t::add_unmanaged_snap(uint64_t& snapid) void pg_pool_t::remove_snap(snapid_t s) { - assert(snaps.count(s)); + ceph_assert(snaps.count(s)); snaps.erase(s); snap_seq = snap_seq + 1; } void pg_pool_t::remove_unmanaged_snap(snapid_t s) { - assert(is_unmanaged_snaps_mode()); + ceph_assert(is_unmanaged_snaps_mode()); removed_snaps.insert(s); snap_seq = snap_seq + 1; // try to add in the new seq, just to try to keep the interval_set contiguous @@ -3170,7 +3170,7 @@ public: bool ec_pool, const PastIntervals::pg_interval_t &interval) override { if (first == 0) first = interval.first; - assert(interval.last > last); + ceph_assert(interval.last > last); last = interval.last; set acting; for (unsigned i = 0; i < interval.acting.size(); ++i) { @@ -3322,7 +3322,7 @@ void PastIntervals::decode(bufferlist::const_iterator &bl) case 0: break; case 1: - assert(0 == "pi_simple_rep support removed post-luminous"); + ceph_assert(0 == "pi_simple_rep support removed post-luminous"); break; case 2: past_intervals.reset(new pi_compact_rep); @@ -3482,8 +3482,8 @@ bool PastIntervals::check_new_interval( // NOTE: a change in the up set primary triggers an interval // change, even though the interval members in the pg_interval_t // do not change. - assert(past_intervals); - assert(past_intervals->past_intervals); + ceph_assert(past_intervals); + ceph_assert(past_intervals->past_intervals); if (is_new_interval( old_acting_primary, new_acting_primary, @@ -3499,7 +3499,7 @@ bool PastIntervals::check_new_interval( pg_interval_t i; i.first = same_interval_since; i.last = osdmap->get_epoch() - 1; - assert(i.first <= i.last); + ceph_assert(i.first <= i.last); i.acting = old_acting; i.up = old_up; i.primary = old_acting_primary; @@ -3511,7 +3511,7 @@ bool PastIntervals::check_new_interval( if (*p != CRUSH_ITEM_NONE) ++num_acting; - assert(lastmap->get_pools().count(pgid.pool())); + ceph_assert(lastmap->get_pools().count(pgid.pool())); const pg_pool_t& old_pg_pool = lastmap->get_pools().find(pgid.pool())->second; set old_acting_shards; old_pg_pool.convert_to_pg_shards(old_acting, &old_acting_shards); @@ -3745,12 +3745,12 @@ void ObjectModDesc::visit(Visitor *visitor) const break; } default: - assert(0 == "Invalid rollback code"); + ceph_assert(0 == "Invalid rollback code"); } DECODE_FINISH(bp); } } catch (...) { - assert(0 == "Invalid encoding"); + ceph_assert(0 == "Invalid encoding"); } } @@ -4246,7 +4246,7 @@ void pg_log_t::copy_after(const pg_log_t &other, eversion_t v) for (list::const_reverse_iterator i = other.log.rbegin(); i != other.log.rend(); ++i) { - assert(i->version > other.tail); + ceph_assert(i->version > other.tail); if (i->version <= v) { // make tail accurate. tail = i->version; @@ -4260,12 +4260,12 @@ void pg_log_t::copy_range(const pg_log_t &other, eversion_t from, eversion_t to) { can_rollback_to = other.can_rollback_to; list::const_reverse_iterator i = other.log.rbegin(); - assert(i != other.log.rend()); + ceph_assert(i != other.log.rend()); while (i->version > to) { ++i; - assert(i != other.log.rend()); + ceph_assert(i != other.log.rend()); } - assert(i->version == to); + ceph_assert(i->version == to); head = to; for ( ; i != other.log.rend(); ++i) { if (i->version <= from) { @@ -4873,11 +4873,11 @@ void SnapSet::from_snap_set(const librados::snap_set_t& ss, bool legacy) uint64_t SnapSet::get_clone_bytes(snapid_t clone) const { - assert(clone_size.count(clone)); + ceph_assert(clone_size.count(clone)); uint64_t size = clone_size.find(clone)->second; - assert(clone_overlap.count(clone)); + ceph_assert(clone_overlap.count(clone)); const interval_set &overlap = clone_overlap.find(clone)->second; - assert(size >= (uint64_t)overlap.size()); + ceph_assert(size >= (uint64_t)overlap.size()); return size - overlap.size(); } @@ -5664,7 +5664,7 @@ uint64_t PushOp::cost(CephContext *cct) const void ScrubMap::merge_incr(const ScrubMap &l) { - assert(valid_through == l.incr_since); + ceph_assert(valid_through == l.incr_since); valid_through = l.valid_through; for (map::const_iterator p = l.objects.begin(); diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index fb84455c732e4..4156718b86ff7 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -380,7 +380,7 @@ struct pg_t { old_pg_t get_old_pg() const { old_pg_t o; - assert(m_pool < 0xffffffffull); + ceph_assert(m_pool < 0xffffffffull); o.v.pool = m_pool; o.v.ps = m_seed; o.v.preferred = (__s16)-1; @@ -711,7 +711,7 @@ public: // get a TEMP collection that corresponds to the current collection, // which we presume is a pg collection. coll_t get_temp() const { - assert(type == TYPE_PG); + ceph_assert(type == TYPE_PG); return coll_t(TYPE_PG_TEMP, pgid, 0); } @@ -1305,7 +1305,7 @@ struct pg_pool_t { case CACHEMODE_READPROXY: return true; default: - assert(0 == "implement me"); + ceph_assert(0 == "implement me"); } } @@ -1532,7 +1532,7 @@ public: case TYPE_ERASURE: return false; default: - assert(0 == "unhandled pool type"); + ceph_assert(0 == "unhandled pool type"); } } @@ -2614,7 +2614,7 @@ struct pg_notify_t { : query_epoch(query_epoch), epoch_sent(epoch_sent), info(info), to(to), from(from) { - assert(from == info.pgid.shard); + ceph_assert(from == info.pgid.shard); } void encode(bufferlist &bl) const; void decode(bufferlist::const_iterator &p); @@ -2692,8 +2692,8 @@ public: virtual bool has_full_intervals() const { return false; } virtual void iterate_all_intervals( std::function &&f) const { - assert(!has_full_intervals()); - assert(0 == "not valid for this implementation"); + ceph_assert(!has_full_intervals()); + ceph_assert(0 == "not valid for this implementation"); } virtual ~interval_rep() {} @@ -2707,7 +2707,7 @@ private: public: void add_interval(bool ec_pool, const pg_interval_t &interval) { - assert(past_intervals); + ceph_assert(past_intervals); return past_intervals->add_interval(ec_pool, interval); } @@ -2726,7 +2726,7 @@ public: void decode(bufferlist::const_iterator &bl); void dump(Formatter *f) const { - assert(past_intervals); + ceph_assert(past_intervals); past_intervals->dump(f); } static void generate_test_instances(list & o); @@ -2802,11 +2802,11 @@ public: void iterate_mayberw_back_to( epoch_t les, F &&f) const { - assert(past_intervals); + ceph_assert(past_intervals); past_intervals->iterate_mayberw_back_to(les, std::forward(f)); } void clear() { - assert(past_intervals); + ceph_assert(past_intervals); past_intervals->clear(); } @@ -2815,12 +2815,12 @@ public: * of state contained */ size_t size() const { - assert(past_intervals); + ceph_assert(past_intervals); return past_intervals->size(); } bool empty() const { - assert(past_intervals); + ceph_assert(past_intervals); return past_intervals->empty(); } @@ -2836,7 +2836,7 @@ public: set get_might_have_unfound( pg_shard_t pg_whoami, bool ec_pool) const { - assert(past_intervals); + ceph_assert(past_intervals); auto ret = past_intervals->get_all_participants(ec_pool); ret.erase(pg_whoami); return ret; @@ -2847,7 +2847,7 @@ public: */ set get_all_probe( bool ec_pool) const { - assert(past_intervals); + ceph_assert(past_intervals); return past_intervals->get_all_participants(ec_pool); } @@ -2855,7 +2855,7 @@ public: * past_interval set. */ pair get_bounds() const { - assert(past_intervals); + ceph_assert(past_intervals); return past_intervals->get_bounds(); } @@ -3068,7 +3068,7 @@ PastIntervals::PriorSet::PriorSet( // fixme: how do we identify a "clean" shutdown anyway? ldpp_dout(dpp, 10) << "build_prior possibly went active+rw," << " insufficient up; including down osds" << dendl; - assert(!candidate_blocked_by.empty()); + ceph_assert(!candidate_blocked_by.empty()); pg_down = true; blocked_by.insert( candidate_blocked_by.begin(), @@ -3124,7 +3124,7 @@ struct pg_query_t { history(h), epoch_sent(epoch_sent), to(to), from(from) { - assert(t != LOG); + ceph_assert(t != LOG); } pg_query_t( int t, @@ -3135,7 +3135,7 @@ struct pg_query_t { epoch_t epoch_sent) : type(t), since(s), history(h), epoch_sent(epoch_sent), to(to), from(from) { - assert(t == LOG); + ceph_assert(t == LOG); } void encode(bufferlist &bl, uint64_t features) const; @@ -3281,8 +3281,8 @@ public: } void rollback_extents( version_t gen, const vector > &extents) { - assert(can_local_rollback); - assert(!rollback_info_completed); + ceph_assert(can_local_rollback); + ceph_assert(!rollback_info_completed); if (max_required_version < 2) max_required_version = 2; ENCODE_START(2, 2, bl); @@ -3602,7 +3602,7 @@ public: } mempool::osd_pglog::list rewind_from_head(eversion_t newhead) { - assert(newhead >= tail); + ceph_assert(newhead >= tail); mempool::osd_pglog::list::iterator p = log.end(); mempool::osd_pglog::list divergent; @@ -3627,7 +3627,7 @@ public: divergent.splice(divergent.begin(), log, p, log.end()); break; } - assert(p->version > newhead); + ceph_assert(p->version > newhead); } head = newhead; @@ -3897,7 +3897,7 @@ public: return eversion_t(); } auto it = missing.find(rmissing.begin()->second); - assert(it != missing.end()); + ceph_assert(it != missing.end()); return it->second.need; } @@ -3929,7 +3929,7 @@ public: missing_it->second.set_delete(e.is_delete()); } else { // not missing, we must have prior_version (if any) - assert(!is_missing_divergent_item); + ceph_assert(!is_missing_divergent_item); missing[e.soid] = item(e.version, e.prior_version, e.is_delete()); } rmissing[e.version.version] = e.soid; @@ -3977,8 +3977,8 @@ public: void got(const hobject_t& oid, eversion_t v) { std::map::iterator p = missing.find(oid); - assert(p != missing.end()); - assert(p->second.need <= v || p->second.is_delete()); + ceph_assert(p != missing.end()); + ceph_assert(p->second.need <= v || p->second.is_delete()); got(p); } @@ -4257,7 +4257,7 @@ struct pg_ls_response_t { using ceph::decode; __u8 v; decode(v, bl); - assert(v == 1); + ceph_assert(v == 1); decode(handle, bl); decode(entries, bl); } -- 2.39.5