void ClassHandler::add_embedded_class(const string& cname)
{
- assert(mutex.is_locked());
+ ceph_assert(mutex.is_locked());
ClassData *cls = _get_class(cname, false);
- assert(cls->status == ClassData::CLASS_UNKNOWN);
+ ceph_assert(cls->status == ClassData::CLASS_UNKNOWN);
cls->status = ClassData::CLASS_INITIALIZING;
}
ClassHandler::ClassData *ClassHandler::register_class(const char *cname)
{
- assert(mutex.is_locked());
+ ceph_assert(mutex.is_locked());
ClassData *cls = _get_class(cname, false);
ldout(cct, 10) << "register_class " << cname << " status " << cls->status << dendl;
case ECBackend::pipeline_state_t::CACHE_INVALID:
return lhs << "CACHE_INVALID";
default:
- assert(0 == "invalid pipeline state");
+ ceph_assert(0 == "invalid pipeline state");
}
return lhs; // unreachable
}
: PGBackend(cct, pg, store, coll, ch),
ec_impl(ec_impl),
sinfo(ec_impl->get_data_chunk_count(), stripe_width) {
- assert((ec_impl->get_data_chunk_count() *
+ ceph_assert((ec_impl->get_data_chunk_count() *
ec_impl->get_chunk_size(stripe_width)) == stripe_width);
}
<< res.r << " errors=" << res.errors << dendl;
dout(10) << __func__ << ": canceling recovery op for obj " << hoid
<< dendl;
- assert(recovery_ops.count(hoid));
+ ceph_assert(recovery_ops.count(hoid));
eversion_t v = recovery_ops[hoid].v;
recovery_ops.erase(hoid);
pg->_failed_push(hoid, in);
return;
}
- assert(res.returned.size() == 1);
+ ceph_assert(res.returned.size() == 1);
pg->handle_recovery_read_complete(
hoid,
res.returned.back(),
bool attrs) {
list<boost::tuple<uint64_t, uint64_t, uint32_t> > to_read;
to_read.push_back(boost::make_tuple(off, len, 0));
- assert(!reads.count(hoid));
+ ceph_assert(!reads.count(hoid));
want_to_read.insert(make_pair(hoid, std::move(_want_to_read)));
reads.insert(
make_pair(
if (!op.data_included.empty()) {
uint64_t start = op.data_included.range_start();
uint64_t end = op.data_included.range_end();
- assert(op.data.length() == (end - start));
+ ceph_assert(op.data.length() == (end - start));
m->t.write(
coll,
op.data.length(),
op.data);
} else {
- assert(op.data.length() == 0);
+ ceph_assert(op.data.length() == 0);
}
if (op.before_progress.first) {
- assert(op.attrset.count(string("_")));
+ ceph_assert(op.attrset.count(string("_")));
m->t.setattrs(
coll,
tobj,
}
if (op.after_progress.data_complete) {
if ((get_parent()->pgb_is_primary())) {
- assert(recovery_ops.count(op.soid));
- assert(recovery_ops[op.soid].obc);
+ ceph_assert(recovery_ops.count(op.soid));
+ ceph_assert(recovery_ops[op.soid].obc);
get_parent()->on_local_recover(
op.soid,
op.recovery_info,
if (!recovery_ops.count(op.soid))
return;
RecoveryOp &rop = recovery_ops[op.soid];
- assert(rop.waiting_on_pushes.count(from));
+ ceph_assert(rop.waiting_on_pushes.count(from));
rop.waiting_on_pushes.erase(from);
continue_recovery_op(rop, m);
}
<< ", " << to_read.get<2>()
<< ")"
<< dendl;
- assert(recovery_ops.count(hoid));
+ ceph_assert(recovery_ops.count(hoid));
RecoveryOp &op = recovery_ops[hoid];
- assert(op.returned_data.empty());
+ ceph_assert(op.returned_data.empty());
map<int, bufferlist*> target;
for (set<shard_id_t>::iterator i = op.missing_on_shards.begin();
i != op.missing_on_shards.end();
dout(10) << __func__ << ": " << from << dendl;
int r;
r = ECUtil::decode(sinfo, ec_impl, from, target);
- assert(r == 0);
+ ceph_assert(r == 0);
if (attrs) {
op.xattrs.swap(*attrs);
map<string, bufferlist> sanitized_attrs(op.xattrs);
sanitized_attrs.erase(ECUtil::get_hinfo_key());
op.obc = get_parent()->get_obc(hoid, sanitized_attrs);
- assert(op.obc);
+ ceph_assert(op.obc);
op.recovery_info.size = op.obc->obs.oi.size;
op.recovery_info.oi = op.obc->obs.oi;
}
ECUtil::HashInfo hinfo(ec_impl->get_chunk_count());
if (op.obc->obs.oi.size > 0) {
- assert(op.xattrs.count(ECUtil::get_hinfo_key()));
+ ceph_assert(op.xattrs.count(ECUtil::get_hinfo_key()));
auto bp = op.xattrs[ECUtil::get_hinfo_key()].cbegin();
decode(hinfo, bp);
}
op.hinfo = unstable_hashinfo_registry.lookup_or_create(hoid, hinfo);
}
- assert(op.xattrs.size());
- assert(op.obc);
+ ceph_assert(op.xattrs.size());
+ ceph_assert(op.obc);
continue_recovery_op(op, m);
}
case RecoveryOp::IDLE: {
// start read
op.state = RecoveryOp::READING;
- assert(!op.recovery_progress.data_complete);
+ ceph_assert(!op.recovery_progress.data_complete);
set<int> want(op.missing_on_shards.begin(), op.missing_on_shards.end());
uint64_t from = op.recovery_progress.data_recovered_to;
uint64_t amount = get_recovery_chunk_size();
if (op.recovery_progress.first && op.obc) {
/* We've got the attrs and the hinfo, might as well use them */
op.hinfo = get_hash_info(op.hoid);
- assert(op.hinfo);
+ ceph_assert(op.hinfo);
op.xattrs = op.obc->attr_cache;
encode(*(op.hinfo), op.xattrs[ECUtil::get_hinfo_key()]);
}
op.hoid, want, true, false, &to_read);
if (r != 0) {
// we must have lost a recovery source
- assert(!op.recovery_progress.first);
+ ceph_assert(!op.recovery_progress.first);
dout(10) << __func__ << ": canceling recovery op for obj " << op.hoid
<< dendl;
get_parent()->cancel_pull(op.hoid);
}
case RecoveryOp::READING: {
// read completed, start write
- assert(op.xattrs.size());
- assert(op.returned_data.size());
+ ceph_assert(op.xattrs.size());
+ ceph_assert(op.returned_data.size());
op.state = RecoveryOp::WRITING;
ObjectRecoveryProgress after_progress = op.recovery_progress;
after_progress.data_recovered_to += op.extent_requested.second;
for (set<pg_shard_t>::iterator mi = op.missing_on.begin();
mi != op.missing_on.end();
++mi) {
- assert(op.returned_data.count(mi->shard));
+ ceph_assert(op.returned_data.count(mi->shard));
m->pushes[*mi].push_back(PushOp());
PushOp &pop = m->pushes[*mi].back();
pop.soid = op.hoid;
<< ", after_progress=" << after_progress
<< ", pop.data.length()=" << pop.data.length()
<< ", size=" << op.obc->obs.oi.size << dendl;
- assert(
+ ceph_assert(
pop.data.length() ==
sinfo.aligned_logical_offset_to_chunk_offset(
after_progress.data_recovered_to -
i != h->ops.end();
++i) {
dout(10) << __func__ << ": starting " << *i << dendl;
- assert(!recovery_ops.count(i->hoid));
+ ceph_assert(!recovery_ops.count(i->hoid));
RecoveryOp &op = recovery_ops.insert(make_pair(i->hoid, *i)).first->second;
continue_recovery_op(op, &m);
}
}
if (hoid.is_snap()) {
if (obc) {
- assert(obc->ssc);
+ ceph_assert(obc->ssc);
h->ops.back().recovery_info.ss = obc->ssc->snapset;
} else if (head) {
- assert(head->ssc);
+ ceph_assert(head->ssc);
h->ops.back().recovery_info.ss = head->ssc->snapset;
} else {
- assert(0 == "neither obc nor head set for a snap object");
+ ceph_assert(0 == "neither obc nor head set for a snap object");
}
}
h->ops.back().recovery_progress.omap_complete = true;
// are read in sections, so the digest check here won't be done here.
// Do NOT check osd_read_eio_on_bad_digest here. We need to report
// the state of our chunk in case other chunks could substitute.
- assert(hinfo->has_chunk_hash());
+ ceph_assert(hinfo->has_chunk_hash());
if ((bl.length() == hinfo->get_total_chunk_size()) &&
(j->get<0>() == 0)) {
dout(20) << __func__ << ": Checking hash of " << i->first << dendl;
const ZTracer::Trace &trace)
{
map<ceph_tid_t, Op>::iterator i = tid_to_op_map.find(op.tid);
- assert(i != tid_to_op_map.end());
+ ceph_assert(i != tid_to_op_map.end());
if (op.committed) {
trace.event("sub write committed");
- assert(i->second.pending_commit.count(from));
+ ceph_assert(i->second.pending_commit.count(from));
i->second.pending_commit.erase(from);
if (from != get_parent()->whoami_shard()) {
get_parent()->update_peer_last_complete_ondisk(from, op.last_complete);
}
if (op.applied) {
trace.event("sub write applied");
- assert(i->second.pending_apply.count(from));
+ ceph_assert(i->second.pending_apply.count(from));
i->second.pending_apply.erase(from);
}
for (auto i = op.buffers_read.begin();
i != op.buffers_read.end();
++i) {
- assert(!op.errors.count(i->first)); // If attribute error we better not have sent a buffer
+ ceph_assert(!op.errors.count(i->first)); // If attribute error we better not have sent a buffer
if (!rop.to_read.count(i->first)) {
// We canceled this read! @see filter_read_op
dout(20) << __func__ << " to_read skipping" << dendl;
for (list<pair<uint64_t, bufferlist> >::iterator j = i->second.begin();
j != i->second.end();
++j, ++req_iter, ++riter) {
- assert(req_iter != rop.to_read.find(i->first)->second.to_read.end());
- assert(riter != rop.complete[i->first].returned.end());
+ ceph_assert(req_iter != rop.to_read.find(i->first)->second.to_read.end());
+ ceph_assert(riter != rop.complete[i->first].returned.end());
pair<uint64_t, uint64_t> adjusted =
sinfo.aligned_offset_len_to_chunk(
make_pair(req_iter->get<0>(), req_iter->get<1>()));
- assert(adjusted.first == j->first);
+ ceph_assert(adjusted.first == j->first);
riter->get<2>()[from].claim(j->second);
}
}
for (auto i = op.attrs_read.begin();
i != op.attrs_read.end();
++i) {
- assert(!op.errors.count(i->first)); // if read error better not have sent an attribute
+ ceph_assert(!op.errors.count(i->first)); // if read error better not have sent an attribute
if (!rop.to_read.count(i->first)) {
// We canceled this read! @see filter_read_op
dout(20) << __func__ << " to_read skipping" << dendl;
map<pg_shard_t, set<ceph_tid_t> >::iterator siter =
shard_to_read_map.find(from);
- assert(siter != shard_to_read_map.end());
- assert(siter->second.count(op.tid));
+ ceph_assert(siter != shard_to_read_map.end());
+ ceph_assert(siter->second.count(op.tid));
siter->second.erase(op.tid);
- assert(rop.in_progress.count(from));
+ ceph_assert(rop.in_progress.count(from));
rop.in_progress.erase(from);
unsigned is_complete = 0;
// For redundant reads check for completion as each shard comes in,
++is_complete;
}
} else {
- assert(rop.complete[iter->first].r == 0);
+ ceph_assert(rop.complete[iter->first].r == 0);
if (!rop.complete[iter->first].errors.empty()) {
if (cct->_conf->osd_read_ec_check_for_errors) {
dout(10) << __func__ << ": Not ignoring errors, use one shard err=" << err << dendl;
rop.to_read.begin();
map<hobject_t, read_result_t>::iterator resiter =
rop.complete.begin();
- assert(rop.to_read.size() == rop.complete.size());
+ ceph_assert(rop.to_read.size() == rop.complete.size());
for (; reqiter != rop.to_read.end(); ++reqiter, ++resiter) {
if (reqiter->second.cb) {
pair<RecoveryMessages *, read_result_t &> arg(
FinishReadOp(ECBackend *ec, ceph_tid_t tid) : ec(ec), tid(tid) {}
void finish(ThreadPool::TPHandle &handle) override {
auto ropiter = ec->tid_to_read_map.find(tid);
- assert(ropiter != ec->tid_to_read_map.end());
+ ceph_assert(ropiter != ec->tid_to_read_map.end());
int priority = ropiter->second.priority;
RecoveryMessages rm;
ec->complete_read_op(ropiter->second, &rm);
if (i->second.empty()) {
op.source_to_obj.erase(i++);
} else {
- assert(!osdmap->is_down(i->first.osd));
+ ceph_assert(!osdmap->is_down(i->first.osd));
++i;
}
}
++i) {
get_parent()->cancel_pull(*i);
- assert(op.to_read.count(*i));
+ ceph_assert(op.to_read.count(*i));
read_request_t &req = op.to_read.find(*i)->second;
dout(10) << __func__ << ": canceling " << req
<< " for obj " << *i << dendl;
- assert(req.cb);
+ ceph_assert(req.cb);
delete req.cb;
req.cb = nullptr;
i != tids_to_filter.end();
++i) {
map<ceph_tid_t, ReadOp>::iterator j = tid_to_read_map.find(*i);
- assert(j != tid_to_read_map.end());
+ ceph_assert(j != tid_to_read_map.end());
filter_read_op(osdmap, j->second);
}
}
OpRequestRef client_op
)
{
- assert(!tid_to_op_map.count(tid));
+ ceph_assert(!tid_to_op_map.count(tid));
Op *op = &(tid_to_op_map[tid]);
op->hoid = hoid;
op->delta_stats = delta_stats;
if (error_shards.find(*i) != error_shards.end())
continue;
if (!missing.is_missing(hoid)) {
- assert(!have.count(i->shard));
+ ceph_assert(!have.count(i->shard));
have.insert(i->shard);
- assert(!shards.count(i->shard));
+ ceph_assert(!shards.count(i->shard));
shards.insert(make_pair(i->shard, *i));
}
}
if (error_shards.find(*i) != error_shards.end())
continue;
if (have.count(i->shard)) {
- assert(shards.count(i->shard));
+ ceph_assert(shards.count(i->shard));
continue;
}
dout(10) << __func__ << ": checking backfill " << *i << dendl;
- assert(!shards.count(i->shard));
+ ceph_assert(!shards.count(i->shard));
const pg_info_t &info = get_parent()->get_shard_info(*i);
const pg_missing_t &missing = get_parent()->get_shard_missing(*i);
if (hoid < info.last_backfill &&
dout(10) << __func__ << ": checking missing_loc " << *i << dendl;
auto m = get_parent()->maybe_get_shard_missing(*i);
if (m) {
- assert(!(*m).is_missing(hoid));
+ ceph_assert(!(*m).is_missing(hoid));
}
if (error_shards.find(*i) != error_shards.end())
continue;
map<pg_shard_t, vector<pair<int, int>>> *to_read)
{
// Make sure we don't do redundant reads for recovery
- assert(!for_recovery || !do_redundant_reads);
+ ceph_assert(!for_recovery || !do_redundant_reads);
set<int> have;
map<shard_id_t, pg_shard_t> shards;
return 0;
for (auto &&i:need) {
- assert(shards.count(shard_id_t(i.first)));
+ ceph_assert(shards.count(shard_id_t(i.first)));
to_read->insert(make_pair(shards[shard_id_t(i.first)], i.second));
}
return 0;
map<pg_shard_t, vector<pair<int, int>>> *to_read,
bool for_recovery)
{
- assert(to_read);
+ ceph_assert(to_read);
set<int> have;
map<shard_id_t, pg_shard_t> shards;
for (set<int>::iterator i = shards_left.begin();
i != shards_left.end();
++i) {
- assert(shards.count(shard_id_t(*i)));
- assert(avail.find(*i) == avail.end());
+ ceph_assert(shards.count(shard_id_t(*i)));
+ ceph_assert(avail.find(*i) == avail.end());
to_read->insert(make_pair(shards[shard_id_t(*i)], subchunks));
}
return 0;
bool for_recovery)
{
ceph_tid_t tid = get_parent()->get_tid();
- assert(!tid_to_read_map.count(tid));
+ ceph_assert(!tid_to_read_map.count(tid));
auto &op = tid_to_read_map.emplace(
tid,
ReadOp(
chunk_off_len.second,
j->get<2>()));
}
- assert(!need_attrs);
+ ceph_assert(!need_attrs);
}
}
void ECBackend::start_rmw(Op *op, PGTransactionUPtr &&t)
{
- assert(op);
+ ceph_assert(op);
op->plan = ECTransaction::get_write_plan(
sinfo,
Op *op = &(waiting_state.front());
if (op->requires_rmw() && pipeline_state.cache_invalid()) {
- assert(get_parent()->get_pool().allows_ecoverwrites());
+ ceph_assert(get_parent()->get_pool().allows_ecoverwrites());
dout(20) << __func__ << ": blocking " << *op
<< " because it requires an rmw and the cache is invalid "
<< pipeline_state
dout(10) << __func__ << ": " << *op << dendl;
if (!op->remote_read.empty()) {
- assert(get_parent()->get_pool().allows_ecoverwrites());
+ ceph_assert(get_parent()->get_pool().allows_ecoverwrites());
objects_read_async_no_cache(
op->remote_read,
[this, op](map<hobject_t,pair<int, extent_map> > &&results) {
}
op->pending_read.clear();
} else {
- assert(op->pending_read.empty());
+ ceph_assert(op->pending_read.empty());
}
map<shard_id_t, ObjectStore::Transaction> trans;
written_set[i.first] = i.second.get_interval_set();
}
dout(20) << __func__ << ": written_set: " << written_set << dendl;
- assert(written_set == op->plan.will_write);
+ ceph_assert(written_set == op->plan.will_write);
if (op->using_cache) {
for (auto &&hpair: written) {
op->pending_commit.insert(*i);
map<shard_id_t, ObjectStore::Transaction>::iterator iter =
trans.find(i->shard);
- assert(iter != trans.end());
+ ceph_assert(iter != trans.end());
bool should_send = get_parent()->should_send_op(*i, op->hoid);
const pg_stat_t &stats =
(should_send || !backfill_shards.count(*i)) ?
if (r == 0)
r = got.first;
} else {
- assert(read.second.first);
+ ceph_assert(read.second.first);
uint64_t offset = read.first.get<0>();
uint64_t length = read.first.get<1>();
auto range = got.second.get_containing_range(offset, length);
- assert(range.first != range.second);
- assert(range.first.get_off() <= offset);
- assert(
+ ceph_assert(range.first != range.second);
+ ceph_assert(range.first.get_off() <= offset);
+ ceph_assert(
(offset + length) <=
(range.first.get_off() + range.first.get_len()));
read.second.first->substr_of(
extent_map result;
if (res.r != 0)
goto out;
- assert(res.returned.size() == to_read.size());
- assert(res.errors.empty());
+ ceph_assert(res.returned.size() == to_read.size());
+ ceph_assert(res.errors.empty());
for (auto &&read: to_read) {
pair<uint64_t, uint64_t> adjusted =
ec->sinfo.offset_len_to_stripe_bounds(
make_pair(read.get<0>(), read.get<1>()));
- assert(res.returned.front().get<0>() == adjusted.first &&
+ ceph_assert(res.returned.front().get<0>() == adjusted.first &&
res.returned.front().get<1>() == adjusted.second);
map<int, bufferlist> to_decode;
bufferlist bl;
false,
fast_read,
&shards);
- assert(r == 0);
+ ceph_assert(r == 0);
CallClientContexts *c = new CallClientContexts(
to_read.first,
uint64_t old_size,
ObjectStore::Transaction *t)
{
- assert(old_size % sinfo.get_stripe_width() == 0);
+ ceph_assert(old_size % sinfo.get_stripe_width() == 0);
t->truncate(
coll,
ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard),
return 0;
} else {
if (!get_parent()->get_pool().allows_ecoverwrites()) {
- assert(hinfo->has_chunk_hash());
+ ceph_assert(hinfo->has_chunk_hash());
if (hinfo->get_total_chunk_size() != (unsigned)pos.data_pos) {
dout(0) << "_scan_list " << poid << " got incorrect size on read 0x"
<< std::hex << pos
const hobject_t &hoid,
int err,
extent_map &&buffers) {
- assert(objects_to_read);
+ ceph_assert(objects_to_read);
--objects_to_read;
- assert(!results.count(hoid));
+ ceph_assert(!results.count(hoid));
results.emplace(hoid, make_pair(err, std::move(buffers)));
}
bool is_complete() const {
map<shard_id_t, ObjectStore::Transaction> *transactions,
DoutPrefixProvider *dpp) {
const uint64_t before_size = hinfo->get_total_logical_size(sinfo);
- assert(sinfo.logical_offset_is_stripe_aligned(offset));
- assert(sinfo.logical_offset_is_stripe_aligned(bl.length()));
- assert(bl.length());
+ ceph_assert(sinfo.logical_offset_is_stripe_aligned(offset));
+ ceph_assert(sinfo.logical_offset_is_stripe_aligned(bl.length()));
+ ceph_assert(bl.length());
map<int, bufferlist> buffers;
int r = ECUtil::encode(
sinfo, ecimpl, bl, want, &buffers);
- assert(r == 0);
+ ceph_assert(r == 0);
written.insert(offset, bl.length(), bl);
<< dendl;
if (offset >= before_size) {
- assert(offset == before_size);
+ ceph_assert(offset == before_size);
hinfo->append(
sinfo.aligned_logical_offset_to_chunk_offset(offset),
buffers);
}
for (auto &&i : *transactions) {
- assert(buffers.count(i.first));
+ ceph_assert(buffers.count(i.first));
bufferlist &enc_bl = buffers[i.first];
if (offset >= before_size) {
i.second.set_alloc_hint(
set<hobject_t> *temp_removed,
DoutPrefixProvider *dpp)
{
- assert(written_map);
- assert(transactions);
- assert(temp_added);
- assert(temp_removed);
- assert(plan.t);
+ ceph_assert(written_map);
+ ceph_assert(transactions);
+ ceph_assert(temp_added);
+ ceph_assert(temp_removed);
+ ceph_assert(plan.t);
auto &t = *(plan.t);
auto &hash_infos = plan.hash_infos;
obc = obiter->second;
}
if (entry) {
- assert(obc);
+ ceph_assert(obc);
} else {
- assert(oid.is_temp());
+ ceph_assert(oid.is_temp());
}
ECUtil::HashInfoRef hinfo;
{
auto iter = hash_infos.find(oid);
- assert(iter != hash_infos.end());
+ ceph_assert(iter != hash_infos.end());
hinfo = iter->second;
}
}
map<string, boost::optional<bufferlist> > xattr_rollback;
- assert(hinfo);
+ ceph_assert(hinfo);
bufferlist old_hinfo;
encode(*hinfo, old_hinfo);
xattr_rollback[ECUtil::get_hinfo_key()] = old_hinfo;
if (op.is_none() && op.truncate && op.truncate->first == 0) {
- assert(op.truncate->first == 0);
- assert(op.truncate->first ==
+ ceph_assert(op.truncate->first == 0);
+ ceph_assert(op.truncate->first ==
op.truncate->second);
- assert(entry);
- assert(obc);
+ ceph_assert(entry);
+ ceph_assert(obc);
if (op.truncate->first != op.truncate->second) {
op.truncate->first = op.truncate->second;
}
auto siter = hash_infos.find(op.source);
- assert(siter != hash_infos.end());
+ ceph_assert(siter != hash_infos.end());
hinfo->update_to(*(siter->second));
if (obc) {
auto cobciter = obc_map.find(op.source);
- assert(cobciter != obc_map.end());
+ ceph_assert(cobciter != obc_map.end());
obc->attr_cache = cobciter->second->attr_cache;
}
},
[&](const PGTransaction::ObjectOperation::Init::Rename &op) {
- assert(op.source.is_temp());
+ ceph_assert(op.source.is_temp());
for (auto &&st: *transactions) {
st.second.collection_move_rename(
coll_t(spg_t(pgid, st.first)),
ghobject_t(oid, ghobject_t::NO_GEN, st.first));
}
auto siter = hash_infos.find(op.source);
- assert(siter != hash_infos.end());
+ ceph_assert(siter != hash_infos.end());
hinfo->update_to(*(siter->second));
if (obc) {
auto cobciter = obc_map.find(op.source);
- assert(cobciter == obc_map.end());
+ ceph_assert(cobciter == obc_map.end());
obc->attr_cache.clear();
}
});
// omap not supported (except 0, handled above)
- assert(!(op.clear_omap));
- assert(!(op.omap_header));
- assert(op.omap_updates.empty());
+ ceph_assert(!(op.clear_omap));
+ ceph_assert(!(op.omap_header));
+ ceph_assert(op.omap_updates.empty());
if (!op.attr_updates.empty()) {
map<string, bufferlist> to_set;
obc->attr_cache.erase(citer);
}
} else {
- assert(!entry);
+ ceph_assert(!entry);
}
}
for (auto &&st : *transactions) {
ghobject_t(oid, ghobject_t::NO_GEN, st.first),
to_set);
}
- assert(!xattr_rollback.empty());
+ ceph_assert(!xattr_rollback.empty());
}
if (entry && !xattr_rollback.empty()) {
entry->mod_desc.setattrs(xattr_rollback);
uint64_t append_after = new_size;
ldpp_dout(dpp, 20) << __func__ << ": new_size start " << new_size << dendl;
if (op.truncate && op.truncate->first < new_size) {
- assert(!op.is_fresh_object());
+ ceph_assert(!op.is_fresh_object());
new_size = sinfo.logical_to_next_stripe_offset(
op.truncate->first);
ldpp_dout(dpp, 20) << __func__ << ": new_size truncate down "
uint64_t restore_len = sinfo.aligned_logical_offset_to_chunk_offset(
orig_size -
sinfo.logical_to_prev_stripe_offset(op.truncate->first));
- assert(rollback_extents.empty());
+ ceph_assert(rollback_extents.empty());
ldpp_dout(dpp, 20) << __func__ << ": saving extent "
<< make_pair(restore_from, restore_len)
bl.append_zero(extent.get_len());
},
[&](const BufferUpdate::CloneRange &) {
- assert(
+ ceph_assert(
0 ==
"CloneRange is not allowed, do_op should have returned ENOTSUPP");
});
ldpp_dout(dpp, 20) << __func__ << ": adding buffer_update "
<< make_pair(off, len)
<< dendl;
- assert(len > 0);
+ ceph_assert(len > 0);
if (off > new_size) {
- assert(off > append_after);
+ ceph_assert(off > append_after);
bl.prepend_zero(off - new_size);
len += off - new_size;
ldpp_dout(dpp, 20) << __func__ << ": prepending zeroes to align "
if (op.truncate &&
op.truncate->second > new_size) {
- assert(op.truncate->second > append_after);
+ ceph_assert(op.truncate->second > append_after);
uint64_t truncate_to =
sinfo.logical_to_next_stripe_offset(
op.truncate->second);
<< to_overwrite
<< dendl;
for (auto &&extent: to_overwrite) {
- assert(extent.get_off() + extent.get_len() <= append_after);
- assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off()));
- assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len()));
+ ceph_assert(extent.get_off() + extent.get_len() <= append_after);
+ ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off()));
+ ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len()));
if (entry) {
uint64_t restore_from = sinfo.aligned_logical_offset_to_chunk_offset(
extent.get_off());
<< to_append
<< dendl;
for (auto &&extent: to_append) {
- assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off()));
- assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len()));
+ ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_off()));
+ ceph_assert(sinfo.logical_offset_is_stripe_aligned(extent.get_len()));
ldpp_dout(dpp, 20) << __func__ << ": appending "
<< extent.get_off() << "~" << extent.get_len()
<< dendl;
hinfo->set_total_chunk_size_clear_hash(
sinfo.aligned_logical_offset_to_chunk_offset(new_size));
} else {
- assert(hinfo->get_total_logical_size(sinfo) == new_size);
+ ceph_assert(hinfo->get_total_logical_size(sinfo) == new_size);
}
if (entry && !to_append.empty()) {
for (auto &&extent: i.second.buffer_updates) {
using BufferUpdate = PGTransaction::ObjectOperation::BufferUpdate;
if (boost::get<BufferUpdate::CloneRange>(&(extent.get_val()))) {
- assert(
+ ceph_assert(
0 ==
"CloneRange is not allowed, do_op should have returned ENOTSUPP");
}
}
if (head_start != head_finish &&
head_start < orig_size) {
- assert(head_finish <= orig_size);
- assert(head_finish - head_start == sinfo.get_stripe_width());
+ ceph_assert(head_finish <= orig_size);
+ ceph_assert(head_finish - head_start == sinfo.get_stripe_width());
ldpp_dout(dpp, 20) << __func__ << ": reading partial head stripe "
<< head_start << "~" << sinfo.get_stripe_width()
<< dendl;
if (tail_start != tail_finish &&
(head_start == head_finish || tail_start != head_start) &&
tail_start < orig_size) {
- assert(tail_finish <= orig_size);
- assert(tail_finish - tail_start == sinfo.get_stripe_width());
+ ceph_assert(tail_finish <= orig_size);
+ ceph_assert(tail_finish - tail_start == sinfo.get_stripe_width());
ldpp_dout(dpp, 20) << __func__ << ": reading partial tail stripe "
<< tail_start << "~" << sinfo.get_stripe_width()
<< dendl;
}
if (head_start != tail_finish) {
- assert(
+ ceph_assert(
sinfo.logical_offset_is_stripe_aligned(
tail_finish - head_start)
);
if (tail_finish > projected_size)
projected_size = tail_finish;
} else {
- assert(tail_finish <= projected_size);
+ ceph_assert(tail_finish <= projected_size);
}
}
* to_read should have an entry for i.first iff it isn't empty
* and if we are reading from i.first, we can't be renaming or
* cloning it */
- assert(plan.to_read.count(i.first) == 0 ||
+ ceph_assert(plan.to_read.count(i.first) == 0 ||
(!plan.to_read.at(i.first).empty() &&
!i.second.has_source()));
});
ErasureCodeInterfaceRef &ec_impl,
map<int, bufferlist> &to_decode,
bufferlist *out) {
- assert(to_decode.size());
+ ceph_assert(to_decode.size());
uint64_t total_data_size = to_decode.begin()->second.length();
- assert(total_data_size % sinfo.get_chunk_size() == 0);
+ ceph_assert(total_data_size % sinfo.get_chunk_size() == 0);
- assert(out);
- assert(out->length() == 0);
+ ceph_assert(out);
+ ceph_assert(out->length() == 0);
for (map<int, bufferlist>::iterator i = to_decode.begin();
i != to_decode.end();
++i) {
- assert(i->second.length() == total_data_size);
+ ceph_assert(i->second.length() == total_data_size);
}
if (total_data_size == 0)
}
bufferlist bl;
int r = ec_impl->decode_concat(chunks, &bl);
- assert(r == 0);
- assert(bl.length() == sinfo.get_stripe_width());
+ ceph_assert(r == 0);
+ ceph_assert(bl.length() == sinfo.get_stripe_width());
out->claim_append(bl);
}
return 0;
map<int, bufferlist> &to_decode,
map<int, bufferlist*> &out) {
- assert(to_decode.size());
+ ceph_assert(to_decode.size());
for (auto &&i : to_decode) {
if(i.second.length() == 0)
for (map<int, bufferlist*>::iterator i = out.begin();
i != out.end();
++i) {
- assert(i->second);
- assert(i->second->length() == 0);
+ ceph_assert(i->second);
+ ceph_assert(i->second->length() == 0);
need.insert(i->first);
}
set<int> avail;
for (auto &&i : to_decode) {
- assert(i.second.length() != 0);
+ ceph_assert(i.second.length() != 0);
avail.insert(i.first);
}
map<int, vector<pair<int, int>>> min;
int r = ec_impl->minimum_to_decode(need, avail, &min);
- assert(r == 0);
+ ceph_assert(r == 0);
int chunks_count = 0;
int repair_data_per_chunk = 0;
}
map<int, bufferlist> out_bls;
r = ec_impl->decode(need, chunks, &out_bls, sinfo.get_chunk_size());
- assert(r == 0);
+ ceph_assert(r == 0);
for (auto j = out.begin(); j != out.end(); ++j) {
- assert(out_bls.count(j->first));
- assert(out_bls[j->first].length() == sinfo.get_chunk_size());
+ ceph_assert(out_bls.count(j->first));
+ ceph_assert(out_bls[j->first].length() == sinfo.get_chunk_size());
j->second->claim_append(out_bls[j->first]);
}
}
for (auto &&i : out) {
- assert(i.second->length() == chunks_count * sinfo.get_chunk_size());
+ ceph_assert(i.second->length() == chunks_count * sinfo.get_chunk_size());
}
return 0;
}
uint64_t logical_size = in.length();
- assert(logical_size % sinfo.get_stripe_width() == 0);
- assert(out);
- assert(out->empty());
+ ceph_assert(logical_size % sinfo.get_stripe_width() == 0);
+ ceph_assert(out);
+ ceph_assert(out->empty());
if (logical_size == 0)
return 0;
bufferlist buf;
buf.substr_of(in, i, sinfo.get_stripe_width());
int r = ec_impl->encode(want, buf, &encoded);
- assert(r == 0);
+ ceph_assert(r == 0);
for (map<int, bufferlist>::iterator i = encoded.begin();
i != encoded.end();
++i) {
- assert(i->second.length() == sinfo.get_chunk_size());
+ ceph_assert(i->second.length() == sinfo.get_chunk_size());
(*out)[i->first].claim_append(i->second);
}
}
for (map<int, bufferlist>::iterator i = out->begin();
i != out->end();
++i) {
- assert(i->second.length() % sinfo.get_chunk_size() == 0);
- assert(
+ ceph_assert(i->second.length() % sinfo.get_chunk_size() == 0);
+ ceph_assert(
sinfo.aligned_chunk_offset_to_logical_offset(i->second.length()) ==
logical_size);
}
void ECUtil::HashInfo::append(uint64_t old_size,
map<int, bufferlist> &to_append) {
- assert(old_size == total_chunk_size);
+ ceph_assert(old_size == total_chunk_size);
uint64_t size_to_append = to_append.begin()->second.length();
if (has_chunk_hash()) {
- assert(to_append.size() == cumulative_shard_hashes.size());
+ ceph_assert(to_append.size() == cumulative_shard_hashes.size());
for (map<int, bufferlist>::iterator i = to_append.begin();
i != to_append.end();
++i) {
- assert(size_to_append == i->second.length());
- assert((unsigned)i->first < cumulative_shard_hashes.size());
+ ceph_assert(size_to_append == i->second.length());
+ ceph_assert((unsigned)i->first < cumulative_shard_hashes.size());
uint32_t new_hash = i->second.crc32c(cumulative_shard_hashes[i->first]);
cumulative_shard_hashes[i->first] = new_hash;
}
stripe_info_t(uint64_t stripe_size, uint64_t stripe_width)
: stripe_width(stripe_width),
chunk_size(stripe_width / stripe_size) {
- assert(stripe_width % stripe_size == 0);
+ ceph_assert(stripe_width % stripe_size == 0);
}
bool logical_offset_is_stripe_aligned(uint64_t logical) const {
return (logical % stripe_width) == 0;
offset);
}
uint64_t aligned_logical_offset_to_chunk_offset(uint64_t offset) const {
- assert(offset % stripe_width == 0);
+ ceph_assert(offset % stripe_width == 0);
return (offset / stripe_width) * chunk_size;
}
uint64_t aligned_chunk_offset_to_logical_offset(uint64_t offset) const {
- assert(offset % chunk_size == 0);
+ ceph_assert(offset % chunk_size == 0);
return (offset / chunk_size) * stripe_width;
}
std::pair<uint64_t, uint64_t> aligned_offset_len_to_chunk(
void dump(Formatter *f) const;
static void generate_test_instances(std::list<HashInfo*>& o);
uint32_t get_chunk_hash(int shard) const {
- assert((unsigned)shard < cumulative_shard_hashes.size());
+ ceph_assert((unsigned)shard < cumulative_shard_hashes.size());
return cumulative_shard_hashes[shard];
}
uint64_t get_total_chunk_size() const {
void set_projected_total_logical_size(
const stripe_info_t &sinfo,
uint64_t logical_size) {
- assert(sinfo.logical_offset_is_stripe_aligned(logical_size));
+ ceph_assert(sinfo.logical_offset_is_stripe_aligned(logical_size));
projected_total_chunk_size = sinfo.aligned_logical_offset_to_chunk_offset(
logical_size);
}
void ExtentCache::extent::_link_pin_state(pin_state &pin_state)
{
- assert(parent_extent_set);
- assert(!parent_pin_state);
+ ceph_assert(parent_extent_set);
+ ceph_assert(!parent_pin_state);
parent_pin_state = &pin_state;
pin_state.pin_list.push_back(*this);
}
void ExtentCache::extent::_unlink_pin_state()
{
- assert(parent_extent_set);
- assert(parent_pin_state);
+ ceph_assert(parent_extent_set);
+ ceph_assert(parent_pin_state);
auto liter = pin_state::list::s_iterator_to(*this);
parent_pin_state->pin_list.erase(liter);
parent_pin_state = nullptr;
void ExtentCache::extent::unlink()
{
- assert(parent_extent_set);
- assert(parent_pin_state);
+ ceph_assert(parent_extent_set);
+ ceph_assert(parent_pin_state);
_unlink_pin_state();
{
auto siter = object_extent_set::set::s_iterator_to(*this);
auto &set = object_extent_set::set::container_from_iterator(siter);
- assert(&set == &(parent_extent_set->extent_set));
+ ceph_assert(&set == &(parent_extent_set->extent_set));
set.erase(siter);
}
parent_extent_set = nullptr;
- assert(!parent_pin_state);
+ ceph_assert(!parent_pin_state);
}
void ExtentCache::extent::link(
object_extent_set &extent_set,
pin_state &pin_state)
{
- assert(!parent_extent_set);
+ ceph_assert(!parent_extent_set);
parent_extent_set = &extent_set;
extent_set.extent_set.insert(*this);
if (eset.extent_set.empty()) {
auto siter = cache_set::s_iterator_to(eset);
auto &set = cache_set::container_from_iterator(siter);
- assert(&set == &per_object_caches);
+ ceph_assert(&set == &per_object_caches);
// per_object_caches owns eset
per_object_caches.erase(eset);
res.second,
[&](uint64_t off, uint64_t len,
extent *ext, object_extent_set::update_action *action) {
- assert(off == cur);
+ ceph_assert(off == cur);
cur = off + len;
action->action = object_extent_set::update_action::NONE;
- assert(ext && ext->bl && ext->pinned_by_write());
+ ceph_assert(ext && ext->bl && ext->pinned_by_write());
bl.substr_of(
*(ext->bl),
off - ext->offset,
[&](uint64_t off, uint64_t len,
extent *ext, object_extent_set::update_action *action) {
action->action = object_extent_set::update_action::NONE;
- assert(ext && ext->pinned_by_write());
+ ceph_assert(ext && ext->pinned_by_write());
action->bl = bufferlist();
action->bl->substr_of(
res.get_val(),
}
bool pinned_by_write() const {
- assert(parent_pin_state);
+ ceph_assert(parent_pin_state);
return parent_pin_state->is_write();
}
uint64_t pin_tid() const {
- assert(parent_pin_state);
+ ceph_assert(parent_pin_state);
return parent_pin_state->tid;
}
update_action action;
f(offset, extlen, nullptr, &action);
- assert(!action.bl || action.bl->length() == extlen);
+ ceph_assert(!action.bl || action.bl->length() == extlen);
if (action.action == update_action::UPDATE_PIN) {
extent *ext = action.bl ?
new extent(offset, *action.bl) :
new extent(offset, extlen);
ext->link(*this, pin);
} else {
- assert(!action.bl);
+ ceph_assert(!action.bl);
}
}
update_action action;
f(extoff, extlen, ext, &action);
- assert(!action.bl || action.bl->length() == extlen);
+ ceph_assert(!action.bl || action.bl->length() == extlen);
extent *final_extent = nullptr;
if (action.action == update_action::NONE) {
final_extent = ext;
}
if (action.bl) {
- assert(final_extent);
- assert(final_extent->length == action.bl->length());
+ ceph_assert(final_extent);
+ ceph_assert(final_extent->length == action.bl->length());
final_extent->bl = *(action.bl);
}
update_action action;
f(tailoff, taillen, nullptr, &action);
- assert(!action.bl || action.bl->length() == taillen);
+ ceph_assert(!action.bl || action.bl->length() == taillen);
if (action.action == update_action::UPDATE_PIN) {
extent *ext = action.bl ?
new extent(tailoff, *action.bl) :
new extent(tailoff, taillen);
ext->link(*this, pin);
} else {
- assert(!action.bl);
+ ceph_assert(!action.bl);
}
}
}
using list = boost::intrusive::list<extent, list_member_options>;
list pin_list;
~pin_state() {
- assert(pin_list.empty());
- assert(tid == 0);
- assert(pin_type == NONE);
+ ceph_assert(pin_list.empty());
+ ceph_assert(tid == 0);
+ ceph_assert(pin_type == NONE);
}
void _open(uint64_t in_tid, pin_type_t in_type) {
- assert(pin_type == NONE);
- assert(in_tid > 0);
+ ceph_assert(pin_type == NONE);
+ ceph_assert(in_tid > 0);
tid = in_tid;
pin_type = in_type;
}
for (auto iter = p.pin_list.begin(); iter != p.pin_list.end(); ) {
unique_ptr<extent> extent(&*iter); // we now own this
iter++; // unlink will invalidate
- assert(extent->parent_extent_set);
+ ceph_assert(extent->parent_extent_set);
auto &eset = *(extent->parent_extent_set);
extent->unlink();
remove_and_destroy_if_empty(eset);
return impl->approx_unique_insert_count();
}
void seal() {
- assert(!sealed);
+ ceph_assert(!sealed);
sealed = true;
impl->seal();
}
void OSDService::remove_pgid(spg_t pgid, PG *pg)
{
Mutex::Locker l(pgid_lock);
- assert(pgid_tracker.count(pgid));
- assert(pgid_tracker[pgid] > 0);
+ ceph_assert(pgid_tracker.count(pgid));
+ ceph_assert(pgid_tracker[pgid] > 0);
pgid_tracker[pgid]--;
if (pgid_tracker[pgid] == 0) {
pgid_tracker.erase(pgid);
Mutex::Locker l(agent_lock);
// By this time all ops should be cancelled
- assert(agent_ops == 0);
+ ceph_assert(agent_ops == 0);
// By this time all PGs are shutdown and dequeued
if (!agent_queue.empty()) {
set<PGRef>& top = agent_queue.rbegin()->second;
derr << "agent queue not empty, for example " << (*top.begin())->get_pgid() << dendl;
- assert(0 == "agent queue not empty");
+ ceph_assert(0 == "agent queue not empty");
}
agent_stop_flag = true;
{
OSDMapRef next_map = get_nextmap_reserved();
// service map is always newer/newest
- assert(from_epoch <= next_map->get_epoch());
+ ceph_assert(from_epoch <= next_map->get_epoch());
if (next_map->is_down(peer) ||
next_map->get_info(peer).up_from > from_epoch) {
{
OSDMapRef next_map = get_nextmap_reserved();
// service map is always newer/newest
- assert(from_epoch <= next_map->get_epoch());
+ ceph_assert(from_epoch <= next_map->get_epoch());
if (next_map->is_down(peer) ||
next_map->get_info(peer).up_from > from_epoch) {
{
OSDMapRef next_map = get_nextmap_reserved();
// service map is always newer/newest
- assert(from_epoch <= next_map->get_epoch());
+ ceph_assert(from_epoch <= next_map->get_epoch());
pair<ConnectionRef,ConnectionRef> ret;
if (next_map->is_down(peer) ||
dout(20) << "dec_scrubs_pending " << scrubs_pending << " -> " << (scrubs_pending-1)
<< " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
--scrubs_pending;
- assert(scrubs_pending >= 0);
+ ceph_assert(scrubs_pending >= 0);
sched_scrub_lock.Unlock();
}
dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active
<< " (max " << cct->_conf->osd_max_scrubs
<< ", pending " << (scrubs_pending+1) << " -> " << scrubs_pending << ")" << dendl;
- assert(scrubs_pending >= 0);
+ ceph_assert(scrubs_pending >= 0);
} else {
dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active
<< " (max " << cct->_conf->osd_max_scrubs
dout(20) << "dec_scrubs_active " << scrubs_active << " -> " << (scrubs_active-1)
<< " (max " << cct->_conf->osd_max_scrubs << ", pending " << scrubs_pending << ")" << dendl;
--scrubs_active;
- assert(scrubs_active >= 0);
+ ceph_assert(scrubs_active >= 0);
sched_scrub_lock.Unlock();
}
{
Mutex::Locker l(epoch_lock);
if (_boot_epoch) {
- assert(*_boot_epoch == 0 || *_boot_epoch >= boot_epoch);
+ ceph_assert(*_boot_epoch == 0 || *_boot_epoch >= boot_epoch);
boot_epoch = *_boot_epoch;
}
if (_up_epoch) {
- assert(*_up_epoch == 0 || *_up_epoch >= up_epoch);
+ ceph_assert(*_up_epoch == 0 || *_up_epoch >= up_epoch);
up_epoch = *_up_epoch;
}
if (_bind_epoch) {
- assert(*_bind_epoch == 0 || *_bind_epoch >= bind_epoch);
+ ceph_assert(*_bind_epoch == 0 || *_bind_epoch >= bind_epoch);
bind_epoch = *_bind_epoch;
}
}
version_t uv)
{
const MOSDOp *m = static_cast<const MOSDOp*>(op->get_req());
- assert(m->get_type() == CEPH_MSG_OSD_OP);
+ ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
int flags;
flags = m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK);
}
const MOSDOp *m = static_cast<const MOSDOp*>(op->get_req());
- assert(m->get_type() == CEPH_MSG_OSD_OP);
+ ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
- assert(m->get_map_epoch() >= pg->get_history().same_primary_since);
+ ceph_assert(m->get_map_epoch() >= pg->get_history().same_primary_since);
if (pg->is_ec_pg()) {
/**
* splitting. The simplest thing is to detect such cases here and drop
* them without an error (the client will resend anyway).
*/
- assert(m->get_map_epoch() <= superblock.newest_map);
+ ceph_assert(m->get_map_epoch() <= superblock.newest_map);
OSDMapRef opmap = try_get_map(m->get_map_epoch());
if (!opmap) {
dout(7) << __func__ << ": " << *pg << " no longer have map for "
std::pair<epoch_t, PGRef> p,
uint64_t reserved_pushes)
{
- assert(recovery_lock.is_locked_by_me());
+ ceph_assert(recovery_lock.is_locked_by_me());
enqueue_back(
OpQueueItem(
unique_ptr<OpQueueItem::OpQueueable>(
void OSD::handle_signal(int signum)
{
- assert(signum == SIGINT || signum == SIGTERM);
+ ceph_assert(signum == SIGINT || signum == SIGTERM);
derr << "*** Got signal " << sig_str(signum) << " ***" << dendl;
shutdown();
}
}
f->close_section();
} else {
- assert(0 == "broken asok registration");
+ ceph_assert(0 == "broken asok registration");
}
f->flush(ss);
delete f;
<< " (looks like " << (store_is_rotational ? "hdd" : "ssd") << ")"
<< dendl;
dout(2) << "journal " << journal_path << dendl;
- assert(store); // call pre_init() first!
+ ceph_assert(store); // call pre_init() first!
store->set_cache_shards(get_num_op_shards());
{
struct store_statfs_t stbuf;
int r = store->statfs(&stbuf);
- assert(r == 0);
+ ceph_assert(r == 0);
service.set_statfs(stbuf);
}
asok_hook = new OSDSocketHook(this);
int r = admin_socket->register_command("status", "status", asok_hook,
"high-level status of OSD");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("flush_journal", "flush_journal",
asok_hook,
"flush the journal to permanent store");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_ops_in_flight",
"dump_ops_in_flight " \
"name=filterstr,type=CephString,n=N,req=false",
asok_hook,
"show the ops currently in flight");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("ops",
"ops " \
"name=filterstr,type=CephString,n=N,req=false",
asok_hook,
"show the ops currently in flight");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_blocked_ops",
"dump_blocked_ops " \
"name=filterstr,type=CephString,n=N,req=false",
asok_hook,
"show the blocked ops currently in flight");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_historic_ops",
"dump_historic_ops " \
"name=filterstr,type=CephString,n=N,req=false",
asok_hook,
"show recent ops");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_historic_slow_ops",
"dump_historic_slow_ops " \
"name=filterstr,type=CephString,n=N,req=false",
asok_hook,
"show slowest recent ops");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_historic_ops_by_duration",
"dump_historic_ops_by_duration " \
"name=filterstr,type=CephString,n=N,req=false",
asok_hook,
"show slowest recent ops, sorted by duration");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_op_pq_state", "dump_op_pq_state",
asok_hook,
"dump op priority queue state");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_blacklist", "dump_blacklist",
asok_hook,
"dump blacklisted clients and times");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_watchers", "dump_watchers",
asok_hook,
"show clients which have active watches,"
" and on which objects");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_reservations", "dump_reservations",
asok_hook,
"show recovery reservations");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("get_latest_osdmap", "get_latest_osdmap",
asok_hook,
"force osd to update the latest map from "
"the mon");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command( "heap",
"heap " \
asok_hook,
"show heap usage info (available only if "
"compiled with tcmalloc)");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("set_heap_property",
"set_heap_property " \
"name=value,type=CephInt",
asok_hook,
"update malloc extension heap property");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("get_heap_property",
"get_heap_property " \
"name=property,type=CephString",
asok_hook,
"get malloc extension heap property");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_objectstore_kv_stats",
"dump_objectstore_kv_stats",
asok_hook,
"print statistics of kvdb which used by bluestore");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_scrubs",
"dump_scrubs",
asok_hook,
"print scheduled scrubs");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("calc_objectstore_db_histogram",
"calc_objectstore_db_histogram",
asok_hook,
"Generate key value histogram of kvdb(rocksdb) which used by bluestore");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("flush_store_cache",
"flush_store_cache",
asok_hook,
"Flush bluestore internal cache");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_pgstate_history", "dump_pgstate_history",
asok_hook,
"show recent state history");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("compact", "compact",
asok_hook,
"Commpact object store's omap."
" WARNING: Compaction probably slows your requests");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("get_mapped_pools", "get_mapped_pools",
asok_hook,
"dump pools whose PG(s) are mapped to this OSD.");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("smart", "smart name=devid,type=CephString,req=False",
asok_hook,
"probe OSD devices for SMART data.");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("list_devices", "list_devices",
asok_hook,
"name=val,type=CephString",
test_ops_hook,
"set omap key");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"rmomapkey",
"rmomapkey " \
"name=key,type=CephString",
test_ops_hook,
"remove omap key");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"setomapheader",
"setomapheader " \
"name=header,type=CephString",
test_ops_hook,
"set omap header");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"getomap",
"name=objname,type=CephObjectname",
test_ops_hook,
"output entire object map");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"truncobj",
"name=len,type=CephInt",
test_ops_hook,
"truncate object to length");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"injectdataerr",
"name=shardid,type=CephInt,req=false,range=0|255",
test_ops_hook,
"inject data error to an object");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"injectmdataerr",
"name=shardid,type=CephInt,req=false,range=0|255",
test_ops_hook,
"inject metadata error to an object");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"set_recovery_delay",
"set_recovery_delay " \
"name=utime,type=CephInt,req=false",
test_ops_hook,
"Delay osd recovery by specified seconds");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"trigger_scrub",
"trigger_scrub " \
"name=pgid,type=CephString ",
test_ops_hook,
"Trigger a scheduled scrub ");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command(
"injectfull",
"injectfull " \
"name=count,type=CephInt,req=false ",
test_ops_hook,
"Inject a full disk (optional count times)");
- assert(r == 0);
+ ceph_assert(r == 0);
}
void OSD::create_logger()
while (1) {
vector<ghobject_t> objects;
auto ch = store->open_collection(*p);
- assert(ch);
+ ceph_assert(ch);
store->collection_list(ch, next, ghobject_t::get_max(),
store->get_ideal_list_max(),
&objects, &next);
t.remove(tmp, p);
}
int r = store->queue_transaction(ch, std::move(t));
- assert(r == 0);
+ ceph_assert(r == 0);
t = ObjectStore::Transaction();
}
t.remove_collection(tmp);
int r = store->queue_transaction(ch, std::move(t));
- assert(r == 0);
+ ceph_assert(r == 0);
C_SaferCond waiter;
if (!ch->flush_commit(&waiter)) {
auto sdata = shards[shard_index];
Mutex::Locker l(sdata->shard_lock);
auto r = sdata->pg_slots.emplace(pgid, make_unique<OSDShardPGSlot>());
- assert(r.second);
+ ceph_assert(r.second);
auto *slot = r.first->second.get();
dout(20) << __func__ << " " << pgid << " " << pg << dendl;
sdata->_attach_pg(slot, pg.get());
void OSD::unregister_pg(PG *pg)
{
auto sdata = pg->osd_shard;
- assert(sdata);
+ ceph_assert(sdata);
Mutex::Locker l(sdata->shard_lock);
auto p = sdata->pg_slots.find(pg->pg_id);
if (p != sdata->pg_slots.end() &&
void OSD::load_pgs()
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
dout(0) << "load_pgs" << dendl;
vector<coll_t> ls;
derr << __func__ << ": have pgid " << pgid << " at epoch "
<< map_epoch << ", but missing map. Crashing."
<< dendl;
- assert(0 == "Missing map in load_pgs");
+ ceph_assert(0 == "Missing map in load_pgs");
}
}
pg = _make_pg(pgosdmap, pgid);
for (auto shard : shards) {
shard->prime_splits(osdmap, &new_children);
}
- assert(new_children.empty());
+ ceph_assert(new_children.empty());
}
pg->reg_next_scrub();
pg->lock(true);
// we are holding the shard lock
- assert(!pg->is_deleted());
+ ceph_assert(!pg->is_deleted());
pg->init(
role,
dout(15) << __func__ << ": found map gap, returning false" << dendl;
return false;
}
- assert(oldmap->have_pg_pool(pgid.pool()));
+ ceph_assert(oldmap->have_pg_pool(pgid.pool()));
int upprimary, actingprimary;
vector<int> up, acting;
void OSD::_remove_heartbeat_peer(int n)
{
map<int,HeartbeatInfo>::iterator q = heartbeat_peers.find(n);
- assert(q != heartbeat_peers.end());
+ ceph_assert(q != heartbeat_peers.end());
dout(20) << " removing heartbeat peer osd." << n
<< " " << q->second.con_back->get_peer_addr()
<< " " << (q->second.con_front ? q->second.con_front->get_peer_addr() : entity_addr_t())
void OSD::maybe_update_heartbeat_peers()
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
if (is_waiting_for_healthy() || is_active()) {
utime_t now = ceph_clock_now();
void OSD::reset_heartbeat_peers()
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
dout(10) << "reset_heartbeat_peers" << dendl;
Mutex::Locker l(heartbeat_lock);
while (!heartbeat_peers.empty()) {
<< " last_rx_front " << i->second.last_rx_front
<< dendl;
i->second.last_rx_back = now;
- assert(unacknowledged > 0);
+ ceph_assert(unacknowledged > 0);
--unacknowledged;
// if there is no front con, set both stamps.
if (i->second.con_front == NULL) {
i->second.last_rx_front = now;
- assert(unacknowledged > 0);
+ ceph_assert(unacknowledged > 0);
--unacknowledged;
}
} else if (m->get_connection() == i->second.con_front) {
<< " last_rx_front " << i->second.last_rx_front << " -> " << now
<< dendl;
i->second.last_rx_front = now;
- assert(unacknowledged > 0);
+ ceph_assert(unacknowledged > 0);
--unacknowledged;
}
void OSD::heartbeat_check()
{
- assert(heartbeat_lock.is_locked());
+ ceph_assert(heartbeat_lock.is_locked());
utime_t now = ceph_clock_now();
// check for incoming heartbeats (move me elsewhere?)
auto new_stat = service.set_osd_stat(hb_peers, get_num_pgs());
dout(5) << __func__ << " " << new_stat << dendl;
- assert(new_stat.kb);
+ ceph_assert(new_stat.kb);
float ratio = ((float)new_stat.kb_used) / ((float)new_stat.kb);
service.check_full_status(ratio);
void OSD::tick()
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
dout(10) << "tick" << dendl;
if (is_active() || is_waiting_for_healthy()) {
void OSD::tick_without_osd_lock()
{
- assert(tick_timer_lock.is_locked());
+ ceph_assert(tick_timer_lock.is_locked());
dout(10) << "tick_without_osd_lock" << dendl;
logger->set(l_osd_buf, buffer::get_total_alloc());
// refresh osd stats
struct store_statfs_t stbuf;
int r = store->statfs(&stbuf);
- assert(r == 0);
+ ceph_assert(r == 0);
service.set_statfs(stbuf);
// osd_lock is not being held, which means the OSD state
dout(10) << " new session (outgoing) " << s << " con=" << s->con
<< " addr=" << s->con->get_peer_addr() << dendl;
// we don't connect to clients
- assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD);
+ ceph_assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD);
s->entity_name.set_type(CEPH_ENTITY_TYPE_OSD);
}
}
dout(10) << "new session (incoming)" << s << " con=" << con
<< " addr=" << con->get_peer_addr()
<< " must have raced with connect" << dendl;
- assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD);
+ ceph_assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD);
s->entity_name.set_type(CEPH_ENTITY_TYPE_OSD);
}
}
void OSD::_preboot(epoch_t oldest, epoch_t newest)
{
- assert(is_preboot());
+ ceph_assert(is_preboot());
dout(10) << __func__ << " _preboot mon has osdmaps "
<< oldest << ".." << newest << dendl;
void OSD::send_alive()
{
- assert(mon_report_lock.is_locked());
+ ceph_assert(mon_report_lock.is_locked());
if (!osdmap->exists(whoami))
return;
epoch_t up_thru = osdmap->get_up_thru(whoami);
dout(10) << __func__ << " " << first << ".." << last
<< ", previously requested "
<< requested_full_first << ".." << requested_full_last << dendl;
- assert(osd_lock.is_locked());
- assert(first > 0 && last > 0);
- assert(first <= last);
- assert(first >= requested_full_first); // we shouldn't ever ask for older maps
+ ceph_assert(osd_lock.is_locked());
+ ceph_assert(first > 0 && last > 0);
+ ceph_assert(first <= last);
+ ceph_assert(first >= requested_full_first); // we shouldn't ever ask for older maps
if (requested_full_first == 0) {
// first request
requested_full_first = first;
void OSD::got_full_map(epoch_t e)
{
- assert(requested_full_first <= requested_full_last);
- assert(osd_lock.is_locked());
+ ceph_assert(requested_full_first <= requested_full_last);
+ ceph_assert(osd_lock.is_locked());
if (requested_full_first == 0) {
dout(20) << __func__ << " " << e << ", nothing requested" << dendl;
return;
void OSD::send_failures()
{
- assert(map_lock.is_locked());
- assert(mon_report_lock.is_locked());
+ ceph_assert(map_lock.is_locked());
+ ceph_assert(mon_report_lock.is_locked());
Mutex::Locker l(heartbeat_lock);
utime_t now = ceph_clock_now();
while (!failure_queue.empty()) {
void OSD::dispatch_session_waiting(SessionRef session, OSDMapRef osdmap)
{
- assert(session->session_dispatch_lock.is_locked());
+ ceph_assert(session->session_dispatch_lock.is_locked());
auto i = session->waiting_on_map.begin();
while (i != session->waiting_on_map.end()) {
OpRequestRef op = &(*i);
- assert(ms_can_fast_dispatch(op->get_req()));
+ ceph_assert(ms_can_fast_dispatch(op->get_req()));
const MOSDFastDispatchOp *m = static_cast<const MOSDFastDispatchOp*>(
op->get_req());
if (m->get_min_epoch() > osdmap->get_epoch()) {
// note sender epoch, min req's epoch
op->sent_epoch = static_cast<MOSDFastDispatchOp*>(m)->get_map_epoch();
op->min_epoch = static_cast<MOSDFastDispatchOp*>(m)->get_min_epoch();
- assert(op->min_epoch <= op->sent_epoch); // sanity check!
+ ceph_assert(op->min_epoch <= op->sent_epoch); // sanity check!
service.maybe_inject_dispatch_delay();
void OSD::do_waiters()
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
dout(10) << "do_waiters -- start" << dendl;
while (!finished.empty()) {
void OSD::_dispatch(Message *m)
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
dout(20) << "_dispatch " << m << " " << *m << dendl;
switch (m->get_type()) {
void OSD::note_down_osd(int peer)
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
cluster_messenger->mark_down_addrs(osdmap->get_cluster_addrs(peer));
heartbeat_lock.Lock();
service.publish_superblock(superblock);
write_superblock(t);
int tr = store->queue_transaction(service.meta_ch, std::move(t), nullptr);
- assert(tr == 0);
+ ceph_assert(tr == 0);
num = 0;
if (!skip_maps) {
// skip_maps leaves us with a range of old maps if we fail to remove all
service.publish_superblock(superblock);
write_superblock(t);
int tr = store->queue_transaction(service.meta_ch, std::move(t), nullptr);
- assert(tr == 0);
+ ceph_assert(tr == 0);
}
// we should not remove the cached maps
- assert(min <= service.map_cache.cached_key_lower_bound());
+ ceph_assert(min <= service.map_cache.cached_key_lower_bound());
}
void OSD::handle_osd_map(MOSDMap *m)
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
// Keep a ref in the list until we get the newly received map written
// onto disk. This is important because as long as the refs are alive,
// the OSDMaps will be pinned in the cache and we won't try to read it
logger->inc(l_osd_mape_dup, superblock.newest_map - first + 1);
if (service.max_oldest_map < m->oldest_map) {
service.max_oldest_map = m->oldest_map;
- assert(service.max_oldest_map >= superblock.oldest_map);
+ ceph_assert(service.max_oldest_map >= superblock.oldest_map);
}
// make sure there is something new, here, before we bother flushing
// up with.
epoch_t max_lag = cct->_conf->osd_map_cache_size *
m_osd_pg_epoch_max_lag_factor;
- assert(max_lag > 0);
+ ceph_assert(max_lag > 0);
if (osdmap->get_epoch() > max_lag) {
epoch_t need = osdmap->get_epoch() - max_lag;
for (auto shard : shards) {
for (epoch_t e = start; e <= last; e++) {
if (txn_size >= t.get_num_bytes()) {
derr << __func__ << " transaction size overflowed" << dendl;
- assert(txn_size < t.get_num_bytes());
+ ceph_assert(txn_size < t.get_num_bytes());
}
txn_size = t.get_num_bytes();
map<epoch_t,bufferlist>::iterator p;
bool got = get_map_bl(e - 1, obl);
if (!got) {
auto p = added_maps_bl.find(e - 1);
- assert(p != added_maps_bl.end());
+ ceph_assert(p != added_maps_bl.end());
obl = p->second;
}
o->decode(obl);
inc.decode(p);
if (o->apply_incremental(inc) < 0) {
derr << "ERROR: bad fsid? i have " << osdmap->get_fsid() << " and inc has " << inc.fsid << dendl;
- assert(0 == "bad fsid");
+ ceph_assert(0 == "bad fsid");
}
bufferlist fbl;
continue;
}
- assert(0 == "MOSDMap lied about what maps it had?");
+ ceph_assert(0 == "MOSDMap lied about what maps it had?");
}
// even if this map isn't from a mon, we may have satisfied our subscription
continue;
}
}
- assert(lastmap->get_epoch() + 1 == i.second->get_epoch());
+ ceph_assert(lastmap->get_epoch() + 1 == i.second->get_epoch());
for (auto& j : lastmap->get_pools()) {
if (!i.second->have_pg_pool(j.first)) {
dout(10) << __func__ << " recording final pg_pool_t for pool "
<< ")" << dendl;
OSDMapRef newmap = get_map(cur);
- assert(newmap); // we just cached it above!
+ ceph_assert(newmap); // we just cached it above!
// start blacklisting messages sent to peers that go down.
service.pre_publish_map(newmap);
ObjectStore::Transaction t;
write_superblock(t);
int err = store->queue_transaction(service.meta_ch, std::move(t), NULL);
- assert(err == 0);
+ ceph_assert(err == 0);
}
}
}
ThreadPool::TPHandle &handle,
PG::RecoveryCtx *rctx)
{
- assert(pg->is_locked());
+ ceph_assert(pg->is_locked());
OSDMapRef lastmap = pg->get_osdmap();
- assert(lastmap->get_epoch() < osd_epoch);
+ ceph_assert(lastmap->get_epoch() < osd_epoch);
set<PGRef> new_pgs; // any split children
for (epoch_t next_epoch = pg->get_osdmap_epoch() + 1;
next_epoch <= osd_epoch;
void OSD::consume_map()
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
dout(7) << "consume_map version " << osdmap->get_epoch() << dendl;
/** make sure the cluster is speaking in SORTBITWISE, because we don't
for (auto& shard : shards) {
shard->prime_splits(osdmap, &newly_split);
}
- assert(newly_split.empty());
+ ceph_assert(newly_split.empty());
}
unsigned pushes_to_free = 0;
void OSD::activate_map()
{
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
dout(7) << "activate_map version " << osdmap->get_epoch() << dendl;
dout(15) << "require_same_or_newer_map " << epoch
<< " (i am " << osdmap->get_epoch() << ") " << m << dendl;
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
// do they have a newer map?
if (epoch > osdmap->get_epoch()) {
for (set<spg_t>::const_iterator i = childpgids.begin();
i != childpgids.end();
++i, ++stat_iter) {
- assert(stat_iter != updated_stats.end());
+ ceph_assert(stat_iter != updated_stats.end());
dout(10) << __func__ << " splitting " << *parent << " into " << *i << dendl;
PG* child = _make_pg(nextmap, *i);
child->lock(true);
child->finish_split_stats(*stat_iter, rctx->transaction);
child->unlock();
}
- assert(stat_iter != updated_stats.end());
+ ceph_assert(stat_iter != updated_stats.end());
parent->finish_split_stats(*stat_iter, rctx->transaction);
}
void OSD::handle_pg_create(OpRequestRef op)
{
const MOSDPGCreate *m = static_cast<const MOSDPGCreate*>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_CREATE);
+ ceph_assert(m->get_type() == MSG_OSD_PG_CREATE);
dout(10) << "handle_pg_create " << *m << dendl;
for (map<pg_t,pg_create_t>::const_iterator p = m->mkpg.begin();
p != m->mkpg.end();
++p, ++ci) {
- assert(ci != m->ctimes.end() && ci->first == p->first);
+ ceph_assert(ci != m->ctimes.end() && ci->first == p->first);
epoch_t created = p->second.created;
if (p->second.split_bits) // Skip split pgs
continue;
spg_t pgid;
bool mapped = osdmap->get_primary_shard(on, &pgid);
- assert(mapped);
+ ceph_assert(mapped);
PastIntervals pi;
pg_history_t history;
int tr = store->queue_transaction(
pg->ch,
std::move(*ctx.transaction), TrackedOpRef(), handle);
- assert(tr == 0);
+ ceph_assert(tr == 0);
delete (ctx.transaction);
ctx.transaction = new ObjectStore::Transaction;
}
pg->ch,
std::move(*ctx.transaction), TrackedOpRef(),
handle);
- assert(tr == 0);
+ ceph_assert(tr == 0);
}
delete ctx.notify_list;
delete ctx.query_map;
// RECOVERY
void OSDService::_maybe_queue_recovery() {
- assert(recovery_lock.is_locked_by_me());
+ ceph_assert(recovery_lock.is_locked_by_me());
uint64_t available_pushes;
while (!awaiting_throttle.empty() &&
_recover_now(&available_pushes)) {
}
out:
- assert(started <= reserved_pushes);
+ ceph_assert(started <= reserved_pushes);
service.release_reserved_pushes(reserved_pushes);
}
#ifdef DEBUG_RECOVERY_OIDS
dout(20) << " active was " << recovery_oids[pg->pg_id] << dendl;
- assert(recovery_oids[pg->pg_id].count(soid) == 0);
+ ceph_assert(recovery_oids[pg->pg_id].count(soid) == 0);
recovery_oids[pg->pg_id].insert(soid);
#endif
}
<< dendl;
// adjust count
- assert(recovery_ops_active > 0);
+ ceph_assert(recovery_ops_active > 0);
recovery_ops_active--;
#ifdef DEBUG_RECOVERY_OIDS
dout(20) << " active oids was " << recovery_oids[pg->pg_id] << dendl;
- assert(recovery_oids[pg->pg_id].count(soid));
+ ceph_assert(recovery_oids[pg->pg_id].count(soid));
recovery_oids[pg->pg_id].erase(soid);
#endif
dout(10) << __func__ << "(" << pushes << "), recovery_ops_reserved "
<< recovery_ops_reserved << " -> " << (recovery_ops_reserved-pushes)
<< dendl;
- assert(recovery_ops_reserved >= pushes);
+ ceph_assert(recovery_ops_reserved >= pushes);
recovery_ops_reserved -= pushes;
_maybe_queue_recovery();
}
r.first->second->waiting_for_split = true;
} else {
auto q = r.first;
- assert(q != pg_slots.end());
+ ceph_assert(q != pg_slots.end());
if (q->second->waiting_for_split) {
dout(10) << "slot " << *p << " already primed" << dendl;
} else {
Mutex::Locker l(shard_lock);
dout(10) << pg->pg_id << " " << pg << dendl;
auto p = pg_slots.find(pg->pg_id);
- assert(p != pg_slots.end());
+ ceph_assert(p != pg_slots.end());
auto *slot = p->second.get();
- assert(!slot->pg);
- assert(slot->waiting_for_split);
+ ceph_assert(!slot->pg);
+ ceph_assert(slot->waiting_for_split);
_attach_pg(slot, pg);
_wake_pg_slot(pg->pg_id, slot);
}
{
uint32_t shard_index = thread_index % osd->num_shards;
auto& sdata = osd->shards[shard_index];
- assert(sdata);
+ ceph_assert(sdata);
// peek at spg_t
sdata->shard_lock.Lock();
if (sdata->pqueue->empty()) {
for (auto shard : osd->shards) {
shard->prime_splits(osdmap, &new_children);
}
- assert(new_children.empty());
+ ceph_assert(new_children.empty());
}
// osd_opwq_process marks the point at which an operation has been dequeued
{
auto shard_index = item.get_ordering_token().hash_to_shard(osd->shards.size());
auto& sdata = osd->shards[shard_index];
- assert(sdata);
+ ceph_assert(sdata);
sdata->shard_lock.Lock();
auto p = sdata->pg_slots.find(item.get_ordering_token());
if (p != sdata->pg_slots.end() &&
Mutex::Locker l(pre_publish_lock);
map<epoch_t, unsigned>::iterator i =
map_reservations.find(osdmap->get_epoch());
- assert(i != map_reservations.end());
- assert(i->second > 0);
+ ceph_assert(i != map_reservations.end());
+ ceph_assert(i->second > 0);
if (--(i->second) == 0) {
map_reservations.erase(i);
}
/// blocks until there are no reserved maps prior to next_osdmap
void await_reserved_maps() {
Mutex::Locker l(pre_publish_lock);
- assert(next_osdmap);
+ ceph_assert(next_osdmap);
while (true) {
map<epoch_t, unsigned>::const_iterator i = map_reservations.cbegin();
if (i == map_reservations.cend() || i->first >= next_osdmap->get_epoch()) {
void unreg_pg_scrub(spg_t pgid, utime_t t) {
Mutex::Locker l(sched_scrub_lock);
size_t removed = sched_scrub_pg.erase(ScrubJob(cct, pgid, t));
- assert(removed);
+ ceph_assert(removed);
}
bool first_scrub_stamp(ScrubJob *out) {
Mutex::Locker l(sched_scrub_lock);
}
void dumps_scrub(Formatter *f) {
- assert(f != nullptr);
+ ceph_assert(f != nullptr);
Mutex::Locker l(sched_scrub_lock);
f->open_array_section("scrubs");
void _dequeue(PG *pg, uint64_t old_priority) {
set<PGRef>& oq = agent_queue[old_priority];
set<PGRef>::iterator p = oq.find(pg);
- assert(p != oq.end());
+ ceph_assert(p != oq.end());
if (p == agent_queue_pos)
++agent_queue_pos;
oq.erase(p);
/// adjust priority for an enagled pg
void agent_adjust_pg(PG *pg, uint64_t old_priority, uint64_t new_priority) {
Mutex::Locker l(agent_lock);
- assert(new_priority != old_priority);
+ ceph_assert(new_priority != old_priority);
_enqueue(pg, new_priority);
_dequeue(pg, old_priority);
}
/// note finish or cancellation of an async (evict) op
void agent_finish_evict_op() {
Mutex::Locker l(agent_lock);
- assert(agent_ops > 0);
+ ceph_assert(agent_ops > 0);
--agent_ops;
agent_cond.Signal();
}
void agent_start_op(const hobject_t& oid) {
Mutex::Locker l(agent_lock);
++agent_ops;
- assert(agent_oids.count(oid) == 0);
+ ceph_assert(agent_oids.count(oid) == 0);
agent_oids.insert(oid);
}
/// note finish or cancellation of an async (flush) op
void agent_finish_op(const hobject_t& oid) {
Mutex::Locker l(agent_lock);
- assert(agent_ops > 0);
+ ceph_assert(agent_ops > 0);
--agent_ops;
- assert(agent_oids.count(oid) == 1);
+ ceph_assert(agent_oids.count(oid) == 1);
agent_oids.erase(oid);
agent_cond.Signal();
}
OSDMapRef try_get_map(epoch_t e);
OSDMapRef get_map(epoch_t e) {
OSDMapRef ret(try_get_map(e));
- assert(ret);
+ ceph_assert(ret);
return ret;
}
OSDMapRef add_map(OSDMap *o) {
list<OpRequestRef> finished;
void take_waiters(list<OpRequestRef>& ls) {
- assert(osd_lock.is_locked());
+ ceph_assert(osd_lock.is_locked());
finished.splice(finished.end(), ls);
}
void do_waiters();
char queue_name[32] = {0};
snprintf(queue_name, sizeof(queue_name), "%s%" PRIu32, "OSD:ShardedOpWQ:", i);
- assert(NULL != sdata);
+ ceph_assert(NULL != sdata);
sdata->shard_lock.Lock();
f->open_object_section(queue_name);
bool is_shard_empty(uint32_t thread_index) override {
uint32_t shard_index = thread_index % osd->num_shards;
auto &&sdata = osd->shards[shard_index];
- assert(sdata);
+ ceph_assert(sdata);
Mutex::Locker l(sdata->shard_lock);
return sdata->pqueue->empty();
}
int OSDMap::Incremental::propagate_snaps_to_tiers(CephContext *cct,
const OSDMap& osdmap)
{
- assert(epoch == osdmap.get_epoch() + 1);
+ ceph_assert(epoch == osdmap.get_epoch() + 1);
for (auto &new_pool : new_pools) {
if (!new_pool.second.tiers.empty()) {
} else {
type = crush->get_bucket_type(current);
}
- assert(type >= 0);
+ ceph_assert(type >= 0);
if (!subtree_is_down(current, down_cache)) {
ldout(cct, 30) << "containing_subtree_is_down(" << id << ") = false" << dendl;
// OSDMaps. others should be passing around the canonical encoded
// buffers from on high. select out those callers by passing in an
// "impossible" feature bit.
- assert(features & CEPH_FEATURE_RESERVED);
+ ceph_assert(features & CEPH_FEATURE_RESERVED);
features &= ~CEPH_FEATURE_RESERVED;
size_t start_offset = bl.length();
set<int64_t> *backfillfull,
set<int64_t> *nearfull) const
{
- assert(full);
- assert(backfillfull);
- assert(nearfull);
+ ceph_assert(full);
+ ceph_assert(backfillfull);
+ ceph_assert(nearfull);
full->clear();
backfillfull->clear();
nearfull->clear();
else if (inc.fsid != fsid)
return -EINVAL;
- assert(inc.epoch == epoch+1);
+ ceph_assert(inc.epoch == epoch+1);
epoch++;
modified = inc.modified;
p != new_purged_snaps.end();
++p) {
auto q = removed_snaps_queue.find(p->first);
- assert(q != removed_snaps_queue.end());
+ ceph_assert(q != removed_snaps_queue.end());
q->second.subtract(p->second);
if (q->second.empty()) {
removed_snaps_queue.erase(q);
// OSDMaps. others should be passing around the canonical encoded
// buffers from on high. select out those callers by passing in an
// "impossible" feature bit.
- assert(features & CEPH_FEATURE_RESERVED);
+ ceph_assert(features & CEPH_FEATURE_RESERVED);
features &= ~CEPH_FEATURE_RESERVED;
size_t start_offset = bl.length();
encode(pg_upmap, bl);
encode(pg_upmap_items, bl);
} else {
- assert(pg_upmap.empty());
- assert(pg_upmap_items.empty());
+ ceph_assert(pg_upmap.empty());
+ ceph_assert(pg_upmap_items.empty());
}
if (v >= 6) {
encode(crush_version, bl);
if (f) {
OSDTreeFormattingDumper(crush.get(), this, filter).dump(f, bucket);
} else {
- assert(out);
+ ceph_assert(out);
TextTable tbl;
OSDTreePlainDumper(crush.get(), this, filter).dump(&tbl, bucket);
*out << tbl;
r = build_simple_crush_map(cct, *crush, nosd, &ss);
else
r = build_simple_crush_map_from_conf(cct, *crush, &ss);
- assert(r == 0);
+ ceph_assert(r == 0);
int poolbase = get_max_osd() ? get_max_osd() : 1;
const int default_replicated_rule = crush->get_osd_pool_default_crush_replicated_ruleset(cct);
- assert(default_replicated_rule >= 0);
+ ceph_assert(default_replicated_rule >= 0);
if (default_pool) {
// pgp_num <= pg_num
int rootid;
int r = crush.add_bucket(0, 0, CRUSH_HASH_DEFAULT,
root_type, 0, NULL, NULL, &rootid);
- assert(r == 0);
+ ceph_assert(r == 0);
crush.set_item_name(rootid, "default");
for (int o=0; o<nosd; o++) {
int r = crush.add_bucket(0, 0,
CRUSH_HASH_DEFAULT,
root_type, 0, NULL, NULL, &rootid);
- assert(r == 0);
+ ceph_assert(r == 0);
crush.set_item_name(rootid, "default");
// add osds
}
}
} else {
- assert(0 == "unhandled pool type");
+ ceph_assert(0 == "unhandled pool type");
}
}
}
set<int> overfull;
for (auto& i : pgs_by_osd) {
// make sure osd is still there (belongs to this crush-tree)
- assert(osd_weight.count(i.first));
+ ceph_assert(osd_weight.count(i.first));
float target = osd_weight[i.first] * pgs_per_weight;
float deviation = (float)i.second.size() - target;
ldout(cct, 20) << " osd." << i.first
int osd = p->second;
float deviation = p->first;
float target = osd_weight[osd] * pgs_per_weight;
- assert(target > 0);
+ ceph_assert(target > 0);
if (deviation/target < max_deviation_ratio) {
ldout(cct, 10) << " osd." << osd
<< " target " << target
if (orig.size() != out.size()) {
continue;
}
- assert(orig != out);
+ ceph_assert(orig != out);
auto& rmi = tmp.pg_upmap_items[pg];
for (unsigned i = 0; i < out.size(); ++i) {
if (orig[i] != out[i]) {
int osd,
set<int64_t> *pool_ids) const
{
- assert(pool_ids);
+ ceph_assert(pool_ids);
set<int> raw_rules;
int r = crush->get_rules_by_osd(osd, &raw_rules);
if (r < 0) {
lderr(cct) << __func__ << " get_rules_by_osd failed: " << cpp_strerror(r)
<< dendl;
- assert(r >= 0);
+ ceph_assert(r >= 0);
}
set<int> rules;
for (auto &i: raw_rules) {
}
}
num_down_in_osds = down_in_osds.size();
- assert(num_down_in_osds <= num_in_osds);
+ ceph_assert(num_down_in_osds <= num_in_osds);
if (num_down_in_osds > 0) {
// summary of down subtree types and osds
for (int type = max_type; type > 0; type--) {
void init_current() {
if (it != end) {
current.first = it->first;
- assert(it->second);
+ ceph_assert(it->second);
current.second.resize(*it->second);
int32_t *p = it->second + 1;
for (int n = 0; n < *it->second; ++n, ++p) {
/// filter out osds with any pending state changing
size_t get_pending_state_osds(vector<int> *osds) {
- assert(osds);
+ ceph_assert(osds);
osds->clear();
for (auto &p : new_state) {
static void calc_state_set(int state, set<string>& st);
int get_state(int o) const {
- assert(o < max_osd);
+ ceph_assert(o < max_osd);
return osd_state[o];
}
int get_state(int o, set<string>& st) const {
- assert(o < max_osd);
+ ceph_assert(o < max_osd);
unsigned t = osd_state[o];
calc_state_set(t, st);
return osd_state[o];
}
void set_state(int o, unsigned s) {
- assert(o < max_osd);
+ ceph_assert(o < max_osd);
osd_state[o] = s;
}
void set_weight(int o, unsigned w) {
- assert(o < max_osd);
+ ceph_assert(o < max_osd);
osd_weight[o] = w;
if (w)
osd_state[o] |= CEPH_OSD_EXISTS;
}
unsigned get_weight(int o) const {
- assert(o < max_osd);
+ ceph_assert(o < max_osd);
return osd_weight[o];
}
float get_weightf(int o) const {
void adjust_osd_weights(const map<int,double>& weights, Incremental& inc) const;
void set_primary_affinity(int o, int w) {
- assert(o < max_osd);
+ ceph_assert(o < max_osd);
if (!osd_primary_affinity)
osd_primary_affinity.reset(
new mempool::osdmap::vector<__u32>(
(*osd_primary_affinity)[o] = w;
}
unsigned get_primary_affinity(int o) const {
- assert(o < max_osd);
+ ceph_assert(o < max_osd);
if (!osd_primary_affinity)
return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
return (*osd_primary_affinity)[o];
}
void get_noup_osds(vector<int> *osds) const {
- assert(osds);
+ ceph_assert(osds);
osds->clear();
for (int i = 0; i < max_osd; i++) {
}
void get_nodown_osds(vector<int> *osds) const {
- assert(osds);
+ ceph_assert(osds);
osds->clear();
for (int i = 0; i < max_osd; i++) {
}
void get_noin_osds(vector<int> *osds) const {
- assert(osds);
+ ceph_assert(osds);
osds->clear();
for (int i = 0; i < max_osd; i++) {
}
void get_noout_osds(vector<int> *osds) const {
- assert(osds);
+ ceph_assert(osds);
osds->clear();
for (int i = 0; i < max_osd; i++) {
int find_osd_on_ip(const entity_addr_t& ip) const;
const entity_addrvec_t& get_addrs(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return osd_addrs->client_addrs[osd] ?
*osd_addrs->client_addrs[osd] : _blank_addrvec;
}
return get_addrs(osd);
}
const entity_addrvec_t &get_cluster_addrs(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return osd_addrs->cluster_addrs[osd] ?
*osd_addrs->cluster_addrs[osd] : _blank_addrvec;
}
const entity_addrvec_t &get_hb_back_addrs(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return osd_addrs->hb_back_addrs[osd] ?
*osd_addrs->hb_back_addrs[osd] : _blank_addrvec;
}
const entity_addrvec_t &get_hb_front_addrs(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return osd_addrs->hb_front_addrs[osd] ?
*osd_addrs->hb_front_addrs[osd] : _blank_addrvec;
}
const uuid_d& get_uuid(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return (*osd_uuid)[osd];
}
const epoch_t& get_up_from(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return osd_info[osd].up_from;
}
const epoch_t& get_up_thru(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return osd_info[osd].up_thru;
}
const epoch_t& get_down_at(int osd) const {
- assert(exists(osd));
+ ceph_assert(exists(osd));
return osd_info[osd].down_at;
}
const osd_info_t& get_info(int osd) const {
- assert(osd < max_osd);
+ ceph_assert(osd < max_osd);
return osd_info[osd];
}
const osd_xinfo_t& get_xinfo(int osd) const {
- assert(osd < max_osd);
+ ceph_assert(osd < max_osd);
return osd_xinfo[osd];
}
const object_locator_t& loc) const {
pg_t pg;
int ret = object_locator_to_pg(oid, loc, pg);
- assert(ret == 0);
+ ceph_assert(ret == 0);
return pg;
}
int get_pg_num(int pg_pool) const
{
const pg_pool_t *pool = get_pg_pool(pg_pool);
- assert(NULL != pool);
+ ceph_assert(NULL != pool);
return pool->get_pg_num();
}
return -ENOENT;
}
const pg_pool_t *p = get_pg_pool(pgid.pool());
- assert(p);
+ ceph_assert(p);
return p->get_min_size();
}
return -ENOENT;
}
const pg_pool_t *p = get_pg_pool(pgid.pool());
- assert(p);
+ ceph_assert(p);
return p->get_size();
}
return -ENOENT;
}
const pg_pool_t *p = get_pg_pool(pgid.pool());
- assert(p);
+ ceph_assert(p);
return p->get_crush_rule();
}
}
bool pg_is_ec(pg_t pg) const {
auto i = pools.find(pg.pool());
- assert(i != pools.end());
+ ceph_assert(i != pools.end());
return i->second.is_erasure();
}
bool get_primary_shard(const pg_t& pgid, spg_t *out) const {
return pools;
}
void get_pool_ids_by_rule(int rule_id, set<int64_t> *pool_ids) const {
- assert(pool_ids);
+ ceph_assert(pool_ids);
for (auto &p: pools) {
if (p.second.get_crush_rule() == rule_id) {
pool_ids->insert(p.first);
set<int64_t> *pool_ids) const;
const string& get_pool_name(int64_t p) const {
auto i = pool_name.find(p);
- assert(i != pool_name.end());
+ ceph_assert(i != pool_name.end());
return i->second;
}
const mempool::osdmap::map<int64_t,string>& get_pool_names() const {
}
unsigned get_pg_size(pg_t pg) const {
auto p = pools.find(pg.pool());
- assert(p != pools.end());
+ ceph_assert(p != pools.end());
return p->second.get_size();
}
int get_pg_type(pg_t pg) const {
auto p = pools.find(pg.pool());
- assert(p != pools.end());
+ ceph_assert(p != pools.end());
return p->second.get_type();
}
pg_t raw_pg_to_pg(pg_t pg) const {
auto p = pools.find(pg.pool());
- assert(p != pools.end());
+ ceph_assert(p != pools.end());
return p->second.raw_pg_to_pg(pg);
}
p.second.is_erasure()));
}
pools.erase(q, pools.end());
- assert(pools.size() == osdmap.get_pools().size());
+ ceph_assert(pools.size() == osdmap.get_pools().size());
}
void OSDMapMapping::update(const OSDMap& osdmap)
unsigned pg_end)
{
auto i = pools.find(pool);
- assert(i != pools.end());
- assert(pg_begin <= pg_end);
- assert(pg_end <= i->second.pg_num);
+ ceph_assert(i != pools.end());
+ ceph_assert(pg_begin <= pg_end);
+ ceph_assert(pg_end <= i->second.pg_num);
for (unsigned ps = pg_begin; ps < pg_end; ++ps) {
vector<int> up, acting;
int up_primary, acting_primary;
any = true;
}
}
- assert(any);
+ ceph_assert(any);
}
Job(const OSDMap *om) : start(ceph_clock_now()), osdmap(om) {}
virtual ~Job() {
- assert(shards == 0);
+ ceph_assert(shards == 0);
}
// child must implement this
void _process(Item *i, ThreadPool::TPHandle &h) override;
void _clear() override {
- assert(_empty());
+ ceph_assert(_empty());
}
bool _empty() override {
std::vector<int> *acting,
int *acting_primary) const {
auto p = pools.find(pgid.pool());
- assert(p != pools.end());
- assert(pgid.ps() < p->second.pg_num);
+ ceph_assert(p != pools.end());
+ ceph_assert(pgid.ps() < p->second.pg_num);
p->second.get(pgid.ps(), up, up_primary, acting, acting_primary);
}
int *acting_primary,
spg_t *spgid) {
auto p = pools.find(pgid.pool());
- assert(p != pools.end());
- assert(pgid.ps() < p->second.pg_num);
+ ceph_assert(p != pools.end());
+ ceph_assert(pgid.ps() < p->second.pg_num);
vector<int> acting;
p->second.get(pgid.ps(), nullptr, nullptr, &acting, acting_primary);
if (p->second.erasure) {
}
const mempool::osdmap_mapping::vector<pg_t>& get_osd_acting_pgs(unsigned osd) {
- assert(osd < acting_rmap.size());
+ ceph_assert(osd < acting_rmap.size());
return acting_rmap[osd];
}
}
bool OpRequest::check_rmw(int flag) const {
- assert(rmw_flags != 0);
+ ceph_assert(rmw_flags != 0);
return rmw_flags & flag;
}
bool OpRequest::may_read() const {
{
Mutex::Locker l(_ref_id_lock);
auto tag_counts_entry = _tag_counts.find(tag);
- assert(tag_counts_entry != _tag_counts.end());
+ ceph_assert(tag_counts_entry != _tag_counts.end());
--tag_counts_entry->second;
if (tag_counts_entry->second == 0) {
_tag_counts.erase(tag_counts_entry);
stringstream ss;
bt.print(ss);
dout(20) << __func__ << ": " << info.pgid << " got id " << id << " (new) ref==" << ref << dendl;
- assert(!_live_ids.count(id));
+ ceph_assert(!_live_ids.count(id));
_live_ids.insert(make_pair(id, ss.str()));
return id;
}
dout(20) << __func__ << ": " << info.pgid << " put id " << id << " (current) ref==" << ref << dendl;
{
Mutex::Locker l(_ref_id_lock);
- assert(_live_ids.count(id));
+ ceph_assert(_live_ids.count(id));
_live_ids.erase(id);
}
if (--ref == 0)
<< " pool.cached_removed_snaps " << cached_removed_snaps
<< dendl;
}
- assert(actual_removed_snaps == cached_removed_snaps);
+ ceph_assert(actual_removed_snaps == cached_removed_snaps);
}
}
if (info.is_pool_snaps_mode() && updated) {
{
_lock.Lock(no_lockdep);
// if we have unrecorded dirty state with the lock dropped, there is a bug
- assert(!dirty_info);
- assert(!dirty_big_info);
+ ceph_assert(!dirty_info);
+ ceph_assert(!dirty_big_info);
dout(30) << "lock" << dendl;
}
{
dout(10) << "proc_master_log for osd." << from << ": "
<< olog << " " << omissing << dendl;
- assert(!is_peered() && is_primary());
+ ceph_assert(!is_peered() && is_primary());
// merge log into our own log to build master log. no need to
// make any adjustments to their missing map; we are taking their
dirty_info = true;
}
update_history(oinfo.history);
- assert(cct->_conf->osd_find_best_info_ignore_history_les ||
+ ceph_assert(cct->_conf->osd_find_best_info_ignore_history_les ||
info.last_epoch_started >= info.history.last_epoch_started);
peer_missing[from].claim(omissing);
}
dout(10) << " got osd." << from << " " << oinfo << dendl;
- assert(is_primary());
+ ceph_assert(is_primary());
peer_info[from] = oinfo;
might_have_unfound.insert(from);
ObjectStore::Transaction *t, const hobject_t &soid, const set<snapid_t> &snaps)
{
OSDriver::OSTransaction _t(osdriver.get_transaction(t));
- assert(soid.snap < CEPH_MAXSNAP);
+ ceph_assert(soid.snap < CEPH_MAXSNAP);
int r = snap_mapper.remove_oid(
soid,
&_t);
/******* PG ***********/
bool PG::needs_recovery() const
{
- assert(is_primary());
+ ceph_assert(is_primary());
auto &missing = pg_log.get_missing();
return true;
}
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
set<pg_shard_t>::const_iterator end = acting_recovery_backfill.end();
set<pg_shard_t>::const_iterator a = acting_recovery_backfill.begin();
for (; a != end; ++a) {
bool PG::needs_backfill() const
{
- assert(is_primary());
+ ceph_assert(is_primary());
// We can assume that only possible osds that need backfill
// are on the backfill_targets vector nodes.
derr << info.pgid << " required past_interval bounds are"
<< " not empty [" << rpib << ") but past_intervals "
<< past_intervals << " is empty" << dendl;
- assert(!past_intervals.empty());
+ ceph_assert(!past_intervals.empty());
}
auto apib = past_intervals.get_bounds();
derr << info.pgid << " past_intervals [" << apib
<< ") start interval does not contain the required"
<< " bound [" << rpib << ") start" << dendl;
- assert(0 == "past_interval start interval mismatch");
+ ceph_assert(0 == "past_interval start interval mismatch");
}
if (apib.second != rpib.second) {
osd->clog->error() << info.pgid << " past_interal bound [" << apib
derr << info.pgid << " past_interal bound [" << apib
<< ") end does not match required [" << rpib
<< ") end" << dendl;
- assert(0 == "past_interval end mismatch");
+ ceph_assert(0 == "past_interval end mismatch");
}
}
}
*/
bool PG::all_unfound_are_queried_or_lost(const OSDMapRef osdmap) const
{
- assert(is_primary());
+ ceph_assert(is_primary());
set<pg_shard_t>::const_iterator peer = might_have_unfound.begin();
set<pg_shard_t>::const_iterator mend = might_have_unfound.end();
for (map<pg_shard_t,pg_info_t>::iterator it = peer_info.begin();
it != peer_info.end();
++it) {
- assert(info.history.last_epoch_started >= it->second.history.last_epoch_started);
+ ceph_assert(info.history.last_epoch_started >= it->second.history.last_epoch_started);
}
}
bool restrict_to_up_acting,
bool *history_les_bound) const
{
- assert(history_les_bound);
+ ceph_assert(history_les_bound);
/* See doc/dev/osd_internals/last_epoch_started.rst before attempting
* to make changes to this process. Also, make sure to update it
* when you find bugs! */
for (set<pg_shard_t>::iterator j = all_info_by_shard[shard_id_t(i)].begin();
j != all_info_by_shard[shard_id_t(i)].end();
++j) {
- assert(j->shard == i);
+ ceph_assert(j->shard == i);
if (!all_info.find(*j)->second.is_incomplete() &&
all_info.find(*j)->second.last_update >=
auth_log_shard->second.log_tail) {
auth_log_shard->second.log_tail) {
ss << "up_primary: " << up_primary << ") selected as primary" << std::endl;
} else {
- assert(!auth_log_shard->second.is_incomplete());
+ ceph_assert(!auth_log_shard->second.is_incomplete());
ss << "up[0] needs backfill, osd." << auth_log_shard_id
<< " selected as primary instead" << std::endl;
primary = auth_log_shard;
);
for (auto &p: candidate_by_last_update) {
- assert(usable < size);
+ ceph_assert(usable < size);
want->push_back(p.second);
pg_shard_t s = pg_shard_t(p.second, shard_id_t::NO_SHARD);
acting_backfill->insert(s);
);
for (auto &p: candidate_by_last_update) {
- assert(usable < size);
+ ceph_assert(usable < size);
want->push_back(p.second);
pg_shard_t s = pg_shard_t(p.second, shard_id_t::NO_SHARD);
acting_backfill->insert(s);
osd->queue_want_pg_temp(info.pgid.pgid, empty);
} else {
dout(10) << __func__ << " failed" << dendl;
- assert(want_acting.empty());
+ ceph_assert(want_acting.empty());
}
return false;
}
- assert(!auth_log_shard->second.is_incomplete());
+ ceph_assert(!auth_log_shard->second.is_incomplete());
auth_log_shard_id = auth_log_shard->first;
set<pg_shard_t> want_backfill, want_acting_backfill;
if (want_acting == up) {
// There can't be any pending backfill if
// want is the same as crush map up OSDs.
- assert(want_backfill.empty());
+ ceph_assert(want_backfill.empty());
vector<int> empty;
osd->queue_want_pg_temp(info.pgid.pgid, empty);
} else
want_acting.clear();
acting_recovery_backfill = want_acting_backfill;
dout(10) << "acting_recovery_backfill is " << acting_recovery_backfill << dendl;
- assert(backfill_targets.empty() || backfill_targets == want_backfill);
+ ceph_assert(backfill_targets.empty() || backfill_targets == want_backfill);
if (backfill_targets.empty()) {
// Caller is GetInfo
backfill_targets = want_backfill;
}
// Adding !needs_recovery() to let the async_recovery_targets reset after recovery is complete
- assert(async_recovery_targets.empty() || async_recovery_targets == want_async_recovery || !needs_recovery());
+ ceph_assert(async_recovery_targets.empty() || async_recovery_targets == want_async_recovery || !needs_recovery());
if (async_recovery_targets.empty() || !needs_recovery()) {
async_recovery_targets = want_async_recovery;
}
for (set<pg_shard_t>::iterator i = want_backfill.begin();
i != want_backfill.end();
++i) {
- assert(stray_set.find(*i) == stray_set.end());
+ ceph_assert(stray_set.find(*i) == stray_set.end());
}
dout(10) << "choose_acting want=" << want << " backfill_targets="
<< want_backfill << " async_recovery_targets="
*/
void PG::build_might_have_unfound()
{
- assert(might_have_unfound.empty());
- assert(is_primary());
+ ceph_assert(might_have_unfound.empty());
+ ceph_assert(is_primary());
dout(10) << __func__ << dendl;
PastIntervals> > > *activator_map,
RecoveryCtx *ctx)
{
- assert(!is_peered());
- assert(scrubber.callbacks.empty());
- assert(callbacks_for_degraded_object.empty());
+ ceph_assert(!is_peered());
+ ceph_assert(scrubber.callbacks.empty());
+ ceph_assert(callbacks_for_degraded_object.empty());
// twiddle pg state
state_clear(PG_STATE_DOWN);
if (is_primary()) {
// only update primary last_epoch_started if we will go active
if (acting.size() >= pool.info.min_size) {
- assert(cct->_conf->osd_find_best_info_ignore_history_les ||
+ ceph_assert(cct->_conf->osd_find_best_info_ignore_history_les ||
info.last_epoch_started <= activation_epoch);
info.last_epoch_started = activation_epoch;
info.last_interval_started = info.history.same_interval_since;
// if primary..
if (is_primary()) {
- assert(ctx);
+ ceph_assert(ctx);
// start up replicas
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
if (*i == pg_whoami) continue;
pg_shard_t peer = *i;
- assert(peer_info.count(peer));
+ ceph_assert(peer_info.count(peer));
pg_info_t& pi = peer_info[peer];
dout(10) << "activate peer osd." << peer << " " << pi << dendl;
MOSDPGLog *m = 0;
- assert(peer_missing.count(peer));
+ ceph_assert(peer_missing.count(peer));
pg_missing_t& pm = peer_missing[peer];
bool needs_past_intervals = pi.dne();
pm.clear();
} else {
// catch up
- assert(pg_log.get_tail() <= pi.last_update);
+ ceph_assert(pg_log.get_tail() <= pi.last_update);
m = new MOSDPGLog(
i->shard, pg_whoami.shard,
get_osdmap()->get_epoch(), info,
complete_shards.insert(*i);
} else {
auto peer_missing_entry = peer_missing.find(*i);
- assert(peer_missing_entry != peer_missing.end());
+ ceph_assert(peer_missing_entry != peer_missing.end());
missing_loc.add_active_missing(peer_missing_entry->second);
if (!peer_missing_entry->second.have_missing() &&
peer_info[*i].last_backfill.is_max())
++i) {
if (*i == pg_whoami) continue;
dout(10) << __func__ << ": adding " << *i << " as a source" << dendl;
- assert(peer_missing.count(*i));
- assert(peer_info.count(*i));
+ ceph_assert(peer_missing.count(*i));
+ ceph_assert(peer_info.count(*i));
missing_loc.add_source_info(
*i,
peer_info[*i],
++i) {
if (is_acting_recovery_backfill(i->first))
continue;
- assert(peer_info.count(i->first));
+ ceph_assert(peer_info.count(i->first));
search_for_missing(
peer_info[i->first],
i->second,
dout(10) << "_activate_committed " << epoch
<< ", that was an old interval" << dendl;
} else if (is_primary()) {
- assert(!peer_activated.count(pg_whoami));
+ ceph_assert(!peer_activated.count(pg_whoami));
peer_activated.insert(pg_whoami);
dout(10) << "_activate_committed " << epoch
<< " peer_activated now " << peer_activated
<< " last_interval_started " << info.history.last_interval_started
<< " last_epoch_started " << info.history.last_epoch_started
<< " same_interval_since " << info.history.same_interval_since << dendl;
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
if (peer_activated.size() == acting_recovery_backfill.size())
all_activated_and_committed();
} else {
dout(10) << __func__ << " flushes in progress, moving "
<< waiting_for_peered.size() << " items to waiting_for_flush"
<< dendl;
- assert(waiting_for_flush.empty());
+ ceph_assert(waiting_for_flush.empty());
waiting_for_flush.swap(waiting_for_peered);
}
}
- assert(!dirty_info);
+ ceph_assert(!dirty_info);
unlock();
}
void PG::all_activated_and_committed()
{
dout(10) << "all_activated_and_committed" << dendl;
- assert(is_primary());
- assert(peer_activated.size() == acting_recovery_backfill.size());
- assert(!acting_recovery_backfill.empty());
- assert(blocked_by.empty());
+ ceph_assert(is_primary());
+ ceph_assert(peer_activated.size() == acting_recovery_backfill.size());
+ ceph_assert(!acting_recovery_backfill.empty());
+ ceph_assert(blocked_by.empty());
// Degraded?
_update_calc_stats();
bool PG::requeue_scrub(bool high_priority)
{
- assert(is_locked());
+ ceph_assert(is_locked());
if (scrub_queued) {
dout(10) << __func__ << ": already queued" << dendl;
return false;
{
if (!is_primary() || !is_peered()) {
dout(10) << "queue_recovery -- not primary or not peered " << dendl;
- assert(!recovery_queued);
+ ceph_assert(!recovery_queued);
} else if (recovery_queued) {
dout(10) << "queue_recovery -- already queued" << dendl;
} else {
bool PG::queue_scrub()
{
- assert(is_locked());
+ ceph_assert(is_locked());
if (is_scrubbing()) {
return false;
}
} else if (is_undersized()) {
// undersized: OSD_BACKFILL_DEGRADED_PRIORITY_BASE + num missing replicas
- assert(pool.info.size > actingset.size());
+ ceph_assert(pool.info.size > actingset.size());
ret = OSD_BACKFILL_DEGRADED_PRIORITY_BASE + (pool.info.size - actingset.size());
} else if (is_degraded()) {
Context *PG::finish_recovery()
{
dout(10) << "finish_recovery" << dendl;
- assert(info.last_complete == info.last_update);
+ ceph_assert(info.last_complete == info.last_update);
clear_recovery_state();
<< " (" << recovering_oids << ")"
#endif
<< dendl;
- assert(recovery_ops_active >= 0);
+ ceph_assert(recovery_ops_active >= 0);
recovery_ops_active++;
#ifdef DEBUG_RECOVERY_OIDS
recovering_oids.insert(soid);
<< " (" << recovering_oids << ")"
#endif
<< dendl;
- assert(recovery_ops_active > 0);
+ ceph_assert(recovery_ops_active > 0);
recovery_ops_active--;
#ifdef DEBUG_RECOVERY_OIDS
- assert(recovering_oids.count(soid));
+ ceph_assert(recovering_oids.count(soid));
recovering_oids.erase(recovering_oids.find(soid));
#endif
osd->finish_recovery_op(this, soid, dequeue);
Mutex::Locker l(b->lock);
dout(10) << __func__ << " " << *b << dendl;
if (b->session) {
- assert(b->pg == this);
+ ceph_assert(b->pg == this);
ConnectionRef con = b->session->con;
if (con) { // OSD::ms_handle_reset clears s->con without a lock
con->send_message(
Mutex::Locker l(b->lock);
dout(10) << __func__ << " " << *b << dendl;
if (b->session) {
- assert(b->pg == this);
+ ceph_assert(b->pg == this);
if (b->is_new()) {
b->state = Backoff::STATE_DELETING;
} else {
{
dout(10) << __func__ << " " << *b << dendl;
Mutex::Locker l(backoff_lock);
- assert(b->lock.is_locked_by_me());
- assert(b->pg == this);
+ ceph_assert(b->lock.is_locked_by_me());
+ ceph_assert(b->pg == this);
auto p = backoffs.find(b->begin);
// may race with release_backoffs()
if (p != backoffs.end()) {
for (set<pg_shard_t>::iterator p = stray_set.begin();
p != stray_set.end();
++p) {
- assert(!is_acting_recovery_backfill(*p));
+ ceph_assert(!is_acting_recovery_backfill(*p));
if (get_osdmap()->is_up(p->osd)) {
dout(10) << "sending PGRemove to osd." << *p << dendl;
vector<spg_t> to_remove;
void PG::update_heartbeat_peers()
{
- assert(is_locked());
+ ceph_assert(is_locked());
if (!is_primary())
return;
<< upset << " acting_recovery_backfill " << acting_recovery_backfill << dendl;
dout(20) << __func__ << " acting " << acting << " up " << up << dendl;
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
bool estimate = false;
// Primary first
missing = pg_log.get_missing().num_missing();
- assert(acting_recovery_backfill.count(pg_whoami));
+ ceph_assert(acting_recovery_backfill.count(pg_whoami));
if (upset.count(pg_whoami)) {
missing_target_objects.insert(make_pair(missing, pg_whoami));
} else {
// Copies on other osds but limited to the possible degraded
int more_osds = std::min(missing_shards, ml.first.other);
int omisplaced = ml.second * more_osds;
- assert(omisplaced <= odegraded);
+ ceph_assert(omisplaced <= odegraded);
odegraded -= omisplaced;
misplaced += omisplaced;
// Handle undersized case
if (pool.info.is_replicated()) {
// Add degraded for missing targets (num_objects missing)
- assert(target >= upset.size());
+ ceph_assert(target >= upset.size());
unsigned needed = target - upset.size();
degraded += num_objects * needed;
} else {
// Handle undersized case
if (pool.info.is_replicated()) {
// Add to missing_target_objects
- assert(target >= missing_target_objects.size());
+ ceph_assert(target >= missing_target_objects.size());
unsigned needed = target - missing_target_objects.size();
if (needed)
missing_target_objects.insert(make_pair(num_objects * needed, pg_shard_t(pg_shard_t::NO_OSD)));
{
dout(0) << __func__ << " " << info_struct_v << " -> " << latest_struct_v
<< dendl;
- assert(info_struct_v <= 10);
+ ceph_assert(info_struct_v <= 10);
ObjectStore::Transaction t;
// <do upgrade steps here>
// finished upgrade!
- assert(info_struct_v == 10);
+ ceph_assert(info_struct_v == 10);
// update infover_key
if (info_struct_v < latest_struct_v) {
<< cpp_strerror(r) << dendl;
ceph_abort();
}
- assert(r == 0);
+ ceph_assert(r == 0);
C_SaferCond waiter;
if (!ch->flush_commit(&waiter)) {
pg_fast_info_t fast;
fast.populate_from(info);
bool did = fast.try_apply_to(&last_written_info);
- assert(did); // we verified last_update increased above
+ ceph_assert(did); // we verified last_update increased above
if (info == last_written_info) {
encode(fast, (*km)[fastinfo_key]);
if (logger)
dirty_big_info, need_update_epoch,
cct->_conf->osd_fast_info,
osd->logger);
- assert(ret == 0);
+ ceph_assert(ret == 0);
if (need_update_epoch)
last_epoch = get_osdmap()->get_epoch();
last_persisted_osdmap = last_epoch;
keys.insert("_remove");
map<string,bufferlist> values;
auto ch = store->open_collection(coll);
- assert(ch);
+ ceph_assert(ch);
if (store->omap_get_values(ch, pgmeta_oid, keys, &values) == 0 &&
values.size() == 1)
return true;
epoch_t cur_epoch = 0;
// validate collection name
- assert(coll.is_pg());
+ ceph_assert(coll.is_pg());
// try for v8
set<string> keys;
keys.insert(epoch_key);
map<string,bufferlist> values;
auto ch = store->open_collection(coll);
- assert(ch);
+ ceph_assert(ch);
int r = store->omap_get_values(ch, pgmeta_oid, keys, &values);
if (r == 0) {
- assert(values.size() == 2);
+ ceph_assert(values.size() == 2);
// sanity check version
auto bp = values[infover_key].cbegin();
__u8 struct_v = 0;
decode(struct_v, bp);
- assert(struct_v >= 8);
+ ceph_assert(struct_v >= 8);
// get epoch
bp = values[epoch_key].begin();
info.last_complete = e.version;
// raise last_update.
- assert(e.version > info.last_update);
+ ceph_assert(e.version > info.last_update);
info.last_update = e.version;
// raise user_version, if it increased (it may have not get bumped
ghobject_t pgmeta_oid(pgid.make_pgmeta_oid());
map<string,bufferlist> values;
auto ch = store->open_collection(coll);
- assert(ch);
+ ceph_assert(ch);
int r = store->omap_get_values(ch, pgmeta_oid, keys, &values);
- assert(r == 0);
- assert(values.size() == 3 ||
+ ceph_assert(r == 0);
+ ceph_assert(values.size() == 3 ||
values.size() == 4);
auto p = values[infover_key].cbegin();
decode(struct_v, p);
- assert(struct_v >= 10);
+ ceph_assert(struct_v >= 10);
p = values[info_key].begin();
decode(info, p);
{
int r = read_info(store, pg_id, coll, info, past_intervals,
info_struct_v);
- assert(r >= 0);
+ ceph_assert(r >= 0);
if (info_struct_v < compat_struct_v) {
derr << "PG needs upgrade, but on-disk data is too old; upgrade to"
<< " an older version first." << dendl;
- assert(0 == "PG too old to upgrade");
+ ceph_assert(0 == "PG too old to upgrade");
}
last_written_info = info;
int r = snap_mapper.remove_oid(
i->soid,
&_t);
- assert(r == 0);
+ ceph_assert(r == 0);
} else if (i->is_update()) {
- assert(i->snaps.length() > 0);
+ ceph_assert(i->snaps.length() > 0);
vector<snapid_t> snaps;
bufferlist snapbl = i->snaps;
auto p = snapbl.cbegin();
_snaps,
0,
&_t);
- assert(r == 0);
+ ceph_assert(r == 0);
} else {
- assert(i->is_clean());
+ ceph_assert(i->is_clean());
}
}
}
bool PG::sched_scrub()
{
bool nodeep_scrub = false;
- assert(is_locked());
+ ceph_assert(is_locked());
if (!(is_primary() && is_active() && is_clean() && !is_scrubbing())) {
return false;
}
}
if (!scrubber.must_scrub) {
- assert(!scrubber.must_deep_scrub);
+ ceph_assert(!scrubber.must_deep_scrub);
//NOSCRUB so skip regular scrubs
if ((osd->osd->get_osdmap()->test_flag(CEPH_OSDMAP_NOSCRUB) ||
bool ret = true;
if (!scrubber.reserved) {
- assert(scrubber.reserved_peers.empty());
+ ceph_assert(scrubber.reserved_peers.empty());
if ((cct->_conf->osd_scrub_during_recovery || !osd->is_recovery_active()) &&
osd->inc_scrubs_pending()) {
dout(20) << __func__ << ": reserved locally, reserving replicas" << dendl;
double scrub_min_interval = 0, scrub_max_interval = 0;
pool.info.opts.get(pool_opts_t::SCRUB_MIN_INTERVAL, &scrub_min_interval);
pool.info.opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &scrub_max_interval);
- assert(scrubber.scrub_reg_stamp == utime_t());
+ ceph_assert(scrubber.scrub_reg_stamp == utime_t());
scrubber.scrub_reg_stamp = osd->reg_pg_scrub(info.pgid,
reg_stamp,
scrub_min_interval,
dout(10) << __func__ << " waiting_on_whom was " << scrubber.waiting_on_whom
<< dendl;
- assert(scrubber.waiting_on_whom.count(m->from));
+ ceph_assert(scrubber.waiting_on_whom.count(m->from));
scrubber.waiting_on_whom.erase(m->from);
if (m->preempted) {
dout(10) << __func__ << " replica was preempted, setting flag" << dendl;
bool deep,
bool allow_preemption)
{
- assert(replica != pg_whoami);
+ ceph_assert(replica != pg_whoami);
dout(10) << "scrub requesting scrubmap from osd." << replica
<< " deep " << (int)deep << dendl;
MOSDRepScrub *repscrubop = new MOSDRepScrub(
void PG::scrub_reserve_replicas()
{
- assert(backfill_targets.empty());
+ ceph_assert(backfill_targets.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
void PG::scrub_unreserve_replicas()
{
- assert(backfill_targets.empty());
+ ceph_assert(backfill_targets.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
dout(20) << __func__ << " " << hoid << dendl;
- assert(!hoid.is_snapdir());
+ ceph_assert(!hoid.is_snapdir());
if (hoid.is_head()) {
// parse the SnapSet
bufferlist bl;
// finish
dout(20) << __func__ << " finishing" << dendl;
- assert(pos.done());
+ ceph_assert(pos.done());
_repair_oinfo_oid(map);
if (!is_primary()) {
ScrubMap for_meta_scrub;
};
store->cleanup(t);
t->register_on_complete(new OnComplete(std::move(store)));
- assert(!store);
+ ceph_assert(!store);
}
void PG::repair_object(
decode(oi, bliter);
} catch (...) {
dout(0) << __func__ << ": Need version of replica, bad object_info_t: " << soid << dendl;
- assert(0);
+ ceph_assert(0);
}
if (bad_peer != primary) {
peer_missing[bad_peer].add(soid, oi.version, eversion_t(), false);
} else {
// We should only be scrubbing if the PG is clean.
- assert(waiting_for_unreadable_object.empty());
+ ceph_assert(waiting_for_unreadable_object.empty());
pg_log.missing_add(soid, oi.version, eversion_t());
ThreadPool::TPHandle &handle)
{
const MOSDRepScrub *msg = static_cast<const MOSDRepScrub *>(op->get_req());
- assert(!scrubber.active_rep_scrub);
+ ceph_assert(!scrubber.active_rep_scrub);
dout(7) << "replica_scrub" << dendl;
if (msg->map_epoch < info.history.same_interval_since) {
return;
}
- assert(msg->chunky);
+ ceph_assert(msg->chunky);
if (active_pushes > 0) {
dout(10) << "waiting for active pushes to finish" << dendl;
scrubber.active_rep_scrub = op;
if (pg_has_reset_since(queued)) {
return;
}
- assert(scrub_queued);
+ ceph_assert(scrub_queued);
scrub_queued = false;
scrubber.needs_sleep = true;
}
if (!scrubber.active) {
- assert(backfill_targets.empty());
+ ceph_assert(backfill_targets.empty());
scrubber.deep = state_test(PG_STATE_DEEP_SCRUB);
switch (scrubber.state) {
case PG::Scrubber::INACTIVE:
dout(10) << "scrub start" << dendl;
- assert(is_primary());
+ ceph_assert(is_primary());
publish_stats_to_osd();
scrubber.epoch_start = info.history.same_interval_since;
max,
&objects,
&candidate_end);
- assert(ret >= 0);
+ ceph_assert(ret >= 0);
if (!objects.empty()) {
hobject_t back = objects.back();
candidate_end = back;
objects.pop_back();
if (objects.empty()) {
- assert(0 ==
+ ceph_assert(0 ==
"Somehow we got more than 2 objects which"
"have the same head but are not clones");
}
back = objects.back();
}
if (candidate_end.is_head()) {
- assert(candidate_end != back.get_head());
+ ceph_assert(candidate_end != back.get_head());
candidate_end = candidate_end.get_object_boundary();
}
} else {
- assert(candidate_end.is_max());
+ ceph_assert(candidate_end.is_max());
}
if (!_range_available_for_scrub(scrubber.start, candidate_end)) {
break;
case PG::Scrubber::BUILD_MAP:
- assert(last_update_applied >= scrubber.subset_last_update);
+ ceph_assert(last_update_applied >= scrubber.subset_last_update);
// build my own scrub map
if (scrub_preempted) {
}
dout(10) << __func__ << " waiting_on_whom was "
<< scrubber.waiting_on_whom << dendl;
- assert(scrubber.waiting_on_whom.count(pg_whoami));
+ ceph_assert(scrubber.waiting_on_whom.count(pg_whoami));
scrubber.waiting_on_whom.erase(pg_whoami);
scrubber.state = PG::Scrubber::WAIT_REPLICAS;
break;
case PG::Scrubber::COMPARE_MAPS:
- assert(last_update_applied >= scrubber.subset_last_update);
- assert(scrubber.waiting_on_whom.empty());
+ ceph_assert(last_update_applied >= scrubber.subset_last_update);
+ ceph_assert(scrubber.waiting_on_whom.empty());
scrub_compare_maps();
scrubber.start = scrubber.end;
void PG::scrub_clear_state()
{
- assert(is_locked());
+ ceph_assert(is_locked());
state_clear(PG_STATE_SCRUBBING);
state_clear(PG_STATE_REPAIR);
state_clear(PG_STATE_DEEP_SCRUB);
// when every one has been fixed.
if (repair) {
if (scrubber.fixed == scrubber.shallow_errors + scrubber.deep_errors) {
- assert(deep_scrub);
+ ceph_assert(deep_scrub);
scrubber.shallow_errors = scrubber.deep_errors = 0;
} else {
// Deep scrub in order to get corrected error counts
dirty_info = true;
write_if_dirty(t);
int tr = osd->store->queue_transaction(ch, std::move(t), NULL);
- assert(tr == 0);
+ ceph_assert(tr == 0);
}
dout(10) << "share_pg_info" << dendl;
// share new pg_info_t with replicas
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
ObjectStore::Transaction &t, boost::optional<eversion_t> trim_to,
boost::optional<eversion_t> roll_forward_to)
{
- assert(!entries.empty());
- assert(entries.begin()->version > info.last_update);
+ ceph_assert(!entries.empty());
+ ceph_assert(entries.begin()->version > info.last_update);
PGLogEntryHandler rollbacker{this, &t};
bool invalidate_stats =
boost::optional<eversion_t> roll_forward_to)
{
dout(10) << __func__ << " " << entries << dendl;
- assert(is_primary());
+ ceph_assert(is_primary());
bool rebuild_missing = append_log_entries_update_missing(entries, t, trim_to, roll_forward_to);
for (set<pg_shard_t>::const_iterator i = acting_recovery_backfill.begin();
++i) {
pg_shard_t peer(*i);
if (peer == pg_whoami) continue;
- assert(peer_missing.count(peer));
- assert(peer_info.count(peer));
+ ceph_assert(peer_missing.count(peer));
+ ceph_assert(peer_info.count(peer));
pg_missing_t& pmissing(peer_missing[peer]);
dout(20) << __func__ << " peer_missing for " << peer << " = " << pmissing << dendl;
pg_info_t& pinfo(peer_info[peer]);
pg_shard_t from, const pg_query_t &query,
pair<pg_shard_t, pg_info_t> ¬ify_info)
{
- assert(from == primary);
- assert(query.type == pg_query_t::INFO);
+ ceph_assert(from == primary);
+ ceph_assert(query.type == pg_query_t::INFO);
// info
dout(10) << "sending info" << dendl;
pg_shard_t from, const pg_query_t &query, epoch_t query_epoch)
{
dout(10) << "log request from " << from << dendl;
- assert(from == primary);
- assert(query.type != pg_query_t::INFO);
+ ceph_assert(from == primary);
+ ceph_assert(query.type != pg_query_t::INFO);
ConnectionRef con = osd->get_con_osd_cluster(
from.osd, get_osdmap()->get_epoch());
if (!con) return;
info.history.same_interval_since = osdmap->get_epoch();
} else {
std::stringstream debug;
- assert(info.history.same_interval_since != 0);
+ ceph_assert(info.history.same_interval_since != 0);
boost::scoped_ptr<IsPGRecoverablePredicate> recoverable(
get_is_recoverable_predicate());
bool new_interval = PastIntervals::check_new_interval(
projected_last_update = eversion_t();
- assert(!deleting);
+ ceph_assert(!deleting);
// should we tell the primary we are here?
send_notify = !is_primary();
void PG::proc_primary_info(ObjectStore::Transaction &t, const pg_info_t &oinfo)
{
- assert(!is_primary());
+ ceph_assert(!is_primary());
update_history(oinfo.history);
if (!info.stats.stats_invalid && info.stats.stats.sum.num_scrub_errors) {
bool PG::can_discard_replica_op(OpRequestRef& op)
{
const T *m = static_cast<const T *>(op->get_req());
- assert(m->get_type() == MSGTYPE);
+ ceph_assert(m->get_type() == MSGTYPE);
int from = m->get_source().num();
bool PG::can_discard_scan(OpRequestRef op)
{
const MOSDPGScan *m = static_cast<const MOSDPGScan *>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_SCAN);
+ ceph_assert(m->get_type() == MSG_OSD_PG_SCAN);
if (old_peering_msg(m->map_epoch, m->query_epoch)) {
dout(10) << " got old scan, ignoring" << dendl;
bool PG::can_discard_backfill(OpRequestRef op)
{
const MOSDPGBackfill *m = static_cast<const MOSDPGBackfill *>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_BACKFILL);
+ ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL);
if (old_peering_msg(m->map_epoch, m->query_epoch)) {
dout(10) << " got old backfill, ignoring" << dendl;
void PG::do_peering_event(PGPeeringEventRef evt, RecoveryCtx *rctx)
{
dout(10) << __func__ << ": " << evt->get_desc() << dendl;
- assert(have_same_or_newer_map(evt->get_epoch_sent()));
+ ceph_assert(have_same_or_newer_map(evt->get_epoch_sent()));
if (old_peering_evt(evt)) {
dout(10) << "discard old " << evt->get_desc() << dendl;
} else {
vector<int>& newacting, int acting_primary,
RecoveryCtx *rctx)
{
- assert(lastmap->get_epoch() == osdmap_ref->get_epoch());
- assert(lastmap == osdmap_ref);
+ ceph_assert(lastmap->get_epoch() == osdmap_ref->get_epoch());
+ ceph_assert(lastmap == osdmap_ref);
dout(10) << "handle_advance_map "
<< newup << "/" << newacting
<< " -- " << up_primary << "/" << acting_primary
ceph_abort();
}
void complete(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
pg->lock();
if (!pg->pg_has_reset_since(epoch)) {
pg->osd->queue_for_pg_delete(pg->get_pgid(), epoch);
NamedState(context< RecoveryMachine >().pg, "Crashed")
{
context< RecoveryMachine >().log_enter(state_name);
- assert(0 == "we got a bad state machine event");
+ ceph_assert(0 == "we got a bad state machine event");
}
boost::statechart::result PG::RecoveryState::Initial::react(const MInfoRec& i)
{
PG *pg = context< RecoveryMachine >().pg;
- assert(!pg->is_primary());
+ ceph_assert(!pg->is_primary());
post_event(i);
return transit< Stray >();
}
boost::statechart::result PG::RecoveryState::Initial::react(const MLogRec& i)
{
PG *pg = context< RecoveryMachine >().pg;
- assert(!pg->is_primary());
+ ceph_assert(!pg->is_primary());
post_event(i);
return transit< Stray >();
}
{
context< RecoveryMachine >().log_enter(state_name);
PG *pg = context< RecoveryMachine >().pg;
- assert(pg->want_acting.empty());
+ ceph_assert(pg->want_acting.empty());
// set CREATING bit until we have peered for the first time.
if (pg->info.history.last_epoch_started == 0) {
context< RecoveryMachine >().log_enter(state_name);
PG *pg = context< RecoveryMachine >().pg;
- assert(!pg->is_peered());
- assert(!pg->is_peering());
- assert(pg->is_primary());
+ ceph_assert(!pg->is_peered());
+ ceph_assert(!pg->is_peering());
+ ceph_assert(pg->is_primary());
pg->state_set(PG_STATE_PEERING);
}
for (set<pg_shard_t>::iterator it = pg->backfill_targets.begin();
it != pg->backfill_targets.end();
++it) {
- assert(*it != pg->pg_whoami);
+ ceph_assert(*it != pg->pg_whoami);
ConnectionRef con = pg->osd->get_con_osd_cluster(
it->osd, pg->get_osdmap()->get_epoch());
if (con) {
if (backfill_osd_it != context< Active >().remote_shards_to_reserve_backfill.end()) {
//The primary never backfills itself
- assert(*backfill_osd_it != pg->pg_whoami);
+ ceph_assert(*backfill_osd_it != pg->pg_whoami);
ConnectionRef con = pg->osd->get_con_osd_cluster(
backfill_osd_it->osd, pg->get_osdmap()->get_epoch());
if (con) {
set<pg_shard_t>::const_iterator it, begin, end;
begin = context< Active >().remote_shards_to_reserve_backfill.begin();
end = context< Active >().remote_shards_to_reserve_backfill.end();
- assert(begin != end);
+ ceph_assert(begin != end);
for (it = begin; it != backfill_osd_it; ++it) {
//The primary never backfills itself
- assert(*it != pg->pg_whoami);
+ ceph_assert(*it != pg->pg_whoami);
ConnectionRef con = pg->osd->get_con_osd_cluster(
it->osd, pg->get_osdmap()->get_epoch());
if (con) {
PG *pg = context< RecoveryMachine >().pg;
if (remote_recovery_reservation_it != context< Active >().remote_shards_to_reserve_recovery.end()) {
- assert(*remote_recovery_reservation_it != pg->pg_whoami);
+ ceph_assert(*remote_recovery_reservation_it != pg->pg_whoami);
ConnectionRef con = pg->osd->get_con_osd_cluster(
remote_recovery_reservation_it->osd, pg->get_osdmap()->get_epoch());
if (con) {
pg->state_clear(PG_STATE_RECOVERY_WAIT);
pg->state_clear(PG_STATE_RECOVERY_TOOFULL);
pg->state_set(PG_STATE_RECOVERING);
- assert(!pg->state_test(PG_STATE_ACTIVATING));
+ ceph_assert(!pg->state_test(PG_STATE_ACTIVATING));
pg->publish_stats_to_osd();
pg->queue_recovery();
}
void PG::RecoveryState::Recovering::release_reservations(bool cancel)
{
PG *pg = context< RecoveryMachine >().pg;
- assert(cancel || !pg->pg_log.get_missing().have_missing());
+ ceph_assert(cancel || !pg->pg_log.get_missing().have_missing());
// release remote reservations
for (set<pg_shard_t>::const_iterator i =
PG *pg = context< RecoveryMachine >().pg;
- assert(!pg->needs_recovery());
+ ceph_assert(!pg->needs_recovery());
// if we finished backfill, all acting are active; recheck if
// DEGRADED | UNDERSIZED is appropriate.
- assert(!pg->acting_recovery_backfill.empty());
+ ceph_assert(!pg->acting_recovery_backfill.empty());
if (pg->get_osdmap()->get_pg_size(pg->info.pgid.pgid) <=
pg->acting_recovery_backfill.size()) {
pg->state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY);
bool history_les_bound = false;
if (pg->acting != pg->up && !pg->choose_acting(auth_log_shard,
true, &history_les_bound)) {
- assert(pg->want_acting.size());
+ ceph_assert(pg->want_acting.size());
} else if (!pg->async_recovery_targets.empty()) {
pg->choose_acting(auth_log_shard, true, &history_les_bound);
}
PG *pg = context< RecoveryMachine >().pg;
- assert(!pg->backfill_reserving);
- assert(!pg->backfill_reserved);
- assert(pg->is_primary());
+ ceph_assert(!pg->backfill_reserving);
+ ceph_assert(!pg->backfill_reserved);
+ ceph_assert(pg->is_primary());
ldout(pg->cct, 10) << "In Active, about to call activate" << dendl;
pg->start_flush(context< RecoveryMachine >().get_cur_transaction());
pg->activate(*context< RecoveryMachine >().get_cur_transaction(),
}
ldout(pg->cct,10) << __func__ << " new removed_snaps " << i->second
<< ", snap_trimq now " << pg->snap_trimq << dendl;
- assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps);
+ ceph_assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps);
pg->dirty_info = true;
pg->dirty_big_info = true;
}
}
ldout(pg->cct,10) << __func__ << " new purged_snaps " << j->second
<< ", now " << pg->info.purged_snaps << dendl;
- assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps);
+ ceph_assert(!bad || !pg->cct->_conf->osd_debug_verify_cached_snaps);
pg->dirty_info = true;
pg->dirty_big_info = true;
}
int osd = pg->want_acting[i];
if (!advmap.osdmap->is_up(osd)) {
pg_shard_t osd_with_shard(osd, shard_id_t(i));
- assert(pg->is_acting(osd_with_shard) || pg->is_up(osd_with_shard));
+ ceph_assert(pg->is_acting(osd_with_shard) || pg->is_up(osd_with_shard));
}
}
{
PG *pg = context< RecoveryMachine >().pg;
ldout(pg->cct, 10) << "Active: handling ActMap" << dendl;
- assert(pg->is_primary());
+ ceph_assert(pg->is_primary());
if (pg->have_unfound()) {
// object may have become unfound
boost::statechart::result PG::RecoveryState::Active::react(const MNotifyRec& notevt)
{
PG *pg = context< RecoveryMachine >().pg;
- assert(pg->is_primary());
+ ceph_assert(pg->is_primary());
if (pg->peer_info.count(notevt.from)) {
ldout(pg->cct, 10) << "Active: got notify from " << notevt.from
<< ", already have info from that osd, ignoring"
boost::statechart::result PG::RecoveryState::Active::react(const MTrim& trim)
{
PG *pg = context< RecoveryMachine >().pg;
- assert(pg->is_primary());
+ ceph_assert(pg->is_primary());
// peer is informing us of their last_complete_ondisk
ldout(pg->cct,10) << " replica osd." << trim.from << " lcod " << trim.trim_to << dendl;
boost::statechart::result PG::RecoveryState::Active::react(const MInfoRec& infoevt)
{
PG *pg = context< RecoveryMachine >().pg;
- assert(pg->is_primary());
+ ceph_assert(pg->is_primary());
- assert(!pg->acting_recovery_backfill.empty());
+ ceph_assert(!pg->acting_recovery_backfill.empty());
// don't update history (yet) if we are active and primary; the replica
// may be telling us they have activated (and committed) but we can't
// share that until _everyone_ does the same.
<< pg->waiting_for_peered.size()
<< " items to waiting_for_flush"
<< dendl;
- assert(pg->waiting_for_flush.empty());
+ ceph_assert(pg->waiting_for_flush.empty());
pg->waiting_for_flush.swap(pg->waiting_for_peered);
}
ldout(pg->cct, 10) << "received log from " << logevt.from << dendl;
ObjectStore::Transaction* t = context<RecoveryMachine>().get_cur_transaction();
pg->merge_log(*t, logevt.msg->info, logevt.msg->log, logevt.from);
- assert(pg->pg_log.get_head() == pg->info.last_update);
+ ceph_assert(pg->pg_log.get_head() == pg->info.last_update);
return discard_event();
}
context< RecoveryMachine >().log_enter(state_name);
PG *pg = context< RecoveryMachine >().pg;
- assert(!pg->is_peered());
- assert(!pg->is_peering());
- assert(!pg->is_primary());
+ ceph_assert(!pg->is_peered());
+ ceph_assert(!pg->is_peering());
+ ceph_assert(!pg->is_primary());
if (!pg->get_osdmap()->have_pg_pool(pg->get_pgid().pool())) {
ldout(pg->cct,10) << __func__ << " pool is deleted" << dendl;
pg->merge_log(*t, msg->info, msg->log, logevt.from);
}
- assert(pg->pg_log.get_head() == pg->info.last_update);
+ ceph_assert(pg->pg_log.get_head() == pg->info.last_update);
post_event(Activate(logevt.msg->info.last_epoch_started));
return transit<ReplicaActive>();
pg->info.hit_set = infoevt.info.hit_set;
}
- assert(infoevt.info.last_update == pg->info.last_update);
- assert(pg->pg_log.get_head() == pg->info.last_update);
+ ceph_assert(infoevt.info.last_update == pg->info.last_update);
+ ceph_assert(pg->pg_log.get_head() == pg->info.last_update);
post_event(Activate(infoevt.info.last_epoch_started));
return transit<ReplicaActive>();
pg->check_past_interval_bounds();
PastIntervals::PriorSet &prior_set = context< Peering >().prior_set;
- assert(pg->blocked_by.empty());
+ ceph_assert(pg->blocked_by.empty());
prior_set = pg->build_prior();
// how much log to request?
eversion_t request_log_from = pg->info.last_update;
- assert(!pg->acting_recovery_backfill.empty());
+ ceph_assert(!pg->acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator p = pg->acting_recovery_backfill.begin();
p != pg->acting_recovery_backfill.end();
++p) {
request_log_from, pg->info.history,
pg->get_osdmap()->get_epoch()));
- assert(pg->blocked_by.empty());
+ ceph_assert(pg->blocked_by.empty());
pg->blocked_by.insert(auth_log_shard.osd);
pg->publish_stats_to_osd();
}
boost::statechart::result PG::RecoveryState::GetLog::react(const MLogRec& logevt)
{
PG *pg = context< RecoveryMachine >().pg;
- assert(!msg);
+ ceph_assert(!msg);
if (logevt.from != auth_log_shard) {
ldout(pg->cct, 10) << "GetLog: discarding log from "
<< "non-auth_log_shard osd." << logevt.from << dendl;
pg->state_set(PG_STATE_DOWN);
auto &prior_set = context< Peering >().prior_set;
- assert(pg->blocked_by.empty());
+ ceph_assert(pg->blocked_by.empty());
pg->blocked_by.insert(prior_set.down.begin(), prior_set.down.end());
pg->publish_stats_to_osd();
}
{
PG *pg = context< RecoveryMachine >().pg;
- assert(pg->is_primary());
+ ceph_assert(pg->is_primary());
epoch_t old_start = pg->info.history.last_epoch_started;
if (!pg->peer_info.count(infoevt.from) &&
pg->get_osdmap()->has_been_up_since(infoevt.from.osd, infoevt.notify.epoch_sent)) {
pg->state_set(PG_STATE_INCOMPLETE);
PastIntervals::PriorSet &prior_set = context< Peering >().prior_set;
- assert(pg->blocked_by.empty());
+ ceph_assert(pg->blocked_by.empty());
pg->blocked_by.insert(prior_set.down.begin(), prior_set.down.end());
pg->publish_stats_to_osd();
}
context< RecoveryMachine >().log_enter(state_name);
PG *pg = context< RecoveryMachine >().pg;
- assert(!pg->acting_recovery_backfill.empty());
+ ceph_assert(!pg->acting_recovery_backfill.empty());
eversion_t since;
for (set<pg_shard_t>::iterator i = pg->acting_recovery_backfill.begin();
i != pg->acting_recovery_backfill.end();
// We pull the log from the peer's last_epoch_started to ensure we
// get enough log to detect divergent updates.
since.epoch = pi.last_epoch_started;
- assert(pi.last_update >= pg->info.log_tail); // or else choose_acting() did a bad thing
+ ceph_assert(pi.last_update >= pg->info.log_tail); // or else choose_acting() did a bad thing
if (pi.log_tail <= since) {
ldout(pg->cct, 10) << " requesting log+missing since " << since << " from osd." << *i << dendl;
context< RecoveryMachine >().send_query(
#define dout_prefix ((debug_pg ? debug_pg->gen_prefix(*_dout) : *_dout) << " PriorSet: ")
void PG::RecoveryState::start_handle(RecoveryCtx *new_ctx) {
- assert(!rctx);
- assert(!orig_ctx);
+ ceph_assert(!rctx);
+ ceph_assert(!orig_ctx);
orig_ctx = new_ctx;
if (new_ctx) {
if (messages_pending_flush) {
}
void PG::RecoveryState::begin_block_outgoing() {
- assert(!messages_pending_flush);
- assert(orig_ctx);
- assert(rctx);
+ ceph_assert(!messages_pending_flush);
+ ceph_assert(orig_ctx);
+ ceph_assert(rctx);
messages_pending_flush = BufferedRecoveryMessages();
rctx = RecoveryCtx(*messages_pending_flush, *orig_ctx);
}
void PG::RecoveryState::clear_blocked_outgoing() {
- assert(orig_ctx);
- assert(rctx);
+ ceph_assert(orig_ctx);
+ ceph_assert(rctx);
messages_pending_flush = boost::optional<BufferedRecoveryMessages>();
}
void PG::RecoveryState::end_block_outgoing() {
- assert(messages_pending_flush);
- assert(orig_ctx);
- assert(rctx);
+ ceph_assert(messages_pending_flush);
+ ceph_assert(orig_ctx);
+ ceph_assert(rctx);
rctx = RecoveryCtx(*orig_ctx);
rctx->accept_buffered_messages(*messages_pending_flush);
}
OSDMapRef get_osdmap() const {
- assert(is_locked());
- assert(osdmap_ref);
+ ceph_assert(is_locked());
+ ceph_assert(osdmap_ref);
return osdmap_ref;
}
epoch_t get_osdmap_epoch() const {
void lock(bool no_lockdep = false) const;
void unlock() const {
//generic_dout(0) << this << " " << info.pgid << " unlock" << dendl;
- assert(!dirty_info);
- assert(!dirty_big_info);
+ ceph_assert(!dirty_info);
+ ceph_assert(!dirty_big_info);
_lock.Unlock();
}
bool is_locked() const {
return pg_whoami == primary;
}
bool pg_has_reset_since(epoch_t e) {
- assert(is_locked());
+ ceph_assert(is_locked());
return deleted || e < get_last_peering_reset();
}
void requeue_map_waiters();
void update_osdmap_ref(OSDMapRef newmap) {
- assert(_lock.is_locked_by_me());
+ ceph_assert(_lock.is_locked_by_me());
osdmap_ref = std::move(newmap);
}
(l.other < r.other)));
}
friend ostream& operator<<(ostream& out, const loc_count_t& l) {
- assert(l.up >= 0);
- assert(l.other >= 0);
+ ceph_assert(l.up >= 0);
+ ceph_assert(l.other >= 0);
return out << "(" << l.up << "+" << l.other << ")";
}
};
pgs_by_shard_id(s, pgsbs);
for (auto shard: pgsbs) {
auto p = missing_by_count[shard.first].find(_get_count(shard.second));
- assert(p != missing_by_count[shard.first].end());
+ ceph_assert(p != missing_by_count[shard.first].end());
if (--p->second == 0) {
missing_by_count[shard.first].erase(p);
}
lgeneric_dout(pg->cct, 0) << this << " " << pg->info.pgid << " unexpected need for "
<< i->first << " have " << j->second
<< " tried to add " << i->second << dendl;
- assert(i->second.need == j->second.need);
+ ceph_assert(i->second.need == j->second.need);
}
}
}
}
void revise_need(const hobject_t &hoid, eversion_t need) {
auto it = needs_recovery_map.find(hoid);
- assert(it != needs_recovery_map.end());
+ ceph_assert(it != needs_recovery_map.end());
it->second.need = need;
}
if (i == self)
continue;
auto pmiter = pmissing.find(i);
- assert(pmiter != pmissing.end());
+ ceph_assert(pmiter != pmissing.end());
miter = pmiter->second.get_items().find(hoid);
if (miter != pmiter->second.get_items().end()) {
item = miter->second;
return;
auto mliter =
missing_loc.insert(make_pair(hoid, set<pg_shard_t>())).first;
- assert(info.last_backfill.is_max());
- assert(info.last_update >= item->need);
+ ceph_assert(info.last_backfill.is_max());
+ ceph_assert(info.last_update >= item->need);
if (!missing.is_missing(hoid))
mliter->second.insert(self);
for (auto &&i: pmissing) {
if (i.first == self)
continue;
auto pinfoiter = pinfo.find(i.first);
- assert(pinfoiter != pinfo.end());
+ ceph_assert(pinfoiter != pinfo.end());
if (item->need <= pinfoiter->second.last_update &&
hoid <= pinfoiter->second.last_backfill &&
!i.second.is_missing(hoid))
handle(rctx.handle) {}
void accept_buffered_messages(BufferedRecoveryMessages &m) {
- assert(query_map);
- assert(info_map);
- assert(notify_list);
+ ceph_assert(query_map);
+ ceph_assert(info_map);
+ ceph_assert(notify_list);
for (map<int, map<spg_t, pg_query_t> >::iterator i = m.query_map.begin();
i != m.query_map.end();
++i) {
void send_notify(pg_shard_t to,
const pg_notify_t &info, const PastIntervals &pi) {
- assert(notify_list);
+ ceph_assert(notify_list);
(*notify_list)[to.osd].push_back(make_pair(info, pi));
}
};
/// drop first entry, and adjust @begin accordingly
void pop_front() {
- assert(!objects.empty());
+ ceph_assert(!objects.empty());
objects.erase(objects.begin());
trim();
}
void calc_min_last_complete_ondisk() {
eversion_t min = last_complete_ondisk;
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
pg->get_pgbackend()->try_stash(hoid, v, t);
}
void rollback(const pg_log_entry_t &entry) override {
- assert(entry.can_rollback());
+ ceph_assert(entry.can_rollback());
pg->get_pgbackend()->rollback(entry, t);
}
void rollforward(const pg_log_entry_t &entry) override {
/* Accessor functions for state methods */
ObjectStore::Transaction* get_cur_transaction() {
- assert(state->rctx);
- assert(state->rctx->transaction);
+ ceph_assert(state->rctx);
+ ceph_assert(state->rctx->transaction);
return state->rctx->transaction;
}
void send_query(pg_shard_t to, const pg_query_t &query) {
- assert(state->rctx);
- assert(state->rctx->query_map);
+ ceph_assert(state->rctx);
+ ceph_assert(state->rctx->query_map);
(*state->rctx->query_map)[to.osd][spg_t(pg->info.pgid.pgid, to.shard)] =
query;
}
map<int, map<spg_t, pg_query_t> > *get_query_map() {
- assert(state->rctx);
- assert(state->rctx->query_map);
+ ceph_assert(state->rctx);
+ ceph_assert(state->rctx->query_map);
return state->rctx->query_map;
}
map<int, vector<pair<pg_notify_t, PastIntervals> > > *get_info_map() {
- assert(state->rctx);
- assert(state->rctx->info_map);
+ ceph_assert(state->rctx);
+ ceph_assert(state->rctx->info_map);
return state->rctx->info_map;
}
void send_notify(pg_shard_t to,
const pg_notify_t &info, const PastIntervals &pi) {
- assert(state->rctx);
+ ceph_assert(state->rctx);
state->rctx->send_notify(to, info, pi);
}
};
break;
}
}
- assert(up_primary.osd == new_up_primary);
- assert(primary.osd == new_acting_primary);
+ ceph_assert(up_primary.osd == new_up_primary);
+ ceph_assert(primary.osd == new_acting_primary);
}
void set_role(int r) {
eversion_t at_version(
get_osdmap()->get_epoch(),
projected_last_update.version+1);
- assert(at_version > info.last_update);
- assert(at_version > pg_log.get_head());
- assert(at_version > projected_last_update);
+ ceph_assert(at_version > info.last_update);
+ ceph_assert(at_version > pg_log.get_head());
+ ceph_assert(at_version > projected_last_update);
return at_version;
}
void PGBackend::recover_delete_object(const hobject_t &oid, eversion_t v,
RecoveryHandle *h)
{
- assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0);
+ ceph_assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0);
for (const auto& shard : get_parent()->get_acting_recovery_backfill_shards()) {
if (shard == get_parent()->whoami_shard())
continue;
void PGBackend::handle_recovery_delete(OpRequestRef op)
{
const MOSDPGRecoveryDelete *m = static_cast<const MOSDPGRecoveryDelete *>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE);
+ ceph_assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE);
dout(20) << __func__ << " " << op << dendl;
op->mark_started();
void PGBackend::handle_recovery_delete_reply(OpRequestRef op)
{
const MOSDPGRecoveryDeleteReply *m = static_cast<const MOSDPGRecoveryDeleteReply *>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE_REPLY);
+ ceph_assert(m->get_type() == MSG_OSD_PG_RECOVERY_DELETE_REPLY);
dout(20) << __func__ << " " << op << dendl;
for (const auto &p : m->objects) {
}
};
- assert(entry.mod_desc.can_rollback());
+ ceph_assert(entry.mod_desc.can_rollback());
RollbackVisitor vis(entry.soid, this);
entry.mod_desc.visit(&vis);
t->append(vis.t);
void PGBackend::remove(
const hobject_t &hoid,
ObjectStore::Transaction *t) {
- assert(!hoid.is_temp());
+ ceph_assert(!hoid.is_temp());
t->remove(
coll,
ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard));
vector<hobject_t> *ls,
hobject_t *next)
{
- assert(ls);
+ ceph_assert(ls);
// Starts with the smallest generation to make sure the result list
// has the marker object (it might have multiple generations
// though, which would be filtered).
vector<hobject_t> *ls,
vector<ghobject_t> *gen_obs)
{
- assert(ls);
+ ceph_assert(ls);
vector<ghobject_t> objects;
int r = store->collection_list(
ch,
map<string, boost::optional<bufferlist> > &old_attrs,
ObjectStore::Transaction *t) {
map<string, bufferlist> to_set;
- assert(!hoid.is_temp());
+ ceph_assert(!hoid.is_temp());
for (map<string, boost::optional<bufferlist> >::iterator i = old_attrs.begin();
i != old_attrs.end();
++i) {
const hobject_t &hoid,
uint64_t old_size,
ObjectStore::Transaction *t) {
- assert(!hoid.is_temp());
+ ceph_assert(!hoid.is_temp());
t->truncate(
coll,
ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard),
const hobject_t &hoid,
version_t old_version,
ObjectStore::Transaction *t) {
- assert(!hoid.is_temp());
+ ceph_assert(!hoid.is_temp());
t->remove(
coll,
ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard));
const hobject_t &hoid,
version_t old_version,
ObjectStore::Transaction *t) {
- assert(!hoid.is_temp());
+ ceph_assert(!hoid.is_temp());
t->remove(
coll,
ghobject_t(hoid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard));
const hobject_t &hoid,
version_t old_version,
ObjectStore::Transaction *t) {
- assert(!hoid.is_temp());
+ ceph_assert(!hoid.is_temp());
t->remove(
coll, ghobject_t(hoid, old_version, get_parent()->whoami_shard().shard));
}
ec_profile,
&ec_impl,
&ss);
- assert(ec_impl);
+ ceph_assert(ec_impl);
return new ECBackend(
l,
coll,
ScrubMapBuilder &pos)
{
dout(10) << __func__ << " " << pos << dendl;
- assert(!pos.done());
- assert(pos.pos < pos.ls.size());
+ ceph_assert(!pos.done());
+ ceph_assert(pos.pos < pos.ls.size());
hobject_t& poid = pos.ls[pos.pos];
struct stat st;
if (r == 0) {
ScrubMap::object &o = map.objects[poid];
o.size = st.st_size;
- assert(!o.negative);
+ ceph_assert(!o.negative);
store->getattrs(
ch,
ghobject_t(
}
// We won't pick an auth copy if the snapset is missing or won't decode.
- assert(!obj.is_snapdir());
+ ceph_assert(!obj.is_snapdir());
if (obj.is_head()) {
k = i->second.attrs.find(SS_ATTR);
if (k == i->second.attrs.end()) {
}
// This is automatically corrected in PG::_repair_oinfo_oid()
- assert(oi.soid == obj);
+ ceph_assert(oi.soid == obj);
if (i->second.size != be_get_ondisk_size(oi.size)) {
shard_info.set_obj_size_info_mismatch();
if (fix_digest) {
boost::optional<uint32_t> data_digest, omap_digest;
- assert(auth_object.digest_present);
+ ceph_assert(auth_object.digest_present);
data_digest = auth_object.digest;
if (auth_object.omap_digest_present) {
omap_digest = auth_object.omap_digest;
// recorded digest != actual digest?
if (auth_oi.is_data_digest() && auth_object.digest_present &&
auth_oi.data_digest != auth_object.digest) {
- assert(shard_map[auth->first].has_data_digest_mismatch_info());
+ ceph_assert(shard_map[auth->first].has_data_digest_mismatch_info());
errorstream << pgid << " recorded data digest 0x"
<< std::hex << auth_oi.data_digest << " != on disk 0x"
<< auth_object.digest << std::dec << " on " << auth_oi.soid
}
if (auth_oi.is_omap_digest() && auth_object.omap_digest_present &&
auth_oi.omap_digest != auth_object.omap_digest) {
- assert(shard_map[auth->first].has_omap_digest_mismatch_info());
+ ceph_assert(shard_map[auth->first].has_omap_digest_mismatch_info());
errorstream << pgid << " recorded omap digest 0x"
<< std::hex << auth_oi.omap_digest << " != on disk 0x"
<< auth_object.omap_digest << std::dec
}
virtual const pg_missing_const_i &get_shard_missing(pg_shard_t peer) const {
auto m = maybe_get_shard_missing(peer);
- assert(m);
+ ceph_assert(m);
return *m;
}
} else {
map<pg_shard_t, pg_info_t>::const_iterator i =
get_shard_info().find(peer);
- assert(i != get_shard_info().end());
+ ceph_assert(i != get_shard_info().end());
return i->second;
}
}
set<string>* trimmed_dups,
eversion_t *write_from_dups)
{
- assert(s <= can_rollback_to);
+ ceph_assert(s <= can_rollback_to);
if (complete_to != log.end())
lgeneric_subdout(cct, osd, 20) << " complete_to " << complete_to->version << dendl;
out << *p << " " <<
(logged_object(p->soid) ? "indexed" : "NOT INDEXED") <<
std::endl;
- assert(!p->reqid_is_indexed() || logged_req(p->reqid));
+ ceph_assert(!p->reqid_is_indexed() || logged_req(p->reqid));
}
for (list<pg_log_dup_t>::const_iterator p = dups.begin();
// Don't assert for async_recovery_targets or backfill_targets
// or whenever there are missing items
if (transaction_applied && !async && (missing.num_missing() == 0))
- assert(trim_to <= info.last_complete);
+ ceph_assert(trim_to <= info.last_complete);
dout(10) << "trim " << log << " to " << trim_to << dendl;
log.trim(cct, trim_to, &trimmed, &trimmed_dups, &write_from_dups);
// Check preconditions
// If our log is empty, the incoming log needs to have not been trimmed.
- assert(!log.null() || olog.tail == eversion_t());
+ ceph_assert(!log.null() || olog.tail == eversion_t());
// The logs must overlap.
- assert(log.head >= olog.tail && olog.head >= log.tail);
+ ceph_assert(log.head >= olog.tail && olog.head >= log.tail);
for (map<hobject_t, pg_missing_item>::const_iterator i = missing.get_items().begin();
i != missing.get_items().end();
derr << " " << *i << dendl;
}
}
- assert(log.log.size() == log_keys_debug.size());
+ ceph_assert(log.log.size() == log_keys_debug.size());
for (list<pg_log_entry_t>::iterator i = log.log.begin();
i != log.log.end();
++i) {
- assert(log_keys_debug.count(i->get_key_name()));
+ ceph_assert(log_keys_debug.count(i->get_key_name()));
}
}
++i) {
if (i->first[0] == '_')
continue;
- assert(!log_keys_debug->count(i->first));
+ ceph_assert(!log_keys_debug->count(i->first));
log_keys_debug->insert(i->first);
}
}
string key = t.get_key_name();
if (log_keys_debug) {
auto it = log_keys_debug->find(key);
- assert(it != log_keys_debug->end());
+ ceph_assert(it != log_keys_debug->end());
log_keys_debug->erase(it);
}
to_remove.emplace(std::move(key));
++i) {
if (i->first[0] == '_')
continue;
- assert(!log_keys_debug->count(i->first));
+ ceph_assert(!log_keys_debug->count(i->first));
log_keys_debug->insert(i->first);
}
}
/****/
void claim_log_and_clear_rollback_info(const pg_log_t& o) {
// we must have already trimmed the old entries
- assert(rollback_info_trimmed_to == head);
- assert(rollback_info_trimmed_to_riter == log.rbegin());
+ ceph_assert(rollback_info_trimmed_to == head);
+ ceph_assert(rollback_info_trimmed_to_riter == log.rbegin());
*this = IndexedLog(o);
void zero() {
// we must have already trimmed the old entries
- assert(rollback_info_trimmed_to == head);
- assert(rollback_info_trimmed_to_riter == log.rbegin());
+ ceph_assert(rollback_info_trimmed_to == head);
+ ceph_assert(rollback_info_trimmed_to_riter == log.rbegin());
unindex();
pg_log_t::clear();
version_t *user_version,
int *return_code) const
{
- assert(version);
- assert(user_version);
- assert(return_code);
+ ceph_assert(version);
+ ceph_assert(user_version);
+ ceph_assert(return_code);
ceph::unordered_map<osd_reqid_t,pg_log_entry_t*>::const_iterator p;
if (!(indexed_data & PGLOG_INDEXED_CALLER_OPS)) {
index_caller_ops();
return true;
}
}
- assert(0 == "in extra_caller_ops but not extra_reqids");
+ ceph_assert(0 == "in extra_caller_ops but not extra_reqids");
}
if (!(indexed_data & PGLOG_INDEXED_DUPS)) {
// actors
void add(const pg_log_entry_t& e, bool applied = true) {
if (!applied) {
- assert(get_can_rollback_to() == head);
+ ceph_assert(get_can_rollback_to() == head);
}
// make sure our buffers don't pin bigger buffers
if (rollback_info_trimmed_to_riter == log.rbegin())
++rollback_info_trimmed_to_riter;
- assert(e.version > head);
- assert(head.version == 0 || e.version.version > head.version);
+ ceph_assert(e.version > head);
+ ceph_assert(head.version == 0 || e.version.version > head.version);
head = e.version;
// to our index
}
}
- assert(log.get_can_rollback_to() >= v);
+ ceph_assert(log.get_can_rollback_to() >= v);
}
void reset_complete_to(pg_info_t *info) {
log.complete_to = log.log.begin();
- assert(log.complete_to != log.log.end());
+ ceph_assert(log.complete_to != log.log.end());
auto oldest_need = missing.get_oldest_need();
if (oldest_need != eversion_t()) {
while (log.complete_to->version < oldest_need) {
++log.complete_to;
- assert(log.complete_to != log.log.end());
+ ceph_assert(log.complete_to != log.log.end());
}
}
if (!info)
}
// entries is non-empty
- assert(!orig_entries.empty());
+ ceph_assert(!orig_entries.empty());
// strip out and ignore ERROR entries
mempool::osd_pglog::list<pg_log_entry_t> entries;
eversion_t last;
i != orig_entries.end();
++i) {
// all entries are on hoid
- assert(i->soid == hoid);
+ ceph_assert(i->soid == hoid);
// did not see error entries before this entry and this entry is not error
// then this entry is the first non error entry
bool first_non_error = ! seen_non_error && ! i->is_error();
if (i != orig_entries.begin() && i->prior_version != eversion_t() &&
! first_non_error) {
// in increasing order of version
- assert(i->version > last);
+ ceph_assert(i->version > last);
// prior_version correct (unless it is an ERROR entry)
- assert(i->prior_version == last || i->is_error());
+ ceph_assert(i->prior_version == last || i->is_error());
}
if (i->is_error()) {
ldpp_dout(dpp, 20) << __func__ << ": ignoring " << *i << dendl;
ldpp_dout(dpp, 10) << __func__ << ": more recent entry found: "
<< *objiter->second << ", already merged" << dendl;
- assert(objiter->second->version > last_divergent_update);
+ ceph_assert(objiter->second->version > last_divergent_update);
// ensure missing has been updated appropriately
if (objiter->second->is_update() ||
(missing.may_include_deletes && objiter->second->is_delete())) {
- assert(missing.is_missing(hoid) &&
+ ceph_assert(missing.is_missing(hoid) &&
missing.get_items().at(hoid).need == objiter->second->version);
} else {
- assert(!missing.is_missing(hoid));
+ ceph_assert(!missing.is_missing(hoid));
}
missing.revise_have(hoid, eversion_t());
if (rollbacker) {
for (list<pg_log_entry_t>::const_reverse_iterator i = entries.rbegin();
i != entries.rend();
++i) {
- assert(i->can_rollback() && i->version > olog_can_rollback_to);
+ ceph_assert(i->can_rollback() && i->version > olog_can_rollback_to);
ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
<< " rolling back " << *i << dendl;
if (rollbacker)
const DoutPrefixProvider *dpp) {
bool invalidate_stats = false;
if (log && !entries.empty()) {
- assert(log->head < entries.begin()->version);
+ ceph_assert(log->head < entries.begin()->version);
}
for (list<pg_log_entry_t>::const_iterator p = entries.begin();
p != entries.end();
// legacy?
struct stat st;
int r = store->stat(ch, pgmeta_oid, &st);
- assert(r == 0);
- assert(st.st_size == 0);
+ ceph_assert(r == 0);
+ ceph_assert(st.st_size == 0);
// will get overridden below if it had been recorded
eversion_t on_disk_can_rollback_to = info.last_update;
decode(oid, bp);
decode(item, bp);
if (item.is_delete()) {
- assert(missing.may_include_deletes);
+ ceph_assert(missing.may_include_deletes);
}
missing.add(oid, item.need, item.have, item.is_delete());
} else if (p->key().substr(0, 4) == string("dup_")) {
pg_log_dup_t dup;
decode(dup, bp);
if (!dups.empty()) {
- assert(dups.back().version < dup.version);
+ ceph_assert(dups.back().version < dup.version);
}
dups.push_back(dup);
} else {
ldpp_dout(dpp, 20) << "read_log_and_missing " << e << dendl;
if (!entries.empty()) {
pg_log_entry_t last_e(entries.back());
- assert(last_e.version.version < e.version.version);
- assert(last_e.version.epoch <= e.version.epoch);
+ ceph_assert(last_e.version.version < e.version.version);
+ ceph_assert(last_e.version.epoch <= e.version.epoch);
}
entries.push_back(e);
if (log_keys_debug)
<< " (have " << oi.version << ")" << dendl;
if (debug_verify_stored_missing) {
auto miter = missing.get_items().find(i->soid);
- assert(miter != missing.get_items().end());
- assert(miter->second.need == i->version);
+ ceph_assert(miter != missing.get_items().end());
+ ceph_assert(miter->second.need == i->version);
// the 'have' version is reset if an object is deleted,
// then created again
- assert(miter->second.have == oi.version || miter->second.have == eversion_t());
+ ceph_assert(miter->second.have == oi.version || miter->second.have == eversion_t());
checked.insert(i->soid);
} else {
missing.add(i->soid, i->version, oi.version, i->is_delete());
if (debug_verify_stored_missing) {
auto miter = missing.get_items().find(i->soid);
if (i->is_delete()) {
- assert(miter == missing.get_items().end() ||
+ ceph_assert(miter == missing.get_items().end() ||
(miter->second.need == i->version &&
miter->second.have == eversion_t()));
} else {
- assert(miter != missing.get_items().end());
- assert(miter->second.need == i->version);
- assert(miter->second.have == eversion_t());
+ ceph_assert(miter != missing.get_items().end());
+ ceph_assert(miter->second.need == i->version);
+ ceph_assert(miter->second.have == eversion_t());
}
checked.insert(i->soid);
} else {
<< i.first << " " << i.second
<< " last_backfill = " << info.last_backfill
<< dendl;
- assert(0 == "invalid missing set entry found");
+ ceph_assert(0 == "invalid missing set entry found");
}
bufferlist bv;
int r = store->getattr(
bv);
if (r >= 0) {
object_info_t oi(bv);
- assert(oi.version == i.second.have || eversion_t() == i.second.have);
+ ceph_assert(oi.version == i.second.have || eversion_t() == i.second.have);
} else {
- assert(i.second.is_delete() || eversion_t() == i.second.have);
+ ceph_assert(i.second.is_delete() || eversion_t() == i.second.have);
}
}
} else {
- assert(must_rebuild);
+ ceph_assert(must_rebuild);
for (map<eversion_t, hobject_t>::reverse_iterator i =
divergent_priors.rbegin();
i != divergent_priors.rend();
<< "), assuming it is tracker.ceph.com/issues/17916"
<< dendl;
} else {
- assert(oi.version == i->first);
+ ceph_assert(oi.version == i->first);
}
} else {
ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i << dendl;
left,
[&](const BufferUpdate::Write &w) -> BufferUpdateType {
auto r = boost::get<BufferUpdate::Write>(&right);
- assert(r && w.fadvise_flags == r->fadvise_flags);
+ ceph_assert(r && w.fadvise_flags == r->fadvise_flags);
bufferlist bl = w.buffer;
bl.append(r->buffer);
return BufferUpdate::Write{bl, w.fadvise_flags};
},
[&](const BufferUpdate::Zero &z) -> BufferUpdateType {
auto r = boost::get<BufferUpdate::Zero>(&right);
- assert(r);
+ ceph_assert(r);
return BufferUpdate::Zero{z.len + r->len};
},
[&](const BufferUpdate::CloneRange &c) -> BufferUpdateType {
- assert(0 == "violates can_merge condition");
+ ceph_assert(0 == "violates can_merge condition");
return left;
});
}
private:
ObjectOperation &get_object_op_for_modify(const hobject_t &hoid) {
auto &op = op_map[hoid];
- assert(!op.is_delete());
+ ceph_assert(!op.is_delete());
return op;
}
ObjectOperation &get_object_op(const hobject_t &hoid) {
public:
void add_obc(
ObjectContextRef obc) {
- assert(obc);
+ ceph_assert(obc);
obc_map[obc->obs.oi.soid] = obc;
}
/// Sets up state for new object
const hobject_t &hoid
) {
auto &op = op_map[hoid];
- assert(op.is_none() || op.is_delete());
+ ceph_assert(op.is_none() || op.is_delete());
op.init_type = ObjectOperation::Init::Create();
}
const hobject_t &source ///< [in] obj to clone from
) {
auto &op = op_map[target];
- assert(op.is_none() || op.is_delete());
+ ceph_assert(op.is_none() || op.is_delete());
op.init_type = ObjectOperation::Init::Clone{source};
}
const hobject_t &target, ///< [in] to, must not exist, be non-temp
const hobject_t &source ///< [in] source (must be a temp object)
) {
- assert(source.is_temp());
- assert(!target.is_temp());
+ ceph_assert(source.is_temp());
+ ceph_assert(!target.is_temp());
auto &op = op_map[target];
- assert(op.is_none() || op.is_delete());
+ ceph_assert(op.is_none() || op.is_delete());
bool del_first = op.is_delete();
auto iter = op_map.find(source);
) {
auto &op = get_object_op_for_modify(hoid);
if (!op.is_fresh_object()) {
- assert(!op.updated_snaps);
+ ceph_assert(!op.updated_snaps);
op = ObjectOperation();
op.delete_first = true;
} else {
- assert(!op.is_rename());
+ ceph_assert(!op.is_rename());
op_map.erase(hoid); // make it a noop if it's a fresh object
}
}
const set<snapid_t> &new_snaps ///< [in] new snaps value
) {
auto &op = get_object_op(hoid);
- assert(!op.updated_snaps);
- assert(op.buffer_updates.empty());
- assert(!op.truncate);
+ ceph_assert(!op.updated_snaps);
+ ceph_assert(op.buffer_updates.empty());
+ ceph_assert(!op.truncate);
op.updated_snaps = make_pair(
old_snaps,
new_snaps);
uint64_t off ///< [in] offset to truncate to
) {
auto &op = get_object_op_for_modify(hoid);
- assert(!op.updated_snaps);
+ ceph_assert(!op.updated_snaps);
op.buffer_updates.erase(
off,
std::numeric_limits<uint64_t>::max() - off);
uint32_t fadvise_flags = 0 ///< [in] fadvise hint
) {
auto &op = get_object_op_for_modify(hoid);
- assert(!op.updated_snaps);
- assert(len > 0);
- assert(len == bl.length());
+ ceph_assert(!op.updated_snaps);
+ ceph_assert(len > 0);
+ ceph_assert(len == bl.length());
op.buffer_updates.insert(
off,
len,
uint64_t tooff ///< [in] offset
) {
auto &op = get_object_op_for_modify(to);
- assert(!op.updated_snaps);
+ ceph_assert(!op.updated_snaps);
op.buffer_updates.insert(
tooff,
len,
uint64_t len ///< [in] amount to zero
) {
auto &op = get_object_op_for_modify(hoid);
- assert(!op.updated_snaps);
+ ceph_assert(!op.updated_snaps);
op.buffer_updates.insert(
off,
len,
/* Internal node: push children onto stack, remove edge,
* recurse. When this node is encountered again, it'll
* be a leaf */
- assert(!diter->second.empty());
+ ceph_assert(!diter->second.empty());
stack.splice(stack.begin(), diter->second);
dgraph.erase(diter);
}
}
void PrimaryLogPG::OpContext::finish_read(PrimaryLogPG *pg)
{
- assert(inflightreads > 0);
+ ceph_assert(inflightreads > 0);
--inflightreads;
if (async_reads_complete()) {
- assert(pg->in_progress_async_reads.size());
- assert(pg->in_progress_async_reads.front().second == this);
+ ceph_assert(pg->in_progress_async_reads.size());
+ ceph_assert(pg->in_progress_async_reads.front().second == this);
pg->in_progress_async_reads.pop_front();
// Restart the op context now that all reads have been
}
if (!is_delete && pg_log.get_missing().is_missing(recovery_info.soid) &&
pg_log.get_missing().get_items().find(recovery_info.soid)->second.need > recovery_info.version) {
- assert(is_primary());
+ ceph_assert(is_primary());
const pg_log_entry_t *latest = pg_log.get_log().objects.find(recovery_info.soid)->second;
if (latest->op == pg_log_entry_t::LOST_REVERT &&
latest->reverting_to == recovery_info.version) {
bufferlist bl;
encode(recovery_info.oi, bl,
get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
- assert(!pool.info.is_erasure());
+ ceph_assert(!pool.info.is_erasure());
t->setattr(coll, ghobject_t(recovery_info.soid), OI_ATTR, bl);
if (obc)
obc->attr_cache[OI_ATTR] = bl;
obc->obs.exists = true;
bool got = obc->get_recovery_read();
- assert(got);
+ ceph_assert(got);
- assert(recovering.count(obc->obs.oi.soid));
+ ceph_assert(recovering.count(obc->obs.oi.soid));
recovering[obc->obs.oi.soid] = obc;
obc->obs.oi = recovery_info.oi; // may have been updated above
}
t->register_on_applied(new C_OSD_AppliedRecoveredObject(this, obc));
publish_stats_to_osd();
- assert(missing_loc.needs_recovery(hoid));
+ ceph_assert(missing_loc.needs_recovery(hoid));
if (!is_delete)
missing_loc.add_location(hoid, pg_whoami);
release_backoffs(hoid);
publish_stats_to_osd();
dout(10) << "pushed " << soid << " to all replicas" << dendl;
map<hobject_t, ObjectContextRef>::iterator i = recovering.find(soid);
- assert(i != recovering.end());
+ ceph_assert(i != recovering.end());
if (i->second && i->second->rwstate.recovery_read_marker) {
// recover missing won't have had an obc, but it gets filled in
// during on_local_recover
- assert(i->second);
+ ceph_assert(i->second);
list<OpRequestRef> requeue_list;
i->second->drop_recovery_read(&requeue_list);
requeue_ops(requeue_list);
const hobject_t &hoid) {
if (peer == get_primary())
return true;
- assert(peer_info.count(peer));
+ ceph_assert(peer_info.count(peer));
bool should_send =
hoid.pool != (int64_t)info.pgid.pool() ||
hoid <= last_backfill_started ||
hoid <= peer_info[peer].last_backfill;
if (!should_send) {
- assert(is_backfill_targets(peer));
+ ceph_assert(is_backfill_targets(peer));
dout(10) << __func__ << " issue_repop shipping empty opt to osd." << peer
<< ", object " << hoid
<< " beyond std::max(last_backfill_started "
void PrimaryLogPG::wait_for_unreadable_object(
const hobject_t& soid, OpRequestRef op)
{
- assert(is_unreadable_object(soid));
+ ceph_assert(is_unreadable_object(soid));
maybe_kick_recovery(soid);
waiting_for_unreadable_object[soid].push_back(op);
op->mark_delayed("waiting for missing object");
return true;
if (pg_log.get_missing().get_items().count(soid))
return true;
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
void PrimaryLogPG::wait_for_degraded_object(const hobject_t& soid, OpRequestRef op)
{
- assert(is_degraded_or_backfilling_object(soid) || is_degraded_on_async_recovery_target(soid));
+ ceph_assert(is_degraded_or_backfilling_object(soid) || is_degraded_on_async_recovery_target(soid));
maybe_kick_recovery(soid);
waiting_for_degraded_object[soid].push_back(op);
dout(20) << __func__ << ": blocking object " << oid.get_head()
<< " on snap promotion " << obc->obs.oi.soid << dendl;
// otherwise, we'd have blocked in do_op
- assert(oid.is_head());
- assert(objects_blocked_on_snap_promotion.count(oid) == 0);
+ ceph_assert(oid.is_head());
+ ceph_assert(objects_blocked_on_snap_promotion.count(oid) == 0);
objects_blocked_on_snap_promotion[oid] = obc;
wait_for_blocked_object(obc->obs.oi.soid, op);
}
dout(20) << __func__ << ": blocking object " << snap.get_head()
<< " on degraded snap " << snap << dendl;
// otherwise, we'd have blocked in do_op
- assert(objects_blocked_on_degraded_snap.count(snap.get_head()) == 0);
+ ceph_assert(objects_blocked_on_degraded_snap.count(snap.get_head()) == 0);
objects_blocked_on_degraded_snap[snap.get_head()] = snap.snap;
wait_for_degraded_object(snap, op);
}
min_version = pg_log.get_missing().get_rmissing().begin()->first;
soid = pg_log.get_missing().get_rmissing().begin()->second;
}
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator it = acting_recovery_backfill.begin();
it != acting_recovery_backfill.end();
++it) {
r = -EINVAL;
return r;
} else {
- assert(cls);
+ ceph_assert(cls);
}
ClassHandler::ClassFilter *class_filter = cls->get_filter(filter_name);
}
}
- assert(filter);
+ ceph_assert(filter);
int r = filter->init(iter);
if (r < 0) {
derr << "Error initializing filter " << type << ": "
ss << "mode must be 'revert' or 'delete'; mark not yet implemented";
return -EINVAL;
}
- assert(mode == pg_log_entry_t::LOST_REVERT ||
+ ceph_assert(mode == pg_log_entry_t::LOST_REVERT ||
mode == pg_log_entry_t::LOST_DELETE);
if (!is_primary()) {
// NOTE: this is non-const because we modify the OSDOp.outdata in
// place
MOSDOp *m = static_cast<MOSDOp *>(op->get_nonconst_req());
- assert(m->get_type() == CEPH_MSG_OSD_OP);
+ ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
dout(10) << "do_pg_op " << *m << dendl;
op->mark_started();
if (result < 0)
break;
- assert(filter);
+ ceph_assert(filter);
// fall through
}
} else if (mcand < lcand) {
candidate = mcand;
- assert(!mcand.is_max());
+ ceph_assert(!mcand.is_max());
++missing_iter;
} else {
candidate = lcand;
- assert(!lcand.is_max());
+ ceph_assert(!lcand.is_max());
++ls_iter;
}
if (result < 0)
break;
- assert(filter);
+ ceph_assert(filter);
// fall through
break;
}
- assert(snapid == CEPH_NOSNAP || pg_log.get_missing().get_items().empty());
+ ceph_assert(snapid == CEPH_NOSNAP || pg_log.get_missing().get_items().empty());
map<hobject_t, pg_missing_item>::const_iterator missing_iter =
pg_log.get_missing().get_items().lower_bound(current);
}
} else if (mcand < lcand) {
candidate = mcand;
- assert(!mcand.is_max());
+ ceph_assert(!mcand.is_max());
++missing_iter;
} else {
candidate = lcand;
- assert(!lcand.is_max());
+ ceph_assert(!lcand.is_max());
++ls_iter;
}
}
dout(10) << "calc_trim_to " << pg_trim_to << " -> " << new_trim_to << dendl;
pg_trim_to = new_trim_to;
- assert(pg_trim_to <= pg_log.get_head());
+ ceph_assert(pg_trim_to <= pg_log.get_head());
}
}
// Delay unless PGBackend says it's ok
if (pgbackend->can_handle_while_inactive(op)) {
bool handled = pgbackend->handle_message(op);
- assert(handled);
+ ceph_assert(handled);
return;
} else {
waiting_for_peered.push_back(op);
return;
}
- assert(is_peered() && flushes_in_progress == 0);
+ ceph_assert(is_peered() && flushes_in_progress == 0);
if (pgbackend->handle_message(op))
return;
break;
default:
- assert(0 == "bad message type in do_request");
+ ceph_assert(0 == "bad message type in do_request");
}
}
++i) {
pg_shard_t bt = *i;
map<pg_shard_t, pg_info_t>::const_iterator iter = peer_info.find(bt);
- assert(iter != peer_info.end());
+ ceph_assert(iter != peer_info.end());
if (iter->second.last_backfill < e)
e = iter->second.last_backfill;
}
// NOTE: take a non-const pointer here; we must be careful not to
// change anything that will break other reads on m (operator<<).
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
- assert(m->get_type() == CEPH_MSG_OSD_OP);
+ ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
if (m->finish_decode()) {
op->reset_desc(); // for TrackedOp
m->clear_payload();
<< std::hex << head.get_hash() << std::dec << dendl;
osd->clog->warn() << info.pgid.pgid << " does not contain " << head
<< " op " << *m;
- assert(!cct->_conf->osd_debug_misdirected_ops);
+ ceph_assert(!cct->_conf->osd_debug_misdirected_ops);
return;
}
// we have to wait for the object.
if (is_primary()) {
// missing the specific snap we need; requeue and wait.
- assert(!op->may_write()); // only happens on a read/cache
+ ceph_assert(!op->may_write()); // only happens on a read/cache
wait_for_unreadable_object(missing_oid, op);
return;
}
bool write_ordered,
ObjectContextRef obc)
{
- assert(obc);
+ ceph_assert(obc);
if (static_cast<const MOSDOp *>(op->get_req())->get_flags() &
CEPH_OSD_FLAG_IGNORE_REDIRECT) {
dout(20) << __func__ << ": ignoring redirect due to flag" << dendl;
}
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
- assert(m->get_type() == CEPH_MSG_OSD_OP);
+ ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
hobject_t head = m->get_hobj();
if (is_degraded_or_backfilling_object(head)) {
return cache_result_t::NOOP;
}
default:
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
return cache_result_t::NOOP;
}
if (p->second->chunks == p->second->io_results.size()) {
if (last_peering_reset == get_last_peering_reset()) {
- assert(p->second->obc);
+ ceph_assert(p->second->obc);
finish_manifest_flush(oid, tid, r, p->second->obc, last_offset);
}
}
uint64_t max_copy_size = 0, last_offset = 0;
map<uint64_t, chunk_info_t>::iterator iter = manifest.chunk_map.find(start_offset);
- assert(iter != manifest.chunk_map.end());
+ ceph_assert(iter != manifest.chunk_map.end());
for (;iter != manifest.chunk_map.end(); ++iter) {
if (iter->second.flags == chunk_info_t::FLAG_DIRTY) {
last_offset = iter->first;
}
map<uint64_t, chunk_info_t>::iterator iter =
obc->obs.oi.manifest.chunk_map.find(last_offset);
- assert(iter != obc->obs.oi.manifest.chunk_map.end());
+ ceph_assert(iter != obc->obs.oi.manifest.chunk_map.end());
for (;iter != obc->obs.oi.manifest.chunk_map.end(); ++iter) {
if (iter->second.flags == chunk_info_t::FLAG_DIRTY && last_offset < iter->first) {
do_manifest_flush(p->second->op, obc, p->second, iter->first, p->second->blocking);
MOSDOpReply *orig_reply, int r)
{
dout(20) << __func__ << " r=" << r << dendl;
- assert(op->may_write());
+ ceph_assert(op->may_write());
const osd_reqid_t &reqid = static_cast<const MOSDOp*>(op->get_req())->get_reqid();
mempool::osd_pglog::list<pg_log_entry_t> entries;
entries.push_back(pg_log_entry_t(pg_log_entry_t::ERROR, soid,
return cache_result_t::HANDLED_PROXY;
}
- assert(0 == "unreachable");
+ ceph_assert(0 == "unreachable");
return cache_result_t::NOOP;
case pg_pool_t::CACHEMODE_FORWARD:
return cache_result_t::HANDLED_PROXY;
default:
- assert(0 == "unrecognized cache_mode");
+ ceph_assert(0 == "unrecognized cache_mode");
}
return cache_result_t::NOOP;
}
if (last_peering_reset == pg->get_last_peering_reset()) {
if (r >= 0) {
if (!prdop->ops[op_index].outdata.length()) {
- assert(req_total_len);
+ ceph_assert(req_total_len);
bufferlist list;
bufferptr bptr(req_total_len);
list.push_back(std::move(bptr));
prdop->ops[op_index].outdata.append(list);
}
- assert(obj_op);
+ ceph_assert(obj_op);
uint64_t copy_offset;
if (req_offset >= prdop->ops[op_index].op.extent.offset) {
copy_offset = req_offset - prdop->ops[op_index].op.extent.offset;
soid = obc->obs.oi.manifest.redirect_target;
break;
default:
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
} else {
/* proxy */
dout(10) << __func__ << " no in_progress_proxy_ops found" << dendl;
return;
}
- assert(q->second.size());
+ ceph_assert(q->second.size());
list<OpRequestRef>::iterator it = std::find(q->second.begin(),
q->second.end(),
prdop->op);
- assert(it != q->second.end());
+ ceph_assert(it != q->second.end());
OpRequestRef op = *it;
q->second.erase(it);
if (q->second.size() == 0) {
soid = obc->obs.oi.manifest.redirect_target;
break;
default:
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
} else {
/* proxy */
if (chunk_index <= req_offset) {
osd_op.op.extent.offset = manifest->chunk_map[chunk_index].offset + req_offset - chunk_index;
} else {
- assert(0 == "chunk_index > req_offset");
+ ceph_assert(0 == "chunk_index > req_offset");
}
osd_op.op.extent.length = req_length;
return;
}
ProxyWriteOpRef pwop = p->second;
- assert(tid == pwop->objecter_tid);
- assert(oid == pwop->soid);
+ ceph_assert(tid == pwop->objecter_tid);
+ ceph_assert(oid == pwop->soid);
proxywrite_ops.erase(tid);
return;
}
list<OpRequestRef>& in_progress_op = q->second;
- assert(in_progress_op.size());
+ ceph_assert(in_progress_op.size());
list<OpRequestRef>::iterator it = std::find(in_progress_op.begin(),
in_progress_op.end(),
pwop->op);
- assert(it != in_progress_op.end());
+ ceph_assert(it != in_progress_op.end());
in_progress_op.erase(it);
if (in_progress_op.size() == 0) {
in_progress_proxy_ops.erase(oid);
osd->logger->inc(l_osd_tier_proxy_write);
const MOSDOp *m = static_cast<const MOSDOp*>(pwop->op->get_req());
- assert(m != NULL);
+ ceph_assert(m != NULL);
if (!pwop->sent_reply) {
// send commit.
promote_callback->promote_results.get<1>(),
promote_callback->obc);
} else {
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
return 0;
}
ObjectContextRef *promote_obc)
{
hobject_t hoid = obc ? obc->obs.oi.soid : missing_oid;
- assert(hoid != hobject_t());
+ ceph_assert(hoid != hobject_t());
if (write_blocked_by_scrub(hoid)) {
dout(10) << __func__ << " " << hoid
<< " blocked by scrub" << dendl;
return;
}
if (!obc) { // we need to create an ObjectContext
- assert(missing_oid != hobject_t());
+ ceph_assert(missing_oid != hobject_t());
obc = get_object_context(missing_oid, true);
}
if (promote_obc)
src_hoid = obc->obs.oi.manifest.redirect_target;
cb = new PromoteCallback(obc, this);
} else {
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
}
obc->obs.oi.soid.snap == CEPH_NOSNAP,
src_fadvise_flags, 0);
- assert(obc->is_blocked());
+ ceph_assert(obc->is_blocked());
if (op)
wait_for_blocked_object(obc->obs.oi.soid, op);
if (result == -EINPROGRESS || pending_async_reads) {
// come back later.
if (pending_async_reads) {
- assert(pool.info.is_erasure());
+ ceph_assert(pool.info.is_erasure());
in_progress_async_reads.push_back(make_pair(op, ctx));
ctx->start_async_reads(this);
}
ctx->reply->set_reply_versions(ctx->at_version, ctx->user_at_version);
- assert(op->may_write() || op->may_cache());
+ ceph_assert(op->may_write() || op->may_cache());
// trim log?
calc_trim_to();
dout(20) << " op order client." << n << " tid " << t << " last was " << p->second << dendl;
if (p->second > t) {
derr << "bad op order, already applied " << p->second << " > this " << t << dendl;
- assert(0 == "out of order op");
+ ceph_assert(0 == "out of order op");
}
p->second = t;
}
ThreadPool::TPHandle &handle)
{
const MOSDPGScan *m = static_cast<const MOSDPGScan*>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_SCAN);
+ ceph_assert(m->get_type() == MSG_OSD_PG_SCAN);
dout(10) << "do_scan " << *m << dendl;
op->mark_started();
pg_shard_t from = m->from;
// Check that from is in backfill_targets vector
- assert(is_backfill_targets(from));
+ ceph_assert(is_backfill_targets(from));
BackfillInterval& bi = peer_backfill_info[from];
bi.begin = m->begin;
if (waiting_on_backfill.erase(from)) {
if (waiting_on_backfill.empty()) {
- assert(peer_backfill_info.size() == backfill_targets.size());
+ ceph_assert(peer_backfill_info.size() == backfill_targets.size());
finish_recovery_op(hobject_t::get_max());
}
} else {
void PrimaryLogPG::do_backfill(OpRequestRef op)
{
const MOSDPGBackfill *m = static_cast<const MOSDPGBackfill*>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_BACKFILL);
+ ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL);
dout(10) << "do_backfill " << *m << dendl;
op->mark_started();
switch (m->op) {
case MOSDPGBackfill::OP_BACKFILL_FINISH:
{
- assert(cct->_conf->osd_kill_backfill_at != 1);
+ ceph_assert(cct->_conf->osd_kill_backfill_at != 1);
MOSDPGBackfill *reply = new MOSDPGBackfill(
MOSDPGBackfill::OP_BACKFILL_FINISH_ACK,
case MOSDPGBackfill::OP_BACKFILL_PROGRESS:
{
- assert(cct->_conf->osd_kill_backfill_at != 2);
+ ceph_assert(cct->_conf->osd_kill_backfill_at != 2);
info.set_last_backfill(m->last_backfill);
info.stats = m->stats;
dirty_info = true;
write_if_dirty(t);
int tr = osd->store->queue_transaction(ch, std::move(t), NULL);
- assert(tr == 0);
+ ceph_assert(tr == 0);
}
break;
case MOSDPGBackfill::OP_BACKFILL_FINISH_ACK:
{
- assert(is_primary());
- assert(cct->_conf->osd_kill_backfill_at != 3);
+ ceph_assert(is_primary());
+ ceph_assert(cct->_conf->osd_kill_backfill_at != 3);
finish_recovery_op(hobject_t::get_max());
}
break;
{
const MOSDPGBackfillRemove *m = static_cast<const MOSDPGBackfillRemove*>(
op->get_req());
- assert(m->get_type() == MSG_OSD_PG_BACKFILL_REMOVE);
+ ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL_REMOVE);
dout(7) << __func__ << " " << m->ls << dendl;
op->mark_started();
remove_snap_mapped_object(t, p.first);
}
int r = osd->store->queue_transaction(ch, std::move(t), NULL);
- assert(r == 0);
+ ceph_assert(r == 0);
}
int PrimaryLogPG::trim_object(
<< new_snaps << " ... deleting" << dendl;
// ...from snapset
- assert(p != snapset.clones.end());
+ ceph_assert(p != snapset.clones.end());
snapid_t last = coid.snap;
ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(last);
void PrimaryLogPG::kick_snap_trim()
{
- assert(is_active());
- assert(is_primary());
+ ceph_assert(is_active());
+ ceph_assert(is_primary());
if (is_clean() && !snap_trimq.empty()) {
if (get_osdmap()->test_flag(CEPH_OSDMAP_NOSNAPTRIM)) {
dout(10) << __func__ << ": nosnaptrim set, not kicking" << dendl;
void PrimaryLogPG::snap_trimmer_scrub_complete()
{
if (is_primary() && is_active() && is_clean()) {
- assert(!snap_trimq.empty());
+ ceph_assert(!snap_trimq.empty());
snap_trimmer_machine.process_event(ScrubComplete());
}
}
return;
}
- assert(is_primary());
+ ceph_assert(is_primary());
dout(10) << "snap_trimmer posting" << dendl;
snap_trimmer_machine.process_event(DoSnapWork());
auto p = bl.cbegin();
decode(header, p);
decode(m, p);
- assert(p.end());
+ ceph_assert(p.end());
}
// do the update(s)
decode(h, tp);
map<string,bufferlist> d;
decode(d, tp);
- assert(tp.end());
+ ceph_assert(tp.end());
dout(0) << " **** debug sanity check, looks ok ****" << dendl;
}
if (!obs.exists) {
ctx->delta_stats.num_objects++;
obs.exists = true;
- assert(!obs.oi.is_whiteout());
+ ceph_assert(!obs.oi.is_whiteout());
obs.oi.new_object();
if (!ignore_transaction)
ctx->op_t->create(obs.oi.soid);
ClassHandler::ClassData *cls;
result = osd->class_handler->open_class(cname, &cls);
- assert(result == 0); // init_op_flags() already verified this works.
+ ceph_assert(result == 0); // init_op_flags() already verified this works.
ClassHandler::ClassMethod *method = cls->get_method(mname.c_str());
if (!method) {
// Check special return value which has set missing_return
if (result == -ENOENT) {
dout(10) << __func__ << " CEPH_OSD_OP_CACHE_FLUSH got ENOENT" << dendl;
- assert(!missing.is_min());
+ ceph_assert(!missing.is_min());
wait_for_unreadable_object(missing, ctx->op);
// Error code which is used elsewhere when wait_for_unreadable_object() is used
result = -EAGAIN;
dout(20) << "key cookie=" << oi_iter->first.first
<< " entity=" << oi_iter->first.second << " "
<< oi_iter->second << dendl;
- assert(oi_iter->first.first == oi_iter->second.cookie);
- assert(oi_iter->first.second.is_client());
+ ceph_assert(oi_iter->first.first == oi_iter->second.cookie);
+ ceph_assert(oi_iter->first.second.is_client());
watch_item_t wi(oi_iter->first.second, oi_iter->second.cookie,
oi_iter->second.timeout_seconds, oi_iter->second.addr);
if (!ssc) {
ssc = ctx->obc->ssc = get_snapset_context(soid, false);
}
- assert(ssc);
+ ceph_assert(ssc);
dout(20) << " snapset " << ssc->snapset << dendl;
int clonecount = ssc->snapset.clones.size();
break;
}
if (!ctx->obc->obs.oi.is_whiteout()) {
- assert(obs.exists);
+ ceph_assert(obs.exists);
clone_info ci;
ci.cloneid = CEPH_NOSNAP;
if (result < 0)
break;
- assert(op.extent.length);
+ ceph_assert(op.extent.length);
if (obs.exists && !oi.is_whiteout()) {
t->zero(soid, op.extent.offset, op.extent.length);
interval_set<uint64_t> ch;
break;
if (op.extent.truncate_seq) {
- assert(op.extent.offset == op.extent.truncate_size);
+ ceph_assert(op.extent.offset == op.extent.truncate_size);
if (op.extent.truncate_seq <= oi.truncate_seq) {
dout(10) << " truncate seq " << op.extent.truncate_seq << " <= current " << oi.truncate_seq
<< ", no-op" << dendl;
// finish
if (op_finisher) {
result = op_finisher->execute();
- assert(result == 0);
+ ceph_assert(result == 0);
}
if (!oi.has_manifest() && !oi.manifest.is_redirect())
} else {
if (op_finisher) {
result = op_finisher->execute();
- assert(result == 0);
+ ceph_assert(result == 0);
}
chunk_info_t chunk_info;
src_hoid = obs.oi.manifest.redirect_target;
cb = new PromoteManifestCallback(ctx->obc, this, ctx);
} else {
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new PromoteFinisher(cb));
result = -EINPROGRESS;
} else {
result = op_finisher->execute();
- assert(result == 0);
+ ceph_assert(result == 0);
ctx->op_finishers.erase(ctx->current_osd_subop_num);
}
}
}
});
} else {
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
oi.clear_flag(object_info_t::FLAG_MANIFEST);
map<string, bufferlist> m;
decode(header, bp);
decode(m, bp);
- assert(bp.end());
+ ceph_assert(bp.end());
bufferlist newbl;
encode(header, newbl);
encode(m, newbl);
newop.indata = newbl;
}
result = do_osd_ops(ctx, nops);
- assert(result == 0);
+ ceph_assert(result == 0);
}
break;
ObjectMap::ObjectMapIterator iter = osd->store->get_omap_iterator(
ch, ghobject_t(soid)
);
- assert(iter);
+ ceph_assert(iter);
iter->upper_bound(start_after);
for (num = 0; iter->valid(); ++num, iter->next(false)) {
if (num >= max_return ||
} else {
// finish
result = op_finisher->execute();
- assert(result == 0);
+ ceph_assert(result == 0);
// COPY_FROM cannot be executed multiple times -- it must restart
ctx->op_finishers.erase(ctx->current_osd_subop_num);
ctx->delta_stats.num_wr++;
if (soid.is_snap()) {
- assert(ctx->obc->ssc->snapset.clone_overlap.count(soid.snap));
+ ceph_assert(ctx->obc->ssc->snapset.clone_overlap.count(soid.snap));
ctx->delta_stats.num_bytes -= ctx->obc->ssc->snapset.get_clone_bytes(soid.snap);
} else {
ctx->delta_stats.num_bytes -= oi.size;
&rollback_to, false, false, &missing_oid);
if (ret == -EAGAIN) {
/* clone must be missing */
- assert(is_degraded_or_backfilling_object(missing_oid) || is_degraded_on_async_recovery_target(missing_oid));
+ ceph_assert(is_degraded_or_backfilling_object(missing_oid) || is_degraded_on_async_recovery_target(missing_oid));
dout(20) << "_rollback_to attempted to roll back to a missing or backfilling clone "
<< missing_oid << " (requested snapid: ) " << snapid << dendl;
block_write_on_degraded_snap(missing_oid, ctx->op);
case cache_result_t::NOOP:
break;
case cache_result_t::BLOCKED_PROMOTE:
- assert(promote_obc);
+ ceph_assert(promote_obc);
block_write_on_snap_rollback(soid, promote_obc, ctx->op);
return -EAGAIN;
case cache_result_t::BLOCKED_FULL:
block_write_on_full_cache(soid, ctx->op);
return -EAGAIN;
case cache_result_t::REPLIED_WITH_EAGAIN:
- assert(0 == "this can't happen, no rollback on replica");
+ ceph_assert(0 == "this can't happen, no rollback on replica");
default:
- assert(0 == "must promote was set, other values are not valid");
+ ceph_assert(0 == "must promote was set, other values are not valid");
return -EAGAIN;
}
}
}
} else if (ret) {
// ummm....huh? It *can't* return anything else at time of writing.
- assert(0 == "unexpected error code in _rollback_to");
+ ceph_assert(0 == "unexpected error code in _rollback_to");
} else { //we got our context, let's use it to do the rollback!
hobject_t& rollback_to_sobject = rollback_to->obs.oi.soid;
if (is_degraded_or_backfilling_object(rollback_to_sobject) ||
map<snapid_t, interval_set<uint64_t> >::iterator iter =
snapset.clone_overlap.lower_bound(snapid);
- assert(iter != snapset.clone_overlap.end());
+ ceph_assert(iter != snapset.clone_overlap.end());
interval_set<uint64_t> overlaps = iter->second;
for ( ;
iter != snapset.clone_overlap.end();
SnapContext& snapc = ctx->snapc;
// clone?
- assert(soid.snap == CEPH_NOSNAP);
+ ceph_assert(soid.snap == CEPH_NOSNAP);
dout(20) << "make_writeable " << soid << " snapset=" << ctx->new_snapset
<< " snapc=" << snapc << dendl;
// we will mark the object dirty
if (ctx->undirty && was_dirty) {
dout(20) << " clearing DIRTY flag" << dendl;
- assert(ctx->new_obs.oi.is_dirty());
+ ceph_assert(ctx->new_obs.oi.is_dirty());
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY);
--ctx->delta_stats.num_objects_dirty;
osd->logger->inc(l_osd_tier_clean);
coid,
ctx->clone_obc,
ctx->op);
- assert(got);
+ ceph_assert(got);
dout(20) << " got greedy write on clone_obc " << *ctx->clone_obc << dendl;
} else {
snap_oi = &static_snap_oi;
// disconnects first
complete_disconnect_watches(ctx->obc, ctx->watch_disconnects);
- assert(conn);
+ ceph_assert(conn);
auto session = conn->get_priv();
if (!session)
int PrimaryLogPG::prepare_transaction(OpContext *ctx)
{
- assert(!ctx->ops->empty());
+ ceph_assert(!ctx->ops->empty());
// valid snap context?
if (!ctx->snapc.is_valid()) {
void PrimaryLogPG::complete_read_ctx(int result, OpContext *ctx)
{
const MOSDOp *m = static_cast<const MOSDOp*>(ctx->op->get_req());
- assert(ctx->async_reads_complete());
+ ceph_assert(ctx->async_reads_complete());
for (vector<OSDOp>::iterator p = ctx->ops->begin();
p != ctx->ops->end() && result >= 0; ++p) {
return;
}
- assert(len > 0);
- assert(len <= reply_obj.data.length());
+ ceph_assert(len > 0);
+ ceph_assert(len <= reply_obj.data.length());
bufferlist bl;
bl.substr_of(reply_obj.data, 0, len);
reply_obj.data.swap(bl);
// size, mtime
reply_obj.size = oi.size;
reply_obj.mtime = oi.mtime;
- assert(obc->ssc);
+ ceph_assert(obc->ssc);
if (soid.snap < CEPH_NOSNAP) {
auto p = obc->ssc->snapset.clone_snaps.find(soid.snap);
- assert(p != obc->ssc->snapset.clone_snaps.end()); // warn?
+ ceph_assert(p != obc->ssc->snapset.clone_snaps.end()); // warn?
reply_obj.snaps = p->second;
} else {
reply_obj.snap_seq = obc->ssc->snapset.seq;
cursor.data_complete = true;
dout(20) << " got data" << dendl;
}
- assert(cursor.data_offset <= oi.size);
+ ceph_assert(cursor.data_offset <= oi.size);
}
// omap
cursor.omap_complete = true;
} else {
if (left > 0 && !cursor.omap_complete) {
- assert(cursor.data_complete);
+ ceph_assert(cursor.data_complete);
if (cursor.omap_offset.empty()) {
osd->store->omap_get_header(ch, ghobject_t(oi.soid),
&reply_obj.omap_header);
bufferlist omap_data;
ObjectMap::ObjectMapIterator iter =
osd->store->get_omap_iterator(ch, ghobject_t(oi.soid));
- assert(iter);
+ ceph_assert(iter);
iter->upper_bound(cursor.omap_offset);
for (; iter->valid(); iter->next(false)) {
++omap_keys;
<< (mirror_snapset ? " mirror_snapset" : "")
<< dendl;
- assert(!mirror_snapset || src.snap == CEPH_NOSNAP);
+ ceph_assert(!mirror_snapset || src.snap == CEPH_NOSNAP);
// cancel a previous in-progress copy?
if (copy_ops.count(dest)) {
auto p = obc->obs.oi.manifest.chunk_map.begin();
_copy_some_manifest(obc, cop, p->first);
} else {
- assert(0 == "unrecognized manifest type");
+ ceph_assert(0 == "unrecognized manifest type");
}
}
}
if (cop->cursor.is_initial() && cop->mirror_snapset) {
// list snaps too.
- assert(cop->src.snap == CEPH_NOSNAP);
+ ceph_assert(cop->src.snap == CEPH_NOSNAP);
ObjectOperation op;
op.list_snaps(&cop->results.snapset, NULL);
ceph_tid_t tid = osd->objecter->read(cop->src.oid, cop->oloc, op,
} else {
// we should learn the version after the first chunk, if we didn't know
// it already!
- assert(cop->cursor.is_initial());
+ ceph_assert(cop->cursor.is_initial());
}
op.copy_get(&cop->cursor, get_copy_chunk_size(),
&cop->results.object_size, &cop->results.mtime,
} else {
// we should learn the version after the first chunk, if we didn't know
// it already!
- assert(cop->cursor.is_initial());
+ ceph_assert(cop->cursor.is_initial());
}
op.set_last_op_flags(cop->src_obj_fadvise_flags);
if (r < 0)
goto out;
- assert(cop->rval >= 0);
+ ceph_assert(cop->rval >= 0);
if (oid.snap < CEPH_NOSNAP && !cop->results.snaps.empty()) {
// verify snap hasn't been deleted
}
}
- assert(cop->rval >= 0);
+ ceph_assert(cop->rval >= 0);
if (!cop->temp_cursor.data_complete) {
cop->results.data_digest = cop->data.crc32c(cop->results.data_digest);
if (!cop->cursor.is_complete()) {
// write out what we have so far
if (cop->temp_cursor.is_initial()) {
- assert(!cop->results.started_temp_obj);
+ ceph_assert(!cop->results.started_temp_obj);
cop->results.started_temp_obj = true;
cop->results.temp_oid = generate_temp_object(oid);
dout(20) << __func__ << " using temp " << cop->results.temp_oid << dendl;
t->create(cop->results.temp_oid);
}
if (!cop->temp_cursor.data_complete) {
- assert(cop->data.length() + cop->temp_cursor.data_offset ==
+ ceph_assert(cop->data.length() + cop->temp_cursor.data_offset ==
cop->cursor.data_offset);
if (pool.info.required_alignment() &&
!cop->cursor.data_complete) {
* Trim off the unaligned bit at the end, we'll adjust cursor.data_offset
* to pick it up on the next pass.
*/
- assert(cop->temp_cursor.data_offset %
+ ceph_assert(cop->temp_cursor.data_offset %
pool.info.required_alignment() == 0);
if (cop->data.length() % pool.info.required_alignment() != 0) {
uint64_t to_trim =
bl.substr_of(cop->data, 0, cop->data.length() - to_trim);
cop->data.swap(bl);
cop->cursor.data_offset -= to_trim;
- assert(cop->data.length() + cop->temp_cursor.data_offset ==
+ ceph_assert(cop->data.length() + cop->temp_cursor.data_offset ==
cop->cursor.data_offset);
}
}
}
}
} else {
- assert(cop->omap_header.length() == 0);
- assert(cop->omap_data.length() == 0);
+ ceph_assert(cop->omap_header.length() == 0);
+ ceph_assert(cop->omap_data.length() == 0);
}
cop->temp_cursor = cop->cursor;
}
if (r < 0 && results->started_temp_obj) {
dout(10) << __func__ << " abort; will clean up partial work" << dendl;
ObjectContextRef tempobc = get_object_context(results->temp_oid, false);
- assert(tempobc);
+ ceph_assert(tempobc);
OpContextUPtr ctx = simple_opc_create(tempobc);
ctx->op_t->remove(results->temp_oid);
simple_opc_submit(std::move(ctx));
<< dendl;
hobject_t head(soid.get_head());
ObjectContextRef obc = get_object_context(head, false);
- assert(obc);
+ ceph_assert(obc);
OpContextUPtr tctx = simple_opc_create(obc);
tctx->at_version = get_next_version();
if (!tctx->lock_manager.take_write_lock(
head,
obc)) {
- assert(0 == "problem!");
+ ceph_assert(0 == "problem!");
}
dout(20) << __func__ << " took lock on obc, " << obc->rwstate << dendl;
bool whiteout = false;
if (r == -ENOENT) {
- assert(soid.snap == CEPH_NOSNAP); // snap case is above
+ ceph_assert(soid.snap == CEPH_NOSNAP); // snap case is above
dout(10) << __func__ << " whiteout " << soid << dendl;
whiteout = true;
}
tctx->new_obs.oi.truncate_size = results->truncate_size;
if (soid.snap != CEPH_NOSNAP) {
- assert(obc->ssc->snapset.clone_snaps.count(soid.snap));
- assert(obc->ssc->snapset.clone_size.count(soid.snap));
- assert(obc->ssc->snapset.clone_size[soid.snap] ==
+ ceph_assert(obc->ssc->snapset.clone_snaps.count(soid.snap));
+ ceph_assert(obc->ssc->snapset.clone_size.count(soid.snap));
+ ceph_assert(obc->ssc->snapset.clone_size[soid.snap] ==
results->object_size);
- assert(obc->ssc->snapset.clone_overlap.count(soid.snap));
+ ceph_assert(obc->ssc->snapset.clone_overlap.count(soid.snap));
tctx->delta_stats.num_bytes += obc->ssc->snapset.get_clone_bytes(soid.snap);
} else {
}
if (results->mirror_snapset) {
- assert(tctx->new_obs.oi.soid.snap == CEPH_NOSNAP);
+ ceph_assert(tctx->new_obs.oi.soid.snap == CEPH_NOSNAP);
tctx->new_snapset.from_snap_set(
results->snapset,
get_osdmap()->require_osd_release < CEPH_RELEASE_LUMINOUS);
if (!tctx->lock_manager.take_write_lock(
obc->obs.oi.soid,
obc)) {
- assert(0 == "problem!");
+ ceph_assert(0 == "problem!");
}
dout(20) << __func__ << " took lock on obc, " << obc->rwstate << dendl;
if (p != snapset.clones.rend()) {
hobject_t next = soid;
next.snap = *p;
- assert(next.snap < soid.snap);
+ ceph_assert(next.snap < soid.snap);
if (pg_log.get_missing().is_missing(next)) {
dout(10) << __func__ << " missing clone is " << next << dendl;
if (pmissing)
} else {
snapid_t min_included_snap;
auto p = snapset.clone_snaps.find(soid.snap);
- assert(p != snapset.clone_snaps.end());
+ ceph_assert(p != snapset.clone_snaps.end());
min_included_snap = p->second.back();
snapc = snapset.get_ssc_as_of(min_included_snap - 1);
}
oid,
obc,
op);
- assert(!locked);
+ ceph_assert(!locked);
}
close_op_ctx(ctx.release());
return -EAGAIN; // will retry
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY);
--ctx->delta_stats.num_objects_dirty;
if (fop->obc->obs.oi.has_manifest()) {
- assert(obc->obs.oi.manifest.is_chunked());
+ ceph_assert(obc->obs.oi.manifest.is_chunked());
PGTransaction* t = ctx->op_t.get();
uint64_t chunks_size = 0;
for (auto &p : ctx->new_obs.oi.manifest.chunk_map) {
void PrimaryLogPG::op_applied(const eversion_t &applied_version)
{
dout(10) << "op_applied version " << applied_version << dendl;
- assert(applied_version != eversion_t());
- assert(applied_version <= info.last_update);
+ ceph_assert(applied_version != eversion_t());
+ ceph_assert(applied_version <= info.last_update);
last_update_applied = applied_version;
if (is_primary()) {
if (scrubber.active) {
requeue_scrub(ops_blocked_by_scrub());
}
} else {
- assert(scrubber.start == scrubber.end);
+ ceph_assert(scrubber.start == scrubber.end);
}
}
}
// send dup commits, in order
auto it = waiting_for_ondisk.find(repop->v);
if (it != waiting_for_ondisk.end()) {
- assert(waiting_for_ondisk.begin()->first == repop->v);
+ ceph_assert(waiting_for_ondisk.begin()->first == repop->v);
for (list<pair<OpRequestRef, version_t> >::iterator i =
it->second.begin();
i != it->second.end();
calc_min_last_complete_ondisk();
dout(10) << " removing " << *repop << dendl;
- assert(!repop_queue.empty());
+ ceph_assert(!repop_queue.empty());
dout(20) << " q front is " << *repop_queue.front() << dendl;
if (repop_queue.front() == repop) {
RepGather *to_remove = nullptr;
Context *on_all_commit = new C_OSD_RepopCommit(this, repop);
if (!(ctx->log.empty())) {
- assert(ctx->at_version >= projected_last_update);
+ ceph_assert(ctx->at_version >= projected_last_update);
projected_last_update = ctx->at_version;
}
for (auto &&entry: ctx->log) {
int r)
{
dout(10) << __func__ << " " << entries << dendl;
- assert(is_primary());
+ ceph_assert(is_primary());
eversion_t version;
if (!entries.empty()) {
- assert(entries.rbegin()->version >= projected_last_update);
+ ceph_assert(entries.rbegin()->version >= projected_last_update);
version = projected_last_update = entries.rbegin()->version;
}
++i) {
pg_shard_t peer(*i);
if (peer == pg_whoami) continue;
- assert(peer_missing.count(peer));
- assert(peer_info.count(peer));
+ ceph_assert(peer_missing.count(peer));
+ ceph_assert(peer_info.count(peer));
if (get_osdmap()->require_osd_release >= CEPH_RELEASE_JEWEL) {
- assert(repop);
+ ceph_assert(repop);
MOSDPGUpdateLogMissing *m = new MOSDPGUpdateLogMissing(
entries,
spg_t(info.pgid.pgid, i->shard),
pg->lock();
if (!pg->pg_has_reset_since(epoch)) {
auto it = pg->log_entry_update_waiting_on.find(rep_tid);
- assert(it != pg->log_entry_update_waiting_on.end());
+ ceph_assert(it != pg->log_entry_update_waiting_on.end());
auto it2 = it->second.waiting_on.find(pg->pg_whoami);
- assert(it2 != it->second.waiting_on.end());
+ ceph_assert(it2 != it->second.waiting_on.end());
it->second.waiting_on.erase(it2);
if (it->second.waiting_on.empty()) {
pg->repop_all_committed(it->second.repop.get());
t.register_on_commit(
new OnComplete{this, rep_tid, get_osdmap()->get_epoch()});
int r = osd->store->queue_transaction(ch, std::move(t), NULL);
- assert(r == 0);
+ ceph_assert(r == 0);
op_applied(info.last_update);
});
dout(30) << "watch: Check entity_addr_t " << ea << dendl;
if (get_osdmap()->is_blacklisted(ea)) {
dout(10) << "watch: Found blacklisted watcher for " << ea << dendl;
- assert(j->second->get_pg() == this);
+ ceph_assert(j->second->get_pg() == this);
j->second->unregister_cb();
handle_watch_timeout(j->second);
}
void PrimaryLogPG::populate_obc_watchers(ObjectContextRef obc)
{
- assert(is_active());
+ ceph_assert(is_active());
auto it_objects = pg_log.get_log().objects.find(obc->obs.oi.soid);
- assert((recovering.count(obc->obs.oi.soid) ||
+ ceph_assert((recovering.count(obc->obs.oi.soid) ||
!is_missing_object(obc->obs.oi.soid)) ||
(it_objects != pg_log.get_log().objects.end() && // or this is a revert... see recover_primary()
it_objects->second->op ==
obc->obs.oi.version));
dout(10) << "populate_obc_watchers " << obc->obs.oi.soid << dendl;
- assert(obc->watchers.empty());
+ ceph_assert(obc->watchers.empty());
// populate unconnected_watchers
for (map<pair<uint64_t, entity_name_t>, watch_info_t>::iterator p =
obc->obs.oi.watchers.begin();
SnapSetContext *ssc)
{
ObjectContextRef obc(object_contexts.lookup_or_create(oi.soid));
- assert(obc->destructor_callback == NULL);
+ ceph_assert(obc->destructor_callback == NULL);
obc->destructor_callback = new C_PG_ObjectContext(this, obc.get());
obc->obs.oi = oi;
obc->obs.exists = false;
const map<string, bufferlist> *attrs)
{
auto it_objects = pg_log.get_log().objects.find(soid);
- assert(
+ ceph_assert(
attrs || !pg_log.get_missing().is_missing(soid) ||
// or this is a revert... see recover_primary()
(it_objects != pg_log.get_log().objects.end() &&
bufferlist bv;
if (attrs) {
auto it_oi = attrs->find(OI_ATTR);
- assert(it_oi != attrs->end());
+ ceph_assert(it_oi != attrs->end());
bv = it_oi->second;
} else {
int r = pgbackend->objects_get_attr(soid, OI_ATTR, &bv);
object_info_t oi(soid);
SnapSetContext *ssc = get_snapset_context(
soid, true, 0, false);
- assert(ssc);
+ ceph_assert(ssc);
obc = create_object_context(oi, ssc);
dout(10) << __func__ << ": " << obc << " " << soid
<< " " << obc->rwstate
return ObjectContextRef(); // -ENOENT!
}
- assert(oi.soid.pool == (int64_t)info.pgid.pool());
+ ceph_assert(oi.soid.pool == (int64_t)info.pgid.pool());
obc = object_contexts.lookup_or_create(oi.soid);
obc->destructor_callback = new C_PG_ObjectContext(this, obc.get());
int r = pgbackend->objects_get_attrs(
soid,
&obc->attr_cache);
- assert(r == 0);
+ ceph_assert(r == 0);
}
}
hobject_t *pmissing)
{
FUNCTRACE(cct);
- assert(oid.pool == static_cast<int64_t>(info.pgid.pool()));
+ ceph_assert(oid.pool == static_cast<int64_t>(info.pgid.pool()));
// want the head?
if (oid.snap == CEPH_NOSNAP) {
ObjectContextRef obc = get_object_context(oid, can_create);
if (!obc->ssc)
obc->ssc = ssc;
else {
- assert(ssc == obc->ssc);
+ ceph_assert(ssc == obc->ssc);
put_snapset_context(ssc);
}
*pobc = obc;
if (!obc->ssc) {
obc->ssc = ssc;
} else {
- assert(obc->ssc == ssc);
+ ceph_assert(obc->ssc == ssc);
put_snapset_context(ssc);
}
ssc = 0;
<< dendl;
snapid_t first, last;
auto p = obc->ssc->snapset.clone_snaps.find(soid.snap);
- assert(p != obc->ssc->snapset.clone_snaps.end());
+ ceph_assert(p != obc->ssc->snapset.clone_snaps.end());
if (p->second.empty()) {
dout(1) << __func__ << " " << soid << " empty snapset -- DNE" << dendl;
- assert(!cct->_conf->osd_debug_verify_snaps);
+ ceph_assert(!cct->_conf->osd_debug_verify_snaps);
return -ENOENT;
}
first = p->second.back();
object_info_t& oi = obc->obs.oi;
dout(10) << __func__ << " " << oi.soid << dendl;
- assert(!oi.soid.is_snapdir());
+ ceph_assert(!oi.soid.is_snapdir());
object_stat_sum_t stat;
stat.num_objects++;
if (!obc->ssc)
obc->ssc = get_snapset_context(oi.soid, false);
- assert(obc->ssc);
+ ceph_assert(obc->ssc);
stat.num_bytes += obc->ssc->snapset.get_clone_bytes(oi.soid.snap);
} else {
stat.num_bytes += oi.size;
map<hobject_t, ObjectContextRef>::iterator i =
objects_blocked_on_snap_promotion.find(obc->obs.oi.soid.get_head());
if (i != objects_blocked_on_snap_promotion.end()) {
- assert(i->second == obc);
+ ceph_assert(i->second == obc);
objects_blocked_on_snap_promotion.erase(i);
}
return NULL;
} else {
auto it_ss = attrs->find(SS_ATTR);
- assert(it_ss != attrs->end());
+ ceph_assert(it_ss != attrs->end());
bv = it_ss->second;
}
ssc = new SnapSetContext(oid.get_snapdir());
ssc->exists = false;
}
}
- assert(ssc);
+ ceph_assert(ssc);
ssc->ref++;
return ssc;
}
if (missing_loc.is_deleted(soid)) {
start_recovery_op(soid);
- assert(!recovering.count(soid));
+ ceph_assert(!recovering.count(soid));
recovering.insert(make_pair(soid, ObjectContextRef()));
epoch_t cur_epoch = get_osdmap()->get_epoch();
remove_missing_object(soid, v, new FunctionContext(
head,
false,
0);
- assert(head_obc);
+ ceph_assert(head_obc);
}
start_recovery_op(soid);
- assert(!recovering.count(soid));
+ ceph_assert(!recovering.count(soid));
recovering.insert(make_pair(soid, obc));
int r = pgbackend->recover_object(
soid,
obc,
h);
// This is only a pull which shouldn't return an error
- assert(r >= 0);
+ ceph_assert(r >= 0);
return PULL_YES;
}
eversion_t v, Context *on_complete)
{
dout(20) << __func__ << " " << soid << " " << v << dendl;
- assert(on_complete != nullptr);
+ ceph_assert(on_complete != nullptr);
// delete locally
ObjectStore::Transaction t;
remove_snap_mapped_object(t, soid);
on_local_recover(soid, recovery_info, ObjectContextRef(), true, &t2);
t2.register_on_complete(on_complete);
int r = osd->store->queue_transaction(ch, std::move(t2), nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
unlock();
} else {
unlock();
}
}));
int r = osd->store->queue_transaction(ch, std::move(t), nullptr);
- assert(r == 0);
+ ceph_assert(r == 0);
}
void PrimaryLogPG::finish_degraded_object(const hobject_t& oid)
if (obc) {
dout(20) << "obc = " << *obc << dendl;
}
- assert(active_pushes >= 1);
+ ceph_assert(active_pushes >= 1);
--active_pushes;
// requeue an active chunky scrub waiting on recovery ops
void PrimaryLogPG::_applied_recovered_object_replica()
{
dout(20) << __func__ << dendl;
- assert(active_pushes >= 1);
+ ceph_assert(active_pushes >= 1);
--active_pushes;
// requeue an active chunky scrub waiting on recovery ops
<< " log.complete_to at end" << dendl;
//below is not true in the repair case.
//assert(missing.num_missing() == 0); // otherwise, complete_to was wrong.
- assert(info.last_complete == info.last_update);
+ ceph_assert(info.last_complete == info.last_update);
}
}
void PrimaryLogPG::failed_push(const list<pg_shard_t> &from, const hobject_t &soid)
{
dout(20) << __func__ << ": " << soid << dendl;
- assert(recovering.count(soid));
+ ceph_assert(recovering.count(soid));
auto obc = recovering[soid];
if (obc) {
list<OpRequestRef> blocked_ops;
eversion_t v;
pg_missing_item pmi;
bool is_missing = pg_log.get_missing().is_missing(oid, &pmi);
- assert(is_missing);
+ ceph_assert(is_missing);
v = pmi.have;
dout(10) << "pick_newest_available " << oid << " " << v << " on osd." << osd->whoami << " (local)" << dendl;
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
{
const MOSDPGUpdateLogMissing *m = static_cast<const MOSDPGUpdateLogMissing*>(
op->get_req());
- assert(m->get_type() == MSG_OSD_PG_UPDATE_LOG_MISSING);
+ ceph_assert(m->get_type() == MSG_OSD_PG_UPDATE_LOG_MISSING);
ObjectStore::Transaction t;
boost::optional<eversion_t> op_trim_to, op_roll_forward_to;
if (m->pg_trim_to != eversion_t())
ch,
std::move(t),
nullptr);
- assert(tr == 0);
+ ceph_assert(tr == 0);
op_applied(info.last_update);
}
switch (what) {
case pg_log_entry_t::LOST_MARK:
- assert(0 == "actually, not implemented yet!");
+ ceph_assert(0 == "actually, not implemented yet!");
break;
case pg_log_entry_t::LOST_REVERT:
void PrimaryLogPG::_split_into(pg_t child_pgid, PG *child, unsigned split_bits)
{
- assert(repop_queue.empty());
+ ceph_assert(repop_queue.empty());
}
/*
remove_repop(repop);
}
- assert(repop_queue.empty());
+ ceph_assert(repop_queue.empty());
if (requeue) {
requeue_ops(rq);
<< i->first << dendl;
}
}
- assert(waiting_for_ondisk.empty());
+ ceph_assert(waiting_for_ondisk.empty());
}
}
void PrimaryLogPG::on_flushed()
{
- assert(flushes_in_progress > 0);
+ ceph_assert(flushes_in_progress > 0);
flushes_in_progress--;
if (flushes_in_progress == 0) {
requeue_ops(waiting_for_flush);
while (object_contexts.get_next(i.first, &i)) {
derr << __func__ << ": object " << i.first << " obc still alive" << dendl;
}
- assert(object_contexts.empty());
+ ceph_assert(object_contexts.empty());
}
}
if (!backfill_targets.empty()) {
last_backfill_started = earliest_backfill();
new_backfill = true;
- assert(!last_backfill_started.is_max());
+ ceph_assert(!last_backfill_started.is_max());
dout(5) << __func__ << ": bft=" << backfill_targets
<< " from " << last_backfill_started << dendl;
for (set<pg_shard_t>::iterator i = backfill_targets.begin();
get_osdmap()->test_flag(CEPH_OSDMAP_RECOVERY_DELETES)) {
pg_log.rebuild_missing_set_with_deletes(osd->store, ch, info);
}
- assert(pg_log.get_missing().may_include_deletes == get_osdmap()->test_flag(CEPH_OSDMAP_RECOVERY_DELETES));
+ ceph_assert(pg_log.get_missing().may_include_deletes == get_osdmap()->test_flag(CEPH_OSDMAP_RECOVERY_DELETES));
}
void PrimaryLogPG::on_change(ObjectStore::Transaction *t)
) {
finish_degraded_object((i++)->first);
}
- assert(callbacks_for_degraded_object.empty());
+ ceph_assert(callbacks_for_degraded_object.empty());
if (is_primary()) {
requeue_ops(waiting_for_cache_not_full);
object_contexts.clear();
// should have been cleared above by finishing all of the degraded objects
- assert(objects_blocked_on_degraded_snap.empty());
+ ceph_assert(objects_blocked_on_degraded_snap.empty());
}
void PrimaryLogPG::on_role_change()
last_backfill_started = hobject_t();
set<hobject_t>::iterator i = backfills_in_flight.begin();
while (i != backfills_in_flight.end()) {
- assert(recovering.count(*i));
+ ceph_assert(recovering.count(*i));
backfills_in_flight.erase(i++);
}
requeue_ops(blocked_ops);
}
}
- assert(backfills_in_flight.empty());
+ ceph_assert(backfills_in_flight.empty());
pending_backfill_updates.clear();
- assert(recovering.empty());
+ ceph_assert(recovering.empty());
pgbackend->clear_recovery_state();
}
void PrimaryLogPG::cancel_pull(const hobject_t &soid)
{
dout(20) << __func__ << ": " << soid << dendl;
- assert(recovering.count(soid));
+ ceph_assert(recovering.count(soid));
ObjectContextRef obc = recovering[soid];
if (obc) {
list<OpRequestRef> blocked_ops;
started = 0;
bool work_in_progress = false;
bool recovery_started = false;
- assert(is_primary());
- assert(is_peered());
- assert(!is_deleting());
+ ceph_assert(is_primary());
+ ceph_assert(is_peered());
+ ceph_assert(!is_deleting());
- assert(recovery_queued);
+ ceph_assert(recovery_queued);
recovery_queued = false;
if (!state_test(PG_STATE_RECOVERING) &&
work_in_progress || recovery_ops_active > 0 || deferred_backfill)
return !work_in_progress && have_unfound();
- assert(recovering.empty());
- assert(recovery_ops_active == 0);
+ ceph_assert(recovering.empty());
+ ceph_assert(recovery_ops_active == 0);
dout(10) << __func__ << " needs_recovery: "
<< missing_loc.get_needs_recovery()
*/
uint64_t PrimaryLogPG::recover_primary(uint64_t max, ThreadPool::TPHandle &handle)
{
- assert(is_primary());
+ ceph_assert(is_primary());
const auto &missing = pg_log.get_missing();
auto it_objects = pg_log.get_log().objects.find(p->second);
if (it_objects != pg_log.get_log().objects.end()) {
latest = it_objects->second;
- assert(latest->is_update() || latest->is_delete());
+ ceph_assert(latest->is_update() || latest->is_delete());
soid = latest->soid;
} else {
latest = 0;
obc->obs.oi.encode(
b2,
get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
- assert(!pool.info.require_rollback());
+ ceph_assert(!pool.info.require_rollback());
t.setattr(coll, ghobject_t(soid), OI_ATTR, b2);
recover_got(soid, latest->version);
pg_log.set_last_requested(0);
missing_loc.remove_location(soid, pg_whoami);
bool uhoh = true;
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
for (set<pg_shard_t>::iterator i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
PGBackend::RecoveryHandle *h,
bool *work_started)
{
- assert(is_primary());
+ ceph_assert(is_primary());
dout(10) << __func__ << ": on " << soid << dendl;
ObjectContextRef obc = get_object_context(soid, false);
}
start_recovery_op(soid);
- assert(!recovering.count(soid));
+ ceph_assert(!recovering.count(soid));
if (!obc)
recovering.insert(make_pair(soid, ObjectContextRef()));
else
PGBackend::RecoveryHandle *h,
bool *work_started)
{
- assert(is_primary());
+ ceph_assert(is_primary());
dout(10) << __func__ << ": on " << soid << dendl;
// NOTE: we know we will get a valid oloc off of disk here.
}
start_recovery_op(soid);
- assert(!recovering.count(soid));
+ ceph_assert(!recovering.count(soid));
recovering.insert(make_pair(soid, obc));
/* We need this in case there is an in progress write on the object. In fact,
PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op();
// this is FAR from an optimal recovery order. pretty lame, really.
- assert(!acting_recovery_backfill.empty());
+ ceph_assert(!acting_recovery_backfill.empty());
// choose replicas to recover, replica has the shortest missing list first
// so we can bring it back to normal ASAP
std::vector<std::pair<unsigned int, pg_shard_t>> replicas_by_num_missing,
continue;
}
auto pm = peer_missing.find(p);
- assert(pm != peer_missing.end());
+ ceph_assert(pm != peer_missing.end());
auto nm = pm->second.num_missing();
if (nm != 0) {
if (async_recovery_targets.count(p)) {
async_by_num_missing.begin(), async_by_num_missing.end());
for (auto &replica: replicas_by_num_missing) {
pg_shard_t &peer = replica.second;
- assert(peer != get_primary());
+ ceph_assert(peer != get_primary());
map<pg_shard_t, pg_missing_t>::const_iterator pm = peer_missing.find(peer);
- assert(pm != peer_missing.end());
+ ceph_assert(pm != peer_missing.end());
map<pg_shard_t, pg_info_t>::const_iterator pi = peer_info.find(peer);
- assert(pi != peer_info.end());
+ ceph_assert(pi != peer_info.end());
size_t m_sz = pm->second.num_missing();
dout(10) << " peer osd." << peer << " missing " << m_sz << " objects." << dendl;
pg_shard_t peer = *i;
map<pg_shard_t, BackfillInterval>::const_iterator iter =
peer_backfill_info.find(peer);
- assert(iter != peer_backfill_info.end());
+ ceph_assert(iter != peer_backfill_info.end());
if (iter->second.begin < e)
e = iter->second.begin;
}
bool PrimaryLogPG::all_peer_done() const
{
// Primary hasn't got any more objects
- assert(backfill_info.empty());
+ ceph_assert(backfill_info.empty());
for (set<pg_shard_t>::const_iterator i = backfill_targets.begin();
i != backfill_targets.end();
pg_shard_t bt = *i;
map<pg_shard_t, BackfillInterval>::const_iterator piter =
peer_backfill_info.find(bt);
- assert(piter != peer_backfill_info.end());
+ ceph_assert(piter != peer_backfill_info.end());
const BackfillInterval& pbi = piter->second;
// See if peer has more to process
if (!pbi.extends_to_end() || !pbi.empty())
<< " last_backfill_started " << last_backfill_started
<< (new_backfill ? " new_backfill":"")
<< dendl;
- assert(!backfill_targets.empty());
+ ceph_assert(!backfill_targets.empty());
// Initialize from prior backfill state
if (new_backfill) {
// on_activate() was called prior to getting here
- assert(last_backfill_started == earliest_backfill());
+ ceph_assert(last_backfill_started == earliest_backfill());
new_backfill = false;
// initialize BackfillIntervals
spg_t(info.pgid.pgid, bt.shard),
pbi.end, hobject_t());
osd->send_message_osd_cluster(bt.osd, m, get_osdmap()->get_epoch());
- assert(waiting_on_backfill.find(bt) == waiting_on_backfill.end());
+ ceph_assert(waiting_on_backfill.find(bt) == waiting_on_backfill.end());
waiting_on_backfill.insert(bt);
sent_scan = true;
}
if (pbi.begin == check)
check_targets.insert(bt);
}
- assert(!check_targets.empty());
+ ceph_assert(!check_targets.empty());
dout(20) << " BACKFILL removing " << check
<< " from peers " << check_targets << dendl;
++i) {
pg_shard_t bt = *i;
BackfillInterval& pbi = peer_backfill_info[bt];
- assert(pbi.begin == check);
+ ceph_assert(pbi.begin == check);
to_remove.push_back(boost::make_tuple(check, pbi.objects.begin()->second, bt));
pbi.pop_front();
}
if (!need_ver_targs.empty() || !missing_targs.empty()) {
ObjectContextRef obc = get_object_context(backfill_info.begin, false);
- assert(obc);
+ ceph_assert(obc);
if (obc->get_recovery_read()) {
if (!need_ver_targs.empty()) {
dout(20) << " BACKFILL replacing " << check
i != add_to_stat.end();
++i) {
ObjectContextRef obc = get_object_context(*i, false);
- assert(obc);
+ ceph_assert(obc);
pg_stat_t stat;
add_object_context_to_pg_stat(obc, &stat);
pending_backfill_updates[*i] = stat;
i->first < next_backfill_to_complete;
pending_backfill_updates.erase(i++)) {
dout(20) << " pending_backfill_update " << i->first << dendl;
- assert(i->first > new_last_backfill);
+ ceph_assert(i->first > new_last_backfill);
for (set<pg_shard_t>::iterator j = backfill_targets.begin();
j != backfill_targets.end();
++j) {
}
dout(10) << "possible new_last_backfill at " << new_last_backfill << dendl;
- assert(!pending_backfill_updates.empty() ||
+ ceph_assert(!pending_backfill_updates.empty() ||
new_last_backfill == last_backfill_started);
if (pending_backfill_updates.empty() &&
backfill_pos.is_max()) {
- assert(backfills_in_flight.empty());
+ ceph_assert(backfills_in_flight.empty());
new_last_backfill = backfill_pos;
last_backfill_started = backfill_pos;
}
PGBackend::RecoveryHandle *h)
{
dout(10) << __func__ << " " << oid << " v " << v << " to peers " << peers << dendl;
- assert(!peers.empty());
+ ceph_assert(!peers.empty());
backfills_in_flight.insert(oid);
for (unsigned int i = 0 ; i < peers.size(); ++i) {
map<pg_shard_t, pg_missing_t>::iterator bpm = peer_missing.find(peers[i]);
- assert(bpm != peer_missing.end());
+ ceph_assert(bpm != peer_missing.end());
bpm->second.add(oid, eversion_t(), eversion_t(), false);
}
- assert(!recovering.count(oid));
+ ceph_assert(!recovering.count(oid));
start_recovery_op(oid);
recovering.insert(make_pair(oid, obc));
if (bi->version >= projected_last_update) {
dout(10) << __func__<< ": bi is current " << dendl;
- assert(bi->version == projected_last_update);
+ ceph_assert(bi->version == projected_last_update);
} else if (bi->version >= info.log_tail) {
if (pg_log.get_log().empty() && projected_log.empty()) {
/* Because we don't move log_tail on split, the log might be
* eversion_t(), because otherwise the entry which changed
* last_update since the last scan would have to be present.
*/
- assert(bi->version == eversion_t());
+ ceph_assert(bi->version == eversion_t());
return;
}
projected_log.scan_log_after(bi->version, func);
bi->version = projected_last_update;
} else {
- assert(0 == "scan_range should have raised bi->version past log_tail");
+ ceph_assert(0 == "scan_range should have raised bi->version past log_tail");
}
}
int min, int max, BackfillInterval *bi,
ThreadPool::TPHandle &handle)
{
- assert(is_locked());
+ ceph_assert(is_locked());
dout(10) << "scan_range from " << bi->begin << dendl;
bi->clear_objects();
vector<hobject_t> ls;
ls.reserve(max);
int r = pgbackend->objects_list_partial(bi->begin, min, max, &ls, &bi->end);
- assert(r >= 0);
+ ceph_assert(r >= 0);
dout(10) << " got " << ls.size() << " items, next " << bi->end << dendl;
dout(20) << ls << dendl;
if (r == -ENOENT)
continue;
- assert(r >= 0);
+ ceph_assert(r >= 0);
object_info_t oi(bl);
bi->objects[*p] = oi.version;
dout(20) << " " << *p << " " << oi.version << dendl;
{
dout(10) << __func__ << dendl;
- assert(info.last_update >= pg_log.get_tail()); // otherwise we need some help!
+ ceph_assert(info.last_update >= pg_log.get_tail()); // otherwise we need some help!
if (!cct->_conf->osd_debug_verify_stray_on_activate)
return;
if (r != -ENOENT) {
derr << __func__ << " " << p->soid << " exists, but should have been "
<< "deleted" << dendl;
- assert(0 == "erroneously present object");
+ ceph_assert(0 == "erroneously present object");
}
} else {
// ignore old(+missing) objects
if (!info.hit_set.history.empty()) {
list<pg_hit_set_info_t>::reverse_iterator p = info.hit_set.history.rbegin();
- assert(p != info.hit_set.history.rend());
+ ceph_assert(p != info.hit_set.history.rend());
hobject_t oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
- assert(!is_degraded_or_backfilling_object(oid));
+ ceph_assert(!is_degraded_or_backfilling_object(oid));
ObjectContextRef obc = get_object_context(oid, false);
- assert(obc);
+ ceph_assert(obc);
OpContextUPtr ctx = simple_opc_create(obc);
ctx->at_version = get_next_version();
for (set<pg_shard_t>::iterator p = backfill_targets.begin();
p != backfill_targets.end();
++p) {
- assert(peer_info.count(*p));
+ ceph_assert(peer_info.count(*p));
const pg_info_t& pi = peer_info[*p];
if (pi.last_backfill == hobject_t() ||
pi.last_backfill.get_hash() == info.pgid.ps()) {
void PrimaryLogPG::hit_set_trim(OpContextUPtr &ctx, unsigned max)
{
- assert(ctx->updated_hset_history);
+ ceph_assert(ctx->updated_hset_history);
pg_hit_set_history_t &updated_hit_set_hist =
*(ctx->updated_hset_history);
for (unsigned num = updated_hit_set_hist.history.size(); num > max; --num) {
list<pg_hit_set_info_t>::iterator p = updated_hit_set_hist.history.begin();
- assert(p != updated_hit_set_hist.history.end());
+ ceph_assert(p != updated_hit_set_hist.history.end());
hobject_t oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
- assert(!is_degraded_or_backfilling_object(oid));
+ ceph_assert(!is_degraded_or_backfilling_object(oid));
dout(20) << __func__ << " removing " << oid << dendl;
++ctx->at_version.version;
updated_hit_set_hist.history.pop_front();
ObjectContextRef obc = get_object_context(oid, false);
- assert(obc);
+ ceph_assert(obc);
--ctx->delta_stats.num_objects;
--ctx->delta_stats.num_objects_hit_set_archive;
ctx->delta_stats.num_bytes -= obc->obs.oi.size;
void PrimaryLogPG::agent_setup()
{
- assert(is_locked());
+ ceph_assert(is_locked());
if (!is_active() ||
!is_primary() ||
pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE ||
return true;
}
- assert(!deleting);
+ ceph_assert(!deleting);
if (agent_state->is_idle()) {
dout(10) << __func__ << " idle, stopping" << dendl;
<< ", evict " << agent_state->get_evict_mode_name()
<< ", pos " << agent_state->position
<< dendl;
- assert(is_primary());
- assert(is_active());
+ ceph_assert(is_primary());
+ ceph_assert(is_active());
agent_load_hit_sets();
const pg_pool_t *base_pool = get_osdmap()->get_pg_pool(pool.info.tier_of);
- assert(base_pool);
+ ceph_assert(base_pool);
int ls_min = 1;
int ls_max = cct->_conf->osd_pool_default_cache_max_evict_check_size;
hobject_t next;
int r = pgbackend->objects_list_partial(agent_state->position, ls_min, ls_max,
&ls, &next);
- assert(r >= 0);
+ ceph_assert(r >= 0);
dout(20) << __func__ << " got " << ls.size() << " objects" << dendl;
int started = 0;
for (vector<hobject_t>::iterator p = ls.begin();
hit_set_in_memory_trim(pool.info.hit_set_count);
if (need_delay) {
- assert(agent_state->delaying == false);
+ ceph_assert(agent_state->delaying == false);
agent_delay();
unlock();
return false;
bufferlist bl;
{
int r = osd->store->read(ch, ghobject_t(oid), 0, 0, bl);
- assert(r >= 0);
+ ceph_assert(r >= 0);
}
HitSetRef hs(new HitSet);
bufferlist::const_iterator pbl = bl.begin();
});
ctx->at_version = get_next_version();
- assert(ctx->new_obs.exists);
+ ceph_assert(ctx->new_obs.exists);
int r = _delete_oid(ctx.get(), true, false);
if (obc->obs.oi.is_omap())
ctx->delta_stats.num_objects_omap--;
ctx->delta_stats.num_evict_kb += shift_round_up(obc->obs.oi.size, 10);
if (obc->obs.oi.is_dirty())
--ctx->delta_stats.num_objects_dirty;
- assert(r == 0);
+ ceph_assert(r == 0);
finish_ctx(ctx.get(), pg_log_entry_t::DELETE);
simple_opc_submit(std::move(ctx));
osd->logger->inc(l_osd_tier_evict);
{
dout(20) << __func__ << dendl;
if (agent_state && !agent_state->is_idle()) {
- assert(agent_state->delaying == false);
+ ceph_assert(agent_state->delaying == false);
agent_state->delaying = true;
osd->agent_disable_pg(this, agent_state->evict_effort);
}
{
uint64_t divisor = pool.info.get_pg_num_divisor(info.pgid.pgid);
- assert(divisor > 0);
+ ceph_assert(divisor > 0);
// adjust (effective) user objects down based on the number
// of HitSet objects, which should not count toward our total since
// also exclude omap objects if ec backing pool
const pg_pool_t *base_pool = get_osdmap()->get_pg_pool(pool.info.tier_of);
- assert(base_pool);
+ ceph_assert(base_pool);
if (!base_pool->supports_omap())
unflushable += info.stats.stats.sum.num_objects_omap;
// quantize effort to avoid too much reordering in the agent_queue.
uint64_t inc = cct->_conf->osd_agent_quantize_effort * 1000000;
- assert(inc > 0);
+ ceph_assert(inc > 0);
uint64_t was = evict_effort;
evict_effort -= evict_effort % inc;
if (evict_effort < inc)
evict_effort = inc;
- assert(evict_effort >= inc && evict_effort <= 1000000);
+ ceph_assert(evict_effort >= inc && evict_effort <= 1000000);
dout(30) << __func__ << " evict_effort " << was << " quantized by " << inc << " to " << evict_effort << dendl;
}
}
void PrimaryLogPG::agent_estimate_temp(const hobject_t& oid, int *temp)
{
- assert(hit_set);
- assert(temp);
+ ceph_assert(hit_set);
+ ceph_assert(temp);
*temp = 0;
if (hit_set->contains(oid))
*temp = 1000000;
const char *mode,
bool allow_incomplete_clones)
{
- assert(head);
+ ceph_assert(head);
if (allow_incomplete_clones) {
dout(20) << func << " " << mode << " " << pgid << " " << head.get()
<< " skipped " << missing << " clone(s) in cache tier" << dendl;
vector<snapid_t>::reverse_iterator *curclone,
inconsistent_snapset_wrapper &e)
{
- assert(head);
- assert(snapset);
+ ceph_assert(head);
+ ceph_assert(snapset);
unsigned missing = 0;
// NOTE: clones are in descending order, thus **curclone > target test here
for (map<hobject_t,ScrubMap::object>::reverse_iterator
p = scrubmap.objects.rbegin(); p != scrubmap.objects.rend(); ++p) {
const hobject_t& soid = p->first;
- assert(!soid.is_snapdir());
+ ceph_assert(!soid.is_snapdir());
soid_error = inconsistent_snapset_wrapper{soid};
object_stat_sum_t stat;
boost::optional<object_info_t> oi;
target = all_clones;
} else {
- assert(soid.is_snap());
+ ceph_assert(soid.is_snap());
target = soid.snap;
}
if (doing_clones(snapset, curclone)) {
// A head would have processed all clones above
// or all greater than *curclone.
- assert(soid.is_snap() && *curclone <= soid.snap);
+ ceph_assert(soid.is_snap() && *curclone <= soid.snap);
// After processing above clone snap should match the expected curclone
expected = (*curclone == soid.snap);
}
}
} else {
- assert(soid.is_snap());
- assert(head);
- assert(snapset);
- assert(soid.snap == *curclone);
+ ceph_assert(soid.is_snap());
+ ceph_assert(head);
+ ceph_assert(snapset);
+ ceph_assert(soid.snap == *curclone);
dout(20) << __func__ << " " << mode << " matched clone " << soid << dendl;
scrubber.store->add_snap_error(pool.id, head_error);
for (auto p = missing_digest.begin(); p != missing_digest.end(); ++p) {
- assert(!p->first.is_snapdir());
+ ceph_assert(!p->first.is_snapdir());
dout(10) << __func__ << " recording digests for " << p->first << dendl;
ObjectContextRef obc = get_object_context(p->first, false);
if (!obc) {
{
OpRequestRef op = ctx->op;
// Only supports replicated pools
- assert(!pool.info.is_erasure());
- assert(is_primary());
+ ceph_assert(!pool.info.is_erasure());
+ ceph_assert(is_primary());
dout(10) << __func__ << " " << soid
<< " peers osd.{" << acting_recovery_backfill << "}" << dendl;
return -EAGAIN;
}
- assert(!pg_log.get_missing().is_missing(soid));
+ ceph_assert(!pg_log.get_missing().is_missing(soid));
auto& oi = ctx->new_obs.oi;
eversion_t v = oi.version;
if (!eio_errors_to_process) {
eio_errors_to_process = true;
- assert(is_clean());
+ ceph_assert(is_clean());
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
PrimaryLogPGRef pg = context< SnapTrimmer >().pg;
snapid_t snap_to_trim = context<Trimming>().snap_to_trim;
auto &in_flight = context<Trimming>().in_flight;
- assert(in_flight.empty());
+ ceph_assert(in_flight.empty());
- assert(pg->is_primary() && pg->is_active());
+ ceph_assert(pg->is_primary() && pg->is_active());
if (!context< SnapTrimmer >().can_trim()) {
ldout(pg->cct, 10) << "something changed, reverting to NotTrimming" << dendl;
post_event(KickTrim());
if (r != 0 && r != -ENOENT) {
lderr(pg->cct) << "get_next_objects_to_trim returned "
<< cpp_strerror(r) << dendl;
- assert(0 == "get_next_objects_to_trim returned an invalid code");
+ ceph_assert(0 == "get_next_objects_to_trim returned an invalid code");
} else if (r == -ENOENT) {
// Done!
ldout(pg->cct, 10) << "got ENOENT" << dendl;
pg->dirty_big_info = true;
pg->write_if_dirty(t);
int tr = pg->osd->store->queue_transaction(pg->ch, std::move(t), NULL);
- assert(tr == 0);
+ ceph_assert(tr == 0);
pg->share_pg_info();
post_event(KickTrim());
return transit< NotTrimming >();
}
- assert(!to_trim.empty());
+ ceph_assert(!to_trim.empty());
for (auto &&object: to_trim) {
// Get next
in_flight.insert(object);
ctx->register_on_success(
[pg, object, &in_flight]() {
- assert(in_flight.find(object) != in_flight.end());
+ ceph_assert(in_flight.find(object) != in_flight.end());
in_flight.erase(object);
if (in_flight.empty()) {
if (pg->state_test(PG_STATE_SNAPTRIM_ERROR)) {
map<string, bufferlist> *out)
{
int r = 0;
- assert(out);
+ ceph_assert(out);
if (pool.info.is_erasure()) {
*out = obc->attr_cache;
} else {
FlushOp()
: flushed_version(0), objecter_tid(0), rval(0),
blocking(false), removal(false), chunks(0) {}
- ~FlushOp() { assert(!on_flush); }
+ ~FlushOp() { ceph_assert(!on_flush); }
};
typedef std::shared_ptr<FlushOp> FlushOpRef;
}
}
~OpContext() {
- assert(!op_t);
+ ceph_assert(!op_t);
if (reply)
reply->put();
for (list<pair<boost::tuple<uint64_t, uint64_t, unsigned>,
return this;
}
void put() {
- assert(nref > 0);
+ ceph_assert(nref > 0);
if (--nref == 0) {
delete this;
//generic_dout(0) << "deleting " << this << dendl;
} else if (write_ordered) {
ctx->lock_type = ObjectContext::RWState::RWWRITE;
} else {
- assert(ctx->op->may_read());
+ ceph_assert(ctx->op->may_read());
ctx->lock_type = ObjectContext::RWState::RWREAD;
}
if (ctx->head_obc) {
- assert(!ctx->obc->obs.exists);
+ ceph_assert(!ctx->obc->obs.exists);
if (!ctx->lock_manager.get_lock_type(
ctx->lock_type,
ctx->head_obc->obs.oi.soid,
ctx->op)) {
return true;
} else {
- assert(!ctx->head_obc);
+ ceph_assert(!ctx->head_obc);
ctx->lock_type = ObjectContext::RWState::RWNONE;
return false;
}
_register_snapset_context(ssc);
}
void _register_snapset_context(SnapSetContext *ssc) {
- assert(snapset_contexts_lock.is_locked());
+ ceph_assert(snapset_contexts_lock.is_locked());
if (!ssc->registered) {
- assert(snapset_contexts.count(ssc->oid) == 0);
+ ceph_assert(snapset_contexts.count(ssc->oid) == 0);
ssc->registered = true;
snapset_contexts[ssc->oid] = ssc;
}
: my_base(ctx),
NamedState(context< SnapTrimmer >().pg, "Trimming") {
context< SnapTrimmer >().log_enter(state_name);
- assert(context< SnapTrimmer >().can_trim());
- assert(in_flight.empty());
+ ceph_assert(context< SnapTrimmer >().can_trim());
+ ceph_assert(in_flight.empty());
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
: my_base(ctx),
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitTrimTimer") {
context< SnapTrimmer >().log_enter(state_name);
- assert(context<Trimming>().in_flight.empty());
+ ceph_assert(context<Trimming>().in_flight.empty());
struct OnTimer : Context {
PrimaryLogPGRef pg;
epoch_t epoch;
: my_base(ctx),
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitRWLock") {
context< SnapTrimmer >().log_enter(state_name);
- assert(context<Trimming>().in_flight.empty());
+ ceph_assert(context<Trimming>().in_flight.empty());
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
: my_base(ctx),
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitRepops") {
context< SnapTrimmer >().log_enter(state_name);
- assert(!context<Trimming>().in_flight.empty());
+ ceph_assert(!context<Trimming>().in_flight.empty());
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
pg->unlock();
}
void cancel() {
- assert(pg->is_locked());
- assert(!canceled);
+ ceph_assert(pg->is_locked());
+ ceph_assert(!canceled);
canceled = true;
}
};
: my_base(ctx),
NamedState(context< SnapTrimmer >().pg, "Trimming/WaitReservation") {
context< SnapTrimmer >().log_enter(state_name);
- assert(context<Trimming>().in_flight.empty());
+ ceph_assert(context<Trimming>().in_flight.empty());
auto *pg = context< SnapTrimmer >().pg;
pending = new ReservationCB(pg);
pg->osd->snap_reserver.request_reservation(
logger->inc(l_osd_sop_push_inb, inb);
logger->tinc(l_osd_sop_push_lat, latency);
} else
- assert("no support subop" == 0);
+ ceph_assert("no support subop" == 0);
} else {
logger->tinc(l_osd_sop_pull_lat, latency);
}
dout(10) << __func__ << ": " << hoid << dendl;
RPGHandle *h = static_cast<RPGHandle *>(_h);
if (get_parent()->get_local_missing().is_missing(hoid)) {
- assert(!obc);
+ ceph_assert(!obc);
// pull
prepare_pull(
v,
head,
h);
} else {
- assert(obc);
+ ceph_assert(obc);
int started = start_pushes(
hoid,
obc,
Context *on_complete,
bool fast_read)
{
- assert(0 == "async read is not used by replica pool");
+ ceph_assert(0 == "async read is not used by replica pool");
}
class C_OSD_OnOpCommit : public Context {
set<hobject_t> *added,
set<hobject_t> *removed)
{
- assert(t);
- assert(added);
- assert(removed);
+ ceph_assert(t);
+ ceph_assert(added);
+ ceph_assert(removed);
for (auto &&le: log_entries) {
le.mark_unrollbackable();
goid);
},
[&](const PGTransaction::ObjectOperation::Init::Rename &op) {
- assert(op.source.is_temp());
+ ceph_assert(op.source.is_temp());
t->collection_move_rename(
coll,
ghobject_t(
extent.get_len());
},
[&](const BufferUpdate::CloneRange &op) {
- assert(op.len == extent.get_len());
+ ceph_assert(op.len == extent.get_len());
t->clone_range(
coll,
ghobject_t(op.from, ghobject_t::NO_GEN, shard_id_t::NO_SHARD),
&op_t,
&added,
&removed);
- assert(added.size() <= 1);
- assert(removed.size() <= 1);
+ ceph_assert(added.size() <= 1);
+ ceph_assert(removed.size() <= 1);
auto insert_res = in_progress_ops.insert(
make_pair(
orig_op, at_version)
)
);
- assert(insert_res.second);
+ ceph_assert(insert_res.second);
InProgressOp &op = *insert_res.first->second;
op.waiting_for_commit.insert(
if (op->waiting_for_commit.empty()) {
op->on_commit->complete(0);
op->on_commit = 0;
- assert(!op->on_commit);
+ ceph_assert(!op->on_commit);
in_progress_ops.erase(op->tid);
}
}
{
static_cast<MOSDRepOpReply*>(op->get_nonconst_req())->finish_decode();
const MOSDRepOpReply *r = static_cast<const MOSDRepOpReply *>(op->get_req());
- assert(r->get_header().type == MSG_OSD_REPOPREPLY);
+ ceph_assert(r->get_header().type == MSG_OSD_REPOPREPLY);
op->mark_started();
// oh, good.
if (r->ack_type & CEPH_OSD_FLAG_ONDISK) {
- assert(ip_op.waiting_for_commit.count(from));
+ ceph_assert(ip_op.waiting_for_commit.count(from));
ip_op.waiting_for_commit.erase(from);
if (ip_op.op) {
ostringstream ss;
sleeptime.sleep();
}
- assert(poid == pos.ls[pos.pos]);
+ ceph_assert(poid == pos.ls[pos.pos]);
if (!pos.data_done()) {
if (pos.data_pos == 0) {
pos.data_hash = bufferhash(-1);
ch,
ghobject_t(
poid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard));
- assert(iter);
+ ceph_assert(iter);
if (pos.omap_pos.length()) {
iter->lower_bound(pos.omap_pos);
} else {
void ReplicatedBackend::_do_push(OpRequestRef op)
{
const MOSDPGPush *m = static_cast<const MOSDPGPush *>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_PUSH);
+ ceph_assert(m->get_type() == MSG_OSD_PG_PUSH);
pg_shard_t from = m->from;
op->mark_started();
ReplicatedBackend::RPGHandle *h = bc->_open_recovery_op();
for (auto &&i: to_continue) {
auto j = bc->pulling.find(i.hoid);
- assert(j != bc->pulling.end());
+ ceph_assert(j != bc->pulling.end());
ObjectContextRef obc = j->second.obc;
bc->clear_pull(j, false /* already did it */);
int started = bc->start_pushes(i.hoid, obc, h);
void ReplicatedBackend::_do_pull_response(OpRequestRef op)
{
const MOSDPGPush *m = static_cast<const MOSDPGPush *>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_PUSH);
+ ceph_assert(m->get_type() == MSG_OSD_PG_PUSH);
pg_shard_t from = m->from;
op->mark_started();
void ReplicatedBackend::do_pull(OpRequestRef op)
{
MOSDPGPull *m = static_cast<MOSDPGPull *>(op->get_nonconst_req());
- assert(m->get_type() == MSG_OSD_PG_PULL);
+ ceph_assert(m->get_type() == MSG_OSD_PG_PULL);
pg_shard_t from = m->from;
map<pg_shard_t, vector<PushOp> > replies;
void ReplicatedBackend::do_push_reply(OpRequestRef op)
{
const MOSDPGPushReply *m = static_cast<const MOSDPGPushReply *>(op->get_req());
- assert(m->get_type() == MSG_OSD_PG_PUSH_REPLY);
+ ceph_assert(m->get_type() == MSG_OSD_PG_PUSH_REPLY);
pg_shard_t from = m->from;
vector<PushOp> replies(1);
static_cast<MOSDRepOp*>(op->get_nonconst_req())->finish_decode();
const MOSDRepOp *m = static_cast<const MOSDRepOp *>(op->get_req());
int msg_type = m->get_type();
- assert(MSG_OSD_REPOP == msg_type);
+ ceph_assert(MSG_OSD_REPOP == msg_type);
const hobject_t& soid = m->poid;
<< dendl;
// sanity checks
- assert(m->map_epoch >= get_info().history.same_interval_since);
+ ceph_assert(m->map_epoch >= get_info().history.same_interval_since);
dout(30) << __func__ << " missing before " << get_parent()->get_log().get_missing().get_items() << dendl;
parent->maybe_preempt_replica_scrub(soid);
rm->last_complete = get_info().last_complete;
rm->epoch_started = get_osdmap()->get_epoch();
- assert(m->logbl.length());
+ ceph_assert(m->logbl.length());
// shipped transaction and log entries
vector<pg_log_entry_t> log;
// send commit.
const MOSDRepOp *m = static_cast<const MOSDRepOp*>(rm->op->get_req());
- assert(m->get_type() == MSG_OSD_REPOP);
+ ceph_assert(m->get_type() == MSG_OSD_REPOP);
dout(10) << __func__ << " on op " << *m
<< ", sending commit to osd." << rm->ackerosd
<< dendl;
- assert(get_osdmap()->is_up(rm->ackerosd));
+ ceph_assert(get_osdmap()->is_up(rm->ackerosd));
get_parent()->update_last_complete_ondisk(rm->last_complete);
ObjectContextRef headctx,
RPGHandle *h)
{
- assert(get_parent()->get_local_missing().get_items().count(soid));
+ ceph_assert(get_parent()->get_local_missing().get_items().count(soid));
eversion_t _v = get_parent()->get_local_missing().get_items().find(
soid)->second.need;
- assert(_v == v);
+ ceph_assert(_v == v);
const map<hobject_t, set<pg_shard_t>> &missing_loc(
get_parent()->get_missing_loc_shards());
const map<pg_shard_t, pg_missing_t > &peer_missing(
get_parent()->get_shard_missing());
map<hobject_t, set<pg_shard_t>>::const_iterator q = missing_loc.find(soid);
- assert(q != missing_loc.end());
- assert(!q->second.empty());
+ ceph_assert(q != missing_loc.end());
+ ceph_assert(!q->second.empty());
// pick a pullee
auto p = q->second.begin();
std::advance(p,
util::generate_random_number<int>(0,
q->second.size() - 1));
- assert(get_osdmap()->is_up(p->osd));
+ ceph_assert(get_osdmap()->is_up(p->osd));
pg_shard_t fromshard = *p;
dout(7) << "pull " << soid
<< " from osd." << fromshard
<< dendl;
- assert(peer_missing.count(fromshard));
+ ceph_assert(peer_missing.count(fromshard));
const pg_missing_t &pmissing = peer_missing.find(fromshard)->second;
if (pmissing.is_missing(soid, v)) {
- assert(pmissing.get_items().find(soid)->second.have != v);
+ ceph_assert(pmissing.get_items().find(soid)->second.have != v);
dout(10) << "pulling soid " << soid << " from osd " << fromshard
<< " at version " << pmissing.get_items().find(soid)->second.have
<< " rather than at version " << v << dendl;
v = pmissing.get_items().find(soid)->second.have;
- assert(get_parent()->get_log().get_log().objects.count(soid) &&
+ ceph_assert(get_parent()->get_log().get_log().objects.count(soid) &&
(get_parent()->get_log().get_log().objects.find(soid)->second->op ==
pg_log_entry_t::LOST_REVERT) &&
(get_parent()->get_log().get_log().objects.find(
ObcLockManager lock_manager;
if (soid.is_snap()) {
- assert(!get_parent()->get_local_missing().is_missing(soid.get_head()));
- assert(headctx);
+ ceph_assert(!get_parent()->get_local_missing().is_missing(soid.get_head()));
+ ceph_assert(headctx);
// check snapset
SnapSetContext *ssc = headctx->ssc;
- assert(ssc);
+ ceph_assert(ssc);
dout(10) << " snapset " << ssc->snapset << dendl;
recovery_info.ss = ssc->snapset;
calc_clone_subsets(
// FIXME: this may overestimate if we are pulling multiple clones in parallel...
dout(10) << " pulling " << recovery_info << dendl;
- assert(ssc->snapset.clone_size.count(soid.snap));
+ ceph_assert(ssc->snapset.clone_size.count(soid.snap));
recovery_info.size = ssc->snapset.clone_size[soid.snap];
} else {
// pulling head or unversioned object.
op.recovery_progress.data_recovered_to = 0;
op.recovery_progress.first = true;
- assert(!pulling.count(soid));
+ ceph_assert(!pulling.count(soid));
pull_from_peer[fromshard].insert(soid);
PullInfo &pi = pulling[soid];
pi.from = fromshard;
}
SnapSetContext *ssc = obc->ssc;
- assert(ssc);
+ ceph_assert(ssc);
dout(15) << "push_to_replica snapset is " << ssc->snapset << dendl;
pop->recovery_info.ss = ssc->snapset;
map<pg_shard_t, pg_missing_t>::const_iterator pm =
get_parent()->get_shard_missing().find(peer);
- assert(pm != get_parent()->get_shard_missing().end());
+ ceph_assert(pm != get_parent()->get_shard_missing().end());
map<pg_shard_t, pg_info_t>::const_iterator pi =
get_parent()->get_shard_info().find(peer);
- assert(pi != get_parent()->get_shard_info().end());
+ ceph_assert(pi != get_parent()->get_shard_info().end());
calc_clone_subsets(
ssc->snapset, soid,
pm->second,
// pushing head or unversioned object.
// base this on partially on replica's clones?
SnapSetContext *ssc = obc->ssc;
- assert(ssc);
+ ceph_assert(ssc);
dout(15) << "push_to_replica snapset is " << ssc->snapset << dendl;
calc_head_subsets(
obc,
ObjectRecoveryInfo new_info = recovery_info;
new_info.copy_subset.clear();
new_info.clone_subset.clear();
- assert(ssc);
+ ceph_assert(ssc);
get_parent()->release_locks(manager); // might already have locks
calc_clone_subsets(
ssc->snapset, new_info.soid, get_parent()->get_local_missing(),
}
const hobject_t &hoid = pop.soid;
- assert((data_included.empty() && data.length() == 0) ||
+ ceph_assert((data_included.empty() && data.length() == 0) ||
(!data_included.empty() && data.length() > 0));
auto piter = pulling.find(hoid);
}
// Once we provide the version subsequent requests will have it, so
// at this point it must be known.
- assert(v != eversion_t());
+ ceph_assert(v != eversion_t());
uint64_t available = cct->_conf->osd_recovery_max_chunk;
if (!progress.omap_complete) {
ObjectMap::ObjectMapIterator iter =
store->get_omap_iterator(ch,
ghobject_t(recovery_info.soid));
- assert(iter);
+ ceph_assert(iter);
for (iter->lower_bound(progress.omap_recovered_to);
iter->valid();
iter->next(false)) {
recovery_info.copy_subset.clear();
if (st.st_size)
recovery_info.copy_subset.insert(0, st.st_size);
- assert(recovery_info.clone_subset.empty());
+ ceph_assert(recovery_info.clone_subset.empty());
}
r = build_push_op(recovery_info, progress, 0, reply);
dout(20) << __func__ << " soid " << soid << dendl;
// who needs it?
- assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0);
+ ceph_assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0);
for (set<pg_shard_t>::iterator i =
get_parent()->get_acting_recovery_backfill_shards().begin();
i != get_parent()->get_acting_recovery_backfill_shards().end();
pg_shard_t peer = *i;
map<pg_shard_t, pg_missing_t>::const_iterator j =
get_parent()->get_shard_missing().find(peer);
- assert(j != get_parent()->get_shard_missing().end());
+ ceph_assert(j != get_parent()->get_shard_missing().end());
if (j->second.is_missing(soid)) {
shards.push_back(j);
}
j != i->second.end();
++j) {
f->open_object_section("pull_info");
- assert(pulling.count(*j));
+ ceph_assert(pulling.count(*j));
pulling.find(*j)->second.dump(f);
f->close_section();
}
const spg_t& pgid,
const coll_t& coll)
{
- assert(store);
- assert(t);
+ ceph_assert(store);
+ ceph_assert(t);
ghobject_t oid = make_scrub_object(pgid);
t->touch(coll, oid);
return new Store{coll, oid, store};
Store::~Store()
{
- assert(results.empty());
+ ceph_assert(results.empty());
}
void Store::add_object_error(int64_t pool, const inconsistent_obj_wrapper& e)
for (auto& b : p.second) {
Mutex::Locker l(b->lock);
if (b->pg) {
- assert(b->session == this);
- assert(b->is_new() || b->is_acked());
+ ceph_assert(b->session == this);
+ ceph_assert(b->is_new() || b->is_acked());
b->pg->rm_backoff(b);
b->pg.reset();
b->session.reset();
} else if (b->session) {
- assert(b->session == this);
- assert(b->is_deleting());
+ ceph_assert(b->session == this);
+ ceph_assert(b->is_deleting());
b->session.reset();
}
}
backoffs.erase(p);
}
}
- assert(!backoff_count == backoffs.empty());
+ ceph_assert(!backoff_count == backoffs.empty());
}
bool Session::check_backoff(
if (b) {
dout(10) << __func__ << " session " << this << " has backoff " << *b
<< " for " << *m << dendl;
- assert(!b->is_acked() || !g_conf()->osd_debug_crash_on_ignored_backoff);
+ ceph_assert(!b->is_acked() || !g_conf()->osd_debug_crash_on_ignored_backoff);
return true;
}
// we may race with ms_handle_reset. it clears session->con before removing
return nullptr;
}
Mutex::Locker l(backoff_lock);
- assert(!backoff_count == backoffs.empty());
+ ceph_assert(!backoff_count == backoffs.empty());
auto i = backoffs.find(pgid);
if (i == backoffs.end()) {
return nullptr;
void add_backoff(BackoffRef b) {
Mutex::Locker l(backoff_lock);
- assert(!backoff_count == backoffs.empty());
+ ceph_assert(!backoff_count == backoffs.empty());
backoffs[b->pgid][b->begin].insert(b);
++backoff_count;
}
// called by PG::release_*_backoffs and PG::clear_backoffs()
void rm_backoff(BackoffRef b) {
Mutex::Locker l(backoff_lock);
- assert(b->lock.is_locked_by_me());
- assert(b->session == this);
+ ceph_assert(b->lock.is_locked_by_me());
+ ceph_assert(b->session == this);
auto i = backoffs.find(b->pgid);
if (i != backoffs.end()) {
// may race with clear_backoffs()
}
}
}
- assert(!backoff_count == backoffs.empty());
+ ceph_assert(!backoff_count == backoffs.empty());
}
void clear_backoffs();
};
const hobject_t &oid,
object_snaps *out)
{
- assert(check(oid));
+ ceph_assert(check(oid));
set<string> keys;
map<string, bufferlist> got;
keys.insert(to_object_key(oid));
dout(20) << __func__ << " " << oid << " " << out->snaps << dendl;
if (out->snaps.empty()) {
dout(1) << __func__ << " " << oid << " empty snapset" << dendl;
- assert(!cct->_conf->osd_debug_verify_snaps);
+ ceph_assert(!cct->_conf->osd_debug_verify_snaps);
}
} else {
dout(20) << __func__ << " " << oid << " (out == NULL)" << dendl;
MapCacher::Transaction<std::string, bufferlist> *t)
{
dout(20) << __func__ << " " << oid << dendl;
- assert(check(oid));
+ ceph_assert(check(oid));
set<string> to_remove;
to_remove.insert(to_object_key(oid));
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
const object_snaps &in,
MapCacher::Transaction<std::string, bufferlist> *t)
{
- assert(check(oid));
+ ceph_assert(check(oid));
map<string, bufferlist> to_set;
bufferlist bl;
encode(in, bl);
dout(20) << __func__ << " " << oid << " " << new_snaps
<< " was " << (old_snaps_check ? *old_snaps_check : set<snapid_t>())
<< dendl;
- assert(check(oid));
+ ceph_assert(check(oid));
if (new_snaps.empty())
return remove_oid(oid, t);
if (r < 0)
return r;
if (old_snaps_check)
- assert(out.snaps == *old_snaps_check);
+ ceph_assert(out.snaps == *old_snaps_check);
object_snaps in(oid, new_snaps);
set_snaps(oid, in, t);
MapCacher::Transaction<std::string, bufferlist> *t)
{
dout(20) << __func__ << " " << oid << " " << snaps << dendl;
- assert(!snaps.empty());
- assert(check(oid));
+ ceph_assert(!snaps.empty());
+ ceph_assert(check(oid));
{
object_snaps out;
int r = get_snaps(oid, &out);
if (r != -ENOENT) {
derr << __func__ << " found existing snaps mapped on " << oid
<< ", removing" << dendl;
- assert(!cct->_conf->osd_debug_verify_snaps);
+ ceph_assert(!cct->_conf->osd_debug_verify_snaps);
remove_oid(oid, t);
}
}
unsigned max,
vector<hobject_t> *out)
{
- assert(out);
- assert(out->empty());
+ ceph_assert(out);
+ ceph_assert(out->empty());
int r = 0;
for (set<string>::iterator i = prefixes.begin();
i != prefixes.end() && out->size() < max && r == 0;
break; // Done with this prefix
}
- assert(is_mapping(next.first));
+ ceph_assert(is_mapping(next.first));
dout(20) << __func__ << " " << next.first << dendl;
pair<snapid_t, hobject_t> next_decoded(from_raw(next));
- assert(next_decoded.first == snap);
- assert(check(next_decoded.second));
+ ceph_assert(next_decoded.first == snap);
+ ceph_assert(check(next_decoded.second));
out->push_back(next_decoded.second);
pos = next.first;
MapCacher::Transaction<std::string, bufferlist> *t)
{
dout(20) << __func__ << " " << oid << dendl;
- assert(check(oid));
+ ceph_assert(check(oid));
return _remove_oid(oid, t);
}
const hobject_t &oid,
std::set<snapid_t> *snaps)
{
- assert(check(oid));
+ ceph_assert(check(oid));
object_snaps out;
int r = get_snaps(oid, &out);
if (r < 0)
return string();
char buf[20];
int r = snprintf(buf, sizeof(buf), ".%x", (int)shard);
- assert(r < (int)sizeof(buf));
+ ceph_assert(r < (int)sizeof(buf));
return string(buf, r) + '_';
}
uint32_t mask_bits;
void update_bits(
uint32_t new_bits ///< [in] new split bits
) {
- assert(new_bits >= mask_bits);
+ ceph_assert(new_bits >= mask_bits);
mask_bits = new_bits;
set<string> _prefixes = hobject_t::get_prefixes(
mask_bits,
case FLUSH_MODE_IDLE: return "idle";
case FLUSH_MODE_LOW: return "low";
case FLUSH_MODE_HIGH: return "high";
- default: assert(0 == "bad flush mode");
+ default: ceph_assert(0 == "bad flush mode");
}
}
const char *get_flush_mode_name() const {
case EVICT_MODE_IDLE: return "idle";
case EVICT_MODE_SOME: return "some";
case EVICT_MODE_FULL: return "full";
- default: assert(0 == "bad evict mode");
+ default: ceph_assert(0 == "bad evict mode");
}
}
const char *get_evict_mode_name() const {
notif->osd->watch_lock.Lock();
}
void cancel() override {
- assert(notif->lock.is_locked_by_me());
+ ceph_assert(notif->lock.is_locked_by_me());
canceled = true;
}
};
void Notify::do_timeout()
{
- assert(lock.is_locked_by_me());
+ ceph_assert(lock.is_locked_by_me());
dout(10) << "timeout" << dendl;
cb = nullptr;
if (is_discarded()) {
timed_out = true; // we will send the client an error code
maybe_complete_notify();
- assert(complete);
+ ceph_assert(complete);
set<WatchRef> _watchers;
_watchers.swap(watchers);
lock.Unlock();
void Notify::register_cb()
{
- assert(lock.is_locked_by_me());
+ ceph_assert(lock.is_locked_by_me());
{
osd->watch_lock.Lock();
cb = new NotifyTimeoutCB(self.lock());
void Notify::unregister_cb()
{
- assert(lock.is_locked_by_me());
+ ceph_assert(lock.is_locked_by_me());
if (!cb)
return;
cb->cancel();
dout(10) << "complete_watcher" << dendl;
if (is_discarded())
return;
- assert(watchers.count(watch));
+ ceph_assert(watchers.count(watch));
watchers.erase(watch);
notify_replies.insert(make_pair(make_pair(watch->get_watcher_gid(),
watch->get_cookie()),
dout(10) << __func__ << dendl;
if (is_discarded())
return;
- assert(watchers.count(watch));
+ ceph_assert(watchers.count(watch));
watchers.erase(watch);
maybe_complete_notify();
}
void finish(int) override {
OSDService *osd(watch->osd);
dout(10) << "HandleWatchTimeoutDelayed" << dendl;
- assert(watch->pg->is_locked());
+ ceph_assert(watch->pg->is_locked());
watch->cb = nullptr;
if (!watch->is_discarded() && !canceled)
watch->pg->handle_watch_timeout(watch);
Watch::~Watch() {
dout(10) << "~Watch" << dendl;
// users must have called remove() or discard() prior to this point
- assert(!obc);
- assert(!conn);
+ ceph_assert(!obc);
+ ceph_assert(!conn);
}
bool Watch::connected() { return !!conn; }
Context *Watch::get_delayed_cb()
{
- assert(!cb);
+ ceph_assert(!cb);
cb = new HandleDelayedWatchTimeout(self.lock());
return cb;
}
void Watch::discard_state()
{
- assert(pg->is_locked());
- assert(!discarded);
- assert(obc);
+ ceph_assert(pg->is_locked());
+ ceph_assert(!discarded);
+ ceph_assert(obc);
in_progress_notifies.clear();
unregister_cb();
discarded = true;
void Watch::start_notify(NotifyRef notif)
{
- assert(in_progress_notifies.find(notif->notify_id) ==
+ ceph_assert(in_progress_notifies.find(notif->notify_id) ==
in_progress_notifies.end());
if (will_ping) {
utime_t cutoff = ceph_clock_now();
}
void OpClassClientInfoMgr::add_rep_op_msg(int message_code) {
- assert(message_code >= 0 && message_code < int(rep_op_msg_bitset_size));
+ ceph_assert(message_code >= 0 && message_code < int(rep_op_msg_bitset_size));
rep_op_msg_bitset.set(message_code);
}
// stores type as unsigned little endian, so be sure to
// convert to CPU byte ordering
boost::optional<OpRequestRef> op_ref_maybe = op.maybe_get_op();
- assert(op_ref_maybe);
+ ceph_assert(op_ref_maybe);
__le16 mtype_le = (*op_ref_maybe)->get_req()->get_header().type;
__u16 mtype = le16_to_cpu(mtype_le);
if (rep_op_msg_bitset.test(mtype)) {
}
switch (state) {
case RWNONE:
- assert(count == 0);
+ ceph_assert(count == 0);
state = RWREAD;
// fall through
case RWREAD:
case RWEXCL:
return false;
default:
- assert(0 == "unhandled case");
+ ceph_assert(0 == "unhandled case");
return false;
}
}
}
switch (state) {
case RWNONE:
- assert(count == 0);
+ ceph_assert(count == 0);
state = RWWRITE;
// fall through
case RWWRITE:
case RWEXCL:
return false;
default:
- assert(0 == "unhandled case");
+ ceph_assert(0 == "unhandled case");
return false;
}
}
bool get_excl_lock() {
switch (state) {
case RWNONE:
- assert(count == 0);
+ ceph_assert(count == 0);
state = RWEXCL;
count = 1;
return true;
case RWEXCL:
return false;
default:
- assert(0 == "unhandled case");
+ ceph_assert(0 == "unhandled case");
return false;
}
}
return get_write_lock();
}
void dec(list<OpRequestRef> *requeue) {
- assert(count > 0);
- assert(requeue);
+ ceph_assert(count > 0);
+ ceph_assert(requeue);
count--;
if (count == 0) {
state = RWNONE;
}
}
void put_read(list<OpRequestRef> *requeue) {
- assert(state == RWREAD);
+ ceph_assert(state == RWREAD);
dec(requeue);
}
void put_write(list<OpRequestRef> *requeue) {
- assert(state == RWWRITE);
+ ceph_assert(state == RWWRITE);
dec(requeue);
}
void put_excl(list<OpRequestRef> *requeue) {
- assert(state == RWEXCL);
+ ceph_assert(state == RWEXCL);
dec(requeue);
}
bool empty() const { return state == RWNONE; }
case RWState::RWEXCL:
return get_excl(op);
default:
- assert(0 == "invalid lock type");
+ ceph_assert(0 == "invalid lock type");
return true;
}
}
return rwstate.get_read_lock();
}
void drop_recovery_read(list<OpRequestRef> *ls) {
- assert(rwstate.recovery_read_marker);
+ ceph_assert(rwstate.recovery_read_marker);
rwstate.put_read(ls);
rwstate.recovery_read_marker = false;
}
rwstate.put_excl(to_wake);
break;
default:
- assert(0 == "invalid lock type");
+ ceph_assert(0 == "invalid lock type");
}
if (rwstate.empty() && rwstate.recovery_read_marker) {
rwstate.recovery_read_marker = false;
blocked(false), requeue_scrub_on_unblock(false) {}
~ObjectContext() {
- assert(rwstate.empty());
+ ceph_assert(rwstate.empty());
if (destructor_callback)
destructor_callback->complete(0);
}
void start_block() {
- assert(!blocked);
+ ceph_assert(!blocked);
blocked = true;
}
void stop_block() {
- assert(blocked);
+ ceph_assert(blocked);
blocked = false;
}
bool is_blocked() const {
const hobject_t &hoid,
ObjectContextRef& obc,
OpRequestRef& op) {
- assert(locks.find(hoid) == locks.end());
+ ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_lock_type(op, type)) {
locks.insert(make_pair(hoid, ObjectLockState(obc, type)));
return true;
bool take_write_lock(
const hobject_t &hoid,
ObjectContextRef obc) {
- assert(locks.find(hoid) == locks.end());
+ ceph_assert(locks.find(hoid) == locks.end());
if (obc->rwstate.take_write_lock()) {
locks.insert(
make_pair(
const hobject_t &hoid,
ObjectContextRef obc,
bool mark_if_unsuccessful) {
- assert(locks.find(hoid) == locks.end());
+ ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_snaptrimmer_write(mark_if_unsuccessful)) {
locks.insert(
make_pair(
const hobject_t &hoid,
ObjectContextRef obc,
OpRequestRef op) {
- assert(locks.find(hoid) == locks.end());
+ ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_write_greedy(op)) {
locks.insert(
make_pair(
bool try_get_read_lock(
const hobject_t &hoid,
ObjectContextRef obc) {
- assert(locks.find(hoid) == locks.end());
+ ceph_assert(locks.find(hoid) == locks.end());
if (obc->try_get_read_lock()) {
locks.insert(
make_pair(
locks.clear();
}
~ObcLockManager() {
- assert(locks.empty());
+ ceph_assert(locks.empty());
}
};
void object_locator_t::encode(bufferlist& bl) const
{
// verify that nobody's corrupted the locator
- assert(hash == -1 || key.empty());
+ ceph_assert(hash == -1 || key.empty());
__u8 encode_compat = 3;
ENCODE_START(6, encode_compat, bl);
encode(pool, bl);
hash = -1;
DECODE_FINISH(p);
// verify that nobody's corrupted the locator
- assert(hash == -1 || key.empty());
+ ceph_assert(hash == -1 || key.empty());
}
void object_locator_t::dump(Formatter *f) const
bool pg_t::is_split(unsigned old_pg_num, unsigned new_pg_num, set<pg_t> *children) const
{
- assert(m_seed < old_pg_num);
+ ceph_assert(m_seed < old_pg_num);
if (new_pg_num <= old_pg_num)
return false;
unsigned pg_t::get_split_bits(unsigned pg_num) const {
if (pg_num == 1)
return 0;
- assert(pg_num > 1);
+ ceph_assert(pg_num > 1);
// Find unique p such that pg_num \in [2^(p-1), 2^p)
unsigned p = cbits(pg_num);
- assert(p); // silence coverity #751330
+ ceph_assert(p); // silence coverity #751330
if ((m_seed % (1<<(p-1))) < (pg_num % (1<<(p-1))))
return p;
pg_t pg_t::get_parent() const
{
unsigned bits = cbits(m_seed);
- assert(bits);
+ ceph_assert(bits);
pg_t retval = *this;
retval.m_seed &= ~((~0)<<(bits - 1));
return retval;
uint64_t rev_start = hobject_t::_reverse_bits(m_seed);
uint64_t rev_end = (rev_start | (0xffffffff >> bits)) + 1;
if (rev_end >= 0x100000000) {
- assert(rev_end == 0x100000000);
+ ceph_assert(rev_end == 0x100000000);
return hobject_t::get_max();
} else {
return hobject_t(object_t(), string(), CEPH_NOSNAP,
_str = pgid.calc_name(_str_buff + spg_t::calc_name_buf_size - 1, "PMET_");
break;
default:
- assert(0 == "unknown collection type");
+ ceph_assert(0 == "unknown collection type");
}
}
pgid = spg_t();
removal_seq = 0;
calc_str();
- assert(s == _str);
+ ceph_assert(s == _str);
return true;
}
if (s.find("_head") == s.length() - 5 &&
type = TYPE_PG;
removal_seq = 0;
calc_str();
- assert(s == _str);
+ ceph_assert(s == _str);
return true;
}
if (s.find("_TEMP") == s.length() - 5 &&
type = TYPE_PG_TEMP;
removal_seq = 0;
calc_str();
- assert(s == _str);
+ ceph_assert(s == _str);
return true;
}
return false;
pool_opts_t::opt_desc_t pool_opts_t::get_opt_desc(const std::string& name) {
opt_mapping_t::iterator i = opt_mapping.find(name);
- assert(i != opt_mapping.end());
+ ceph_assert(i != opt_mapping.end());
return i->second;
}
const pool_opts_t::value_t& pool_opts_t::get(pool_opts_t::key_t key) const {
opts_t::const_iterator i = opts.find(key);
- assert(i != opts.end());
+ ceph_assert(i != opts.end());
return i->second;
}
decode(d, bl);
opts[static_cast<key_t>(k)] = d;
} else {
- assert(!"invalid type");
+ ceph_assert(!"invalid type");
}
}
DECODE_FINISH(bl);
void pg_pool_t::add_snap(const char *n, utime_t stamp)
{
- assert(!is_unmanaged_snaps_mode());
+ ceph_assert(!is_unmanaged_snaps_mode());
flags |= FLAG_POOL_SNAPS;
snapid_t s = get_snap_seq() + 1;
snap_seq = s;
void pg_pool_t::add_unmanaged_snap(uint64_t& snapid)
{
- assert(!is_pool_snaps_mode());
+ ceph_assert(!is_pool_snaps_mode());
if (snap_seq == 0) {
// kludge for pre-mimic tracking of pool vs selfmanaged snaps. after
// mimic this field is not decoded but our flag is set; pre-mimic, we
void pg_pool_t::remove_snap(snapid_t s)
{
- assert(snaps.count(s));
+ ceph_assert(snaps.count(s));
snaps.erase(s);
snap_seq = snap_seq + 1;
}
void pg_pool_t::remove_unmanaged_snap(snapid_t s)
{
- assert(is_unmanaged_snaps_mode());
+ ceph_assert(is_unmanaged_snaps_mode());
removed_snaps.insert(s);
snap_seq = snap_seq + 1;
// try to add in the new seq, just to try to keep the interval_set contiguous
bool ec_pool, const PastIntervals::pg_interval_t &interval) override {
if (first == 0)
first = interval.first;
- assert(interval.last > last);
+ ceph_assert(interval.last > last);
last = interval.last;
set<pg_shard_t> acting;
for (unsigned i = 0; i < interval.acting.size(); ++i) {
case 0:
break;
case 1:
- assert(0 == "pi_simple_rep support removed post-luminous");
+ ceph_assert(0 == "pi_simple_rep support removed post-luminous");
break;
case 2:
past_intervals.reset(new pi_compact_rep);
// NOTE: a change in the up set primary triggers an interval
// change, even though the interval members in the pg_interval_t
// do not change.
- assert(past_intervals);
- assert(past_intervals->past_intervals);
+ ceph_assert(past_intervals);
+ ceph_assert(past_intervals->past_intervals);
if (is_new_interval(
old_acting_primary,
new_acting_primary,
pg_interval_t i;
i.first = same_interval_since;
i.last = osdmap->get_epoch() - 1;
- assert(i.first <= i.last);
+ ceph_assert(i.first <= i.last);
i.acting = old_acting;
i.up = old_up;
i.primary = old_acting_primary;
if (*p != CRUSH_ITEM_NONE)
++num_acting;
- assert(lastmap->get_pools().count(pgid.pool()));
+ ceph_assert(lastmap->get_pools().count(pgid.pool()));
const pg_pool_t& old_pg_pool = lastmap->get_pools().find(pgid.pool())->second;
set<pg_shard_t> old_acting_shards;
old_pg_pool.convert_to_pg_shards(old_acting, &old_acting_shards);
break;
}
default:
- assert(0 == "Invalid rollback code");
+ ceph_assert(0 == "Invalid rollback code");
}
DECODE_FINISH(bp);
}
} catch (...) {
- assert(0 == "Invalid encoding");
+ ceph_assert(0 == "Invalid encoding");
}
}
for (list<pg_log_entry_t>::const_reverse_iterator i = other.log.rbegin();
i != other.log.rend();
++i) {
- assert(i->version > other.tail);
+ ceph_assert(i->version > other.tail);
if (i->version <= v) {
// make tail accurate.
tail = i->version;
{
can_rollback_to = other.can_rollback_to;
list<pg_log_entry_t>::const_reverse_iterator i = other.log.rbegin();
- assert(i != other.log.rend());
+ ceph_assert(i != other.log.rend());
while (i->version > to) {
++i;
- assert(i != other.log.rend());
+ ceph_assert(i != other.log.rend());
}
- assert(i->version == to);
+ ceph_assert(i->version == to);
head = to;
for ( ; i != other.log.rend(); ++i) {
if (i->version <= from) {
uint64_t SnapSet::get_clone_bytes(snapid_t clone) const
{
- assert(clone_size.count(clone));
+ ceph_assert(clone_size.count(clone));
uint64_t size = clone_size.find(clone)->second;
- assert(clone_overlap.count(clone));
+ ceph_assert(clone_overlap.count(clone));
const interval_set<uint64_t> &overlap = clone_overlap.find(clone)->second;
- assert(size >= (uint64_t)overlap.size());
+ ceph_assert(size >= (uint64_t)overlap.size());
return size - overlap.size();
}
void ScrubMap::merge_incr(const ScrubMap &l)
{
- assert(valid_through == l.incr_since);
+ ceph_assert(valid_through == l.incr_since);
valid_through = l.valid_through;
for (map<hobject_t,object>::const_iterator p = l.objects.begin();
old_pg_t get_old_pg() const {
old_pg_t o;
- assert(m_pool < 0xffffffffull);
+ ceph_assert(m_pool < 0xffffffffull);
o.v.pool = m_pool;
o.v.ps = m_seed;
o.v.preferred = (__s16)-1;
// get a TEMP collection that corresponds to the current collection,
// which we presume is a pg collection.
coll_t get_temp() const {
- assert(type == TYPE_PG);
+ ceph_assert(type == TYPE_PG);
return coll_t(TYPE_PG_TEMP, pgid, 0);
}
case CACHEMODE_READPROXY:
return true;
default:
- assert(0 == "implement me");
+ ceph_assert(0 == "implement me");
}
}
case TYPE_ERASURE:
return false;
default:
- assert(0 == "unhandled pool type");
+ ceph_assert(0 == "unhandled pool type");
}
}
: query_epoch(query_epoch),
epoch_sent(epoch_sent),
info(info), to(to), from(from) {
- assert(from == info.pgid.shard);
+ ceph_assert(from == info.pgid.shard);
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &p);
virtual bool has_full_intervals() const { return false; }
virtual void iterate_all_intervals(
std::function<void(const pg_interval_t &)> &&f) const {
- assert(!has_full_intervals());
- assert(0 == "not valid for this implementation");
+ ceph_assert(!has_full_intervals());
+ ceph_assert(0 == "not valid for this implementation");
}
virtual ~interval_rep() {}
public:
void add_interval(bool ec_pool, const pg_interval_t &interval) {
- assert(past_intervals);
+ ceph_assert(past_intervals);
return past_intervals->add_interval(ec_pool, interval);
}
void decode(bufferlist::const_iterator &bl);
void dump(Formatter *f) const {
- assert(past_intervals);
+ ceph_assert(past_intervals);
past_intervals->dump(f);
}
static void generate_test_instances(list<PastIntervals *> & o);
void iterate_mayberw_back_to(
epoch_t les,
F &&f) const {
- assert(past_intervals);
+ ceph_assert(past_intervals);
past_intervals->iterate_mayberw_back_to(les, std::forward<F>(f));
}
void clear() {
- assert(past_intervals);
+ ceph_assert(past_intervals);
past_intervals->clear();
}
* of state contained
*/
size_t size() const {
- assert(past_intervals);
+ ceph_assert(past_intervals);
return past_intervals->size();
}
bool empty() const {
- assert(past_intervals);
+ ceph_assert(past_intervals);
return past_intervals->empty();
}
set<pg_shard_t> get_might_have_unfound(
pg_shard_t pg_whoami,
bool ec_pool) const {
- assert(past_intervals);
+ ceph_assert(past_intervals);
auto ret = past_intervals->get_all_participants(ec_pool);
ret.erase(pg_whoami);
return ret;
*/
set<pg_shard_t> get_all_probe(
bool ec_pool) const {
- assert(past_intervals);
+ ceph_assert(past_intervals);
return past_intervals->get_all_participants(ec_pool);
}
* past_interval set.
*/
pair<epoch_t, epoch_t> get_bounds() const {
- assert(past_intervals);
+ ceph_assert(past_intervals);
return past_intervals->get_bounds();
}
// fixme: how do we identify a "clean" shutdown anyway?
ldpp_dout(dpp, 10) << "build_prior possibly went active+rw,"
<< " insufficient up; including down osds" << dendl;
- assert(!candidate_blocked_by.empty());
+ ceph_assert(!candidate_blocked_by.empty());
pg_down = true;
blocked_by.insert(
candidate_blocked_by.begin(),
history(h),
epoch_sent(epoch_sent),
to(to), from(from) {
- assert(t != LOG);
+ ceph_assert(t != LOG);
}
pg_query_t(
int t,
epoch_t epoch_sent)
: type(t), since(s), history(h),
epoch_sent(epoch_sent), to(to), from(from) {
- assert(t == LOG);
+ ceph_assert(t == LOG);
}
void encode(bufferlist &bl, uint64_t features) const;
}
void rollback_extents(
version_t gen, const vector<pair<uint64_t, uint64_t> > &extents) {
- assert(can_local_rollback);
- assert(!rollback_info_completed);
+ ceph_assert(can_local_rollback);
+ ceph_assert(!rollback_info_completed);
if (max_required_version < 2)
max_required_version = 2;
ENCODE_START(2, 2, bl);
}
mempool::osd_pglog::list<pg_log_entry_t> rewind_from_head(eversion_t newhead) {
- assert(newhead >= tail);
+ ceph_assert(newhead >= tail);
mempool::osd_pglog::list<pg_log_entry_t>::iterator p = log.end();
mempool::osd_pglog::list<pg_log_entry_t> divergent;
divergent.splice(divergent.begin(), log, p, log.end());
break;
}
- assert(p->version > newhead);
+ ceph_assert(p->version > newhead);
}
head = newhead;
return eversion_t();
}
auto it = missing.find(rmissing.begin()->second);
- assert(it != missing.end());
+ ceph_assert(it != missing.end());
return it->second.need;
}
missing_it->second.set_delete(e.is_delete());
} else {
// not missing, we must have prior_version (if any)
- assert(!is_missing_divergent_item);
+ ceph_assert(!is_missing_divergent_item);
missing[e.soid] = item(e.version, e.prior_version, e.is_delete());
}
rmissing[e.version.version] = e.soid;
void got(const hobject_t& oid, eversion_t v) {
std::map<hobject_t, item>::iterator p = missing.find(oid);
- assert(p != missing.end());
- assert(p->second.need <= v || p->second.is_delete());
+ ceph_assert(p != missing.end());
+ ceph_assert(p->second.need <= v || p->second.is_delete());
got(p);
}
using ceph::decode;
__u8 v;
decode(v, bl);
- assert(v == 1);
+ ceph_assert(v == 1);
decode(handle, bl);
decode(entries, bl);
}