};
if (state_test(CDir::STATE_FRAGMENTING) && is_new()) {
- assert(committed_version == 0);
+ ceph_assert(committed_version == 0);
for (auto p = items.begin(); p != items.end(); ) {
CDentry *dn = p->second;
++p;
LogSegment *_ls, version_t iv)
: MDCacheLogContext(m), inos(_inos), ls(_ls), inotablev(iv) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
if (inotablev) {
get_mds()->inotable->apply_release_ids(inos);
- assert(get_mds()->inotable->get_version() == inotablev);
+ ceph_assert(get_mds()->inotable->get_version() == inotablev);
}
ls->purge_inodes_finish(inos);
}
// FIXME: handle non-default data pool and namespace
auto cb = new LambdaContext([this, inos, ls](int r){
- assert(r == 0 || r == -2);
+ ceph_assert(r == 0 || r == -2);
mds->inotable->project_release_ids(inos);
version_t piv = mds->inotable->get_projected_version();
- assert(piv != 0);
+ ceph_assert(piv != 0);
mds->mdlog->start_submit_entry(new EPurged(inos, ls->seq, piv),
new C_MDS_purge_completed_finish(this, inos, ls, piv));
mds->mdlog->flush();
unsigned max_expiring_segments = 0;
if (pre_segments_size > 0){
max_expiring_segments = max_segments/2;
- assert(segments.size() >= pre_segments_size);
+ ceph_assert(segments.size() >= pre_segments_size);
max_expiring_segments = std::max<unsigned>(max_expiring_segments,segments.size() - pre_segments_size);
}
"name=value,type=CephString,req=false ",
asok_hook,
"Config a CephFS client session");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("client config "
"name=client_id,type=CephInt,req=true "
"name=option,type=CephString,req=true "
"name=value,type=CephString,req=false ",
asok_hook,
"Config a CephFS client session");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("damage ls",
asok_hook,
"List detected metadata damage");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("damage rm "
"name=damage_id,type=CephInt",
asok_hook,
"Remove a damage table entry");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("osdmap barrier name=target_epoch,type=CephInt",
asok_hook,
"Wait until the MDS has this OSD map epoch");
}
void send() {
- assert(ceph_mutex_is_locked(mds->mds_lock));
+ ceph_assert(ceph_mutex_is_locked(mds->mds_lock));
dout(20) << __func__ << dendl;
void send() {
// not really a hard requirement here, but lets ensure this in
// case we change the logic here.
- assert(ceph_mutex_is_locked(mds->mds_lock));
+ ceph_assert(ceph_mutex_is_locked(mds->mds_lock));
dout(20) << __func__ << dendl;
f->open_object_section("result");
if (oldstate == MDSMap::STATE_STANDBY_REPLAY) {
dout(10) << "Monitor activated us! Deactivating replay loop" << dendl;
- assert (state == MDSMap::STATE_REPLAY);
+ ceph_assert (state == MDSMap::STATE_REPLAY);
} else {
// did i just recover?
if ((is_active() || is_clientreplay()) &&
// send.
it->second.state = EXPORT_PREPPING;
mds->send_message_mds(prep, it->second.peer);
- assert (g_conf()->mds_kill_export_at != 4);
+ ceph_assert(g_conf()->mds_kill_export_at != 4);
// make sure any new instantiations of caps are flushed out
ceph_assert(it->second.warning_ack_waiting.empty());
return;
}
- assert (g_conf()->mds_kill_export_at != 5);
+ ceph_assert(g_conf()->mds_kill_export_at != 5);
// send warnings
set<CDir*> bounds;
mdcache->get_subtree_bounds(dir, bounds);
decode(it->second.peer_imported, bp);
it->second.state = EXPORT_LOGGINGFINISH;
- assert (g_conf()->mds_kill_export_at != 9);
+ ceph_assert(g_conf()->mds_kill_export_at != 9);
set<CDir*> bounds;
mdcache->get_subtree_bounds(dir, bounds);
// log export completion, then finish (unfreeze, trigger finish context, etc.)
mds->mdlog->submit_entry(le, new C_MDS_ExportFinishLogged(this, dir));
mds->mdlog->flush();
- assert (g_conf()->mds_kill_export_at != 10);
+ ceph_assert(g_conf()->mds_kill_export_at != 10);
}
void Migrator::export_notify_abort(CDir *dir, export_state_t& stat, set<CDir*>& bounds)
// wait for notifyacks
stat.state = EXPORT_NOTIFYING;
- assert (g_conf()->mds_kill_export_at != 11);
+ ceph_assert(g_conf()->mds_kill_export_at != 11);
// no notifies to wait for?
if (stat.notify_ack_waiting.empty()) {
{
dout(3) << *dir << dendl;
- assert (g_conf()->mds_kill_export_at != 12);
+ ceph_assert(g_conf()->mds_kill_export_at != 12);
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
if (it == export_state.end()) {
dout(7) << "target must have failed, not sending final commit message. export succeeded anyway." << dendl;
return;
}
- assert (g_conf()->mds_kill_import_at != 1);
+ ceph_assert(g_conf()->mds_kill_import_at != 1);
// do we have it?
CInode *in = mdcache->get_inode(m->get_dirfrag().ino);
// reply
dout(7) << " sending export_discover_ack on " << *in << dendl;
mds->send_message_mds(make_message<MExportDirDiscoverAck>(df, m->get_tid()), p_state->peer);
- assert (g_conf()->mds_kill_import_at != 2);
+ ceph_assert(g_conf()->mds_kill_import_at != 2);
}
void Migrator::import_reverse_discovering(dirfrag_t df)
void Migrator::handle_export_dir(const cref_t<MExportDir> &m)
{
- assert (g_conf()->mds_kill_import_at != 5);
+ ceph_assert(g_conf()->mds_kill_import_at != 5);
CDir *dir = mdcache->get_dirfrag(m->dirfrag);
ceph_assert(dir);
// note state
it->second.state = IMPORT_LOGGINGSTART;
- assert (g_conf()->mds_kill_import_at != 6);
+ ceph_assert(g_conf()->mds_kill_import_at != 6);
// log it
mds->mdlog->submit_entry(le, onlogged);
dout(7) << "no bystanders, finishing reverse now" << dendl;
import_reverse_unfreeze(dir);
} else {
- assert (g_conf()->mds_kill_import_at != 10);
+ ceph_assert(g_conf()->mds_kill_import_at != 10);
}
}
// note state
it->second.state = IMPORT_ACKING;
- assert (g_conf()->mds_kill_import_at != 7);
+ ceph_assert(g_conf()->mds_kill_import_at != 7);
// force open client sessions and finish cap import
mds->server->finish_force_open_sessions(imported_session_map, false);
encode(imported_caps, ack->imported_caps);
mds->send_message_mds(ack, from);
- assert (g_conf()->mds_kill_import_at != 8);
+ ceph_assert(g_conf()->mds_kill_import_at != 8);
mdcache->show_subtrees();
}
if (!session) {
session = it.second;
} else if (!session->reclaiming_from) {
- assert(it.second->reclaiming_from == session);
+ ceph_assert(it.second->reclaiming_from == session);
session = it.second;
} else {
- assert(session->reclaiming_from == it.second);
+ ceph_assert(session->reclaiming_from == it.second);
}
}
return session;
mds->send_message_client(reply, session);
}
- assert(!target->reclaiming_from);
- assert(!session->reclaiming_from);
+ ceph_assert(!target->reclaiming_from);
+ ceph_assert(!session->reclaiming_from);
session->reclaiming_from = target;
reply->set_addrs(entity_addrvec_t(target->info.inst.addr));
}
if (reply) {
int64_t session_id = session->get_client().v;
send_reply = new LambdaContext([this, session_id, reply](int r) {
- assert(ceph_mutex_is_locked_by_me(mds->mds_lock));
+ ceph_assert(ceph_mutex_is_locked_by_me(mds->mds_lock));
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(session_id));
if (!session) {
return;
{
Session *session = mds->get_session(m);
dout(3) << __func__ << " " << *m << " from " << m->get_source() << dendl;
- assert(m->get_source().is_client()); // should _not_ come from an mds!
+ ceph_assert(m->get_source().is_client()); // should _not_ come from an mds!
if (!session) {
dout(0) << " ignoring sessionless msg " << *m << dendl;
const auto sessions_p2 = mds->sessionmap.by_state.find(Session::STATE_STALE);
if (sessions_p2 != mds->sessionmap.by_state.end() && !sessions_p2->second->empty()) {
for (auto session : *(sessions_p2->second)) {
- assert(session->is_stale());
+ ceph_assert(session->is_stale());
auto last_cap_renew_span = std::chrono::duration<double>(now - session->last_cap_renew).count();
if (last_cap_renew_span < cutoff) {
dout(20) << "oldest stale session is " << session->info.inst
dout(10) << "EPurged.replay inotable " << mds->inotable->get_version()
<< " < " << inotablev << " " << dendl;
mds->inotable->replay_release_ids(inos);
- assert(mds->inotable->get_version() == inotablev);
+ ceph_assert(mds->inotable->get_version() == inotablev);
}
}
update_segment();