void Beacon::ms_fast_dispatch2(const Message::ref& m)
{
bool handled = ms_dispatch2(m);
- assert(handled);
+ ceph_assert(handled);
}
bool Beacon::ms_dispatch2(const Message::ref& m)
seq_stamp[last_seq] = now;
- assert(want_state != MDSMap::STATE_NULL);
+ ceph_assert(want_state != MDSMap::STATE_NULL);
auto beacon = MMDSBeacon::create(
monc->get_fsid(), mds_gid_t(monc->get_global_id()),
void Beacon::_notify_mdsmap(const MDSMap &mdsmap)
{
- assert(mdsmap.get_epoch() >= epoch);
+ ceph_assert(mdsmap.get_epoch() >= epoch);
if (mdsmap.get_epoch() != epoch) {
epoch = mdsmap.get_epoch();
}
// I'm going to touch this MDS, so it must be locked
- assert(mds->mds_lock.is_locked_by_me());
+ ceph_assert(mds->mds_lock.is_locked_by_me());
health.metrics.clear();
get(PIN_DIRTY);
dir->inc_num_dirty();
dir->dirty_dentries.push_back(&item_dir_dirty);
- assert(ls);
+ ceph_assert(ls);
}
if (ls)
ls->dirty_dentries.push_back(&item_dirty);
dout(10) << __func__ << " " << *this << dendl;
// i now live in this new dir version
- assert(pv <= projected_version);
+ ceph_assert(pv <= projected_version);
version = pv;
_mark_dirty(ls);
void CDentry::mark_clean()
{
dout(10) << __func__ << " " << *this << dendl;
- assert(is_dirty());
+ ceph_assert(is_dirty());
// not always true for recalc_auth_bits during resolve finish
//assert(dir->get_version() == 0 || version <= dir->get_version()); // hmm?
void CDentry::make_path(filepath& fp, bool projected) const
{
- assert(dir);
+ ceph_assert(dir);
dir->inode->make_path(fp, projected);
fp.push_dentry(get_name());
}
*/
void CDentry::link_remote(CDentry::linkage_t *dnl, CInode *in)
{
- assert(dnl->is_remote());
- assert(in->ino() == dnl->get_remote_ino());
+ ceph_assert(dnl->is_remote());
+ ceph_assert(in->ino() == dnl->get_remote_ino());
dnl->inode = in;
if (dnl == &linkage)
void CDentry::unlink_remote(CDentry::linkage_t *dnl)
{
- assert(dnl->is_remote());
- assert(dnl->inode);
+ ceph_assert(dnl->is_remote());
+ ceph_assert(dnl->inode);
if (dnl == &linkage)
dnl->inode->remove_remote_parent(this);
CDentry::linkage_t *CDentry::pop_projected_linkage()
{
- assert(projected.size());
+ ceph_assert(projected.size());
linkage_t& n = projected.front();
n.inode->pop_projected_parent();
}
- assert(n.inode == linkage.inode);
- assert(n.remote_ino == linkage.remote_ino);
- assert(n.remote_d_type == linkage.remote_d_type);
+ ceph_assert(n.inode == linkage.inode);
+ ceph_assert(n.remote_ino == linkage.remote_ino);
+ ceph_assert(n.remote_d_type == linkage.remote_d_type);
projected.pop_front();
int CDentry::get_num_dir_auth_pins() const
{
- assert(!is_projected());
+ ceph_assert(!is_projected());
if (get_linkage()->is_primary())
return auth_pins + get_linkage()->get_inode()->get_num_auth_pins();
return auth_pins;
bool CDentry::can_auth_pin(int *err_ret) const
{
- assert(dir);
+ ceph_assert(dir);
return dir->can_auth_pin(err_ret);
}
auth_pins--;
#ifdef MDS_AUTHPIN_SET
- assert(auth_pin_set.count(by));
+ ceph_assert(auth_pin_set.count(by));
auth_pin_set.erase(auth_pin_set.find(by));
#endif
dout(10) << "auth_unpin by " << by << " on " << *this
<< " now " << auth_pins << "+" << nested_auth_pins
<< dendl;
- assert(auth_pins >= 0);
+ ceph_assert(auth_pins >= 0);
dir->adjust_nested_auth_pins(-1, -1, by);
}
<< ", change " << adjustment << " yields "
<< auth_pins << "+" << nested_auth_pins
<< dendl;
- assert(nested_auth_pins >= 0);
+ ceph_assert(nested_auth_pins >= 0);
dir->adjust_nested_auth_pins(adjustment, diradj, by);
}
if (!is_auth() && newfirst != first) {
dout(10) << __func__ << " first " << first << " -> " << newfirst << dendl;
- assert(newfirst > first);
+ ceph_assert(newfirst > first);
first = newfirst;
}
if (p.end()) {
// null
- assert(linkage.is_null());
+ ceph_assert(linkage.is_null());
return;
}
void CDentry::remove_client_lease(ClientLease *l, Locker *locker)
{
- assert(l->parent == this);
+ ceph_assert(l->parent == this);
bool gather = false;
void CDentry::dump(Formatter *f) const
{
- assert(f != NULL);
+ ceph_assert(f != NULL);
filepath path;
make_path(path);
// -- locking --
SimpleLock* get_lock(int type) override {
- assert(type == CEPH_LOCK_DN);
+ ceph_assert(type == CEPH_LOCK_DN);
return &lock;
}
void set_object_info(MDSCacheObjectInfo &info) override;
public:
explicit CDirContext(CDir *d) : dir(d) {
- assert(dir != NULL);
+ ceph_assert(dir != NULL);
}
};
public:
explicit CDirIOContext(CDir *d) : dir(d) {
- assert(dir != NULL);
+ ceph_assert(dir != NULL);
}
};
dir_auth(CDIR_AUTH_DEFAULT)
{
// auth
- assert(in->is_dir());
+ ceph_assert(in->is_dir());
if (auth) state_set(STATE_AUTH);
}
dout(25) << "check_rstats on " << this << dendl;
if (!is_complete() || !is_auth() || is_frozen()) {
- assert(!scrub);
+ ceph_assert(!scrub);
dout(10) << "check_rstats bailing out -- incomplete or non-auth or frozen dir!" << dendl;
return true;
}
}
}
- assert(frag_info.nfiles == fnode.fragstat.nfiles);
- assert(frag_info.nsubdirs == fnode.fragstat.nsubdirs);
- assert(nest_info.rbytes == fnode.rstat.rbytes);
- assert(nest_info.rfiles == fnode.rstat.rfiles);
- assert(nest_info.rsubdirs == fnode.rstat.rsubdirs);
+ ceph_assert(frag_info.nfiles == fnode.fragstat.nfiles);
+ ceph_assert(frag_info.nsubdirs == fnode.fragstat.nsubdirs);
+ ceph_assert(nest_info.rbytes == fnode.rstat.rbytes);
+ ceph_assert(nest_info.rfiles == fnode.rstat.rfiles);
+ ceph_assert(nest_info.rsubdirs == fnode.rstat.rsubdirs);
}
}
dout(10) << "check_rstats complete on " << this << dendl;
cache->open_file_table.remove_dirfrag(this);
num_inodes_with_caps += d;
- assert(num_inodes_with_caps >= 0);
+ ceph_assert(num_inodes_with_caps >= 0);
}
CDentry *CDir::lookup(std::string_view name, snapid_t snap)
snapid_t first, snapid_t last)
{
// foreign
- assert(lookup_exact_snap(dname, last) == 0);
+ ceph_assert(lookup_exact_snap(dname, last) == 0);
// create dentry
CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), first, last);
dn->version = get_projected_version();
// add to dir
- assert(items.count(dn->key()) == 0);
+ ceph_assert(items.count(dn->key()) == 0);
//assert(null_items.count(dn->get_name()) == 0);
items[dn->key()] = dn;
if (get_num_any() == 1)
get(PIN_CHILD);
- assert(get_num_any() == items.size());
+ ceph_assert(get_num_any() == items.size());
return dn;
}
snapid_t first, snapid_t last)
{
// primary
- assert(lookup_exact_snap(dname, last) == 0);
+ ceph_assert(lookup_exact_snap(dname, last) == 0);
// create dentry
CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), first, last);
dn->version = get_projected_version();
// add to dir
- assert(items.count(dn->key()) == 0);
+ ceph_assert(items.count(dn->key()) == 0);
//assert(null_items.count(dn->get_name()) == 0);
items[dn->key()] = dn;
// pin?
if (get_num_any() == 1)
get(PIN_CHILD);
- assert(get_num_any() == items.size());
+ ceph_assert(get_num_any() == items.size());
return dn;
}
snapid_t first, snapid_t last)
{
// foreign
- assert(lookup_exact_snap(dname, last) == 0);
+ ceph_assert(lookup_exact_snap(dname, last) == 0);
// create dentry
CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), ino, d_type, first, last);
dn->version = get_projected_version();
// add to dir
- assert(items.count(dn->key()) == 0);
+ ceph_assert(items.count(dn->key()) == 0);
//assert(null_items.count(dn->get_name()) == 0);
items[dn->key()] = dn;
if (get_num_any() == 1)
get(PIN_CHILD);
- assert(get_num_any() == items.size());
+ ceph_assert(get_num_any() == items.size());
return dn;
}
dout(12) << __func__ << " " << *dn << dendl;
// there should be no client leases at this point!
- assert(dn->client_lease_map.empty());
+ ceph_assert(dn->client_lease_map.empty());
if (state_test(CDir::STATE_DNPINNEDFRAG)) {
dn->put(CDentry::PIN_FRAGMENTING);
unlink_inode_work(dn);
// remove from list
- assert(items.count(dn->key()) == 1);
+ ceph_assert(items.count(dn->key()) == 1);
items.erase(dn->key());
// clean?
// unpin?
if (get_num_any() == 0)
put(PIN_CHILD);
- assert(get_num_any() == items.size());
+ ceph_assert(get_num_any() == items.size());
}
void CDir::link_remote_inode(CDentry *dn, CInode *in)
void CDir::link_remote_inode(CDentry *dn, inodeno_t ino, unsigned char d_type)
{
dout(12) << __func__ << " " << *dn << " remote " << ino << dendl;
- assert(dn->get_linkage()->is_null());
+ ceph_assert(dn->get_linkage()->is_null());
dn->get_linkage()->set_remote(ino, d_type);
num_snap_items++;
num_snap_null--;
}
- assert(get_num_any() == items.size());
+ ceph_assert(get_num_any() == items.size());
}
void CDir::link_primary_inode(CDentry *dn, CInode *in)
{
dout(12) << __func__ << " " << *dn << " " << *in << dendl;
- assert(dn->get_linkage()->is_null());
+ ceph_assert(dn->get_linkage()->is_null());
dn->get_linkage()->inode = in;
num_snap_null--;
}
- assert(get_num_any() == items.size());
+ ceph_assert(get_num_any() == items.size());
}
void CDir::link_inode_work( CDentry *dn, CInode *in)
{
- assert(dn->get_linkage()->get_inode() == in);
+ ceph_assert(dn->get_linkage()->get_inode() == in);
in->set_primary_parent(dn);
// set inode version
num_snap_items--;
num_snap_null++;
}
- assert(get_num_any() == items.size());
+ ceph_assert(get_num_any() == items.size());
}
void CDir::try_remove_unlinked_dn(CDentry *dn)
{
- assert(dn->dir == this);
- assert(dn->get_linkage()->is_null());
+ ceph_assert(dn->dir == this);
+ ceph_assert(dn->get_linkage()->is_null());
// no pins (besides dirty)?
if (dn->get_num_ref() != dn->is_dirty())
in->item_pop_lru.remove_myself();
dn->get_linkage()->inode = 0;
} else {
- assert(!dn->get_linkage()->is_null());
+ ceph_assert(!dn->get_linkage()->is_null());
}
}
void CDir::add_to_bloom(CDentry *dn)
{
- assert(dn->last == CEPH_NOSNAP);
+ ceph_assert(dn->last == CEPH_NOSNAP);
if (!bloom) {
/* not create bloom filter for incomplete dir that was added by log replay */
if (!is_complete())
remove_dentry(dn);
}
- assert(num_snap_null == 0);
- assert(num_head_null == 0);
- assert(get_num_any() == items.size());
+ ceph_assert(num_snap_null == 0);
+ ceph_assert(num_head_null == 0);
+ ceph_assert(get_num_any() == items.size());
}
/** remove dirty null dentries for deleted directory. the dirfrag will be
void CDir::try_remove_dentries_for_stray()
{
dout(10) << __func__ << dendl;
- assert(get_parent_dir()->inode->is_stray());
+ ceph_assert(get_parent_dir()->inode->is_stray());
// clear dirty only when the directory was not snapshotted
bool clear_dirty = !inode->snaprealm;
CDentry *dn = p->second;
++p;
if (dn->last == CEPH_NOSNAP) {
- assert(!dn->is_projected());
- assert(dn->get_linkage()->is_null());
+ ceph_assert(!dn->is_projected());
+ ceph_assert(dn->get_linkage()->is_null());
if (clear_dirty && dn->is_dirty())
dn->mark_clean();
// It's OK to remove lease prematurely because we will never link
if (dn->get_num_ref() == 0)
remove_dentry(dn);
} else {
- assert(!dn->is_projected());
+ ceph_assert(!dn->is_projected());
CDentry::linkage_t *dnl= dn->get_linkage();
CInode *in = NULL;
if (dnl->is_primary()) {
bool CDir::try_trim_snap_dentry(CDentry *dn, const set<snapid_t>& snaps)
{
- assert(dn->last != CEPH_NOSNAP);
+ ceph_assert(dn->last != CEPH_NOSNAP);
set<snapid_t>::const_iterator p = snaps.lower_bound(dn->first);
CDentry::linkage_t *dnl= dn->get_linkage();
CInode *in = 0;
// use the helpers here to maintain the auth_pin invariants on the dir inode
int ap = dn->get_num_auth_pins() + dn->get_num_nested_auth_pins();
int dap = dn->get_num_dir_auth_pins();
- assert(dap <= ap);
+ ceph_assert(dap <= ap);
adjust_nested_auth_pins(ap, dap, NULL);
dn->dir->adjust_nested_auth_pins(-ap, -dap, NULL);
}
take_waiting(WAIT_ANY_MASK, waiters);
if (is_auth()) {
auth_unpin(this); // pinned in prepare_old_fragment
- assert(is_frozen_dir());
+ ceph_assert(is_frozen_dir());
unfreeze_dir();
}
}
- assert(nested_auth_pins == 0);
- assert(dir_auth_pins == 0);
- assert(auth_pins == 0);
+ ceph_assert(nested_auth_pins == 0);
+ ceph_assert(dir_auth_pins == 0);
+ ceph_assert(auth_pins == 0);
num_head_items = num_head_null = 0;
num_snap_items = num_snap_null = 0;
if (auth_pins > 0)
put(PIN_AUTHPIN);
- assert(get_num_ref() == (state_test(STATE_STICKY) ? 1:0));
+ ceph_assert(get_num_ref() == (state_test(STATE_STICKY) ? 1:0));
}
void CDir::init_fragment_pins()
{
dout(10) << "split by " << bits << " bits on " << *this << dendl;
- assert(replay || is_complete() || !is_auth());
+ ceph_assert(replay || is_complete() || !is_auth());
list<frag_t> frags;
frag.split(bits, frags);
for (auto dir : subs) {
if (dir->get_dir_auth() != CDIR_AUTH_DEFAULT &&
dir->get_dir_auth() != new_auth) {
- assert(new_auth == CDIR_AUTH_DEFAULT);
+ ceph_assert(new_auth == CDIR_AUTH_DEFAULT);
new_auth = dir->get_dir_auth();
}
}
for (auto dir : subs) {
dout(10) << " subfrag " << dir->get_frag() << " " << *dir << dendl;
- assert(!dir->is_auth() || dir->is_complete() || replay);
+ ceph_assert(!dir->is_auth() || dir->is_complete() || replay);
if (dir->fnode.accounted_rstat.version == rstat_version)
rstatdiff.add_delta(dir->fnode.accounted_rstat, dir->fnode.rstat);
for (elist<CInode*>::iterator p = dirty_rstat_inodes.begin_use_current();
!p.end(); ++p) {
CInode *in = *p;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
if (in->is_frozen())
continue;
}
}
- assert(!(tag & WAIT_CREATED) || state_test(STATE_CREATING));
+ ceph_assert(!(tag & WAIT_CREATED) || state_test(STATE_CREATING));
MDSCacheObject::add_waiter(tag, c);
}
fnode_t *CDir::project_fnode()
{
- assert(get_version() != 0);
+ ceph_assert(get_version() != 0);
auto &p = projected_fnode.emplace_back(*get_projected_fnode());
if (scrub_infop && scrub_infop->last_scrub_dirty) {
void CDir::pop_and_dirty_projected_fnode(LogSegment *ls)
{
- assert(!projected_fnode.empty());
+ ceph_assert(!projected_fnode.empty());
auto &front = projected_fnode.front();
dout(15) << __func__ << " " << &front << " v" << front.version << dendl;
fnode = front;
void CDir::mark_dirty(version_t pv, LogSegment *ls)
{
- assert(get_version() < pv);
- assert(pv <= projected_version);
+ ceph_assert(get_version() < pv);
+ ceph_assert(pv <= projected_version);
fnode.version = pv;
_mark_dirty(ls);
}
if (!state_test(STATE_DIRTY)) {
dout(10) << __func__ << " (was clean) " << *this << " version " << get_version() << dendl;
_set_dirty_flag();
- assert(ls);
+ ceph_assert(ls);
} else {
dout(10) << __func__ << " (already dirty) " << *this << " version " << get_version() << dendl;
}
{
dout(10) << "fetch on " << *this << dendl;
- assert(is_auth());
- assert(!is_complete());
+ ceph_assert(is_auth());
+ ceph_assert(!is_complete());
if (!can_auth_pin() && !ignore_authpinnability) {
if (c) {
!inode->snaprealm) {
dout(7) << "fetch dirfrag for unlinked directory, mark complete" << dendl;
if (get_version() == 0) {
- assert(inode->is_auth());
+ ceph_assert(inode->is_auth());
set_version(1);
if (state_test(STATE_REJOINUNDEF)) {
- assert(cache->mds->is_rejoin());
+ ceph_assert(cache->mds->is_rejoin());
state_clear(STATE_REJOINUNDEF);
cache->opened_undef_dirfrag(this);
}
{
dout(10) << "fetch " << keys.size() << " keys on " << *this << dendl;
- assert(is_auth());
- assert(!is_complete());
+ ceph_assert(is_auth());
+ ceph_assert(!is_complete());
if (!can_auth_pin()) {
dout(7) << "fetch keys waiting for authpinnable" << dendl;
ObjectOperation rd;
rd.omap_get_header(&fin->hdrbl, &fin->ret1);
if (keys.empty()) {
- assert(!c);
+ ceph_assert(!c);
rd.omap_get_vals("", "", g_conf()->mds_dir_keys_per_op,
&fin->omap, &fin->more, &fin->ret2);
} else {
- assert(c);
+ ceph_assert(c);
std::set<std::string> str_keys;
for (auto p : keys) {
string str;
dout(10) << "_fetched header " << hdrbl.length() << " bytes "
<< omap.size() << " keys for " << *this << dendl;
- assert(r == 0 || r == -ENOENT || r == -ENODATA);
- assert(is_auth());
- assert(!is_frozen());
+ ceph_assert(r == 0 || r == -ENOENT || r == -ENODATA);
+ ceph_assert(is_auth());
+ ceph_assert(!is_frozen());
if (hdrbl.length() == 0) {
dout(0) << "_fetched missing object for " << *this << dendl;
// take the loaded fnode?
// only if we are a fresh CDir* with no prior state.
if (get_version() == 0) {
- assert(!is_projected());
- assert(!state_test(STATE_COMMITTING));
+ ceph_assert(!is_projected());
+ ceph_assert(!state_test(STATE_COMMITTING));
fnode = got_fnode;
projected_version = committing_version = committed_version = got_fnode.version;
if (state_test(STATE_REJOINUNDEF)) {
- assert(cache->mds->is_rejoin());
+ ceph_assert(cache->mds->is_rejoin());
state_clear(STATE_REJOINUNDEF);
cache->opened_undef_dirfrag(this);
}
if (want == 0) want = get_version();
// preconditions
- assert(want <= get_version() || get_version() == 0); // can't commit the future
- assert(want > committed_version); // the caller is stupid
- assert(is_auth());
- assert(ignore_authpinnability || can_auth_pin());
+ ceph_assert(want <= get_version() || get_version() == 0); // can't commit the future
+ ceph_assert(want > committed_version); // the caller is stupid
+ ceph_assert(is_auth());
+ ceph_assert(ignore_authpinnability || can_auth_pin());
// note: queue up a noop if necessary, so that we always
// get an auth_pin.
} else if (dn->linkage.is_primary()) {
// primary link
CInode *in = dn->linkage.get_inode();
- assert(in);
+ ceph_assert(in);
dout(14) << " pos " << bl.length() << " dn '" << dn->get_name() << "' inode " << *in << dendl;
in->encode_snap_blob(snap_blob);
in->encode_bare(bl, cache->mds->mdsmap->get_up_features(), &snap_blob);
} else {
- assert(!dn->linkage.is_null());
+ ceph_assert(!dn->linkage.is_null());
}
}
// we can't commit things in the future.
// (even the projected future.)
- assert(want <= get_version() || get_version() == 0);
+ ceph_assert(want <= get_version() || get_version() == 0);
// check pre+postconditions.
- assert(is_auth());
+ ceph_assert(is_auth());
// already committed?
if (committed_version >= want) {
// already committing >= want?
if (committing_version >= want) {
dout(10) << "already committing " << committing_version << " >= " << want << dendl;
- assert(state_test(STATE_COMMITTING));
+ ceph_assert(state_test(STATE_COMMITTING));
return;
}
}
dout(10) << "_committed v " << v << " on " << *this << dendl;
- assert(is_auth());
+ ceph_assert(is_auth());
bool stray = inode->is_stray();
// take note.
- assert(v > committed_version);
- assert(v <= committing_version);
+ ceph_assert(v > committed_version);
+ ceph_assert(v <= committing_version);
committed_version = v;
// _all_ commits done?
// inode?
if (dn->linkage.is_primary()) {
CInode *in = dn->linkage.get_inode();
- assert(in);
- assert(in->is_auth());
+ ceph_assert(in);
+ ceph_assert(in->is_auth());
if (committed_version >= in->get_version()) {
if (in->is_dirty()) {
}
} else {
dout(15) << " dir " << committed_version << " < inode " << in->get_version() << " still dirty " << *in << dendl;
- assert(in->is_dirty() || in->last < CEPH_NOSNAP); // special case for cow snap items (not predirtied)
+ ceph_assert(in->is_dirty() || in->last < CEPH_NOSNAP); // special case for cow snap items (not predirtied)
}
}
remove_dentry(dn);
} else {
dout(15) << " dir " << committed_version << " < dn " << dn->get_version() << " still dirty " << *dn << dendl;
- assert(dn->is_dirty());
+ ceph_assert(dn->is_dirty());
}
}
void CDir::encode_export(bufferlist& bl)
{
- assert(!is_projected());
+ ceph_assert(!is_projected());
encode(first, bl);
encode(fnode, bl);
encode(dirty_old_rstat, bl);
void CDir::abort_import()
{
- assert(is_auth());
+ ceph_assert(is_auth());
state_clear(CDir::STATE_AUTH);
remove_bloom();
clear_replica_map();
// unpin parent of frozen dir/tree?
if (inode->is_auth()) {
- assert(!is_frozen_tree_root());
+ ceph_assert(!is_frozen_tree_root());
if (is_frozen_dir())
inode->auth_unpin(this);
}
// pin parent of frozen dir/tree?
if (inode->is_auth()) {
- assert(!is_frozen_tree_root());
+ ceph_assert(!is_frozen_tree_root());
if (is_frozen_dir())
inode->auth_pin(this);
}
auth_pins--;
#ifdef MDS_AUTHPIN_SET
- assert(auth_pin_set.count(by));
+ ceph_assert(auth_pin_set.count(by));
auth_pin_set.erase(auth_pin_set.find(by));
#endif
if (auth_pins == 0)
dout(10) << "auth_unpin by " << by
<< " on " << *this
<< " count now " << auth_pins << " + " << nested_auth_pins << dendl;
- assert(auth_pins >= 0);
+ ceph_assert(auth_pins >= 0);
int newcum = get_cum_auth_pins();
void CDir::adjust_nested_auth_pins(int inc, int dirinc, void *by)
{
- assert(inc);
+ ceph_assert(inc);
nested_auth_pins += inc;
dir_auth_pins += dirinc;
dout(15) << __func__ << " " << inc << "/" << dirinc << " on " << *this
<< " by " << by << " count now "
<< auth_pins << " + " << nested_auth_pins << dendl;
- assert(nested_auth_pins >= 0);
- assert(dir_auth_pins >= 0);
+ ceph_assert(nested_auth_pins >= 0);
+ ceph_assert(dir_auth_pins >= 0);
int newcum = get_cum_auth_pins();
#ifdef MDS_VERIFY_FRAGSTAT
void CDir::verify_fragstat()
{
- assert(is_complete());
+ ceph_assert(is_complete());
if (inode->is_stray())
return;
bool CDir::freeze_tree()
{
- assert(!is_frozen());
- assert(!is_freezing());
+ ceph_assert(!is_frozen());
+ ceph_assert(!is_freezing());
auth_pin(this);
if (is_freezeable(true)) {
void CDir::_freeze_tree()
{
dout(10) << __func__ << " " << *this << dendl;
- assert(is_freezeable(true));
+ ceph_assert(is_freezeable(true));
// twiddle state
if (state_test(STATE_FREEZINGTREE)) {
auth = authority();
}
- assert(auth.first >= 0);
- assert(auth.second == CDIR_AUTH_UNKNOWN);
+ ceph_assert(auth.first >= 0);
+ ceph_assert(auth.second == CDIR_AUTH_UNKNOWN);
auth.second = auth.first;
inode->mdcache->adjust_subtree_auth(this, auth);
if (!was_subtree)
if (is_auth()) {
// must be subtree
- assert(is_subtree_root());
+ ceph_assert(is_subtree_root());
// for debug purpose, caller should ensure 'dir_auth.second == dir_auth.first'
mds_authority_t auth = get_dir_auth();
- assert(auth.first >= 0);
- assert(auth.second == auth.first);
+ ceph_assert(auth.first >= 0);
+ ceph_assert(auth.second == auth.first);
auth.second = CDIR_AUTH_UNKNOWN;
inode->mdcache->adjust_subtree_auth(this, auth);
}
finish_waiting(WAIT_FROZEN, -1);
// freezing. stop it.
- assert(state_test(STATE_FREEZINGTREE));
+ ceph_assert(state_test(STATE_FREEZINGTREE));
state_clear(STATE_FREEZINGTREE);
--num_freezing_trees;
auth_unpin(this);
CDir *CDir::get_frozen_tree_root()
{
- assert(is_frozen());
+ ceph_assert(is_frozen());
CDir *dir = this;
while (1) {
if (dir->is_frozen_tree_root())
bool CDir::freeze_dir()
{
- assert(!is_frozen());
- assert(!is_freezing());
+ ceph_assert(!is_frozen());
+ ceph_assert(!is_freezing());
auth_pin(this);
if (is_freezeable_dir(true)) {
finish_waiting(WAIT_FROZEN, -1);
// still freezing. stop.
- assert(state_test(STATE_FREEZINGDIR));
+ ceph_assert(state_test(STATE_FREEZINGDIR));
state_clear(STATE_FREEZINGDIR);
auth_unpin(this);
*/
void CDir::dump(Formatter *f, int flags) const
{
- assert(f != NULL);
+ ceph_assert(f != NULL);
if (flags & DUMP_PATH) {
f->dump_stream("path") << get_path();
}
void CDir::scrub_info_create() const
{
- assert(!scrub_infop);
+ ceph_assert(!scrub_infop);
// break out of const-land to set up implicit initial state
CDir *me = const_cast<CDir*>(this);
void CDir::scrub_initialize(const ScrubHeaderRefConst& header)
{
dout(20) << __func__ << dendl;
- assert(is_complete());
- assert(header != nullptr);
+ ceph_assert(is_complete());
+ ceph_assert(header != nullptr);
// FIXME: weird implicit construction, is someone else meant
// to be calling scrub_info_create first?
scrub_info();
- assert(scrub_infop && !scrub_infop->directory_scrubbing);
+ ceph_assert(scrub_infop && !scrub_infop->directory_scrubbing);
scrub_infop->recursive_start.version = get_projected_version();
scrub_infop->recursive_start.time = ceph_clock_now();
void CDir::scrub_finished()
{
dout(20) << __func__ << dendl;
- assert(scrub_infop && scrub_infop->directory_scrubbing);
+ ceph_assert(scrub_infop && scrub_infop->directory_scrubbing);
- assert(scrub_infop->directories_to_scrub.empty());
- assert(scrub_infop->directories_scrubbing.empty());
+ ceph_assert(scrub_infop->directories_to_scrub.empty());
+ ceph_assert(scrub_infop->directories_scrubbing.empty());
scrub_infop->directories_scrubbed.clear();
- assert(scrub_infop->others_to_scrub.empty());
- assert(scrub_infop->others_scrubbing.empty());
+ ceph_assert(scrub_infop->others_to_scrub.empty());
+ ceph_assert(scrub_infop->others_scrubbing.empty());
scrub_infop->others_scrubbed.clear();
scrub_infop->directory_scrubbing = false;
int CDir::scrub_dentry_next(MDSInternalContext *cb, CDentry **dnout)
{
dout(20) << __func__ << dendl;
- assert(scrub_infop && scrub_infop->directory_scrubbing);
+ ceph_assert(scrub_infop && scrub_infop->directory_scrubbing);
dout(20) << "trying to scrub directories underneath us" << dendl;
int rval = _next_dentry_on_set(scrub_infop->directories_to_scrub, true,
} else if (rval == EAGAIN) {
// we don't need to do anything else
} else { // we emptied out the directory scrub set
- assert(rval == ENOENT);
+ ceph_assert(rval == ENOENT);
dout(20) << "no directories left, moving on to other kinds of dentries"
<< dendl;
void CDir::scrub_dentries_scrubbing(list<CDentry*> *out_dentries)
{
dout(20) << __func__ << dendl;
- assert(scrub_infop && scrub_infop->directory_scrubbing);
+ ceph_assert(scrub_infop && scrub_infop->directory_scrubbing);
for (set<dentry_key_t>::iterator i =
scrub_infop->directories_scrubbing.begin();
i != scrub_infop->directories_scrubbing.end();
++i) {
CDentry *d = lookup(i->name, i->snapid);
- assert(d);
+ ceph_assert(d);
out_dentries->push_back(d);
}
for (set<dentry_key_t>::iterator i = scrub_infop->others_scrubbing.begin();
i != scrub_infop->others_scrubbing.end();
++i) {
CDentry *d = lookup(i->name, i->snapid);
- assert(d);
+ ceph_assert(d);
out_dentries->push_back(d);
}
}
void CDir::scrub_dentry_finished(CDentry *dn)
{
dout(20) << __func__ << " on dn " << *dn << dendl;
- assert(scrub_infop && scrub_infop->directory_scrubbing);
+ ceph_assert(scrub_infop && scrub_infop->directory_scrubbing);
dentry_key_t dn_key = dn->key();
if (scrub_infop->directories_scrubbing.erase(dn_key)) {
scrub_infop->directories_scrubbed.insert(dn_key);
} else {
- assert(scrub_infop->others_scrubbing.count(dn_key));
+ ceph_assert(scrub_infop->others_scrubbing.count(dn_key));
scrub_infop->others_scrubbing.erase(dn_key);
scrub_infop->others_scrubbed.insert(dn_key);
}
bool CDir::scrub_local()
{
- assert(is_complete());
+ ceph_assert(is_complete());
bool rval = check_rstats(true);
scrub_info();
public:
version_t get_version() const { return fnode.version; }
void set_version(version_t v) {
- assert(projected_fnode.empty());
+ ceph_assert(projected_fnode.empty());
projected_version = fnode.version = v;
}
version_t get_projected_version() const { return projected_version; }
void inc_num_dirty() { num_dirty++; }
void dec_num_dirty() {
- assert(num_dirty > 0);
+ ceph_assert(num_dirty > 0);
num_dirty--;
}
int get_num_dirty() const {
MDSRank *get_mds() override {return in->mdcache->mds;}
public:
explicit CInodeIOContext(CInode *in_) : in(in_) {
- assert(in != NULL);
+ ceph_assert(in != NULL);
}
};
bool need_flush = false;
for (auto it = client_need_snapflush.lower_bound(cowin->first);
it != client_need_snapflush.end() && it->first < in->first; ) {
- assert(!it->second.empty());
+ ceph_assert(!it->second.empty());
if (cowin->last >= it->first) {
cowin->auth_pin(this);
need_flush = true;
} else {
// under cross-MDS rename.
// DIRTYRSTAT flag will get cleared when rename finishes
- assert(state_test(STATE_AMBIGUOUSAUTH));
+ ceph_assert(state_test(STATE_AMBIGUOUSAUTH));
}
}
}
void CInode::pop_and_dirty_projected_inode(LogSegment *ls)
{
- assert(!projected_nodes.empty());
+ ceph_assert(!projected_nodes.empty());
auto &front = projected_nodes.front();
dout(15) << __func__ << " " << front.inode.ino
<< " v" << front.inode.version << dendl;
new_srnode = new sr_t(*cur_srnode);
if (!new_srnode->past_parents.empty()) {
// convert past_parents to past_parent_snaps
- assert(snaprealm);
+ ceph_assert(snaprealm);
auto& snaps = snaprealm->get_snaps();
for (auto p : snaps) {
if (p >= new_srnode->current_parent_since)
void CInode::project_snaprealm(sr_t *new_srnode)
{
dout(10) << __func__ << " " << new_srnode << dendl;
- assert(projected_nodes.back().snapnode == projected_inode::UNDEF_SRNODE);
+ ceph_assert(projected_nodes.back().snapnode == projected_inode::UNDEF_SRNODE);
projected_nodes.back().snapnode = new_srnode;
++num_projected_srnodes;
}
void CInode::mark_snaprealm_global(sr_t *new_srnode)
{
- assert(!is_dir());
+ ceph_assert(!is_dir());
// 'last_destroyed' is no longer used, use it to store origin 'current_parent_since'
new_srnode->last_destroyed = new_srnode->current_parent_since;
new_srnode->current_parent_since = mdcache->get_global_snaprealm()->get_newest_seq() + 1;
if parent DNE, we need to find what the parent actually is and fill that in */
void CInode::record_snaprealm_past_parent(sr_t *new_snap, SnapRealm *newparent)
{
- assert(!new_snap->is_parent_global());
+ ceph_assert(!new_snap->is_parent_global());
SnapRealm *oldparent;
if (!snaprealm) {
oldparent = find_snaprealm();
void CInode::record_snaprealm_parent_dentry(sr_t *new_snap, SnapRealm *newparent,
CDentry *dn, bool primary_dn)
{
- assert(new_snap->is_parent_global());
+ ceph_assert(new_snap->is_parent_global());
SnapRealm *oldparent = dn->get_dir()->inode->find_snaprealm();
auto& snaps = oldparent->get_snaps();
void CInode::early_pop_projected_snaprealm()
{
- assert(!projected_nodes.empty());
+ ceph_assert(!projected_nodes.empty());
if (projected_nodes.front().snapnode != projected_inode::UNDEF_SRNODE) {
pop_projected_snaprealm(projected_nodes.front().snapnode, true);
projected_nodes.front().snapnode = projected_inode::UNDEF_SRNODE;
// we should be able to open these up (or have them already be open).
bool ok = snaprealm->_open_parents(NULL);
- assert(ok);
+ ceph_assert(ok);
if (invalidate_cached_snaps)
snaprealm->invalidate_cached_snaps();
dout(10) << " realm " << *snaprealm << " parent " << *snaprealm->parent << dendl;
} else {
dout(10) << __func__ << (early ? " (early) null" : " null") << dendl;
- assert(snaprealm);
+ ceph_assert(snaprealm);
snaprealm->merge_to(NULL);
}
}
int which = inode.dir_layout.dl_dir_hash;
if (!which)
which = CEPH_STR_HASH_LINUX;
- assert(ceph_str_hash_valid(which));
+ ceph_assert(ceph_str_hash_valid(which));
return ceph_str_hash(which, dn.data(), dn.length());
}
bad = true;
}
}
- assert(!bad);
+ ceph_assert(!bad);
}
void CInode::force_dirfrags()
CDir *CInode::get_or_open_dirfrag(MDCache *mdcache, frag_t fg)
{
- assert(is_dir());
+ ceph_assert(is_dir());
// have it?
CDir *dir = get_dirfrag(fg);
if (!dir) {
// create it.
- assert(is_auth() || mdcache->mds->is_any_replay());
+ ceph_assert(is_auth() || mdcache->mds->is_any_replay());
dir = new CDir(this, fg, mdcache, is_auth());
add_dirfrag(dir);
}
CDir *CInode::add_dirfrag(CDir *dir)
{
- assert(dirfrags.count(dir->dirfrag().frag) == 0);
+ ceph_assert(dirfrags.count(dir->dirfrag().frag) == 0);
dirfrags[dir->dirfrag().frag] = dir;
if (stickydir_ref > 0) {
void CInode::close_dirfrag(frag_t fg)
{
dout(14) << __func__ << " " << fg << dendl;
- assert(dirfrags.count(fg));
+ ceph_assert(dirfrags.count(fg));
CDir *dir = dirfrags[fg];
dir->remove_null_dentries();
for (const auto &p : dir->items)
dout(14) << __func__ << " LEFTOVER dn " << *p.second << dendl;
- assert(dir->get_num_ref() == 0);
+ ceph_assert(dir->get_num_ref() == 0);
delete dir;
dirfrags.erase(fg);
}
void CInode::put_stickydirs()
{
- assert(stickydir_ref > 0);
+ ceph_assert(stickydir_ref > 0);
stickydir_ref--;
if (stickydir_ref == 0) {
put(PIN_STICKYDIRS);
return true;
const CDentry *pdn = other->get_oldest_parent_dn();
if (!pdn) {
- assert(other->is_base());
+ ceph_assert(other->is_base());
break;
}
other = pdn->get_dir()->get_inode();
return true;
const CDentry *pdn = other->get_projected_parent_dn();
if (!pdn) {
- assert(other->is_base());
+ ceph_assert(other->is_base());
break;
}
other = pdn->get_dir()->get_inode();
{
const CDentry *use_parent = projected ? get_projected_parent_dn() : parent;
if (use_parent) {
- assert(!is_base());
+ ceph_assert(!is_base());
use_parent->make_path(fp, projected);
} else {
fp = filepath(ino());
pv = _cdentry->pre_dirty(get_projected_version());
dout(10) << "pre_dirty " << pv << " (current v " << inode.version << ")" << dendl;
} else {
- assert(is_base());
+ ceph_assert(is_base());
pv = get_projected_version() + 1;
}
// force update backtrace for old format inode (see mempool_inode::decode)
if (!state_test(STATE_DIRTY)) {
state_set(STATE_DIRTY);
get(PIN_DIRTY);
- assert(ls);
+ ceph_assert(ls);
}
// move myself to this segment's dirty list
// only auth can get dirty. "dirty" async data in replicas is relative to
// filelock state, not the dirty flag.
- assert(is_auth());
+ ceph_assert(is_auth());
// touch my private version
- assert(inode.version < pv);
+ ceph_assert(inode.version < pv);
inode.version = pv;
_mark_dirty(ls);
void CInode::store(MDSInternalContextBase *fin)
{
dout(10) << __func__ << " " << get_version() << dendl;
- assert(is_base());
+ ceph_assert(is_base());
if (snaprealm)
purge_stale_snap_data(snaprealm->get_snaps());
void CInode::flush(MDSInternalContextBase *fin)
{
dout(10) << __func__ << " " << *this << dendl;
- assert(is_auth() && can_auth_pin());
+ ceph_assert(is_auth() && can_auth_pin());
MDSGatherBuilder gather(g_ceph_context);
void CInode::store_backtrace(MDSInternalContextBase *fin, int op_prio)
{
dout(10) << __func__ << " on " << *this << dendl;
- assert(is_dirty_parent());
+ ceph_assert(is_dirty_parent());
if (op_prio < 0)
op_prio = CEPH_MSG_PRIO_DEFAULT;
dout(10) << __func__ << dendl;
state_set(STATE_DIRTYPARENT);
get(PIN_DIRTYPARENT);
- assert(ls);
+ ceph_assert(ls);
}
if (dirty_pool)
state_set(STATE_DIRTYPOOL);
if (err) {
MDSRank *mds = mdcache->mds;
mds->clog->error() << "bad backtrace on directory inode " << ino();
- assert(!"bad backtrace" == (g_conf()->mds_verify_backtrace > 1));
+ ceph_assert(!"bad backtrace" == (g_conf()->mds_verify_backtrace > 1));
mark_dirty_parent(mds->mdlog->get_current_segment(), false);
mds->mdlog->flush();
CDir *dir = get_dirfrag(fg);
if (is_auth()) {
- assert(dir); // i am auth; i had better have this dir open
+ ceph_assert(dir); // i am auth; i had better have this dir open
dout(10) << fg << " first " << dir->first << " -> " << fgfirst
<< " on " << *dir << dendl;
dir->first = fgfirst;
CDir *dir = get_dirfrag(fg);
if (is_auth()) {
- assert(dir); // i am auth; i had better have this dir open
+ ceph_assert(dir); // i am auth; i had better have this dir open
dout(10) << fg << " first " << dir->first << " -> " << fgfirst
<< " on " << *dir << dendl;
dir->first = fgfirst;
void CInode::clear_dirty_scattered(int type)
{
dout(10) << __func__ << " " << type << " on " << *this << dendl;
- assert(is_dir());
+ ceph_assert(is_dir());
switch (type) {
case CEPH_LOCK_IFILE:
item_dirty_dirfrag_dir.remove_myself();
void CInode::start_scatter(ScatterLock *lock)
{
dout(10) << __func__ << " " << *lock << " on " << *this << dendl;
- assert(is_auth());
+ ceph_assert(is_auth());
mempool_inode *pi = get_projected_inode();
for (const auto &p : dirfrags) {
version_t inode_version, version_t dir_accounted_version)
{
frag_t fg = dir->get_frag();
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
if (dir->is_frozen()) {
dout(10) << __func__ << " " << fg << " frozen, marking " << *lock << " stale " << *dir << dendl;
le->metablob.add_dir_context(dir);
le->metablob.add_dir(dir, true);
- assert(!dir->is_frozen());
+ ceph_assert(!dir->is_frozen());
mut->auth_pin(dir);
if (lock->get_type() == CEPH_LOCK_INEST &&
LogChannelRef clog = mdcache->mds->clog;
dout(10) << __func__ << " " << type << " on " << *this << dendl;
- assert(is_auth());
+ ceph_assert(is_auth());
switch (type) {
case CEPH_LOCK_IFILE:
bool dirstat_valid = true;
// adjust summation
- assert(is_auth());
+ ceph_assert(is_auth());
mempool_inode *pi = get_projected_inode();
bool touched_mtime = false, touched_chattr = false;
pf->fragstat.nsubdirs < 0) {
clog->error() << "bad/negative dir size on "
<< dir->dirfrag() << " " << pf->fragstat;
- assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
+ ceph_assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
if (pf->fragstat.nfiles < 0)
pf->fragstat.nfiles = 0;
} else {
clog->error() << "unmatched fragstat on " << ino() << ", inode has "
<< pi->dirstat << ", dirfrags have " << dirstat;
- assert(!"unmatched fragstat" == g_conf()->mds_verify_scatter);
+ ceph_assert(!"unmatched fragstat" == g_conf()->mds_verify_scatter);
}
// trust the dirfrags for now
version_t v = pi->dirstat.version;
make_path_string(path);
clog->error() << "Inconsistent statistics detected: fragstat on inode "
<< ino() << " (" << path << "), inode has " << pi->dirstat;
- assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
+ ceph_assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter);
if (pi->dirstat.nfiles < 0)
pi->dirstat.nfiles = 0;
case CEPH_LOCK_INEST:
{
// adjust summation
- assert(is_auth());
+ ceph_assert(is_auth());
fragtree_t tmpdft = dirfragtree;
nest_info_t rstat;
clog->error() << "inconsistent rstat on inode " << ino()
<< ", inode has " << pi->rstat
<< ", directory fragments have " << rstat;
- assert(!"unmatched rstat" == g_conf()->mds_verify_scatter);
+ ceph_assert(!"unmatched rstat" == g_conf()->mds_verify_scatter);
}
// trust the dirfrag for now
version_t v = pi->rstat.version;
void CInode::finish_scatter_gather_update_accounted(int type, MutationRef& mut, EMetaBlob *metablob)
{
dout(10) << __func__ << " " << type << " on " << *this << dendl;
- assert(is_auth());
+ ceph_assert(is_auth());
for (const auto &p : dirfrags) {
CDir *dir = p.second;
continue; // nothing to do.
dout(10) << " journaling updated frag accounted_ on " << *dir << dendl;
- assert(dir->is_projected());
+ ceph_assert(dir->is_projected());
fnode_t *pf = dir->get_projected_fnode();
pf->version = dir->pre_dirty();
mut->add_projected_fnode(dir);
bool CInode::freeze_inode(int auth_pin_allowance)
{
- assert(auth_pin_allowance > 0); // otherwise we need to adjust parent's nested_auth_pins
- assert(auth_pins >= auth_pin_allowance);
+ ceph_assert(auth_pin_allowance > 0); // otherwise we need to adjust parent's nested_auth_pins
+ ceph_assert(auth_pins >= auth_pin_allowance);
if (auth_pins > auth_pin_allowance) {
dout(10) << "freeze_inode - waiting for auth_pins to drop to " << auth_pin_allowance << dendl;
auth_pin_freeze_allowance = auth_pin_allowance;
}
dout(10) << "freeze_inode - frozen" << dendl;
- assert(auth_pins == auth_pin_allowance);
+ ceph_assert(auth_pins == auth_pin_allowance);
if (!state_test(STATE_FROZEN)) {
get(PIN_FROZEN);
state_set(STATE_FROZEN);
void CInode::freeze_auth_pin()
{
- assert(state_test(CInode::STATE_FROZEN));
+ ceph_assert(state_test(CInode::STATE_FROZEN));
state_set(CInode::STATE_FROZENAUTHPIN);
}
void CInode::unfreeze_auth_pin()
{
- assert(state_test(CInode::STATE_FROZENAUTHPIN));
+ ceph_assert(state_test(CInode::STATE_FROZENAUTHPIN));
state_clear(CInode::STATE_FROZENAUTHPIN);
if (!state_test(STATE_FREEZING|STATE_FROZEN)) {
MDSInternalContextBase::vec finished;
void CInode::clear_ambiguous_auth(MDSInternalContextBase::vec& finished)
{
- assert(state_test(CInode::STATE_AMBIGUOUSAUTH));
+ ceph_assert(state_test(CInode::STATE_AMBIGUOUSAUTH));
state_clear(CInode::STATE_AMBIGUOUSAUTH);
take_waiting(CInode::WAIT_SINGLEAUTH, finished);
}
auth_pins--;
#ifdef MDS_AUTHPIN_SET
- assert(auth_pin_set.count(by));
+ ceph_assert(auth_pin_set.count(by));
auth_pin_set.erase(auth_pin_set.find(by));
#endif
<< " now " << auth_pins << "+" << nested_auth_pins
<< dendl;
- assert(auth_pins >= 0);
+ ceph_assert(auth_pins >= 0);
if (parent)
parent->adjust_nested_auth_pins(-1, -1, by);
void CInode::adjust_nested_auth_pins(int a, void *by)
{
- assert(a);
+ ceph_assert(a);
nested_auth_pins += a;
dout(35) << __func__ << " by " << by
<< " change " << a << " yields "
<< auth_pins << "+" << nested_auth_pins << dendl;
- assert(nested_auth_pins >= 0);
+ ceph_assert(nested_auth_pins >= 0);
if (g_conf()->mds_debug_auth_pins) {
// audit
if (!dir->is_subtree_root() && dir->get_cum_auth_pins())
s++;
}
- assert(s == nested_auth_pins);
+ ceph_assert(s == nested_auth_pins);
}
if (parent)
CInode::mempool_old_inode& CInode::cow_old_inode(snapid_t follows, bool cow_head)
{
- assert(follows >= first);
+ ceph_assert(follows >= first);
mempool_inode *pi = cow_head ? get_projected_inode() : get_previous_projected_inode();
mempool_xattr_map *px = cow_head ? get_projected_xattrs() : get_previous_projected_xattrs();
void CInode::split_old_inode(snapid_t snap)
{
auto it = old_inodes.lower_bound(snap);
- assert(it != old_inodes.end() && it->second.first < snap);
+ ceph_assert(it != old_inodes.end() && it->second.first < snap);
mempool_old_inode &old = old_inodes[snap - 1];
old = it->second;
decode(snaprealm->srnode, p);
if (is_base()) {
bool ok = snaprealm->_open_parents(NULL);
- assert(ok);
+ ceph_assert(ok);
} else {
if ((snaprealm->srnode.flags ^ old_flags) & sr_t::PARENT_GLOBAL) {
snaprealm->close_parents();
}
dout(20) << __func__ << " " << *snaprealm << dendl;
} else if (snaprealm) {
- assert(mdcache->mds->is_any_replay());
+ ceph_assert(mdcache->mds->is_any_replay());
snaprealm->merge_to(NULL);
}
}
set_loner_cap(want_loner_cap);
changed = true;
} else
- assert(loner_cap == want_loner_cap);
+ ceph_assert(loner_cap == want_loner_cap);
}
return changed;
}
bool CInode::try_set_loner()
{
- assert(want_loner_cap >= 0);
+ ceph_assert(want_loner_cap >= 0);
if (loner_cap >= 0 && loner_cap != want_loner_cap)
return false;
set_loner_cap(want_loner_cap);
} else {
// our states have already been chosen during rejoin.
if (lock->is_xlocked())
- assert(lock->get_state() == LOCK_LOCK);
+ ceph_assert(lock->get_state() == LOCK_LOCK);
}
}
mdcache->open_file_table.remove_inode(this);
num_caps_wanted +=d;
- assert(num_caps_wanted >= 0);
+ ceph_assert(num_caps_wanted >= 0);
}
Capability *CInode::add_client_cap(client_t client, Session *session, SnapRealm *conrealm)
{
- assert(last == CEPH_NOSNAP);
+ ceph_assert(last == CEPH_NOSNAP);
if (client_caps.empty()) {
get(PIN_CAPS);
if (conrealm)
uint64_t cap_id = ++mdcache->last_cap_id;
auto ret = client_caps.emplace(std::piecewise_construct, std::forward_as_tuple(client),
std::forward_as_tuple(this, cap_id, client));
- assert(ret.second == true);
+ ceph_assert(ret.second == true);
Capability *cap = &ret.first->second;
session->add_cap(cap);
void CInode::remove_client_cap(client_t client)
{
auto it = client_caps.find(client);
- assert(it != client_caps.end());
+ ceph_assert(it != client_caps.end());
Capability *cap = &it->second;
cap->item_session_caps.remove_myself();
void CInode::replicate_relax_locks()
{
//dout(10) << " relaxing locks on " << *this << dendl;
- assert(is_auth());
- assert(!is_replicated());
+ ceph_assert(is_auth());
+ ceph_assert(!is_replicated());
authlock.replicate_relax();
linklock.replicate_relax();
int getattr_caps)
{
client_t client = session->get_client();
- assert(snapid);
+ ceph_assert(snapid);
bool valid = true;
<< " seq " << cap->get_last_seq() << dendl;
} else if (cap && cap->is_new() && !dir_realm) {
// alway issue new caps to client, otherwise the caps get lost
- assert(cap->is_stale());
+ ceph_assert(cap->is_stale());
issue = cap->pending() | CEPH_CAP_PIN;
cap->issue_norevoke(issue);
dout(10) << "encode_inodestat issuing " << ccap_string(issue)
ENCODE_FINISH(bl);
}
else {
- assert(session->get_connection());
+ ceph_assert(session->get_connection());
encode(oi->ino, bl);
encode(snapid, bl);
void CInode::encode_cap_message(const MClientCaps::ref &m, Capability *cap)
{
- assert(cap);
+ ceph_assert(cap);
client_t client = cap->get_client();
frag_t fg;
decode(fg, q);
CDir *dir = get_dirfrag(fg);
- assert(dir); // we should have all bounds open
+ ceph_assert(dir); // we should have all bounds open
// Only take the remote's fragstat/rstat if we are non-auth for
// this dirfrag AND the lock is NOT in a scattered (MIX) state.
}
bool validate_directory_data() {
- assert(in->is_dir());
+ ceph_assert(in->is_dir());
if (in->is_base()) {
if (!shadow_in) {
// check each dirfrag...
for (const auto &p : in->dirfrags) {
CDir *dir = p.second;
- assert(dir->get_version() > 0);
+ ceph_assert(dir->get_version() > 0);
nest_info.add(dir->fnode.accounted_rstat);
dir_info.add(dir->fnode.accounted_fragstat);
if (dir->scrub_infop->pending_scrub_error) {
void CInode::scrub_info_create() const
{
dout(25) << __func__ << dendl;
- assert(!scrub_infop);
+ ceph_assert(!scrub_infop);
// break out of const-land to set up implicit initial state
CInode *me = const_cast<CInode*>(this);
if (scrub_is_in_progress()) {
dout(20) << __func__ << " inode moved during scrub, reinitializing "
<< dendl;
- assert(scrub_infop->scrub_parent);
+ ceph_assert(scrub_infop->scrub_parent);
CDentry *dn = scrub_infop->scrub_parent;
CDir *dir = dn->dir;
dn->put(CDentry::PIN_SCRUBPARENT);
- assert(dir->scrub_infop && dir->scrub_infop->directory_scrubbing);
+ ceph_assert(dir->scrub_infop && dir->scrub_infop->directory_scrubbing);
dir->scrub_infop->directories_scrubbing.erase(dn->key());
dir->scrub_infop->others_scrubbing.erase(dn->key());
}
int CInode::scrub_dirfrag_next(frag_t* out_dirfrag)
{
dout(20) << __func__ << dendl;
- assert(scrub_is_in_progress());
+ ceph_assert(scrub_is_in_progress());
if (!is_dir()) {
return -ENOTDIR;
void CInode::scrub_dirfrags_scrubbing(list<frag_t>* out_dirfrags)
{
- assert(out_dirfrags != NULL);
- assert(scrub_infop != NULL);
+ ceph_assert(out_dirfrags != NULL);
+ ceph_assert(scrub_infop != NULL);
out_dirfrags->clear();
std::map<frag_t, scrub_stamp_info_t>::iterator i =
void CInode::scrub_dirfrag_finished(frag_t dirfrag)
{
dout(20) << __func__ << " on frag " << dirfrag << dendl;
- assert(scrub_is_in_progress());
+ ceph_assert(scrub_is_in_progress());
std::map<frag_t, scrub_stamp_info_t>::iterator i =
scrub_infop->dirfrag_stamps.find(dirfrag);
- assert(i != scrub_infop->dirfrag_stamps.end());
+ ceph_assert(i != scrub_infop->dirfrag_stamps.end());
scrub_stamp_info_t &si = i->second;
si.last_scrub_stamp = si.scrub_start_stamp;
void CInode::scrub_finished(MDSInternalContextBase **c) {
dout(20) << __func__ << dendl;
- assert(scrub_is_in_progress());
+ ceph_assert(scrub_is_in_progress());
for (std::map<frag_t, scrub_stamp_info_t>::iterator i =
scrub_infop->dirfrag_stamps.begin();
i != scrub_infop->dirfrag_stamps.end();
derr << i->second.last_scrub_version << " != "
<< i->second.scrub_start_version << dendl;
}
- assert(i->second.last_scrub_version == i->second.scrub_start_version);
+ ceph_assert(i->second.last_scrub_version == i->second.scrub_start_version);
}
scrub_infop->last_scrub_version = scrub_infop->scrub_start_version;
} else {
// Files are required to have an explicit layout that specifies
// a pool
- assert(inode.layout.pool_id != -1);
+ ceph_assert(inode.layout.pool_id != -1);
return inode.layout.pool_id;
}
}
void CInode::set_export_pin(mds_rank_t rank)
{
- assert(is_dir());
- assert(is_projected());
+ ceph_assert(is_dir());
+ ceph_assert(is_projected());
get_projected_inode()->export_pin = rank;
maybe_export_pin(true);
}
scrub_infop->children_scrubbed = true;
}
void scrub_set_finisher(MDSInternalContextBase *c) {
- assert(!scrub_infop->on_finish);
+ ceph_assert(!scrub_infop->on_finish);
scrub_infop->on_finish = c;
}
return &projected_nodes.back().inode;
}
mempool_inode *get_previous_projected_inode() {
- assert(!projected_nodes.empty());
+ ceph_assert(!projected_nodes.empty());
auto it = projected_nodes.rbegin();
++it;
if (it != projected_nodes.rend())
close_dirfrags();
close_snaprealm();
clear_file_locks();
- assert(num_projected_xattrs == 0);
- assert(num_projected_srnodes == 0);
- assert(num_caps_wanted == 0);
- assert(num_subtree_roots == 0);
- assert(num_exporting_dirs == 0);
+ ceph_assert(num_projected_xattrs == 0);
+ ceph_assert(num_projected_srnodes == 0);
+ ceph_assert(num_caps_wanted == 0);
+ ceph_assert(num_subtree_roots == 0);
+ ceph_assert(num_exporting_dirs == 0);
}
void decode_store(bufferlist::const_iterator& bl);
void encode_replica(mds_rank_t rep, bufferlist& bl, uint64_t features, bool need_recover) {
- assert(is_auth());
+ ceph_assert(is_auth());
// relax locks?
if (!is_replicated())
void finish_export();
void abort_export() {
put(PIN_TEMPEXPORTING);
- assert(state_test(STATE_EXPORTINGCAPS));
+ ceph_assert(state_test(STATE_EXPORTINGCAPS));
state_clear(STATE_EXPORTINGCAPS);
put(PIN_EXPORTINGCAPS);
}
#endif
<< dendl;
#ifdef MDS_REF_SET
- assert(ref_map[by] > 0);
+ ceph_assert(ref_map[by] > 0);
#endif
- assert(ref > 0);
+ ceph_assert(ref > 0);
}
void bad_get(int by) override {
generic_dout(0) << " bad get " << *this << " by " << by << " " << pin_name(by) << " was " << ref
#endif
<< dendl;
#ifdef MDS_REF_SET
- assert(ref_map[by] >= 0);
+ ceph_assert(ref_map[by] >= 0);
#endif
}
void first_get() override;
// -- hierarchy stuff --
public:
void set_primary_parent(CDentry *p) {
- assert(parent == 0 ||
+ ceph_assert(parent == 0 ||
g_conf().get_val<bool>("mds_hack_allow_loading_invalid_metadata"));
parent = p;
}
void remove_primary_parent(CDentry *dn) {
- assert(dn == parent);
+ ceph_assert(dn == parent);
parent = 0;
}
void add_remote_parent(CDentry *p);
projected_parent.push_back(dn);
}
void pop_projected_parent() {
- assert(projected_parent.size());
+ ceph_assert(projected_parent.size());
parent = projected_parent.front();
projected_parent.pop_front();
}
_revokes.pop_back();
} else {
// no change.
- assert(_pending == c);
+ ceph_assert(_pending == c);
}
//last_issue =
++last_sent;
}
DamageEntryRef entry = by_id_entry->second;
- assert(entry->id == damage_id); // Sanity
+ ceph_assert(entry->id == damage_id); // Sanity
const auto type = entry->get_type();
if (type == DAMAGE_ENTRY_DIRFRAG) {
explicit DamageTable(const mds_rank_t rank_)
: rank(rank_)
{
- assert(rank_ != MDS_RANK_NONE);
+ ceph_assert(rank_ != MDS_RANK_NONE);
}
void dump(Formatter *f) const;
fs->fscid = next_filesystem_id++;
// ANONYMOUS is only for upgrades from legacy mdsmaps, we should
// have initialized next_filesystem_id such that it's never used here.
- assert(fs->fscid != FS_CLUSTER_ID_ANONYMOUS);
+ ceph_assert(fs->fscid != FS_CLUSTER_ID_ANONYMOUS);
} else {
// Use anon fscid because this will get thrown away when encoding
// as legacy MDSMap for legacy mons.
- assert(filesystems.empty());
+ ceph_assert(filesystems.empty());
fs->fscid = FS_CLUSTER_ID_ANONYMOUS;
}
filesystems[fs->fscid] = fs;
} else {
// MDSMonitor should never have created multiple filesystems
// until the quorum features indicated Jewel
- assert(filesystems.size() == 1);
+ ceph_assert(filesystems.size() == 1);
auto fs = filesystems.begin()->second;
// Take the MDSMap for the enabled filesystem, and populated its
decode(legacy_mds_map.inline_data_enabled, p);
if (ev >= 8) {
- assert(struct_v >= 5);
+ ceph_assert(struct_v >= 5);
decode(legacy_mds_map.enabled, p);
decode(legacy_mds_map.fs_name, p);
} else {
for (const auto &i : standby_daemons) {
const auto &gid = i.first;
const auto &info = i.second;
- assert(info.state == MDSMap::STATE_STANDBY);
- assert(info.rank == MDS_RANK_NONE);
+ ceph_assert(info.state == MDSMap::STATE_STANDBY);
+ ceph_assert(info.rank == MDS_RANK_NONE);
if (info.laggy()) {
continue;
for (const auto &i : standby_daemons) {
const auto &gid = i.first;
const auto &info = i.second;
- assert(info.state == MDSMap::STATE_STANDBY);
+ ceph_assert(info.state == MDSMap::STATE_STANDBY);
if (info.laggy() || info.rank >= 0)
continue;
void FSMap::sanity() const
{
if (legacy_client_fscid != FS_CLUSTER_ID_NONE) {
- assert(filesystems.count(legacy_client_fscid) == 1);
+ ceph_assert(filesystems.count(legacy_client_fscid) == 1);
}
for (const auto &i : filesystems) {
auto fs = i.second;
- assert(fs->mds_map.compat.compare(compat) == 0);
- assert(fs->fscid == i.first);
+ ceph_assert(fs->mds_map.compat.compare(compat) == 0);
+ ceph_assert(fs->fscid == i.first);
for (const auto &j : fs->mds_map.mds_info) {
- assert(j.second.rank != MDS_RANK_NONE);
- assert(mds_roles.count(j.first) == 1);
- assert(standby_daemons.count(j.first) == 0);
- assert(standby_epochs.count(j.first) == 0);
- assert(mds_roles.at(j.first) == i.first);
+ ceph_assert(j.second.rank != MDS_RANK_NONE);
+ ceph_assert(mds_roles.count(j.first) == 1);
+ ceph_assert(standby_daemons.count(j.first) == 0);
+ ceph_assert(standby_epochs.count(j.first) == 0);
+ ceph_assert(mds_roles.at(j.first) == i.first);
if (j.second.state != MDSMap::STATE_STANDBY_REPLAY) {
- assert(fs->mds_map.up.at(j.second.rank) == j.first);
- assert(fs->mds_map.failed.count(j.second.rank) == 0);
- assert(fs->mds_map.damaged.count(j.second.rank) == 0);
+ ceph_assert(fs->mds_map.up.at(j.second.rank) == j.first);
+ ceph_assert(fs->mds_map.failed.count(j.second.rank) == 0);
+ ceph_assert(fs->mds_map.damaged.count(j.second.rank) == 0);
}
}
for (const auto &j : fs->mds_map.up) {
mds_rank_t rank = j.first;
- assert(fs->mds_map.in.count(rank) == 1);
+ ceph_assert(fs->mds_map.in.count(rank) == 1);
mds_gid_t gid = j.second;
- assert(fs->mds_map.mds_info.count(gid) == 1);
+ ceph_assert(fs->mds_map.mds_info.count(gid) == 1);
}
}
for (const auto &i : standby_daemons) {
- assert(i.second.state == MDSMap::STATE_STANDBY);
- assert(i.second.rank == MDS_RANK_NONE);
- assert(i.second.global_id == i.first);
- assert(standby_epochs.count(i.first) == 1);
- assert(mds_roles.count(i.first) == 1);
- assert(mds_roles.at(i.first) == FS_CLUSTER_ID_NONE);
+ ceph_assert(i.second.state == MDSMap::STATE_STANDBY);
+ ceph_assert(i.second.rank == MDS_RANK_NONE);
+ ceph_assert(i.second.global_id == i.first);
+ ceph_assert(standby_epochs.count(i.first) == 1);
+ ceph_assert(mds_roles.count(i.first) == 1);
+ ceph_assert(mds_roles.at(i.first) == FS_CLUSTER_ID_NONE);
}
for (const auto &i : standby_epochs) {
- assert(standby_daemons.count(i.first) == 1);
+ ceph_assert(standby_daemons.count(i.first) == 1);
}
for (const auto &i : mds_roles) {
if (i.second == FS_CLUSTER_ID_NONE) {
- assert(standby_daemons.count(i.first) == 1);
+ ceph_assert(standby_daemons.count(i.first) == 1);
} else {
- assert(filesystems.count(i.second) == 1);
- assert(filesystems.at(i.second)->mds_map.mds_info.count(i.first) == 1);
+ ceph_assert(filesystems.count(i.second) == 1);
+ ceph_assert(filesystems.at(i.second)->mds_map.mds_info.count(i.first) == 1);
}
}
}
const std::shared_ptr<Filesystem> &filesystem,
mds_rank_t assigned_rank)
{
- assert(gid_exists(standby_gid));
+ ceph_assert(gid_exists(standby_gid));
bool is_standby_replay = mds_roles.at(standby_gid) != FS_CLUSTER_ID_NONE;
if (!is_standby_replay) {
- assert(standby_daemons.count(standby_gid));
- assert(standby_daemons.at(standby_gid).state == MDSMap::STATE_STANDBY);
+ ceph_assert(standby_daemons.count(standby_gid));
+ ceph_assert(standby_daemons.at(standby_gid).state == MDSMap::STATE_STANDBY);
}
MDSMap &mds_map = filesystem->mds_map;
if (!is_standby_replay) {
mds_map.mds_info[standby_gid] = standby_daemons.at(standby_gid);
} else {
- assert(mds_map.mds_info.count(standby_gid));
- assert(mds_map.mds_info.at(standby_gid).state == MDSMap::STATE_STANDBY_REPLAY);
- assert(mds_map.mds_info.at(standby_gid).rank == assigned_rank);
+ ceph_assert(mds_map.mds_info.count(standby_gid));
+ ceph_assert(mds_map.mds_info.at(standby_gid).state == MDSMap::STATE_STANDBY_REPLAY);
+ ceph_assert(mds_map.mds_info.at(standby_gid).rank == assigned_rank);
}
MDSMap::mds_info_t &info = mds_map.mds_info[standby_gid];
const fs_cluster_id_t leader_ns,
const mds_rank_t leader_rank)
{
- assert(mds_roles.at(standby_gid) == FS_CLUSTER_ID_NONE);
- assert(gid_exists(standby_gid));
- assert(!gid_has_rank(standby_gid));
- assert(standby_daemons.count(standby_gid));
+ ceph_assert(mds_roles.at(standby_gid) == FS_CLUSTER_ID_NONE);
+ ceph_assert(gid_exists(standby_gid));
+ ceph_assert(!gid_has_rank(standby_gid));
+ ceph_assert(standby_daemons.count(standby_gid));
// Insert to the filesystem
auto fs = filesystems.at(leader_ns);
// STANDBY will pick it up.
fs->mds_map.failed.insert(info.rank);
}
- assert(fs->mds_map.up.at(info.rank) == info.global_id);
+ ceph_assert(fs->mds_map.up.at(info.rank) == info.global_id);
fs->mds_map.up.erase(info.rank);
}
fs->mds_map.mds_info.erase(who);
void FSMap::damaged(mds_gid_t who, epoch_t blacklist_epoch)
{
- assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE);
+ ceph_assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE);
auto fs = filesystems.at(mds_roles.at(who));
mds_rank_t rank = fs->mds_map.mds_info[who].rank;
fs->mds_map.failed.erase(rank);
fs->mds_map.damaged.insert(rank);
- assert(fs->mds_map.epoch == epoch);
+ ceph_assert(fs->mds_map.epoch == epoch);
}
/**
void FSMap::insert(const MDSMap::mds_info_t &new_info)
{
- assert(new_info.state == MDSMap::STATE_STANDBY);
- assert(new_info.rank == MDS_RANK_NONE);
+ ceph_assert(new_info.state == MDSMap::STATE_STANDBY);
+ ceph_assert(new_info.rank == MDS_RANK_NONE);
mds_roles[new_info.global_id] = FS_CLUSTER_ID_NONE;
standby_daemons[new_info.global_id] = new_info;
standby_epochs[new_info.global_id] = epoch;
std::list<mds_gid_t> FSMap::stop(mds_gid_t who)
{
- assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE);
+ ceph_assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE);
auto fs = filesystems.at(mds_roles.at(who));
const auto &info = fs->mds_map.mds_info.at(who);
fs->mds_map.up.erase(info.rank);
void set_legacy_client_fscid(fs_cluster_id_t fscid)
{
- assert(fscid == FS_CLUSTER_ID_NONE || filesystems.count(fscid));
+ ceph_assert(fscid == FS_CLUSTER_ID_NONE || filesystems.count(fscid));
legacy_client_fscid = fscid;
}
if (mds_roles.at(who) == FS_CLUSTER_ID_NONE) {
auto &info = standby_daemons.at(who);
fn(&info);
- assert(info.state == MDSMap::STATE_STANDBY);
+ ceph_assert(info.state == MDSMap::STATE_STANDBY);
standby_epochs[who] = epoch;
} else {
const auto &fs = filesystems[mds_roles.at(who)];
inodeno_t InoTable::project_alloc_id(inodeno_t id)
{
dout(10) << "project_alloc_id " << id << " to " << projected_free << "/" << free << dendl;
- assert(is_active());
+ ceph_assert(is_active());
if (!id)
id = projected_free.range_start();
projected_free.erase(id);
void InoTable::project_alloc_ids(interval_set<inodeno_t>& ids, int want)
{
- assert(is_active());
+ ceph_assert(is_active());
while (want > 0) {
inodeno_t start = projected_free.range_start();
inodeno_t end = projected_free.end_after(start);
void InoTable::replay_alloc_id(inodeno_t id)
{
- assert(mds); // Only usable in online mode
+ ceph_assert(mds); // Only usable in online mode
dout(10) << "replay_alloc_id " << id << dendl;
if (free.contains(id)) {
}
void InoTable::replay_alloc_ids(interval_set<inodeno_t>& ids)
{
- assert(mds); // Only usable in online mode
+ ceph_assert(mds); // Only usable in online mode
dout(10) << "replay_alloc_ids " << ids << dendl;
interval_set<inodeno_t> is;
return false;
}
- assert(is_marked_free(id));
+ ceph_assert(is_marked_free(id));
dout(10) << "repair: before status. ino = " << id << " pver =" << projected_version << " ver= " << version << dendl;
free.erase(id);
projected_free.erase(id);
*/
int JournalPointer::load(Objecter *objecter)
{
- assert(objecter != NULL);
+ ceph_assert(objecter != NULL);
// Blocking read of data
std::string const object_id = get_object_id();
*/
int JournalPointer::save(Objecter *objecter) const
{
- assert(objecter != NULL);
+ ceph_assert(objecter != NULL);
// It is not valid to persist a null pointer
- assert(!is_null());
+ ceph_assert(!is_null());
// Serialize JournalPointer object
bufferlist data;
*/
void JournalPointer::save(Objecter *objecter, Context *completion) const
{
- assert(objecter != NULL);
+ ceph_assert(objecter != NULL);
bufferlist data;
encode(data);
return !is_xlocked();
}
void get_wrlock(client_t client) {
- assert(can_wrlock());
+ ceph_assert(can_wrlock());
SimpleLock::get_wrlock();
last_wrlock_client = client;
}
public:
explicit LockerContext(Locker *locker_) : locker(locker_) {
- assert(locker != NULL);
+ ceph_assert(locker != NULL);
}
};
public:
explicit LockerLogContext(Locker *locker_) : locker(locker_) {
- assert(locker != NULL);
+ ceph_assert(locker != NULL);
}
};
break;
default:
derr << "locker unknown message " << m->get_type() << dendl;
- assert(0 == "locker unknown message");
+ ceph_assert(0 == "locker unknown message");
}
}
p != mdr->remote_auth_pins.end();
++p) {
if (mustpin.count(p->first)) {
- assert(p->second == p->first->authority().first);
+ ceph_assert(p->second == p->first->authority().first);
map<mds_rank_t, set<MDSCacheObject*> >::iterator q = mustpin_remote.find(p->second);
if (q != mustpin_remote.end())
q->second.insert(p->first);
mds->send_message_mds(req, p->first);
// put in waiting list
- assert(mdr->more()->waiting_on_slave.count(p->first) == 0);
+ ceph_assert(mdr->more()->waiting_on_slave.count(p->first) == 0);
mdr->more()->waiting_on_slave.insert(p->first);
}
return false;
// hose any stray locks
if (existing != mdr->locks.end() && *existing == *p) {
- assert(need_wrlock || need_remote_wrlock);
+ ceph_assert(need_wrlock || need_remote_wrlock);
SimpleLock *lock = *existing;
if (mdr->wrlocks.count(lock)) {
if (!need_wrlock)
dout(10) << " got wrlock on " << **p << " " << *(*p)->get_parent() << dendl;
}
} else {
- assert(mdr->is_master());
+ ceph_assert(mdr->is_master());
if ((*p)->needs_recover()) {
if (mds->is_cluster_degraded()) {
if (!mdr->is_queued_for_replay()) {
dir = dn->get_dir();
} else {
dir = dynamic_cast<CDir*>(o);
- assert(dir);
+ ceph_assert(dir);
}
if (dir) {
if (dir->is_freezing_dir())
p != mut->xlocks.end();
++p) {
MDSCacheObject *object = (*p)->get_parent();
- assert(object->is_auth());
+ ceph_assert(object->is_auth());
if (skip_dentry &&
((*p)->get_type() == CEPH_LOCK_DN || (*p)->get_type() == CEPH_LOCK_DVERSION))
continue;
SimpleLock *lock = *mut->xlocks.begin();
MDSCacheObject *p = lock->get_parent();
if (!p->is_auth()) {
- assert(lock->get_sm()->can_remote_xlock);
+ ceph_assert(lock->get_sm()->can_remote_xlock);
slaves.insert(p->authority().first);
lock->put_xlock();
mut->locks.erase(lock);
void Locker::cancel_locking(MutationImpl *mut, set<CInode*> *pneed_issue)
{
SimpleLock *lock = mut->locking;
- assert(lock);
+ ceph_assert(lock);
dout(10) << "cancel_locking " << *lock << " on " << *mut << dendl;
if (lock->get_parent()->is_auth()) {
void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInternalContextBase::vec *pfinishers)
{
dout(10) << "eval_gather " << *lock << " on " << *lock->get_parent() << dendl;
- assert(!lock->is_stable());
+ ceph_assert(!lock->is_stable());
int next = lock->get_next_state();
bool need_issue = false;
int loner_issued = 0, other_issued = 0, xlocker_issued = 0;
- assert(!caps || in != NULL);
+ ceph_assert(!caps || in != NULL);
if (caps && in->is_head()) {
in->get_caps_issued(&loner_issued, &other_issued, &xlocker_issued,
lock->get_cap_shift(), lock->get_cap_mask());
<< " on " << *lock->get_parent() << dendl;
if (lock->get_sm() == &sm_filelock) {
- assert(in);
+ ceph_assert(in);
if (in->state_test(CInode::STATE_RECOVERING)) {
dout(7) << "eval_gather finished gather, but still recovering" << dendl;
return;
if (in->get_wanted_loner() >= 0) {
dout(10) << "eval end set loner to client." << in->get_loner() << dendl;
bool ok = in->try_set_loner();
- assert(ok);
+ ceph_assert(ok);
mask = -1;
goto retry;
}
C_Locker_Eval(Locker *l, MDSCacheObject *pp, int m) : LockerContext(l), p(pp), mask(m) {
// We are used as an MDSCacheObject waiter, so should
// only be invoked by someone already holding the big lock.
- assert(locker->mds->mds_lock.is_locked_by_me());
+ ceph_assert(locker->mds->mds_lock.is_locked_by_me());
p->get(MDSCacheObject::PIN_PTRWAITER);
}
void finish(int r) override {
}
if (mask & CEPH_LOCK_DN) {
- assert(mask == CEPH_LOCK_DN);
+ ceph_assert(mask == CEPH_LOCK_DN);
bool need_issue = false; // ignore this, no caps on dentries
CDentry *dn = static_cast<CDentry *>(p);
eval_any(&dn->lock, &need_issue);
lock->get_state() == LOCK_SNAP_SYNC) {
// okay, we actually need to kick the head's lock to get ourselves synced up.
CInode *head = mdcache->get_inode(in->ino());
- assert(head);
+ ceph_assert(head);
SimpleLock *hlock = head->get_lock(CEPH_LOCK_IFILE);
if (hlock->get_state() == LOCK_SYNC)
hlock = head->get_lock(lock->get_type());
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, target);
- assert(mut->more()->waiting_on_slave.count(target) == 0);
+ ceph_assert(mut->more()->waiting_on_slave.count(target) == 0);
mut->more()->waiting_on_slave.insert(target);
}
return false;
} else {
// replica
- assert(lock->get_sm()->can_remote_xlock);
- assert(!mut->slave_request);
+ ceph_assert(lock->get_sm()->can_remote_xlock);
+ ceph_assert(!mut->slave_request);
// wait for single auth
if (lock->get_parent()->is_ambiguous_auth()) {
lock->get_parent()->set_object_info(r->get_object_info());
mds->send_message_mds(r, auth);
- assert(mut->more()->waiting_on_slave.count(auth) == 0);
+ ceph_assert(mut->more()->waiting_on_slave.count(auth) == 0);
mut->more()->waiting_on_slave.insert(auth);
return false;
void Locker::_finish_xlock(SimpleLock *lock, client_t xlocker, bool *pneed_issue)
{
- assert(!lock->is_stable());
+ ceph_assert(!lock->is_stable());
if (lock->get_type() != CEPH_LOCK_DN &&
lock->get_type() != CEPH_LOCK_ISNAP &&
lock->get_num_rdlocks() == 0 &&
// drop ref
lock->put_xlock();
- assert(mut);
+ ceph_assert(mut);
mut->xlocks.erase(lock);
mut->locks.erase(lock);
// remote xlock?
if (!lock->get_parent()->is_auth()) {
- assert(lock->get_sm()->can_remote_xlock);
+ ceph_assert(lock->get_sm()->can_remote_xlock);
// tell auth
dout(7) << "xlock_finish releasing remote xlock on " << *lock->get_parent() << dendl;
mut->locks.erase(lock);
MDSCacheObject *p = lock->get_parent();
- assert(p->state_test(CInode::STATE_AMBIGUOUSAUTH)); // we are exporting this (inode)
+ ceph_assert(p->state_test(CInode::STATE_AMBIGUOUSAUTH)); // we are exporting this (inode)
if (!lock->is_stable())
lock->get_parent()->auth_unpin(lock);
auto q = p->second.find(client);
if (q != p->second.end()) {
SimpleLock *lock = in->get_lock(p->first);
- assert(lock);
+ ceph_assert(lock);
dout(10) << " completing client_snap_caps for " << ccap_string(p->first)
<< " lock " << *lock << " on " << *in << dendl;
lock->put_wrlock();
}
// my needs
- assert(session->info.inst.name.is_client());
+ ceph_assert(session->info.inst.name.is_client());
client_t my_client = session->get_client();
int my_want = ceph_caps_for_mode(mode);
<< " on " << *in << dendl;
}
- assert(in->is_head());
+ ceph_assert(in->is_head());
// count conflicts with
int nissued = 0;
for (xlist<Capability*>::iterator p = session->caps.begin(); !p.end(); ++p) {
Capability *cap = *p;
CInode *in = cap->get_inode();
- assert(in->is_head());
+ ceph_assert(in->is_head());
if (cap->is_stale()) {
dout(10) << " clearing stale flag on " << *in << dendl;
cap->clear_stale();
void Locker::request_inode_file_caps(CInode *in)
{
- assert(!in->is_auth());
+ ceph_assert(!in->is_auth());
int wanted = in->get_caps_wanted() & ~CEPH_CAP_PIN;
if (wanted != in->replica_caps_wanted) {
void Locker::handle_inode_file_caps(const MInodeFileCaps::const_ref &m)
{
// nobody should be talking to us during recovery.
- assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping());
+ ceph_assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping());
// ok
CInode *in = mdcache->get_inode(m->get_ino());
mds_rank_t from = mds_rank_t(m->get_source().num());
- assert(in);
- assert(in->is_auth());
+ ceph_assert(in);
+ ceph_assert(in->is_auth());
dout(7) << "handle_inode_file_caps replica mds." << from << " wants caps " << ccap_string(m->get_caps()) << " on " << *in << dendl;
uint64_t new_max_size, uint64_t new_size,
utime_t new_mtime)
{
- assert(in->is_auth());
- assert(in->is_file());
+ ceph_assert(in->is_auth());
+ ceph_assert(in->is_file());
CInode::mempool_inode *latest = in->get_projected_inode();
CInode::mempool_inode::client_range_map new_ranges;
}
if (mdcache->open_file_table.should_log_open(cur)) {
- assert(cur->last == CEPH_NOSNAP);
+ ceph_assert(cur->last == CEPH_NOSNAP);
EOpen *le = new EOpen(mds->mdlog);
mds->mdlog->start_entry(le);
le->add_clean_inode(cur);
void Locker::snapflush_nudge(CInode *in)
{
- assert(in->last != CEPH_NOSNAP);
+ ceph_assert(in->last != CEPH_NOSNAP);
if (in->client_snap_caps.empty())
return;
CInode *head = mdcache->get_inode(in->ino());
- assert(head);
- assert(head->is_auth());
+ ceph_assert(head);
+ ceph_assert(head->is_auth());
if (head->client_need_snapflush.empty())
return;
void Locker::mark_need_snapflush_inode(CInode *in)
{
- assert(in->last != CEPH_NOSNAP);
+ ceph_assert(in->last != CEPH_NOSNAP);
if (!in->item_caps.is_on_list()) {
need_snapflush_inodes.push_back(&in->item_caps);
utime_t now = ceph_clock_now();
if (clients.count(client)) {
dout(10) << " doing async NULL snapflush on " << snapid << " from client." << client << dendl;
CInode *sin = mdcache->pick_inode_snap(head_in, snapid - 1);
- assert(sin);
- assert(sin->first <= snapid);
+ ceph_assert(sin);
+ ceph_assert(sin->first <= snapid);
_do_snap_update(sin, snapid, 0, sin->first - 1, client, MClientCaps::ref(), MClientCaps::ref());
head_in->remove_need_snapflush(sin, snapid, client);
}
dout(7) << "handle_client_caps no cap for client." << client << " on " << *head_in << dendl;
return;
}
- assert(cap);
+ ceph_assert(cap);
// freezing|frozen?
if (should_defer_client_cap_frozen(head_in)) {
in = mdcache->pick_inode_snap(head_in, follows);
// intermediate snap inodes
while (in != head_in) {
- assert(in->last != CEPH_NOSNAP);
+ ceph_assert(in->last != CEPH_NOSNAP);
if (in->is_auth() && dirty) {
dout(10) << " updating intermediate snapped inode " << *in << dendl;
_do_cap_update(in, NULL, dirty, follows, m, MClientCaps::ref());
return;
/* m must be valid if there are dirty caps */
- assert(m);
+ ceph_assert(m);
uint64_t features = m->get_connection()->get_features();
if (m->get_ctime() > pi->ctime) {
<< " issued " << ccap_string(cap ? cap->issued() : 0)
<< " wanted " << ccap_string(cap ? cap->wanted() : 0)
<< " on " << *in << dendl;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
client_t client = m->get_source().num();
CInode::mempool_inode *latest = in->get_projected_inode();
{
dout(10) << "handle_client_lease " << *m << dendl;
- assert(m->get_source().is_client());
+ ceph_assert(m->get_source().is_client());
client_t client = m->get_source().num();
CInode *in = mdcache->get_inode(m->get_ino(), m->get_last());
ClientLease *l = p->second;
n++;
- assert(lock->get_type() == CEPH_LOCK_DN);
+ ceph_assert(lock->get_type() == CEPH_LOCK_DN);
CDentry *dn = static_cast<CDentry*>(lock->get_parent());
int mask = 1 | CEPH_LOCK_DN; // old and new bits
void Locker::handle_lock(const MLock::const_ref &m)
{
// nobody should be talking to us during recovery.
- assert(mds->is_rejoin() || mds->is_clientreplay() || mds->is_active() || mds->is_stopping());
+ ceph_assert(mds->is_rejoin() || mds->is_clientreplay() || mds->is_active() || mds->is_stopping());
SimpleLock *lock = get_lock(m->get_lock_type(), m->get_object_info());
if (!lock) {
!parent->is_frozen()) {
dout(7) << "handle_reqrdlock got rdlock request on " << *lock
<< " on " << *parent << dendl;
- assert(parent->is_auth()); // replica auth pinned if they're doing this!
+ ceph_assert(parent->is_auth()); // replica auth pinned if they're doing this!
if (lock->is_stable()) {
simple_sync(lock);
} else {
switch (m->get_action()) {
// -- replica --
case LOCK_AC_SYNC:
- assert(lock->get_state() == LOCK_LOCK);
+ ceph_assert(lock->get_state() == LOCK_LOCK);
lock->decode_locked_state(m->get_data());
lock->set_state(LOCK_SYNC);
lock->finish_waiters(SimpleLock::WAIT_RD|SimpleLock::WAIT_STABLE);
break;
case LOCK_AC_LOCK:
- assert(lock->get_state() == LOCK_SYNC);
+ ceph_assert(lock->get_state() == LOCK_SYNC);
lock->set_state(LOCK_SYNC_LOCK);
if (lock->is_leased())
revoke_client_leases(lock);
// -- auth --
case LOCK_AC_LOCKACK:
- assert(lock->get_state() == LOCK_SYNC_LOCK ||
+ ceph_assert(lock->get_state() == LOCK_SYNC_LOCK ||
lock->get_state() == LOCK_SYNC_EXCL);
- assert(lock->is_gathering(from));
+ ceph_assert(lock->is_gathering(from));
lock->remove_gather(from);
if (lock->is_gathering()) {
{
dout(10) << "simple_eval " << *lock << " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->is_stable());
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->is_stable());
if (lock->get_parent()->is_freezing_or_frozen()) {
// dentry/snap lock in unreadable state can block path traverse
bool Locker::simple_sync(SimpleLock *lock, bool *need_issue)
{
dout(7) << "simple_sync on " << *lock << " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->is_stable());
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->is_stable());
CInode *in = 0;
if (lock->get_cap_shift())
bool need_recover = false;
if (lock->get_type() == CEPH_LOCK_IFILE) {
- assert(in);
+ ceph_assert(in);
if (in->state_test(CInode::STATE_NEEDSRECOVER)) {
mds->mdcache->queue_file_recover(in);
need_recover = true;
void Locker::simple_excl(SimpleLock *lock, bool *need_issue)
{
dout(7) << "simple_excl on " << *lock << " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->is_stable());
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->is_stable());
CInode *in = 0;
if (lock->get_cap_shift())
void Locker::simple_lock(SimpleLock *lock, bool *need_issue)
{
dout(7) << "simple_lock on " << *lock << " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->is_stable());
- assert(lock->get_state() != LOCK_LOCK);
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->is_stable());
+ ceph_assert(lock->get_state() != LOCK_LOCK);
CInode *in = 0;
if (lock->get_cap_shift())
bool need_recover = false;
if (lock->get_type() == CEPH_LOCK_IFILE) {
- assert(in);
+ ceph_assert(in);
if(in->state_test(CInode::STATE_NEEDSRECOVER)) {
mds->mdcache->queue_file_recover(in);
need_recover = true;
void Locker::simple_xlock(SimpleLock *lock)
{
dout(7) << "simple_xlock on " << *lock << " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->get_parent()->is_auth());
//assert(lock->is_stable());
- assert(lock->get_state() != LOCK_XLOCK);
+ ceph_assert(lock->get_state() != LOCK_XLOCK);
CInode *in = 0;
if (lock->get_cap_shift())
{
dout(10) << "scatter_eval " << *lock << " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->is_stable());
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->is_stable());
if (lock->get_parent()->is_freezing_or_frozen()) {
dout(20) << " freezing|frozen" << dendl;
// handle_file_lock due to AC_NUDGE, because the rest of the
// time we are replicated or have dirty data and won't get
// called. bailing here avoids an infinite loop.
- assert(!c);
+ ceph_assert(!c);
break;
}
} else {
{
dout(10) << "scatter_tempsync " << *lock
<< " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->is_stable());
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->is_stable());
- assert(0 == "not fully implemented, at least not for filelock");
+ ceph_assert(0 == "not fully implemented, at least not for filelock");
CInode *in = static_cast<CInode *>(lock->get_parent());
dout(7) << "local_wrlock_grab on " << *lock
<< " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->can_wrlock());
- assert(!mut->wrlocks.count(lock));
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->can_wrlock());
+ ceph_assert(!mut->wrlocks.count(lock));
lock->get_wrlock(mut->get_client());
mut->wrlocks.insert(lock);
mut->locks.insert(lock);
dout(7) << "local_wrlock_start on " << *lock
<< " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->get_parent()->is_auth());
if (lock->can_wrlock()) {
- assert(!mut->wrlocks.count(lock));
+ ceph_assert(!mut->wrlocks.count(lock));
lock->get_wrlock(mut->get_client());
mut->wrlocks.insert(lock);
mut->locks.insert(lock);
dout(7) << "local_xlock_start on " << *lock
<< " on " << *lock->get_parent() << dendl;
- assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->get_parent()->is_auth());
if (!lock->can_xlock_local()) {
lock->add_waiter(SimpleLock::WAIT_WR|SimpleLock::WAIT_STABLE, new C_MDS_RetryRequest(mdcache, mut));
return false;
<< " filelock=" << *lock << " on " << *lock->get_parent()
<< dendl;
- assert(lock->get_parent()->is_auth());
- assert(lock->is_stable());
+ ceph_assert(lock->get_parent()->is_auth());
+ ceph_assert(lock->is_stable());
if (lock->get_parent()->is_freezing_or_frozen())
return;
dout(7) << "scatter_mix " << *lock << " on " << *lock->get_parent() << dendl;
CInode *in = static_cast<CInode*>(lock->get_parent());
- assert(in->is_auth());
- assert(lock->is_stable());
+ ceph_assert(in->is_auth());
+ ceph_assert(lock->is_stable());
if (lock->get_state() == LOCK_LOCK) {
in->start_scatter(lock);
CInode *in = static_cast<CInode*>(lock->get_parent());
dout(7) << "file_excl " << *lock << " on " << *lock->get_parent() << dendl;
- assert(in->is_auth());
- assert(lock->is_stable());
+ ceph_assert(in->is_auth());
+ ceph_assert(lock->is_stable());
- assert((in->get_loner() >= 0 && in->get_mds_caps_wanted().empty()) ||
+ ceph_assert((in->get_loner() >= 0 && in->get_mds_caps_wanted().empty()) ||
(lock->get_state() == LOCK_XSYN)); // must do xsyn -> excl -> <anything else>
switch (lock->get_state()) {
{
dout(7) << "file_xsyn on " << *lock << " on " << *lock->get_parent() << dendl;
CInode *in = static_cast<CInode *>(lock->get_parent());
- assert(in->is_auth());
- assert(in->get_loner() >= 0 && in->get_mds_caps_wanted().empty());
+ ceph_assert(in->is_auth());
+ ceph_assert(in->get_loner() >= 0 && in->get_mds_caps_wanted().empty());
switch (lock->get_state()) {
case LOCK_EXCL: lock->set_state(LOCK_EXCL_XSYN); break;
CInode *in = static_cast<CInode *>(lock->get_parent());
dout(7) << "file_recover " << *lock << " on " << *in << dendl;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
//assert(lock->is_stable());
- assert(lock->get_state() == LOCK_PRE_SCAN); // only called from MDCache::start_files_to_recover()
+ ceph_assert(lock->get_state() == LOCK_PRE_SCAN); // only called from MDCache::start_files_to_recover()
int gather = 0;
switch (m->get_action()) {
// -- replica --
case LOCK_AC_SYNC:
- assert(lock->get_state() == LOCK_LOCK ||
+ ceph_assert(lock->get_state() == LOCK_LOCK ||
lock->get_state() == LOCK_MIX ||
lock->get_state() == LOCK_MIX_SYNC2);
break;
case LOCK_AC_MIX:
- assert(lock->get_state() == LOCK_SYNC ||
+ ceph_assert(lock->get_state() == LOCK_SYNC ||
lock->get_state() == LOCK_LOCK ||
lock->get_state() == LOCK_SYNC_MIX2);
// -- auth --
case LOCK_AC_LOCKACK:
- assert(lock->get_state() == LOCK_SYNC_LOCK ||
+ ceph_assert(lock->get_state() == LOCK_SYNC_LOCK ||
lock->get_state() == LOCK_MIX_LOCK ||
lock->get_state() == LOCK_MIX_LOCK2 ||
lock->get_state() == LOCK_MIX_EXCL ||
lock->get_state() == LOCK_SYNC_EXCL ||
lock->get_state() == LOCK_SYNC_MIX ||
lock->get_state() == LOCK_MIX_TSYN);
- assert(lock->is_gathering(from));
+ ceph_assert(lock->is_gathering(from));
lock->remove_gather(from);
if (lock->get_state() == LOCK_MIX_LOCK ||
break;
case LOCK_AC_SYNCACK:
- assert(lock->get_state() == LOCK_MIX_SYNC);
- assert(lock->is_gathering(from));
+ ceph_assert(lock->get_state() == LOCK_MIX_SYNC);
+ ceph_assert(lock->is_gathering(from));
lock->remove_gather(from);
lock->decode_locked_state(m->get_data());
break;
case LOCK_AC_MIXACK:
- assert(lock->get_state() == LOCK_SYNC_MIX);
- assert(lock->is_gathering(from));
+ ceph_assert(lock->get_state() == LOCK_SYNC_MIX);
+ ceph_assert(lock->is_gathering(from));
lock->remove_gather(from);
if (lock->is_gathering()) {
return NULL;
}
- assert(p.end());
+ ceph_assert(p.end());
return le;
}
void wait_for_expiry(MDSInternalContextBase *c)
{
- assert(c != NULL);
+ ceph_assert(c != NULL);
expiry_waiters.push_back(c);
}
default:
derr << " balancer unknown message " << m->get_type() << dendl_impl;
- assert(0 == "balancer unknown message");
+ ceph_assert(0 == "balancer unknown message");
}
return 0;
while (it != q.end()) {
auto cur = it++;
CInode *in = *cur;
- assert(in->is_dir());
+ ceph_assert(in->is_dir());
mds_rank_t export_pin = in->get_export_pin(false);
bool remove = true;
{
const auto frag = dir->dirfrag();
auto callback = [this, frag](int r) {
- assert(frag.frag != frag_t());
+ ceph_assert(frag.frag != frag_t());
// frag must be in this set because only one context is in flight
// for a given frag at a time (because merge_pending is checked before
dout(10) << "drop merge on " << frag << " because not in cache" << dendl;
return;
}
- assert(dir->dirfrag() == frag);
+ ceph_assert(dir->dirfrag() == frag);
if(!dir->is_auth()) {
dout(10) << "drop merge on " << *dir << " because lost auth" << dendl;
if (dir->inode->is_base())
continue;
- assert(dir->inode->authority().first == target); // cuz that's how i put it in the map, dummy
+ ceph_assert(dir->inode->authority().first == target); // cuz that's how i put it in the map, dummy
if (pop <= amount-have) {
dout(5) << "reexporting " << *dir << " pop " << pop
return;
}
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
double need = amount - have;
if (need < amount * g_conf()->mds_bal_min_start)
CInode *in = *it;
++it;
- assert(in->is_dir());
- assert(in->get_parent_dir() == dir);
+ ceph_assert(in->is_dir());
+ ceph_assert(in->get_parent_dir() == dir);
list<CDir*> dfls;
in->get_nested_dirfrags(dfls);
MDCache *mdcache;
MDSRank *get_mds() override
{
- assert(mdcache != NULL);
+ ceph_assert(mdcache != NULL);
return mdcache->mds;
}
public:
MDCache *mdcache;
MDSRank *get_mds() override
{
- assert(mdcache != NULL);
+ ceph_assert(mdcache != NULL);
return mdcache->mds;
}
public:
MDCache *mdcache;
MDSRank *get_mds() override
{
- assert(mdcache != NULL);
+ ceph_assert(mdcache != NULL);
return mdcache->mds;
}
public:
// add to lru, inode map
if (in->last == CEPH_NOSNAP) {
auto &p = inode_map[in->ino()];
- assert(!p); // should be no dup inos!
+ ceph_assert(!p); // should be no dup inos!
p = in;
} else {
auto &p = snap_inode_map[in->vino()];
- assert(!p); // should be no dup inos!
+ ceph_assert(!p); // should be no dup inos!
p = in;
}
if (o->get_parent_dn()) {
// FIXME: multiple parents?
CDentry *dn = o->get_parent_dn();
- assert(!dn->is_dirty());
+ ceph_assert(!dn->is_dirty());
dn->dir->unlink_inode(dn); // leave dentry ... FIXME?
}
}
// delete it
- assert(o->get_num_ref() == 0);
+ ceph_assert(o->get_num_ref() == 0);
delete o;
}
else
in->inode_auth = mds_authority_t(mds_rank_t(in->ino() - MDS_INO_MDSDIR_OFFSET), CDIR_AUTH_UNKNOWN);
in->open_snaprealm(); // empty snaprealm
- assert(!in->snaprealm->parent); // created its own
+ ceph_assert(!in->snaprealm->parent); // created its own
in->snaprealm->srnode.seq = 1;
}
}
if (in->inode.is_dir()) {
CDir *dir = in->get_dirfrag(frag_t());
- assert(dir);
+ ceph_assert(dir);
dir->mark_dirty(1, mut->ls);
dir->mark_new(mut->ls);
}
return;
}
CDir *mydir = myin->get_or_open_dirfrag(this, frag_t());
- assert(mydir);
+ ceph_assert(mydir);
adjust_subtree_auth(mydir, mds->get_nodeid());
mydir->fetch(c);
})
return;
}
if (mds->get_nodeid() == mds->mdsmap->get_root()) {
- assert(root->is_auth());
+ ceph_assert(root->is_auth());
CDir *rootdir = root->get_or_open_dirfrag(this, frag_t());
- assert(rootdir);
+ ceph_assert(rootdir);
if (!rootdir->is_subtree_root())
adjust_subtree_auth(rootdir, mds->get_nodeid());
if (!rootdir->is_complete()) {
return;
}
} else {
- assert(!root->is_auth());
+ ceph_assert(!root->is_auth());
CDir *rootdir = root->get_dirfrag(frag_t());
if (!rootdir) {
open_remote_dirfrag(root, frag_t(), new C_MDS_RetryOpenRoot(this));
return;
}
CDir *mydir = myin->get_or_open_dirfrag(this, frag_t());
- assert(mydir);
+ ceph_assert(mydir);
adjust_subtree_auth(mydir, mds->get_nodeid());
populate_mydir();
void MDCache::populate_mydir()
{
- assert(myin);
+ ceph_assert(myin);
CDir *mydir = myin->get_or_open_dirfrag(this, frag_t());
- assert(mydir);
+ ceph_assert(mydir);
dout(10) << "populate_mydir " << *mydir << dendl;
new C_MDS_RetryOpenRoot(this));
return;
}
- assert(straydn);
- assert(strays[i]);
+ ceph_assert(straydn);
+ ceph_assert(strays[i]);
// we make multiple passes through this method; make sure we only pin each stray once.
if (!strays[i]->state_test(CInode::STATE_STRAYPINNED)) {
strays[i]->get(CInode::PIN_STRAY);
// DamageTable applies special handling to strays: it will
// have damaged() us out if one is damaged.
- assert(!dir->state_test(CDir::STATE_BADFRAG));
+ ceph_assert(!dir->state_test(CDir::STATE_BADFRAG));
if (dir->get_version() == 0) {
dir->fetch(new C_MDS_RetryOpenRoot(this));
// okay!
dout(10) << "populate_mydir done" << dendl;
- assert(!open);
+ ceph_assert(!open);
open = true;
mds->queue_waiters(waiting_for_open);
in->name_stray_dentry(straydname);
CInode *strayi = get_stray();
- assert(strayi);
+ ceph_assert(strayi);
frag_t fg = strayi->pick_dirfrag(straydname);
CDir *straydir = strayi->get_dirfrag(fg);
- assert(straydir);
+ ceph_assert(straydir);
return straydir;
}
straydn = straydir->add_null_dentry(straydname);
straydn->mark_new();
} else {
- assert(straydn->get_projected_linkage()->is_null());
+ ceph_assert(straydn->get_projected_linkage()->is_null());
}
straydn->state_set(CDentry::STATE_STRAY);
} else {
root = get_subtree_root(dir); // subtree root
}
- assert(root);
- assert(subtrees.count(root));
+ ceph_assert(root);
+ ceph_assert(subtrees.count(root));
dout(7) << " current root is " << *root << dendl;
if (root == dir) {
} else {
// i am a new subtree.
dout(10) << " new subtree at " << *dir << dendl;
- assert(subtrees.count(dir) == 0);
+ ceph_assert(subtrees.count(dir) == 0);
subtrees[dir]; // create empty subtree bounds list for me.
dir->get(CDir::PIN_SUBTREE);
return;
auto it = subtrees.find(dir);
- assert(it != subtrees.end());
+ ceph_assert(it != subtrees.end());
// merge with parent?
CDir *parent = dir;
{
// evaluate subtree inode filelock?
// (we should scatter the filelock on subtree bounds)
- assert(diri->is_auth());
+ ceph_assert(diri->is_auth());
mds->locker->try_eval(diri, CEPH_LOCK_IFILE | CEPH_LOCK_INEST);
}
} else {
root = get_subtree_root(dir); // subtree root
}
- assert(root);
- assert(subtrees.count(root));
+ ceph_assert(root);
+ ceph_assert(subtrees.count(root));
dout(7) << " current root is " << *root << dendl;
mds_authority_t oldauth = dir->authority();
} else {
// i am a new subtree.
dout(10) << " new subtree at " << *dir << dendl;
- assert(subtrees.count(dir) == 0);
+ ceph_assert(subtrees.count(dir) == 0);
subtrees[dir]; // create empty subtree bounds list for me.
dir->get(CDir::PIN_SUBTREE);
dout(10) << " want bound " << *bound << dendl;
CDir *t = get_subtree_root(bound->get_parent_dir());
if (subtrees[t].count(bound) == 0) {
- assert(t != dir);
+ ceph_assert(t != dir);
dout(10) << " new bound " << *bound << dendl;
adjust_subtree_auth(bound, t->authority());
}
void MDCache::remove_subtree(CDir *dir)
{
dout(10) << "remove_subtree " << *dir << dendl;
- assert(subtrees.count(dir));
- assert(subtrees[dir].empty());
+ ceph_assert(subtrees.count(dir));
+ ceph_assert(subtrees[dir].empty());
subtrees.erase(dir);
dir->put(CDir::PIN_SUBTREE);
if (dir->get_parent_dir()) {
CDir *p = get_subtree_root(dir->get_parent_dir());
- assert(subtrees[p].count(dir));
+ ceph_assert(subtrees[p].count(dir));
subtrees[p].erase(dir);
}
}
void MDCache::get_subtree_bounds(CDir *dir, set<CDir*>& bounds)
{
- assert(subtrees.count(dir));
+ ceph_assert(subtrees.count(dir));
bounds = subtrees[dir];
}
CDir *t = *p;
while (t != root) {
t = t->get_parent_dir();
- assert(t);
+ ceph_assert(t);
if (t == dir) {
bounds.insert(*p);
continue;
void MDCache::verify_subtree_bounds(CDir *dir, const set<CDir*>& bounds)
{
// for debugging only.
- assert(subtrees.count(dir));
+ ceph_assert(subtrees.count(dir));
if (bounds != subtrees[dir]) {
dout(0) << "verify_subtree_bounds failed" << dendl;
set<CDir*> b = bounds;
for (const auto &cd : b)
dout(0) << " extra bound " << *cd << dendl;
}
- assert(bounds == subtrees[dir]);
+ ceph_assert(bounds == subtrees[dir]);
}
void MDCache::verify_subtree_bounds(CDir *dir, const list<dirfrag_t>& bounds)
{
// for debugging only.
- assert(subtrees.count(dir));
+ ceph_assert(subtrees.count(dir));
// make sure that any bounds i do have are properly noted as such.
int failed = 0;
failed++;
}
}
- assert(failed == 0);
+ ceph_assert(failed == 0);
}
void MDCache::project_subtree_rename(CInode *diri, CDir *olddir, CDir *newdir)
if (pop) {
map<CInode*,list<pair<CDir*,CDir*> > >::iterator p = projected_subtree_renames.find(diri);
- assert(p != projected_subtree_renames.end());
- assert(!p->second.empty());
- assert(p->second.front().first == olddir);
- assert(p->second.front().second == newdir);
+ ceph_assert(p != projected_subtree_renames.end());
+ ceph_assert(!p->second.empty());
+ ceph_assert(p->second.front().first == olddir);
+ ceph_assert(p->second.front().second == newdir);
p->second.pop_front();
if (p->second.empty())
projected_subtree_renames.erase(p);
} else if (dir->is_subtree_root()) {
// children are fine. change parent.
dout(10) << "moving " << *dir << " from " << *oldparent << " to " << *newparent << dendl;
- assert(subtrees[oldparent].count(dir));
+ ceph_assert(subtrees[oldparent].count(dir));
subtrees[oldparent].erase(dir);
- assert(subtrees.count(newparent));
+ ceph_assert(subtrees.count(newparent));
subtrees[newparent].insert(dir);
// caller is responsible for 'eval diri'
try_subtree_merge_at(dir, NULL, false);
CDir *bound = *p;
CDir *broot = get_subtree_root(bound->get_parent_dir());
if (broot != oldparent) {
- assert(broot == newparent);
+ ceph_assert(broot == newparent);
tomove.push_back(bound);
}
}
CInode *MDCache::pick_inode_snap(CInode *in, snapid_t follows)
{
dout(10) << "pick_inode_snap follows " << follows << " on " << *in << dendl;
- assert(in->last == CEPH_NOSNAP);
+ ceph_assert(in->last == CEPH_NOSNAP);
auto p = snap_inode_map.upper_bound(vinodeno_t(in->ino(), follows));
if (p != snap_inode_map.end() && p->second->ino() == in->ino()) {
*/
CInode *MDCache::cow_inode(CInode *in, snapid_t last)
{
- assert(last >= in->first);
+ ceph_assert(last >= in->first);
CInode *oldin = new CInode(this, true, in->first, last);
oldin->inode = *in->get_previous_projected_inode();
if (in->last != CEPH_NOSNAP) {
CInode *head_in = get_inode(in->ino());
- assert(head_in);
+ ceph_assert(head_in);
if (head_in->split_need_snapflush(oldin, in)) {
oldin->client_snap_caps = in->client_snap_caps;
for (const auto &p : in->client_snap_caps) {
SimpleLock *lock = oldin->get_lock(p.first);
- assert(lock);
+ ceph_assert(lock);
for (const auto &q : p.second) {
oldin->auth_pin(lock);
lock->set_state(LOCK_SNAP_SYNC); // gathering
if (issued & cinode_lock_info[i].wr_caps) {
int lockid = cinode_lock_info[i].lock;
SimpleLock *lock = oldin->get_lock(lockid);
- assert(lock);
+ ceph_assert(lock);
oldin->client_snap_caps[lockid].insert(client);
oldin->auth_pin(lock);
lock->set_state(LOCK_SNAP_SYNC); // gathering
return;
}
dout(10) << "journal_cow_dentry follows " << follows << " on " << *dn << dendl;
- assert(dn->is_auth());
+ ceph_assert(dn->is_auth());
// nothing to cow on a null dentry, fix caller
if (!dnl)
dnl = dn->get_projected_linkage();
- assert(!dnl->is_null());
+ ceph_assert(!dnl->is_null());
CInode *in = dnl->is_primary() ? dnl->get_inode() : NULL;
bool cow_head = false;
if (in && in->state_test(CInode::STATE_AMBIGUOUSAUTH)) {
- assert(in->is_frozen_inode());
+ ceph_assert(in->is_frozen_inode());
cow_head = true;
}
if (in && (in->is_multiversion() || cow_head)) {
SnapRealm *realm = NULL;
if (in->get_projected_parent_dn() != dn) {
- assert(follows == CEPH_NOSNAP);
+ ceph_assert(follows == CEPH_NOSNAP);
realm = dn->dir->inode->find_snaprealm();
snapid_t dir_follows = get_global_snaprealm()->get_newest_seq();
- assert(dir_follows >= realm->get_newest_seq());
+ ceph_assert(dir_follows >= realm->get_newest_seq());
if (dir_follows+1 > dn->first) {
snapid_t oldfirst = dn->first;
follows = dir_follows;
if (in->snaprealm) {
realm = in->snaprealm;
- assert(follows >= realm->get_newest_seq());
+ ceph_assert(follows >= realm->get_newest_seq());
}
} else {
realm = in->find_snaprealm();
if (follows == CEPH_NOSNAP) {
follows = get_global_snaprealm()->get_newest_seq();
- assert(follows >= realm->get_newest_seq());
+ ceph_assert(follows >= realm->get_newest_seq());
}
}
SnapRealm *realm = dn->dir->inode->find_snaprealm();
if (follows == CEPH_NOSNAP) {
follows = get_global_snaprealm()->get_newest_seq();
- assert(follows >= realm->get_newest_seq());
+ ceph_assert(follows >= realm->get_newest_seq());
}
// already cloned?
metablob->add_primary_dentry(olddn, 0, true, false, false, need_snapflush);
mut->add_cow_dentry(olddn);
} else {
- assert(dnl->is_remote());
+ ceph_assert(dnl->is_remote());
CDentry *olddn = dn->dir->add_remote_dentry(dn->get_name(), dnl->get_remote_ino(), dnl->get_remote_d_type(),
oldfirst, follows);
olddn->pre_dirty();
const set<snapid_t> snaps = prealm->get_snaps();
if (cur->last != CEPH_NOSNAP) {
- assert(cur->dirty_old_rstats.empty());
+ ceph_assert(cur->dirty_old_rstats.empty());
set<snapid_t>::const_iterator q = snaps.lower_bound(std::max(first, floor));
if (q == snaps.end() || *q > cur->last)
return;
if (cur->state_test(CInode::STATE_AMBIGUOUSAUTH) && cur->is_auth()) {
// rename src inode is not projected in the slave rename prep case. so we should
// avoid updateing the inode.
- assert(linkunlink < 0);
- assert(cur->is_frozen_inode());
+ ceph_assert(linkunlink < 0);
+ ceph_assert(cur->is_frozen_inode());
update = false;
}
_project_rstat_inode_to_frag(*curi, std::max(first, floor), cur->last, parent,
// apply
dout(20) << " project to [" << first << "," << last << "] " << *prstat << dendl;
- assert(last >= first);
+ ceph_assert(last >= first);
prstat->add(delta);
if (update_inode)
inode.accounted_rstat = inode.rstat;
bool do_parent_mtime = flags & PREDIRTY_DIR;
bool shallow = flags & PREDIRTY_SHALLOW;
- assert(mds->mdlog->entry_is_open());
+ ceph_assert(mds->mdlog->entry_is_open());
// make sure stamp is set
if (mut->get_mds_stamp() == utime_t())
<< " " << *in << dendl;
if (!parent) {
- assert(primary_dn);
+ ceph_assert(primary_dn);
parent = in->get_projected_parent_dn()->get_dir();
}
bool first = true;
while (parent) {
//assert(cur->is_auth() || !primary_dn); // this breaks the rename auth twiddle hack
- assert(parent->is_auth());
+ ceph_assert(parent->is_auth());
// opportunistically adjust parent dirfrag
CInode *pin = parent->get_inode();
pf->version = parent->pre_dirty();
if (do_parent_mtime || linkunlink) {
- assert(mut->wrlocks.count(&pin->filelock));
- assert(mut->wrlocks.count(&pin->nestlock));
- assert(cfollows == CEPH_NOSNAP);
+ ceph_assert(mut->wrlocks.count(&pin->filelock));
+ ceph_assert(mut->wrlocks.count(&pin->nestlock));
+ ceph_assert(cfollows == CEPH_NOSNAP);
// update stale fragstat/rstat?
parent->resync_accounted_fragstat();
// because we are about to write into the dirfrag fnode and that needs
// to commit before the lock can cycle.
if (linkunlink) {
- assert(pin->nestlock.get_num_wrlocks() || mut->is_slave());
+ ceph_assert(pin->nestlock.get_num_wrlocks() || mut->is_slave());
}
if (mut->wrlocks.count(&pin->nestlock) == 0) {
if (!mut->wrlocks.count(&pin->versionlock))
mds->locker->local_wrlock_grab(&pin->versionlock, mut);
- assert(mut->wrlocks.count(&pin->nestlock) ||
+ ceph_assert(mut->wrlocks.count(&pin->nestlock) ||
mut->is_slave());
pin->last_dirstat_prop = mut->get_mds_stamp();
if (parent->get_frag() == frag_t()) { // i.e., we are the only frag
if (pi.inode.dirstat.size() < 0)
- assert(!"negative dirstat size" == g_conf()->mds_verify_scatter);
+ ceph_assert(!"negative dirstat size" == g_conf()->mds_verify_scatter);
if (pi.inode.dirstat.size() != pf->fragstat.size()) {
mds->clog->error() << "unmatched fragstat size on single dirfrag "
<< parent->dirfrag() << ", inode has " << pi.inode.dirstat
// trust the dirfrag for now
pi.inode.dirstat = pf->fragstat;
- assert(!"unmatched fragstat size" == g_conf()->mds_verify_scatter);
+ ceph_assert(!"unmatched fragstat size" == g_conf()->mds_verify_scatter);
}
}
}
if (pin->is_base())
break;
parentdn = pin->get_projected_parent_dn();
- assert(parentdn);
+ ceph_assert(parentdn);
// rstat
dout(10) << "predirty_journal_parents frag->inode on " << *parent << dendl;
// trust the dirfrag for now
pi.inode.rstat = pf->rstat;
- assert(!"unmatched rstat rbytes" == g_conf()->mds_verify_scatter);
+ ceph_assert(!"unmatched rstat rbytes" == g_conf()->mds_verify_scatter);
}
}
}
// now, stick it in the blob
- assert(parent);
- assert(parent->is_auth());
+ ceph_assert(parent);
+ ceph_assert(parent->is_auth());
blob->add_dir_context(parent);
blob->add_dir(parent, true);
for (list<CInode*>::iterator p = lsi.begin();
void MDCache::_logged_master_commit(metareqid_t reqid)
{
dout(10) << "_logged_master_commit " << reqid << dendl;
- assert(uncommitted_masters.count(reqid));
+ ceph_assert(uncommitted_masters.count(reqid));
uncommitted_masters[reqid].ls->uncommitted_masters.erase(reqid);
mds->queue_waiters(uncommitted_masters[reqid].waiters);
uncommitted_masters.erase(reqid);
void MDCache::committed_master_slave(metareqid_t r, mds_rank_t from)
{
dout(10) << "committed_master_slave mds." << from << " on " << r << dendl;
- assert(uncommitted_masters.count(r));
+ ceph_assert(uncommitted_masters.count(r));
uncommitted_masters[r].slaves.erase(from);
if (!uncommitted_masters[r].recovering && uncommitted_masters[r].slaves.empty())
log_master_commit(r);
void MDCache::logged_master_update(metareqid_t reqid)
{
dout(10) << "logged_master_update " << reqid << dendl;
- assert(uncommitted_masters.count(reqid));
+ ceph_assert(uncommitted_masters.count(reqid));
uncommitted_masters[reqid].safe = true;
auto p = pending_masters.find(reqid);
if (p != pending_masters.end()) {
void MDCache::resolve_start(MDSInternalContext *resolve_done_)
{
dout(10) << "resolve_start" << dendl;
- assert(!resolve_done);
+ ceph_assert(!resolve_done);
resolve_done.reset(resolve_done_);
if (mds->mdsmap->get_root() != mds->get_nodeid()) {
remove_ambiguous_slave_update(p->first, mdr->slave_to_mds);
if (!mdr->more()->waiting_on_slave.empty()) {
- assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid());
+ ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid());
// will rollback, no need to wait
mdr->reset_slave_request();
mdr->more()->waiting_on_slave.clear();
if (mdr->is_slave() && mdr->slave_did_prepare()) {
if (mdr->more()->waiting_on_slave.count(who)) {
- assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid());
+ ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid());
dout(10) << " slave request " << *mdr << " no longer need rename notity ack from mds."
<< who << dendl;
mdr->more()->waiting_on_slave.erase(who);
if (mdr->more()->srcdn_auth_mds == who) {
dout(10) << " master request " << *mdr << " waiting for rename srcdn's auth mds."
<< who << " to recover" << dendl;
- assert(mdr->more()->witnessed.count(who) == 0);
+ ceph_assert(mdr->more()->witnessed.count(who) == 0);
if (mdr->more()->is_ambiguous_auth)
mdr->clear_ambiguous_auth();
// rename srcdn's auth mds failed, all witnesses will rollback
<< mdr->more()->srcdn_auth_mds << " to reply" << dendl;
// waiting for the slave (rename srcdn's auth mds), delay sending resolve ack
// until either the request is committing or the slave also fails.
- assert(mdr->more()->waiting_on_slave.size() == 1);
+ ceph_assert(mdr->more()->waiting_on_slave.size() == 1);
pending_masters.insert(p->first);
} else {
dout(10) << " master request " << *mdr << " no longer witnessed by slave mds."
<< who << " to recover" << dendl;
if (srcdn_auth >= 0)
- assert(mdr->more()->witnessed.count(srcdn_auth) == 0);
+ ceph_assert(mdr->more()->witnessed.count(srcdn_auth) == 0);
// discard this peer's prepare (if any)
mdr->more()->witnessed.erase(who);
if (dir->authority().first != who ||
dir->authority().second == mds->get_nodeid())
continue;
- assert(!dir->is_auth());
+ ceph_assert(!dir->is_auth());
// wake any waiters
list<CDir*> q;
if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) {
for (auto p = m->slave_requests.begin(); p != m->slave_requests.end(); ++p) {
if (uncommitted_masters.count(p->first) && !uncommitted_masters[p->first].safe) {
- assert(!p->second.committing);
+ ceph_assert(!p->second.committing);
pending_masters.insert(p->first);
}
}
if (p.second.inode_caps.length() > 0) {
// slave wants to export caps (rename)
- assert(mds->is_resolve());
+ ceph_assert(mds->is_resolve());
inodeno_t ino;
map<client_t,Capability::Export> cap_exports;
decode(ino, q);
decode(cap_exports, q);
- assert(get_inode(ino));
+ ceph_assert(get_inode(ino));
for (map<client_t,Capability::Export>::iterator q = cap_exports.begin();
q != cap_exports.end();
} else {
// ABORT
dout(10) << " ambiguous slave request " << p << " will ABORT" << dendl;
- assert(!p.second.committing);
+ ceph_assert(!p.second.committing);
ack->add_abort(p.first);
}
}
map<dirfrag_t, vector<dirfrag_t> >::iterator next = p;
++next;
CDir *dir = get_dirfrag(p->first);
- assert(dir);
+ ceph_assert(dir);
dout(10) << "checking ambiguous import " << *dir << dendl;
if (migrator->is_importing(dir->dirfrag()) &&
migrator->get_import_peer(dir->dirfrag()) == from) {
- assert(migrator->get_import_state(dir->dirfrag()) == Migrator::IMPORT_ACKING);
+ ceph_assert(migrator->get_import_state(dir->dirfrag()) == Migrator::IMPORT_ACKING);
// check if sender claims the subtree
bool claimed_by_sender = false;
void MDCache::maybe_resolve_finish()
{
- assert(resolve_ack_gather.empty());
- assert(resolve_need_rollback.empty());
+ ceph_assert(resolve_ack_gather.empty());
+ ceph_assert(resolve_need_rollback.empty());
if (!resolve_gather.empty()) {
dout(10) << "maybe_resolve_finish still waiting for resolves ("
finish_committed_masters();
if (resolve_done) {
- assert(mds->is_resolve());
+ ceph_assert(mds->is_resolve());
trim_unlinked_inodes();
recalc_auth_bits(false);
resolve_done.release()->complete(0);
}
if (ambiguous_slave_updates.count(from)) {
- assert(mds->mdsmap->is_clientreplay_or_active_or_stopping(from));
- assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping());
+ ceph_assert(mds->mdsmap->is_clientreplay_or_active_or_stopping(from));
+ ceph_assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping());
}
for (const auto &p : ack->commit) {
if (mds->is_resolve()) {
// replay
MDSlaveUpdate *su = get_uncommitted_slave_update(p.first, from);
- assert(su);
+ ceph_assert(su);
// log commit
mds->mdlog->start_submit_entry(new ESlaveUpdate(mds->mdlog, "unknown", p.first, from,
if (p.second.length() > 0)
mdr->more()->inode_import.share(p.second);
- assert(mdr->slave_request == 0); // shouldn't be doing anything!
+ ceph_assert(mdr->slave_request == 0); // shouldn't be doing anything!
request_finish(mdr);
}
}
if (mds->is_resolve()) {
MDSlaveUpdate *su = get_uncommitted_slave_update(metareq, from);
- assert(su);
+ ceph_assert(su);
// perform rollback (and journal a rollback entry)
// note: this will hold up the resolve a bit, until the rollback entries journal.
void MDCache::add_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master, MDSlaveUpdate *su)
{
- assert(uncommitted_slave_updates[master].count(reqid) == 0);
+ ceph_assert(uncommitted_slave_updates[master].count(reqid) == 0);
uncommitted_slave_updates[master][reqid] = su;
for(set<CInode*>::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p)
uncommitted_slave_rename_olddir[*p]++;
void MDCache::finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master)
{
- assert(uncommitted_slave_updates[master].count(reqid));
+ ceph_assert(uncommitted_slave_updates[master].count(reqid));
MDSlaveUpdate* su = uncommitted_slave_updates[master][reqid];
uncommitted_slave_updates[master].erase(reqid);
for(set<CInode*>::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p) {
CInode *diri = *p;
map<CInode*, int>::iterator it = uncommitted_slave_rename_olddir.find(diri);
- assert(it != uncommitted_slave_rename_olddir.end());
+ ceph_assert(it != uncommitted_slave_rename_olddir.end());
it->second--;
if (it->second == 0) {
uncommitted_slave_rename_olddir.erase(it);
}
}
} else
- assert(it->second > 0);
+ ceph_assert(it->second > 0);
}
// removed the inodes that were unlinked by slave update
for(set<CInode*>::iterator p = su->unlinked.begin(); p != su->unlinked.end(); ++p) {
CInode *in = *p;
map<CInode*, int>::iterator it = uncommitted_slave_unlink.find(in);
- assert(it != uncommitted_slave_unlink.end());
+ ceph_assert(it != uncommitted_slave_unlink.end());
it->second--;
if (it->second == 0) {
uncommitted_slave_unlink.erase(it);
if (!in->get_projected_parent_dn())
mds->mdcache->remove_inode_recursive(in);
} else
- assert(it->second > 0);
+ ceph_assert(it->second > 0);
}
delete su;
}
if (uncommitted_slave_updates.count(master) &&
uncommitted_slave_updates[master].count(reqid)) {
su = uncommitted_slave_updates[master][reqid];
- assert(su);
+ ceph_assert(su);
}
return su;
}
void MDCache::finish_rollback(metareqid_t reqid) {
auto p = resolve_need_rollback.find(reqid);
- assert(p != resolve_need_rollback.end());
+ ceph_assert(p != resolve_need_rollback.end());
if (mds->is_resolve())
finish_uncommitted_slave_update(reqid, p->second);
resolve_need_rollback.erase(p);
dout(10) << "disambiguate_my_imports" << dendl;
if (!mds->is_resolve()) {
- assert(my_ambiguous_imports.empty());
+ ceph_assert(my_ambiguous_imports.empty());
return;
}
map<dirfrag_t, vector<dirfrag_t> >::iterator q = my_ambiguous_imports.begin();
CDir *dir = get_dirfrag(q->first);
- assert(dir);
+ ceph_assert(dir);
if (dir->authority() != me_ambig) {
dout(10) << "ambiguous import auth known, must not be me " << *dir << dendl;
CDir *root = get_subtree_root(dir);
if (root != dir)
dout(10) << " subtree root is " << *root << dendl;
- assert(root->dir_auth.first != mds->get_nodeid()); // no us!
+ ceph_assert(root->dir_auth.first != mds->get_nodeid()); // no us!
try_trim_non_auth_subtree(root);
} else {
dout(10) << "ambiguous import auth unclaimed, must be me " << *dir << dendl;
mds->mdlog->start_submit_entry(new EImportFinish(dir, true));
}
}
- assert(my_ambiguous_imports.empty());
+ ceph_assert(my_ambiguous_imports.empty());
mds->mdlog->flush();
// verify all my subtrees are unambiguous!
if (dir->is_ambiguous_dir_auth()) {
dout(0) << "disambiguate_imports uh oh, dir_auth is still ambiguous for " << *dir << dendl;
}
- assert(!dir->is_ambiguous_dir_auth());
+ ceph_assert(!dir->is_ambiguous_dir_auth());
}
show_subtrees();
void MDCache::add_ambiguous_import(dirfrag_t base, const vector<dirfrag_t>& bounds)
{
- assert(my_ambiguous_imports.count(base) == 0);
+ ceph_assert(my_ambiguous_imports.count(base) == 0);
my_ambiguous_imports[base] = bounds;
}
void MDCache::cancel_ambiguous_import(CDir *dir)
{
dirfrag_t df = dir->dirfrag();
- assert(my_ambiguous_imports.count(df));
+ ceph_assert(my_ambiguous_imports.count(df));
dout(10) << "cancel_ambiguous_import " << df
<< " bounds " << my_ambiguous_imports[df]
<< " " << *dir
void MDCache::finish_ambiguous_import(dirfrag_t df)
{
- assert(my_ambiguous_imports.count(df));
+ ceph_assert(my_ambiguous_imports.count(df));
vector<dirfrag_t> bounds;
bounds.swap(my_ambiguous_imports[df]);
my_ambiguous_imports.erase(df);
<< " bounds " << bounds
<< dendl;
CDir *dir = get_dirfrag(df);
- assert(dir);
+ ceph_assert(dir);
// adjust dir_auth, import maps
adjust_bounded_subtree_auth(dir, bounds, mds->get_nodeid());
bool MDCache::expire_recursive(CInode *in, expiremap &expiremap)
{
- assert(!in->is_auth());
+ ceph_assert(!in->is_auth());
dout(10) << __func__ << ":" << *in << dendl;
void MDCache::rejoin_start(MDSInternalContext *rejoin_done_)
{
dout(10) << "rejoin_start" << dendl;
- assert(!rejoin_done);
+ ceph_assert(!rejoin_done);
rejoin_done.reset(rejoin_done_);
rejoin_gather = recovery_set;
return;
}
- assert(!migrator->is_importing());
- assert(!migrator->is_exporting());
+ ceph_assert(!migrator->is_importing());
+ ceph_assert(!migrator->is_exporting());
if (!mds->is_rejoin()) {
disambiguate_other_imports();
p != subtrees.end();
++p) {
CDir *dir = p->first;
- assert(dir->is_subtree_root());
+ ceph_assert(dir->is_subtree_root());
if (dir->is_ambiguous_dir_auth()) {
// exporter is recovering, importer is survivor.
- assert(rejoins.count(dir->authority().first));
- assert(!rejoins.count(dir->authority().second));
+ ceph_assert(rejoins.count(dir->authority().first));
+ ceph_assert(!rejoins.count(dir->authority().second));
continue;
}
continue; // skip my own regions!
mds_rank_t auth = dir->get_dir_auth().first;
- assert(auth >= 0);
+ ceph_assert(auth >= 0);
if (rejoins.count(auth) == 0)
continue; // don't care about this node's subtrees
q != mdr->remote_auth_pins.end();
++q) {
if (!q->first->is_auth()) {
- assert(q->second == q->first->authority().first);
+ ceph_assert(q->second == q->first->authority().first);
if (rejoins.count(q->second) == 0) continue;
const MMDSCacheRejoin::ref &rejoin = rejoins[q->second];
<< " " << q->first->get_parent() << dendl;
MDSCacheObjectInfo i;
q->first->get_parent()->set_object_info(i);
- assert(i.ino);
+ ceph_assert(i.ino);
rejoin->add_inode_wrlock(vinodeno_t(i.ino, i.snapid), q->first->get_type(),
mdr->reqid, mdr->attempt);
}
// send the messages
for (auto &p : rejoins) {
- assert(rejoin_sent.count(p.first) == 0);
- assert(rejoin_ack_gather.count(p.first) == 0);
+ ceph_assert(rejoin_sent.count(p.first) == 0);
+ ceph_assert(rejoin_ack_gather.count(p.first) == 0);
rejoin_sent.insert(p.first);
rejoin_ack_gather.insert(p.first);
mds->send_message_mds(p.second, p.first);
rejoin->add_weak_dirfrag(dir->dirfrag());
for (auto &p : dir->items) {
CDentry *dn = p.second;
- assert(dn->last == CEPH_NOSNAP);
+ ceph_assert(dn->last == CEPH_NOSNAP);
CDentry::linkage_t *dnl = dn->get_linkage();
dout(15) << " add_weak_primary_dentry " << *dn << dendl;
- assert(dnl->is_primary());
+ ceph_assert(dnl->is_primary());
CInode *in = dnl->get_inode();
- assert(dnl->get_inode()->is_dir());
+ ceph_assert(dnl->get_inode()->is_dir());
rejoin->add_weak_primary_dentry(dir->ino(), dn->get_name(), dn->first, dn->last, in->ino());
in->get_nested_dirfrags(nested);
if (in->is_dirty_scattered()) {
for (auto it2 = in->remote_parents.begin(); it2 != in->remote_parents.end(); ) {
CDentry *remote_dn = *it2;
++it2;
- assert(remote_dn->last != CEPH_NOSNAP);
+ ceph_assert(remote_dn->last != CEPH_NOSNAP);
remote_dn->unlink_remote(remote_dn->get_linkage());
}
}
continue;
} else {
// Inventing null/remote dentry shouldn't cause problem
- assert(!dnl->is_primary());
+ ceph_assert(!dnl->is_primary());
}
}
// check cap exports
for (auto p = weak->cap_exports.begin(); p != weak->cap_exports.end(); ++p) {
CInode *in = get_inode(p->first);
- assert(!in || in->is_auth());
+ ceph_assert(!in || in->is_auth());
for (auto q = p->second.begin(); q != p->second.end(); ++q) {
dout(10) << " claiming cap import " << p->first << " client." << q->first << " on " << *in << dendl;
Capability *cap = rejoin_import_cap(in, q->first, q->second, from);
encode(imported_caps, ack->imported_caps);
} else {
- assert(mds->is_rejoin());
+ ceph_assert(mds->is_rejoin());
// we may have already received a strong rejoin from the sender.
rejoin_scour_survivor_replicas(from, NULL, acked_inodes, gather_locks);
- assert(gather_locks.empty());
+ ceph_assert(gather_locks.empty());
// check cap exports.
rejoin_client_map.insert(weak->client_map.begin(), weak->client_map.end());
for (auto p = weak->cap_exports.begin(); p != weak->cap_exports.end(); ++p) {
CInode *in = get_inode(p->first);
- assert(!in || in->is_auth());
+ ceph_assert(!in || in->is_auth());
// note
for (auto q = p->second.begin(); q != p->second.end(); ++q) {
dout(10) << " claiming cap import " << p->first << " client." << q->first << dendl;
// assimilate any potentially dirty scatterlock state
for (const auto &p : weak->inode_scatterlocks) {
CInode *in = get_inode(p.first);
- assert(in);
+ ceph_assert(in);
in->decode_lock_state(CEPH_LOCK_IFILE, p.second.file);
in->decode_lock_state(CEPH_LOCK_INEST, p.second.nest);
in->decode_lock_state(CEPH_LOCK_IDFT, p.second.dft);
CInode *diri = get_inode(p.ino);
if (!diri)
dout(0) << " missing dir ino " << p.ino << dendl;
- assert(diri);
+ ceph_assert(diri);
list<frag_t> ls;
if (diri->dirfragtree.is_leaf(p.frag)) {
dout(0) << " missing dir for " << p.frag << " (which maps to " << fg << ") on " << *diri << dendl;
continue;
}
- assert(dir);
+ ceph_assert(dir);
if (dirs_to_share.count(dir)) {
dout(10) << " already have " << p.frag << " -> " << fg << " " << *dir << dendl;
} else {
CInode *diri = get_inode(p.first);
if (!diri)
dout(0) << " missing dir ino " << p.first << dendl;
- assert(diri);
+ ceph_assert(diri);
// weak dentries
CDir *dir = 0;
dir = diri->get_dirfrag(fg);
if (!dir)
dout(0) << " missing dir frag " << fg << " on " << *diri << dendl;
- assert(dir);
- assert(dirs_to_share.count(dir));
+ ceph_assert(dir);
+ ceph_assert(dirs_to_share.count(dir));
}
// and dentry
CDentry *dn = dir->lookup(q.first.name, q.first.snapid);
- assert(dn);
+ ceph_assert(dn);
CDentry::linkage_t *dnl = dn->get_linkage();
- assert(dnl->is_primary());
+ ceph_assert(dnl->is_primary());
if (survivor && dn->is_replica(from))
dentry_remove_replica(dn, from, gather_locks);
// inode
CInode *in = dnl->get_inode();
- assert(in);
+ ceph_assert(in);
if (survivor && in->is_replica(from))
inode_remove_replica(in, from, true, gather_locks);
p != weak->weak_inodes.end();
++p) {
CInode *in = get_inode(*p);
- assert(in); // hmm fixme wrt stray?
+ ceph_assert(in); // hmm fixme wrt stray?
if (survivor && in->is_replica(from))
inode_remove_replica(in, from, true, gather_locks);
unsigned inonce = in->add_replica(from);
}
}
- assert(rejoin_gather.count(from));
+ ceph_assert(rejoin_gather.count(from));
rejoin_gather.erase(from);
if (survivor) {
// survivor. do everything now.
for (const auto &p : weak->inode_scatterlocks) {
CInode *in = get_inode(p.first);
- assert(in);
+ ceph_assert(in);
dout(10) << " including base inode (due to potential scatterlock update) " << *in << dendl;
acked_inodes.insert(in->vino());
ack->add_inode_base(in, mds->mdsmap->get_up_features());
if (!in)
in = rejoin_invent_inode(df.ino, CEPH_NOSNAP);
if (!in->is_dir()) {
- assert(in->state_test(CInode::STATE_REJOINUNDEF));
+ ceph_assert(in->state_test(CInode::STATE_REJOINUNDEF));
in->inode.mode = S_IFDIR;
in->inode.dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash;
}
mds_rank_t from = mds_rank_t(strong->get_source().num());
// only a recovering node will get a strong rejoin.
- assert(mds->is_rejoin());
+ ceph_assert(mds->is_rejoin());
// assimilate any potentially dirty scatterlock state
for (const auto &p : strong->inode_scatterlocks) {
CInode *in = get_inode(p.first);
- assert(in);
+ ceph_assert(in);
in->decode_lock_state(CEPH_LOCK_IFILE, p.second.file);
in->decode_lock_state(CEPH_LOCK_INEST, p.second.nest);
in->decode_lock_state(CEPH_LOCK_IDFT, p.second.dft);
else {
frag_t fg = diri->pick_dirfrag(ss.name);
dir = diri->get_dirfrag(fg);
- assert(dir);
+ ceph_assert(dir);
dn = dir->lookup(ss.name, ss.snapid);
}
if (!dn) {
const MMDSCacheRejoin::slave_reqid& r = ss_req_it->second;
dout(10) << " dn xlock by " << r << " on " << *dn << dendl;
MDRequestRef mdr = request_get(r.reqid); // should have this from auth_pin above.
- assert(mdr->is_auth_pinned(dn));
+ ceph_assert(mdr->is_auth_pinned(dn));
if (!mdr->xlocks.count(&dn->versionlock)) {
- assert(dn->versionlock.can_xlock_local());
+ ceph_assert(dn->versionlock.can_xlock_local());
dn->versionlock.get_xlock(mdr, mdr->get_client());
mdr->xlocks.insert(&dn->versionlock);
mdr->locks.insert(&dn->versionlock);
if (d.is_primary()) {
if (vinodeno_t(d.ino, ss.snapid) != dnl->get_inode()->vino()) {
// the survivor missed MDentryUnlink+MDentryLink messages ?
- assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0);
+ ceph_assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0);
CInode *in = get_inode(d.ino, ss.snapid);
- assert(in);
- assert(in->get_parent_dn());
+ ceph_assert(in);
+ ceph_assert(in->get_parent_dn());
rejoin_unlinked_inodes[from].insert(in);
dout(7) << " sender has primary dentry but wrong inode" << dendl;
}
} else {
// the survivor missed MDentryLink message ?
- assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0);
+ ceph_assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0);
dout(7) << " sender doesn't have primay dentry" << dendl;
}
} else {
if (d.is_primary()) {
// the survivor missed MDentryUnlink message ?
CInode *in = get_inode(d.ino, ss.snapid);
- assert(in);
- assert(in->get_parent_dn());
+ ceph_assert(in);
+ ceph_assert(in->get_parent_dn());
rejoin_unlinked_inodes[from].insert(in);
dout(7) << " sender has primary dentry but we don't" << dendl;
}
for (const auto &p : strong->strong_inodes) {
CInode *in = get_inode(p.first);
- assert(in);
+ ceph_assert(in);
in->add_replica(from, p.second.nonce);
dout(10) << " have " << *in << dendl;
else
mdr = request_start_slave(r.reqid, r.attempt, strong);
if (strong->frozen_authpin_inodes.count(in->vino())) {
- assert(!in->get_num_auth_pins());
+ ceph_assert(!in->get_num_auth_pins());
mdr->freeze_auth_pin(in);
} else {
- assert(!in->is_frozen_auth_pin());
+ ceph_assert(!in->is_frozen_auth_pin());
}
mdr->auth_pin(in);
}
SimpleLock *lock = in->get_lock(q.first);
dout(10) << " inode xlock by " << q.second << " on " << *lock << " on " << *in << dendl;
MDRequestRef mdr = request_get(q.second.reqid); // should have this from auth_pin above.
- assert(mdr->is_auth_pinned(in));
+ ceph_assert(mdr->is_auth_pinned(in));
if (!mdr->xlocks.count(&in->versionlock)) {
- assert(in->versionlock.can_xlock_local());
+ ceph_assert(in->versionlock.can_xlock_local());
in->versionlock.get_xlock(mdr, mdr->get_client());
mdr->xlocks.insert(&in->versionlock);
mdr->locks.insert(&in->versionlock);
dout(10) << " inode wrlock by " << r << " on " << *lock << " on " << *in << dendl;
MDRequestRef mdr = request_get(r.reqid); // should have this from auth_pin above.
if (in->is_auth())
- assert(mdr->is_auth_pinned(in));
+ ceph_assert(mdr->is_auth_pinned(in));
lock->set_state(LOCK_MIX);
if (lock == &in->filelock)
in->loner_cap = -1;
}
// done?
- assert(rejoin_gather.count(from));
+ ceph_assert(rejoin_gather.count(from));
rejoin_gather.erase(from);
if (rejoin_gather.empty() && rejoin_ack_gather.count(mds->get_nodeid())) {
rejoin_gather_finish();
dout(7) << "handle_cache_rejoin_ack from " << ack->get_source() << dendl;
mds_rank_t from = mds_rank_t(ack->get_source().num());
- assert(mds->get_state() >= MDSMap::STATE_REJOIN);
+ ceph_assert(mds->get_state() >= MDSMap::STATE_REJOIN);
bool survivor = !mds->is_rejoin();
// for sending cache expire message
CDentry::linkage_t *dnl = dn->get_linkage();
- assert(dn->last == q.first.snapid);
+ ceph_assert(dn->last == q.first.snapid);
if (dn->first != q.second.first) {
dout(10) << " adjust dn.first " << dn->first << " -> " << q.second.first << " on " << *dn << dendl;
dn->first = q.second.first;
for (list<CDir*>::iterator q = ls.begin(); q != ls.end(); ++q) {
if ((*q)->is_auth() || ack->strong_dirfrags.count((*q)->dirfrag()))
continue;
- assert((*q)->get_num_any() == 0);
+ ceph_assert((*q)->get_num_any() == 0);
(*p)->close_dirfrag((*q)->get_frag());
}
}
// full dirfrags
for (const auto &p : ack->dirfrag_bases) {
CDir *dir = get_dirfrag(p.first);
- assert(dir);
+ ceph_assert(dir);
auto q = p.second.cbegin();
dir->_decode_base(q);
dout(10) << " got dir replica " << *dir << dendl;
decode(last, p);
decode(basebl, p);
CInode *in = get_inode(ino, last);
- assert(in);
+ ceph_assert(in);
auto q = basebl.cbegin();
snapid_t sseq = 0;
if (in->snaprealm)
decode(lockbl, p);
CInode *in = get_inode(ino, last);
- assert(in);
+ ceph_assert(in);
in->set_replica_nonce(nonce);
auto q = lockbl.cbegin();
in->_decode_locks_rejoin(q, rejoin_waiters, rejoin_eval_locks, survivor);
// FIXME: This can happen if entire subtree, together with the inode subtree root
// belongs to, were trimmed between sending cache rejoin and receiving rejoin ack.
- assert(isolated_inodes.empty());
+ ceph_assert(isolated_inodes.empty());
map<inodeno_t,map<client_t,Capability::Import> > peer_imported;
auto bp = ack->imported_caps.cbegin();
p != peer_imported.end();
++p) {
auto& ex = cap_exports.at(p->first);
- assert(ex.first == from);
+ ceph_assert(ex.first == from);
for (map<client_t,Capability::Import>::iterator q = p->second.begin();
q != p->second.end();
++q) {
auto r = ex.second.find(q->first);
- assert(r != ex.second.end());
+ ceph_assert(r != ex.second.end());
dout(10) << " exporting caps for client." << q->first << " ino " << p->first << dendl;
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(q->first.v));
ex.second.erase(r);
}
- assert(ex.second.empty());
+ ceph_assert(ex.second.empty());
}
for (auto p : updated_realms) {
}
// done?
- assert(rejoin_ack_gather.count(from));
+ ceph_assert(rejoin_ack_gather.count(from));
rejoin_ack_gather.erase(from);
if (!survivor) {
if (rejoin_gather.empty()) {
}
}
- assert(rejoin_undef_inodes.empty());
+ ceph_assert(rejoin_undef_inodes.empty());
}
void MDCache::rejoin_gather_finish()
{
dout(10) << "rejoin_gather_finish" << dendl;
- assert(mds->is_rejoin());
- assert(rejoin_ack_gather.count(mds->get_nodeid()));
+ ceph_assert(mds->is_rejoin());
+ ceph_assert(rejoin_ack_gather.count(mds->get_nodeid()));
if (open_undef_inodes_dirfrags())
return;
if (ret < 0) {
cap_imports_missing.insert(ino);
} else if (ret == mds->get_nodeid()) {
- assert(get_inode(ino));
+ ceph_assert(get_inode(ino));
} else {
auto p = cap_imports.find(ino);
- assert(p != cap_imports.end());
+ ceph_assert(p != cap_imports.end());
for (auto q = p->second.begin(); q != p->second.end(); ++q) {
- assert(q->second.count(MDS_RANK_NONE));
- assert(q->second.size() == 1);
+ ceph_assert(q->second.count(MDS_RANK_NONE));
+ ceph_assert(q->second.size() == 1);
rejoin_export_caps(p->first, q->first, q->second[MDS_RANK_NONE], ret);
}
cap_imports.erase(p);
}
- assert(cap_imports_num_opening > 0);
+ ceph_assert(cap_imports_num_opening > 0);
cap_imports_num_opening--;
if (cap_imports_num_opening == 0) {
map<client_t,pair<Session*,uint64_t> > session_map;
C_MDC_RejoinSessionsOpened(MDCache *c) : MDCacheLogContext(c) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
mdcache->rejoin_open_sessions_finish(session_map);
}
};
cap_imports_missing.insert(ino);
} else if (ret != mds->get_nodeid()) {
for (auto q = p->second.begin(); q != p->second.end(); ++q) {
- assert(q->second.count(MDS_RANK_NONE));
- assert(q->second.size() == 1);
+ ceph_assert(q->second.count(MDS_RANK_NONE));
+ ceph_assert(q->second.size() == 1);
rejoin_export_caps(p->first, q->first, q->second[MDS_RANK_NONE], ret);
}
cap_imports.erase(p);
open_file_table.wait_for_prefetch(
new MDSInternalContextWrapper(mds,
new FunctionContext([this](int r) {
- assert(rejoin_gather.count(mds->get_nodeid()));
+ ceph_assert(rejoin_gather.count(mds->get_nodeid()));
process_imported_caps();
})
)
for (auto p = cap_imports.begin(); p != cap_imports.end(); ++p) {
CInode *in = get_inode(p->first);
if (in) {
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
cap_imports_missing.erase(p->first);
continue;
}
p != rejoin_slave_exports.end();
++p) {
CInode *in = get_inode(p->first);
- assert(in);
+ ceph_assert(in);
for (map<client_t,Capability::Export>::iterator q = p->second.second.begin();
q != p->second.second.end();
++q) {
cap->merge(q->second, true);
Capability::Import& im = rejoin_imported_caps[p->second.first][p->first][q->first];
- assert(cap->get_last_seq() == im.issue_seq);
- assert(cap->get_mseq() == im.mseq);
+ ceph_assert(cap->get_last_seq() == im.issue_seq);
+ ceph_assert(cap->get_mseq() == im.mseq);
cap->set_cap_id(im.cap_id);
// send cap import because we assigned a new cap ID
do_cap_import(session, in, cap, q->second.cap_id, q->second.seq, q->second.mseq - 1,
++p;
continue;
}
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
for (auto q = p->second.begin(); q != p->second.end(); ++q) {
Session *session;
{
} else {
trim_non_auth();
- assert(rejoin_gather.count(mds->get_nodeid()));
+ ceph_assert(rejoin_gather.count(mds->get_nodeid()));
rejoin_gather.erase(mds->get_nodeid());
- assert(!rejoin_ack_gather.count(mds->get_nodeid()));
+ ceph_assert(!rejoin_ack_gather.count(mds->get_nodeid()));
maybe_send_pending_rejoins();
}
return false;
for (int i = 0; i < num_cinode_locks; i++) {
int lockid = cinode_lock_info[i].lock;
SimpleLock *lock = in->get_lock(lockid);
- assert(lock);
+ ceph_assert(lock);
in->client_snap_caps[lockid].insert(client);
in->auth_pin(lock);
lock->set_state(LOCK_SNAP_SYNC);
void MDCache::prepare_realm_merge(SnapRealm *realm, SnapRealm *parent_realm,
map<client_t,MClientSnap::ref>& splits)
{
- assert(parent_realm);
+ ceph_assert(parent_realm);
vector<inodeno_t> split_inos;
vector<inodeno_t> split_realms;
split_realms.push_back((*p)->inode->ino());
for (const auto& p : realm->client_caps) {
- assert(!p.second->empty());
+ ceph_assert(!p.second->empty());
auto em = splits.emplace(std::piecewise_construct, std::forward_as_tuple(p.first), std::forward_as_tuple());
if (em.second) {
auto update = MClientSnap::create(CEPH_SNAP_OP_SPLIT);
reap->set_cap_peer(p_cap_id, p_seq, p_mseq, peer, p_flags);
mds->send_message_client_counted(reap, session);
} else {
- assert(0);
+ ceph_assert(0);
}
}
{
dout(10) << "do_delayed_cap_imports" << dendl;
- assert(delayed_imported_caps.empty());
+ ceph_assert(delayed_imported_caps.empty());
}
struct C_MDC_OpenSnapRealms : public MDCacheContext {
while (it != rejoin_pending_snaprealms.end()) {
CInode *in = *it;
SnapRealm *realm = in->snaprealm;
- assert(realm);
+ ceph_assert(realm);
if (realm->have_past_parents_open() ||
realm->open_parents(gather.new_sub())) {
dout(10) << " past parents now open on " << *in << dendl;
!p.end(); ++p) {
CInode *child = *p;
auto q = reconnected_caps.find(child->ino());
- assert(q != reconnected_caps.end());
+ ceph_assert(q != reconnected_caps.end());
for (auto r = q->second.begin(); r != q->second.end(); ++r) {
if (r->second.snap_follows > 0) {
if (r->second.snap_follows < child->first - 1) {
gather.activate();
} else {
// for multimds, must succeed the first time
- assert(recovery_set.empty());
+ ceph_assert(recovery_set.empty());
dout(10) << "open_snaprealms - waiting for "
<< gather.num_subs_remaining() << dendl;
dout(5) << warn_str.str() << dendl;
}
}
- assert(rejoin_waiters.empty());
- assert(rejoin_pending_snaprealms.empty());
+ ceph_assert(rejoin_waiters.empty());
+ ceph_assert(rejoin_pending_snaprealms.empty());
dout(10) << "open_snaprealms - all open" << dendl;
do_delayed_cap_imports();
- assert(rejoin_done);
+ ceph_assert(rejoin_done);
rejoin_done.release()->complete(0);
reconnected_caps.clear();
}
p != rejoin_undef_inodes.end();
++p) {
CInode *in = *p;
- assert(!in->is_base());
+ ceph_assert(!in->is_base());
fetch_queue.insert(in->get_parent_dir());
}
if (diri->state_test(CInode::STATE_REJOINUNDEF))
continue;
if (dir->state_test(CDir::STATE_REJOINUNDEF))
- assert(diri->dirfragtree.is_leaf(dir->get_frag()));
+ ceph_assert(diri->dirfragtree.is_leaf(dir->get_frag()));
dir->fetch(gather.new_sub());
}
- assert(gather.has_subs());
+ ceph_assert(gather.has_subs());
gather.activate();
return true;
}
rejoin_undef_inodes.erase(in);
if (in->is_dir()) {
// FIXME: re-hash dentries if necessary
- assert(in->inode.dir_layout.dl_dir_hash == g_conf()->mds_default_dir_hash);
+ ceph_assert(in->inode.dir_layout.dl_dir_hash == g_conf()->mds_default_dir_hash);
if (in->has_dirfrags() && !in->dirfragtree.is_leaf(frag_t())) {
CDir *dir = in->get_dirfrag(frag_t());
- assert(dir);
+ ceph_assert(dir);
rejoin_undef_dirfrags.erase(dir);
in->force_dirfrags();
list<CDir*> ls;
void MDCache::queue_file_recover(CInode *in)
{
dout(10) << "queue_file_recover " << *in << dendl;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
// cow?
/*
snapid_t snapid = *s.begin();
CInode *cow_inode = 0;
journal_cow_inode(mut, &le->metablob, in, snapid-1, &cow_inode);
- assert(cow_inode);
+ ceph_assert(cow_inode);
recovery_queue.enqueue(cow_inode);
s.erase(*s.begin());
}
if (in->filelock.is_stable()) {
in->auth_pin(&in->filelock);
} else {
- assert(in->filelock.get_state() == LOCK_XLOCKSNAP);
+ ceph_assert(in->filelock.get_state() == LOCK_XLOCKSNAP);
}
in->filelock.set_state(LOCK_PRE_SCAN);
rejoin_recover_q.push_back(in);
if (!in->client_need_snapflush.empty() &&
(in->get_caps_issued() & CEPH_CAP_FILE_BUFFER)) {
- assert(in->filelock.is_xlocked());
+ ceph_assert(in->filelock.is_xlocked());
in->filelock.set_xlock_snap_sync(new C_MDC_RetryTruncate(this, in, ls));
mds->locker->issue_caps(in);
return;
MDCacheIOContext(c, false), in(i), ls(l) {
}
void finish(int r) override {
- assert(r == 0 || r == -ENOENT);
+ ceph_assert(r == 0 || r == -ENOENT);
mdcache->truncate_inode_finish(in, ls);
}
void print(ostream& out) const override {
<< pi->truncate_from << " -> " << pi->truncate_size
<< " on " << *in << dendl;
- assert(pi->is_truncating());
- assert(pi->truncate_size < (1ULL << 63));
- assert(pi->truncate_from < (1ULL << 63));
- assert(pi->truncate_size < pi->truncate_from);
+ ceph_assert(pi->is_truncating());
+ ceph_assert(pi->truncate_size < (1ULL << 63));
+ ceph_assert(pi->truncate_from < (1ULL << 63));
+ ceph_assert(pi->truncate_size < pi->truncate_from);
SnapRealm *realm = in->find_snaprealm();
} else {
dout(10) << " NO realm, using null context" << dendl;
snapc = &nullsnap;
- assert(in->last == CEPH_NOSNAP);
+ ceph_assert(in->last == CEPH_NOSNAP);
}
dout(10) << "_truncate_inode snapc " << snapc << " on " << *in << dendl;
filer.truncate(in->inode.ino, &in->inode.layout, *snapc,
dout(10) << "truncate_inode_finish " << *in << dendl;
set<CInode*>::iterator p = ls->truncating_inodes.find(in);
- assert(p != ls->truncating_inodes.end());
+ ceph_assert(p != ls->truncating_inodes.end());
ls->truncating_inodes.erase(p);
// update
<< ls->seq << "/" << ls->offset << dendl;
// if we have the logseg the truncate started in, it must be in our list.
set<CInode*>::iterator p = ls->truncating_inodes.find(in);
- assert(p != ls->truncating_inodes.end());
+ ceph_assert(p != ls->truncating_inodes.end());
ls->truncating_inodes.erase(p);
in->put(CInode::PIN_TRUNCATING);
}
if (!in->client_need_snapflush.empty() &&
(in->get_caps_issued() & CEPH_CAP_FILE_BUFFER)) {
- assert(in->filelock.is_stable());
+ ceph_assert(in->filelock.is_stable());
in->filelock.set_state(LOCK_XLOCKDONE);
in->auth_pin(&in->filelock);
in->filelock.set_xlock_snap_sync(new C_MDC_RetryTruncate(this, in, ls));
CDentry::linkage_t *dnl = dn->get_linkage();
CDir *dir = dn->get_dir();
- assert(dir);
+ ceph_assert(dir);
CDir *con = get_subtree_root(dir);
if (con)
dout(12) << " in container " << *con << dendl;
else {
dout(12) << " no container; under a not-yet-linked dir" << dendl;
- assert(dn->is_auth());
+ ceph_assert(dn->is_auth());
}
// If replica dentry is not readable, it's likely we will receive
} else if (dnl->is_primary()) {
// expire the inode, too.
CInode *in = dnl->get_inode();
- assert(in);
+ ceph_assert(in);
if (trim_inode(dn, in, con, expiremap))
return true; // purging stray instead of trimming
} else {
- assert(dnl->is_null());
+ ceph_assert(dnl->is_null());
}
if (!dn->is_auth()) {
if (a == mds->get_nodeid()) continue; // on export, ignore myself.
dout(12) << " sending expire to mds." << a << " on " << *dn << dendl;
- assert(a != mds->get_nodeid());
+ ceph_assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
em.first->second = MCacheExpire::create(mds->get_nodeid());
dout(15) << "trim_dirfrag " << *dir << dendl;
if (dir->is_subtree_root()) {
- assert(!dir->is_auth() ||
+ ceph_assert(!dir->is_auth() ||
(!dir->is_replicated() && dir->inode->is_base()));
remove_subtree(dir); // remove from subtree map
}
- assert(dir->get_num_ref() == 0);
+ ceph_assert(dir->get_num_ref() == 0);
CInode *in = dir->get_inode();
if (a == mds->get_nodeid()) continue; // on export, ignore myself.
dout(12) << " sending expire to mds." << a << " on " << *dir << dendl;
- assert(a != mds->get_nodeid());
+ ceph_assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */
bool MDCache::trim_inode(CDentry *dn, CInode *in, CDir *con, expiremap& expiremap)
{
dout(15) << "trim_inode " << *in << dendl;
- assert(in->get_num_ref() == 0);
+ ceph_assert(in->get_num_ref() == 0);
if (in->is_dir()) {
// If replica inode's dirfragtreelock is not readable, it's likely
in->get_dirfrags(dfls);
for (list<CDir*>::iterator p = dfls.begin(); p != dfls.end(); ++p) {
CDir *dir = *p;
- assert(!dir->is_subtree_root());
+ ceph_assert(!dir->is_subtree_root());
trim_dirfrag(dir, con ? con:dir, expiremap); // if no container (e.g. root dirfrag), use *p
}
}
if (a == mds->get_nodeid()) continue; // on export, ignore myself.
dout(12) << " sending expire to mds." << a << " on " << *in << dendl;
- assert(a != mds->get_nodeid());
+ ceph_assert(a != mds->get_nodeid());
auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple());
if (em.second)
em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */
} else {
// non-auth. expire.
CDir *dir = dn->get_dir();
- assert(dir);
+ ceph_assert(dir);
// unlink the dentry
dout(10) << " removing " << *dn << dendl;
in->get_dirfrags(ls);
for (list<CDir*>::iterator p = ls.begin(); p != ls.end(); ++p) {
CDir *subdir = *p;
- assert(!subdir->is_subtree_root());
+ ceph_assert(!subdir->is_subtree_root());
in->close_dirfrag(subdir->dirfrag().frag);
}
dir->unlink_inode(dn, false);
remove_inode(in);
}
else {
- assert(dnl->is_null());
+ ceph_assert(dnl->is_null());
}
- assert(!dir->has_bloom());
+ ceph_assert(!dir->has_bloom());
dir->remove_dentry(dn);
// adjust the dir state
dir->state_clear(CDir::STATE_COMPLETE); // dir incomplete!
p != ls.end();
++p) {
dout(10) << " removing " << **p << dendl;
- assert((*p)->get_num_ref() == 1); // SUBTREE
+ ceph_assert((*p)->get_num_ref() == 1); // SUBTREE
remove_subtree((*p));
in->close_dirfrag((*p)->dirfrag().frag);
}
dout(10) << " removing " << *in << dendl;
- assert(!in->get_parent_dn());
- assert(in->get_num_ref() == 0);
+ ceph_assert(!in->get_parent_dn());
+ ceph_assert(in->get_num_ref() == 0);
remove_inode(in);
}
}
dout(20) << "trim_non_auth_subtree(" << dir << ") removing inode " << in << " with dentry" << dn << dendl;
dir->unlink_inode(dn, false);
remove_inode(in);
- assert(!dir->has_bloom());
+ ceph_assert(!dir->has_bloom());
dir->remove_dentry(dn);
} else {
dout(20) << "trim_non_auth_subtree(" << dir << ") keeping inode " << in << " with dentry " << dn <<dendl;
diri->close_dirfrag(dir->get_frag());
dout(10) << " removing " << *diri << dendl;
- assert(!diri->get_parent_dn());
- assert(diri->get_num_ref() == 0);
+ ceph_assert(!diri->get_parent_dn());
+ ceph_assert(diri->get_num_ref() == 0);
remove_inode(diri);
}
break;
// check container?
if (p.first.ino > 0) {
CInode *expired_inode = get_inode(p.first.ino);
- assert(expired_inode); // we had better have this.
+ ceph_assert(expired_inode); // we had better have this.
CDir *parent_dir = expired_inode->get_approx_dirfrag(p.first.frag);
- assert(parent_dir);
+ ceph_assert(parent_dir);
int export_state = -1;
if (parent_dir->is_auth() && parent_dir->is_exporting()) {
export_state = migrator->get_export_state(parent_dir);
- assert(export_state >= 0);
+ ceph_assert(export_state >= 0);
}
if (!parent_dir->is_auth() ||
// not auth.
dout(7) << "delaying nonauth|warned expires for " << *parent_dir << dendl;
- assert(parent_dir->is_frozen_tree_root());
+ ceph_assert(parent_dir->is_frozen_tree_root());
// make a message container
em.first->second->add_realm(p.first, p.second);
continue;
}
- assert(export_state <= Migrator::EXPORT_PREPPING ||
+ ceph_assert(export_state <= Migrator::EXPORT_PREPPING ||
(export_state == Migrator::EXPORT_WARNING &&
!migrator->export_has_warned(parent_dir, from)));
if (!in) {
dout(0) << " inode expire on " << q.first << " from " << from
<< ", don't have it" << dendl;
- assert(in);
+ ceph_assert(in);
}
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
dout(20) << __func__ << ": expiring inode " << *in << dendl;
// check nonce
}
dout(0) << " dir expire on " << q.first << " from " << from
<< ", don't have it" << dendl;
- assert(dir);
+ ceph_assert(dir);
}
dout(20) << __func__ << ": expiring dirfrag " << *dir << dendl;
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
// check nonce
if (nonce == dir->get_replica_nonce(from)) {
for (const auto &pd : p.second.dentries) {
dout(10) << " dn expires in dir " << pd.first << dendl;
CInode *diri = get_inode(pd.first.ino);
- assert(diri);
+ ceph_assert(diri);
CDir *dir = diri->get_dirfrag(pd.first.frag);
if (!dir) {
dout(0) << " dn expires on " << pd.first << " from " << from
<< ", must have refragmented" << dendl;
} else {
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
}
for (const auto &p : pd.second) {
} else {
// which dirfrag for this dentry?
CDir *dir = diri->get_dirfrag(diri->pick_dirfrag(p.first.first));
- assert(dir);
- assert(dir->is_auth());
+ ceph_assert(dir);
+ ceph_assert(dir->is_auth());
dn = dir->lookup(p.first.first, p.first.second);
}
else
dout(0) << " missing dentry for " << p.first.first << " snap " << p.first.second << dendl;
}
- assert(dn);
+ ceph_assert(dn);
if (nonce == dn->get_replica_nonce(from)) {
dout(7) << " dentry_expire on " << *dn << " from mds." << from << dendl;
static MemoryModel::snap baseline = last;
// check client caps
- assert(CInode::count() == inode_map.size() + snap_inode_map.size() + num_shadow_inodes);
+ ceph_assert(CInode::count() == inode_map.size() + snap_inode_map.size() + num_shadow_inodes);
double caps_per_inode = 0.0;
if (CInode::count())
caps_per_inode = (double)Capability::count() / (double)CInode::count();
}
if (num_auth_subtree > 0) {
- assert(mds->get_nodeid() > 0);
+ ceph_assert(mds->get_nodeid() > 0);
dout(7) << "still have " << num_auth_subtree << " auth subtrees" << dendl;
show_subtrees();
return false;
show_cache();
return false;
}
- assert(!migrator->is_exporting());
- assert(!migrator->is_importing());
+ ceph_assert(!migrator->is_exporting());
+ ceph_assert(!migrator->is_importing());
if ((myin && myin->is_auth_pinned()) ||
(mydir && mydir->is_auth_pinned())) {
if (!did_shutdown_log_cap) {
// flush journal header
dout(7) << "writing header for (now-empty) journal" << dendl;
- assert(mds->mdlog->empty());
+ ceph_assert(mds->mdlog->empty());
mds->mdlog->write_head(0);
// NOTE: filer active checker below will block us until this completes.
did_shutdown_log_cap = true;
remove_subtree(mydir);
myin->close_dirfrag(mydir->get_frag());
}
- assert(subtrees.empty());
+ ceph_assert(subtrees.empty());
if (myin) {
remove_inode(myin);
- assert(!myin);
+ ceph_assert(!myin);
}
if (global_snaprealm) {
default:
derr << "cache unknown message " << m->get_type() << dendl;
- assert(0 == "cache unknown message");
+ ceph_assert(0 == "cache unknown message");
}
}
bool null_okay = (onfail == MDS_TRAVERSE_DISCOVERXLOCK);
bool forward = (onfail == MDS_TRAVERSE_FORWARD);
- assert(!forward || mdr); // forward requires a request
+ ceph_assert(!forward || mdr); // forward requires a request
snapid_t snapid = CEPH_NOSNAP;
if (mdr)
return 1;
}
}
- assert(curdir);
+ ceph_assert(curdir);
#ifdef MDS_VERIFY_FRAGSTAT
if (curdir->is_complete())
// do we have inode?
if (!in) {
- assert(dnl->is_remote());
+ ceph_assert(dnl->is_remote());
// do i have it?
in = get_inode(dnl->get_remote_ino());
if (in) {
dn->link_remote(dnl, in);
} else {
dout(7) << "remote link to " << dnl->get_remote_ino() << ", which i don't have" << dendl;
- assert(mdr); // we shouldn't hit non-primary dentries doing a non-mdr traversal!
+ ceph_assert(mdr); // we shouldn't hit non-primary dentries doing a non-mdr traversal!
if (mds->damage_table.is_remote_damaged(dnl->get_remote_ino())) {
dout(4) << "traverse: remote dentry points to damaged ino "
<< *dn << dendl;
if (mds->logger) mds->logger->inc(l_mds_traverse_hit);
dout(10) << "path_traverse finish on snapid " << snapid << dendl;
if (mdr)
- assert(mdr->snapid == snapid);
+ ceph_assert(mdr->snapid == snapid);
return 0;
}
void MDCache::open_remote_dirfrag(CInode *diri, frag_t approxfg, MDSInternalContextBase *fin)
{
dout(10) << "open_remote_dir on " << *diri << dendl;
- assert(diri->is_dir());
- assert(!diri->is_auth());
- assert(diri->get_dirfrag(approxfg) == 0);
+ ceph_assert(diri->is_dir());
+ ceph_assert(!diri->is_auth());
+ ceph_assert(diri->get_dirfrag(approxfg) == 0);
discover_dir_frag(diri, approxfg, fin);
}
else
dnl = dn->get_linkage();
- assert(!dnl->is_null());
+ ceph_assert(!dnl->is_null());
if (dnl->is_primary())
return dnl->inode;
- assert(dnl->is_remote());
+ ceph_assert(dnl->is_remote());
CInode *in = get_inode(dnl->get_remote_ino());
if (in) {
dout(7) << "get_dentry_inode linking in remote in " << *in << dendl;
return;
CInode *parent = in->get_parent_inode();
- assert(parent);
+ ceph_assert(parent);
make_trace(trace, parent);
CDentry *dn = in->get_parent_dn();
void MDCache::_open_ino_fetch_dir(inodeno_t ino, const MMDSOpenIno::const_ref &m, CDir *dir, bool parent)
{
if (dir->state_test(CDir::STATE_REJOINUNDEF))
- assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag()));
+ ceph_assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag()));
dir->fetch(new C_MDC_OpenInoTraverseDir(this, ino, m, parent));
if (mds->logger)
mds->logger->inc(l_mds_openino_dir_fetch);
fetch_backtrace(ino, info.pool, fin->bl,
new C_OnFinisher(fin, mds->finisher));
} else {
- assert(!info.ancestors.empty());
+ ceph_assert(!info.ancestors.empty());
info.checking = mds->get_nodeid();
open_ino(info.ancestors[0].dirino, mds->mdsmap->get_metadata_pool(),
new C_MDC_OpenInoParentOpened(this, ino), info.want_replica);
c->complete(-ESTALE);
return;
}
- assert(!in);
+ ceph_assert(!in);
ceph_tid_t tid = ++find_ino_peer_last_tid;
find_ino_peer_info_t& fip = find_ino_peer[tid];
// did we win a forward race against a slave?
if (active_requests.count(req->get_reqid())) {
MDRequestRef& mdr = active_requests[req->get_reqid()];
- assert(mdr);
+ ceph_assert(mdr);
if (mdr->is_slave()) {
dout(10) << "request_start already had " << *mdr << ", waiting for finish" << dendl;
mdr->more()->waiting_for_finish.push_back(new C_MDS_RetryMessage(mds, req));
params.dispatched = m->get_dispatch_stamp();
MDRequestRef mdr =
mds->op_tracker.create_request<MDRequestImpl,MDRequestImpl::Params*>(¶ms);
- assert(active_requests.count(mdr->reqid) == 0);
+ ceph_assert(active_requests.count(mdr->reqid) == 0);
active_requests[mdr->reqid] = mdr;
dout(7) << "request_start_slave " << *mdr << " by mds." << by << dendl;
return mdr;
MDRequestRef mdr =
mds->op_tracker.create_request<MDRequestImpl,MDRequestImpl::Params*>(¶ms);
- assert(active_requests.count(mdr->reqid) == 0);
+ ceph_assert(active_requests.count(mdr->reqid) == 0);
active_requests[mdr->reqid] = mdr;
dout(7) << "request_start_internal " << *mdr << " op " << op << dendl;
return mdr;
MDRequestRef MDCache::request_get(metareqid_t rid)
{
ceph::unordered_map<metareqid_t, MDRequestRef>::iterator p = active_requests.find(rid);
- assert(p != active_requests.end());
+ ceph_assert(p != active_requests.end());
dout(7) << "request_get " << rid << " " << *p->second << dendl;
return p->second;
}
if (mdr->has_more() &&
(!mdr->more()->witnessed.empty() || !mdr->more()->waiting_on_slave.empty())) {
if (!mdr->done_locking) {
- assert(mdr->more()->witnessed.empty());
+ ceph_assert(mdr->more()->witnessed.empty());
mdr->aborted = true;
dout(10) << "request_kill " << *mdr << " -- waiting for slave reply, delaying" << dendl;
} else {
dout(10) << "request_kill " << *mdr << " -- already started slave prep, no-op" << dendl;
}
- assert(mdr->used_prealloc_ino == 0);
- assert(mdr->prealloc_inos.empty());
+ ceph_assert(mdr->used_prealloc_ino == 0);
+ ceph_assert(mdr->prealloc_inos.empty());
mdr->session = NULL;
mdr->item_session_request.remove_myself();
vector<inodeno_t> split_realms;
if (notify_clients) {
- assert(in->snaprealm->have_past_parents_open());
+ ceph_assert(in->snaprealm->have_past_parents_open());
if (snapop == CEPH_SNAP_OP_SPLIT) {
// notify clients of update|split
for (elist<CInode*>::iterator p = in->snaprealm->inodes_with_caps.begin(member_offset(CInode, item_caps));
for (const auto& p : realm->client_caps) {
const auto& client = p.first;
const auto& caps = p.second;
- assert(!caps->empty());
+ ceph_assert(!caps->empty());
auto em = updates.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple());
if (em.second) {
void MDCache::send_snap_update(CInode *in, version_t stid, int snap_op)
{
dout(10) << __func__ << " " << *in << " stid " << stid << dendl;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
set<mds_rank_t> mds_set;
if (stid > 0) {
CInode *in = get_inode(m->get_ino());
if (in) {
- assert(!in->is_auth());
+ ceph_assert(!in->is_auth());
if (mds->get_state() > MDSMap::STATE_REJOIN ||
(mds->is_rejoin() && !in->is_rejoining())) {
auto p = m->snap_blob.cbegin();
mds_rank_t whoami = mds->get_nodeid();
mds_rank_t from = mds_rank_t(dis->get_source().num());
- assert(from != whoami);
+ ceph_assert(from != whoami);
if (mds->get_state() <= MDSMap::STATE_REJOIN) {
if (mds->get_state() < MDSMap::STATE_REJOIN &&
<< dendl;
cur = get_inode(dis->get_base_ino());
- assert(cur);
+ ceph_assert(cur);
// add root
reply->starts_with = MDiscoverReply::INODE;
}
}
- assert(reply);
+ ceph_assert(reply);
// add content
// do some fidgeting to include a dir if they asked for the base dir, or just root.
fg = cur->pick_dirfrag(dis->get_dentry(i));
} else {
// requester explicity specified the frag
- assert(dis->wants_base_dir() || MDS_INO_IS_BASE(dis->get_base_ino()));
+ ceph_assert(dis->wants_base_dir() || MDS_INO_IS_BASE(dis->get_base_ino()));
fg = dis->get_base_dir_frag();
if (!cur->dirfragtree.is_leaf(fg))
fg = cur->dirfragtree[fg.value()];
// original request was sent.
reply->set_base_dir_frag(curdir->get_frag());
} else {
- assert(!curdir->is_ambiguous_auth()); // would be frozen.
+ ceph_assert(!curdir->is_ambiguous_auth()); // would be frozen.
if (!reply->trace.length())
reply->starts_with = MDiscoverReply::DIR;
replicate_dir(curdir, from, reply->trace);
CDentry *dn = 0;
if (curdir->get_version() == 0) {
// fetch newly opened dir
- assert(!curdir->has_bloom());
+ ceph_assert(!curdir->has_bloom());
} else if (dis->get_want().depth() > 0) {
// lookup dentry
dn = curdir->lookup(dis->get_dentry(i), snapid);
else
dn = curdir->add_null_dentry(dis->get_dentry(i), snapid, snapid);
}
- assert(dn);
+ ceph_assert(dn);
// don't add replica to purging dentry/inode
if (dn->state_test(CDentry::STATE_PURGING)) {
// add inode
CInode *next = dnl->get_inode();
- assert(next->is_auth());
+ ceph_assert(next->is_auth());
replicate_inode(next, from, reply->trace, mds->mdsmap->get_up_features());
dout(7) << "handle_discover added inode " << *next << dendl;
}
// how did we do?
- assert(!reply->is_empty());
+ ceph_assert(!reply->is_empty());
dout(7) << "handle_discover sending result back to asker mds." << from << dendl;
mds->send_message(reply, dis->get_connection());
}
if (!p.end() && next == MDiscoverReply::INODE) {
cur = add_replica_inode(p, NULL, finished);
dout(7) << "discover_reply got base inode " << *cur << dendl;
- assert(cur->is_base());
+ ceph_assert(cur->is_base());
next = MDiscoverReply::DIR;
waiting_for_base_ino[from].erase(cur->ino());
}
}
- assert(cur);
+ ceph_assert(cur);
// loop over discover results.
// indexes follow each ([[dir] dentry] inode)
if (next == MDiscoverReply::DIR) {
curdir = add_replica_dir(p, cur, mds_rank_t(m->get_source().num()), finished);
if (cur->ino() == m->get_base_ino() && curdir->get_frag() != m->get_base_dir_frag()) {
- assert(m->get_wanted_base_dir());
+ ceph_assert(m->get_wanted_base_dir());
cur->take_dir_waiting(m->get_base_dir_frag(), finished);
}
} else {
dirfrag_t df;
decode(df, p);
- assert(diri->ino() == df.ino);
+ ceph_assert(diri->ino() == df.ino);
// add it (_replica_)
CDir *dir = diri->get_dirfrag(df.frag);
in->inode_auth.first = in->ino() - MDS_INO_MDSDIR_OFFSET;
dout(10) << "add_replica_inode added " << *in << dendl;
if (dn) {
- assert(dn->get_linkage()->is_null());
+ ceph_assert(dn->get_linkage()->is_null());
dn->dir->link_primary_inode(dn, in);
}
} else {
dout(7) << "handle_dentry_link on " << *dn << dendl;
CDentry::linkage_t *dnl = dn->get_linkage();
- assert(!dn->is_auth());
- assert(dnl->is_null());
+ ceph_assert(!dn->is_auth());
+ ceph_assert(dnl->is_null());
}
}
if (dnl->is_primary()) {
CInode *in = dnl->get_inode();
dn->dir->unlink_inode(dn);
- assert(straydn);
+ ceph_assert(straydn);
straydn->dir->link_primary_inode(straydn, in);
// in->first is lazily updated on replica; drag it forward so
// that we always keep it in sync with the dnq
- assert(straydn->first >= in->first);
+ ceph_assert(straydn->first >= in->first);
in->first = straydn->first;
// update subtree map?
if (m->snapbl.length()) {
bool hadrealm = (in->snaprealm ? true : false);
in->decode_snap_blob(m->snapbl);
- assert(in->snaprealm);
- assert(in->snaprealm->have_past_parents_open());
+ ceph_assert(in->snaprealm);
+ ceph_assert(in->snaprealm->have_past_parents_open());
if (!hadrealm)
do_realm_invalidate_and_update_notify(in, CEPH_SNAP_OP_SPLIT, false);
}
straydn = NULL;
} else {
- assert(!straydn);
- assert(dnl->is_remote());
+ ceph_assert(!straydn);
+ ceph_assert(dnl->is_remote());
dn->dir->unlink_inode(dn);
}
- assert(dnl->is_null());
+ ceph_assert(dnl->is_null());
}
}
// race with trim_dentry()
if (straydn) {
- assert(straydn->get_num_ref() == 0);
- assert(straydn->get_linkage()->is_null());
+ ceph_assert(straydn->get_num_ref() == 0);
+ ceph_assert(straydn->get_linkage()->is_null());
expiremap ex;
trim_dentry(straydn, ex);
send_expire_messages(ex);
if (bits > 0) {
// SPLIT
- assert(srcfrags.size() == 1);
+ ceph_assert(srcfrags.size() == 1);
CDir *dir = srcfrags.front();
dir->split(bits, resultfrags, waiters, replay);
// was i a bound?
if (parent_subtree) {
- assert(subtrees[parent_subtree].count(dir));
+ ceph_assert(subtrees[parent_subtree].count(dir));
subtrees[parent_subtree].erase(dir);
for (list<CDir*>::iterator p = resultfrags.begin();
p != resultfrags.end();
++p) {
- assert((*p)->is_subtree_root());
+ ceph_assert((*p)->is_subtree_root());
subtrees[parent_subtree].insert(*p);
}
}
}
for (CDir *dir : srcfrags) {
- assert(dir->is_subtree_root());
+ ceph_assert(dir->is_subtree_root());
dout(10) << " taking srcfrag subtree bounds from " << *dir << dendl;
map<CDir*, set<CDir*> >::iterator q = subtrees.find(dir);
set<CDir*>::iterator r = q->second.begin();
f->merge(srcfrags, waiters, replay);
if (any_subtree) {
- assert(f->is_subtree_root());
+ ceph_assert(f->is_subtree_root());
subtrees[f].swap(new_bounds);
if (parent_subtree)
subtrees[parent_subtree].insert(f);
void MDCache::split_dir(CDir *dir, int bits)
{
dout(7) << __func__ << " " << *dir << " bits " << bits << dendl;
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
CInode *diri = dir->inode;
list<CDir*> dirs;
MDRequestRef mdr = request_start_internal(CEPH_MDS_OP_FRAGMENTDIR);
mdr->more()->fragment_base = dir->dirfrag();
- assert(fragments.count(dir->dirfrag()) == 0);
+ ceph_assert(fragments.count(dir->dirfrag()) == 0);
fragment_info_t& info = fragments[dir->dirfrag()];
info.mdr = mdr;
info.dirs.push_back(dir);
MDRequestRef mdr = request_start_internal(CEPH_MDS_OP_FRAGMENTDIR);
mdr->more()->fragment_base = basedirfrag;
- assert(fragments.count(basedirfrag) == 0);
+ ceph_assert(fragments.count(basedirfrag) == 0);
fragment_info_t& info = fragments[basedirfrag];
info.mdr = mdr;
info.dirs = dirs;
dir->auth_pin(dir); // until we mark and complete them
dir->state_set(CDir::STATE_FRAGMENTING);
dir->freeze_dir();
- assert(dir->is_freezing_dir());
+ ceph_assert(dir->is_freezing_dir());
}
}
ready = false;
} else if (dir->is_new()) {
dout(15) << " committing new " << *dir << dendl;
- assert(dir->is_dirty());
+ ceph_assert(dir->is_dirty());
dir->commit(0, gather.new_sub(), true);
ready = false;
}
for (auto &p : dir->items) {
CDentry *dn = p.second;
dn->get(CDentry::PIN_FRAGMENTING);
- assert(!dn->state_test(CDentry::STATE_FRAGMENTING));
+ ceph_assert(!dn->state_test(CDentry::STATE_FRAGMENTING));
dn->state_set(CDentry::STATE_FRAGMENTING);
}
dir->state_set(CDir::STATE_DNPINNEDFRAG);
++p) {
CDir *dir = *p;
if (!dir->is_frozen_dir()) {
- assert(dir->is_freezing_dir());
+ ceph_assert(dir->is_freezing_dir());
dir->add_waiter(CDir::WAIT_FROZEN, gather.new_sub());
}
}
CDir *dir = *p;
dout(10) << " frag " << *dir << dendl;
- assert(dir->state_test(CDir::STATE_FRAGMENTING));
+ ceph_assert(dir->state_test(CDir::STATE_FRAGMENTING));
dir->state_clear(CDir::STATE_FRAGMENTING);
if (dir->state_test(CDir::STATE_DNPINNEDFRAG)) {
for (auto &p : dir->items) {
CDentry *dn = p.second;
- assert(dn->state_test(CDentry::STATE_FRAGMENTING));
+ ceph_assert(dn->state_test(CDentry::STATE_FRAGMENTING));
dn->state_clear(CDentry::STATE_FRAGMENTING);
dn->put(CDentry::PIN_FRAGMENTING);
}
bool MDCache::fragment_are_all_frozen(CDir *dir)
{
- assert(dir->is_frozen_dir());
+ ceph_assert(dir->is_frozen_dir());
map<dirfrag_t,fragment_info_t>::iterator p;
for (p = fragments.lower_bound(dirfrag_t(dir->ino(), 0));
p != fragments.end() && p->first.ino == dir->ino();
resultfrags.swap(l);
}
void finish(int r) override {
- assert(r == 0 || r == -ENOENT);
+ ceph_assert(r == 0 || r == -ENOENT);
mdcache->_fragment_finish(basedirfrag, resultfrags);
}
void print(ostream& out) const override {
return;
}
- assert(r == 0);
+ ceph_assert(r == 0);
fragment_info_t& info = it->second;
dout(10) << "fragment_frozen " << basedirfrag.frag << " by " << info.bits
<< " on " << info.dirs.front()->get_inode() << dendl;
mds->queue_waiters(waiters);
for (list<frag_t>::iterator p = le->orig_frags.begin(); p != le->orig_frags.end(); ++p)
- assert(!diri->dirfragtree.is_leaf(*p));
+ ceph_assert(!diri->dirfragtree.is_leaf(*p));
le->metablob.add_dir_context(*info.resultfrags.begin());
for (list<CDir*>::iterator p = info.resultfrags.begin();
{
dirfrag_t basedirfrag = mdr->more()->fragment_base;
map<dirfrag_t,fragment_info_t>::iterator it = fragments.find(basedirfrag);
- assert(it != fragments.end());
+ ceph_assert(it != fragments.end());
fragment_info_t &info = it->second;
CInode *diri = info.resultfrags.front()->get_inode();
{
dirfrag_t basedirfrag = mdr->more()->fragment_base;
map<dirfrag_t,fragment_info_t>::iterator it = fragments.find(basedirfrag);
- assert(it != fragments.end());
+ ceph_assert(it != fragments.end());
fragment_info_t &info = it->second;
CInode *diri = info.resultfrags.front()->get_inode();
for (auto &p : dir->items) {
CDentry *dn = p.second;
- assert(dn->state_test(CDentry::STATE_FRAGMENTING));
+ ceph_assert(dn->state_test(CDentry::STATE_FRAGMENTING));
dn->state_clear(CDentry::STATE_FRAGMENTING);
dn->put(CDentry::PIN_FRAGMENTING);
}
{
dout(10) << "fragment_committed " << basedirfrag << dendl;
map<dirfrag_t, ufragment>::iterator it = uncommitted_fragments.find(basedirfrag);
- assert(it != uncommitted_fragments.end());
+ ceph_assert(it != uncommitted_fragments.end());
ufragment &uf = it->second;
// remove old frags
0, gather.new_sub());
}
- assert(gather.has_subs());
+ ceph_assert(gather.has_subs());
gather.activate();
}
dout(10) << "fragment_finish " << basedirfrag << "resultfrags.size="
<< resultfrags.size() << dendl;
map<dirfrag_t, ufragment>::iterator it = uncommitted_fragments.find(basedirfrag);
- assert(it != uncommitted_fragments.end());
+ ceph_assert(it != uncommitted_fragments.end());
ufragment &uf = it->second;
// unmark & auth_unpin
LogSegment *ls, bufferlist *rollback)
{
dout(10) << "add_uncommitted_fragment: base dirfrag " << basedirfrag << " bits " << bits << dendl;
- assert(!uncommitted_fragments.count(basedirfrag));
+ ceph_assert(!uncommitted_fragments.count(basedirfrag));
ufragment& uf = uncommitted_fragments[basedirfrag];
uf.old_frags = old_frags;
uf.bits = bits;
++p) {
ufragment &uf = p->second;
CInode *diri = get_inode(p->first.ino);
- assert(diri);
+ ceph_assert(diri);
if (uf.committed) {
list<CDir*> frags;
diri->verify_dirfrags();
for (list<frag_t>::iterator q = old_frags.begin(); q != old_frags.end(); ++q)
- assert(!diri->dirfragtree.is_leaf(*q));
+ ceph_assert(!diri->dirfragtree.is_leaf(*q));
for (list<CDir*>::iterator q = resultfrags.begin(); q != resultfrags.end(); ++q) {
CDir *dir = *q;
// sanity check
//dout(25) << "saw depth " << d << " " << *dir << dendl;
if (seen.count(dir)) dout(0) << "aah, already seen " << *dir << dendl;
- assert(seen.count(dir) == 0);
+ ceph_assert(seen.count(dir) == 0);
seen.insert(dir);
// nested items?
<< " " << auth << *dir << dendl;
if (dir->ino() == MDS_INO_ROOT)
- assert(dir->inode == root);
+ ceph_assert(dir->inode == root);
if (dir->ino() == MDS_INO_MDSDIR(mds->get_nodeid()))
- assert(dir->inode == myin);
+ ceph_assert(dir->inode == myin);
if (dir->inode->is_stray() && (MDS_INO_STRAY_OWNER(dir->ino()) == mds->get_nodeid()))
- assert(strays[MDS_INO_STRAY_INDEX(dir->ino())] == dir->inode);
+ ceph_assert(strays[MDS_INO_STRAY_INDEX(dir->ino())] == dir->inode);
// nested items?
if (!subtrees[dir].empty()) {
dout(10) << "*** stray/lost entry in subtree map: " << *p->first << dendl;
lost++;
}
- assert(lost == 0);
+ ceph_assert(lost == 0);
}
void MDCache::show_cache()
void MDCache::dump_tree(CInode *in, const int cur_depth, const int max_depth, Formatter *f)
{
- assert(in);
+ ceph_assert(in);
if ((max_depth >= 0) && (cur_depth > max_depth)) {
return;
}
return;
// TODO: Remove this restriction
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
bool locked = mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks);
if (!locked)
auto& expiring_segments = mds->mdlog->get_expiring_segments();
for (auto logseg : expiring_segments)
logseg->wait_for_expiry(gather.new_sub());
- assert(gather.has_subs());
+ ceph_assert(gather.has_subs());
gather.set_finisher(new MDSInternalContextWrapper(mds, fin));
gather.activate();
}
for (list<frag_t>::iterator p = frags.begin(); p != frags.end(); ++p) {
CDir *dir = diri->get_dirfrag(*p);
if (!dir) {
- assert(mdr->is_auth_pinned(diri));
+ ceph_assert(mdr->is_auth_pinned(diri));
dir = diri->get_or_open_dirfrag(this, *p);
}
if (dir->get_version() == 0) {
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
dir->fetch(new C_MDS_RetryRequest(this, mdr));
return;
}
diri->dirfragtree.get_leaves(frags);
for (list<frag_t>::iterator p = frags.begin(); p != frags.end(); ++p) {
CDir *dir = diri->get_dirfrag(*p);
- assert(dir);
- assert(dir->get_version() > 0);
+ ceph_assert(dir);
+ ceph_assert(dir->get_version() > 0);
dir_info.add(dir->fnode.accounted_fragstat);
nest_info.add(dir->fnode.accounted_rstat);
}
return;
// TODO: Is this necessary? Fix it if so
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
bool locked = mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks);
if (!locked)
return;
void MDCache::clear_dirty_bits_for_stray(CInode* diri) {
dout(10) << __func__ << " " << *diri << dendl;
- assert(diri->get_projected_parent_dir()->inode->is_stray());
+ ceph_assert(diri->get_projected_parent_dir()->inode->is_stray());
list<CDir*> ls;
diri->get_dirfrags(ls);
for (auto &p : ls) {
* on to StrayManager (i.e. this is a stray you've just created)
*/
void notify_stray(CDentry *dn) {
- assert(dn->get_dir()->get_inode()->is_stray());
+ ceph_assert(dn->get_dir()->get_inode()->is_stray());
stray_manager.eval_stray(dn);
}
CDir *get_subtree_root(CDir *dir);
CDir *get_projected_subtree_root(CDir *dir);
bool is_leaf_subtree(CDir *dir) {
- assert(subtrees.count(dir));
+ ceph_assert(subtrees.count(dir));
return subtrees[dir].empty();
}
void remove_subtree(CDir *dir);
void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) {
auto p = ambiguous_slave_updates.find(master);
auto q = p->second.find(reqid);
- assert(q != p->second.end());
+ ceph_assert(q != p->second.end());
p->second.erase(q);
if (p->second.empty())
ambiguous_slave_updates.erase(p);
return my_ambiguous_imports.count(base);
}
void get_ambiguous_import_bounds(dirfrag_t base, vector<dirfrag_t>& bounds) {
- assert(my_ambiguous_imports.count(base));
+ ceph_assert(my_ambiguous_imports.count(base));
bounds = my_ambiguous_imports[base];
}
void cancel_ambiguous_import(CDir *);
return NULL;
}
void remove_replay_cap_reconnect(inodeno_t ino, client_t client) {
- assert(cap_imports[ino].size() == 1);
- assert(cap_imports[ino][client].size() == 1);
+ ceph_assert(cap_imports[ino].size() == 1);
+ ceph_assert(cap_imports[ino][client].size() == 1);
cap_imports.erase(ino);
}
void wait_replay_cap_reconnect(inodeno_t ino, MDSInternalContextBase *c) {
void rollback_uncommitted_fragment(dirfrag_t basedirfrag, list<frag_t>& old_frags);
public:
void wait_for_uncommitted_fragment(dirfrag_t dirfrag, MDSInternalContextBase *c) {
- assert(uncommitted_fragments.count(dirfrag));
+ ceph_assert(uncommitted_fragments.count(dirfrag));
uncommitted_fragments[dirfrag].waiters.push_back(c);
}
void split_dir(CDir *dir, int byn);
void show_subtrees(int dbl=10);
CInode *hack_pick_random_inode() {
- assert(!inode_map.empty());
+ ceph_assert(!inode_map.empty());
int n = rand() % inode_map.size();
auto p = inode_map.begin();
while (n--) ++p;
ino = MDS_INO_LOG_OFFSET + mds->get_nodeid();
// Instantiate Journaler and start async write to RADOS
- assert(journaler == NULL);
+ ceph_assert(journaler == NULL);
journaler = new Journaler("mdlog", ino, mds->mdsmap->get_metadata_pool(),
CEPH_FS_ONDISK_MAGIC, mds->objecter, logger,
l_mdl_jlat, mds->finisher);
- assert(journaler->is_readonly());
+ ceph_assert(journaler->is_readonly());
journaler->set_write_error_handler(new C_MDL_WriteError(this));
journaler->set_writeable();
journaler->create(&mds->mdcache->default_log_layout, g_conf()->mds_journal_format);
{
dout(5) << "open discovering log bounds" << dendl;
- assert(!recovery_thread.is_started());
+ ceph_assert(!recovery_thread.is_started());
recovery_thread.set_completion(c);
recovery_thread.create("md_recov_open");
// Because we will call append() at the completion of this, check that we have already
// read the whole journal.
- assert(journaler != NULL);
- assert(journaler->get_read_pos() == journaler->get_write_pos());
+ ceph_assert(journaler != NULL);
+ ceph_assert(journaler->get_read_pos() == journaler->get_write_pos());
delete journaler;
journaler = NULL;
void MDLog::_start_entry(LogEvent *e)
{
- assert(submit_mutex.is_locked_by_me());
+ ceph_assert(submit_mutex.is_locked_by_me());
- assert(cur_event == NULL);
+ ceph_assert(cur_event == NULL);
cur_event = e;
event_seq++;
void MDLog::cancel_entry(LogEvent *le)
{
- assert(le == cur_event);
+ ceph_assert(le == cur_event);
cur_event = NULL;
delete le;
}
void MDLog::_submit_entry(LogEvent *le, MDSLogContextBase *c)
{
- assert(submit_mutex.is_locked_by_me());
- assert(!mds->is_any_replay());
- assert(!capped);
+ ceph_assert(submit_mutex.is_locked_by_me());
+ ceph_assert(!mds->is_any_replay());
+ ceph_assert(!capped);
- assert(le == cur_event);
+ ceph_assert(le == cur_event);
cur_event = NULL;
// let the event register itself in the segment
- assert(!segments.empty());
+ ceph_assert(!segments.empty());
LogSegment *ls = segments.rbegin()->second;
ls->num_events++;
MDSLogContextBase *fin;
if (data.fin) {
fin = dynamic_cast<MDSLogContextBase*>(data.fin);
- assert(fin);
+ ceph_assert(fin);
fin->set_write_pos(new_write_pos);
} else {
fin = new C_MDL_Flushed(this, new_write_pos);
if (data.fin) {
MDSInternalContextBase* fin =
dynamic_cast<MDSInternalContextBase*>(data.fin);
- assert(fin);
+ ceph_assert(fin);
C_MDL_Flushed *fin2 = new C_MDL_Flushed(this, fin);
fin2->set_write_pos(journaler->get_write_pos());
journaler->wait_for_flush(fin2);
void MDLog::shutdown()
{
- assert(mds->mds_lock.is_locked_by_me());
+ ceph_assert(mds->mds_lock.is_locked_by_me());
dout(5) << "shutdown" << dendl;
if (submit_thread.is_started()) {
- assert(mds->is_daemon_stopping());
+ ceph_assert(mds->is_daemon_stopping());
if (submit_thread.am_self()) {
// Called suicide from the thread: trust it to do no work after
void MDLog::_prepare_new_segment()
{
- assert(submit_mutex.is_locked_by_me());
+ ceph_assert(submit_mutex.is_locked_by_me());
uint64_t seq = event_seq + 1;
dout(7) << __func__ << " seq " << seq << dendl;
void MDLog::_journal_segment_subtree_map(MDSInternalContextBase *onsync)
{
- assert(submit_mutex.is_locked_by_me());
+ ceph_assert(submit_mutex.is_locked_by_me());
dout(7) << __func__ << dendl;
ESubtreeMap *sle = mds->mdcache->create_subtree_map();
// look at first segment
LogSegment *ls = p->second;
- assert(ls);
+ ceph_assert(ls);
++p;
if (pending_events.count(ls->seq) ||
dout(5) << "trim already expired segment " << ls->seq << "/" << ls->offset
<< ", " << ls->num_events << " events" << dendl;
} else {
- assert(expiring_segments.count(ls) == 0);
+ ceph_assert(expiring_segments.count(ls) == 0);
new_expiring_segments++;
expiring_segments.insert(ls);
expiring_events += ls->num_events;
dout(5) << "trim already expired segment " << ls->seq << "/" << ls->offset
<< ", " << ls->num_events << " events" << dendl;
} else {
- assert(expiring_segments.count(ls) == 0);
+ ceph_assert(expiring_segments.count(ls) == 0);
expiring_segments.insert(ls);
expiring_events += ls->num_events;
submit_mutex.Unlock();
} else {
dout(10) << "try_expire expired segment " << ls->seq << "/" << ls->offset << dendl;
submit_mutex.Lock();
- assert(expiring_segments.count(ls));
+ ceph_assert(expiring_segments.count(ls));
expiring_segments.erase(ls);
expiring_events -= ls->num_events;
_expired(ls);
void MDLog::_trim_expired_segments()
{
- assert(submit_mutex.is_locked_by_me());
+ ceph_assert(submit_mutex.is_locked_by_me());
uint64_t oft_committed_seq = mds->mdcache->open_file_table.get_committed_log_seq();
void MDLog::_expired(LogSegment *ls)
{
- assert(submit_mutex.is_locked_by_me());
+ ceph_assert(submit_mutex.is_locked_by_me());
dout(5) << "_expired segment " << ls->seq << "/" << ls->offset
<< ", " << ls->num_events << " events" << dendl;
void MDLog::replay(MDSInternalContextBase *c)
{
- assert(journaler->is_active());
- assert(journaler->is_readonly());
+ ceph_assert(journaler->is_active());
+ ceph_assert(journaler->is_readonly());
// empty?
if (journaler->get_read_pos() == journaler->get_write_pos()) {
dout(10) << "replay start, from " << journaler->get_read_pos()
<< " to " << journaler->get_write_pos() << dendl;
- assert(num_events == 0 || already_replayed);
+ ceph_assert(num_events == 0 || already_replayed);
if (already_replayed) {
// Ensure previous instance of ReplayThread is joined before
// we create another one
*/
void MDLog::_recovery_thread(MDSInternalContextBase *completion)
{
- assert(journaler == NULL);
+ ceph_assert(journaler == NULL);
if (g_conf()->mds_journal_format > JOURNAL_FORMAT_MAX) {
dout(0) << "Configuration value for mds_journal_format is out of bounds, max is "
<< JOURNAL_FORMAT_MAX << dendl;
jp.front = default_log_ino;
int write_result = jp.save(mds->objecter);
// Nothing graceful we can do for this
- assert(write_result >= 0);
+ ceph_assert(write_result >= 0);
} else if (read_result == -EBLACKLISTED) {
derr << "Blacklisted during JournalPointer read! Respawning..." << dendl;
mds->respawn();
mds->clog->error() << "Error recovering journal " << jp.front << ": "
<< cpp_strerror(recovery_result);
mds->damaged_unlocked();
- assert(recovery_result == 0); // Unreachable because damaged() calls respawn()
+ ceph_assert(recovery_result == 0); // Unreachable because damaged() calls respawn()
}
// We could read journal, so we can erase it.
jp.back = 0;
int write_result = jp.save(mds->objecter);
// Nothing graceful we can do for this
- assert(write_result >= 0);
+ ceph_assert(write_result >= 0);
}
}
mds->clog->error() << "Error recovering journal " << jp.front << ": "
<< cpp_strerror(recovery_result);
mds->damaged_unlocked();
- assert(recovery_result == 0); // Unreachable because damaged() calls respawn()
+ ceph_assert(recovery_result == 0); // Unreachable because damaged() calls respawn()
}
/* Check whether the front journal format is acceptable or needs re-write */
*/
void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journal, MDSInternalContextBase *completion)
{
- assert(!jp_in.is_null());
- assert(completion != NULL);
- assert(old_journal != NULL);
+ ceph_assert(!jp_in.is_null());
+ ceph_assert(completion != NULL);
+ ceph_assert(old_journal != NULL);
JournalPointer jp = jp_in;
inodeno_t secondary_ino = MDS_INO_LOG_BACKUP_OFFSET + mds->get_nodeid();
jp.back = (jp.front == primary_ino ? secondary_ino : primary_ino);
int write_result = jp.save(mds->objecter);
- assert(write_result == 0);
+ ceph_assert(write_result == 0);
/* Create the new Journaler file */
Journaler *new_journal = new Journaler("mdlog", jp.back,
break;
// Read one serialized LogEvent
- assert(old_journal->is_readable());
+ ceph_assert(old_journal->is_readable());
bufferlist bl;
uint64_t le_pos = old_journal->get_read_pos();
bool r = old_journal->try_read_entry(bl);
if (!r && old_journal->get_error())
continue;
- assert(r);
+ ceph_assert(r);
// Update segment_pos_rewrite
LogEvent *le = LogEvent::decode(bl);
if (le->get_type() == EVENT_SUBTREEMAP
|| le->get_type() == EVENT_SUBTREEMAP_TEST) {
ESubtreeMap *sle = dynamic_cast<ESubtreeMap*>(le);
- assert(sle != NULL);
+ ceph_assert(sle != NULL);
dout(20) << __func__ << " zeroing expire_pos in subtreemap event at "
<< le_pos << " seq=" << sle->event_seq << dendl;
sle->expire_pos = 0;
// If failed to rewrite journal, leave the part written journal
// as garbage to be cleaned up next startup.
- assert(r == 0);
+ ceph_assert(r == 0);
/* Now that the new journal is safe, we can flip the pointers */
inodeno_t const tmp = jp.front;
jp.front = jp.back;
jp.back = tmp;
write_result = jp.save(mds->objecter);
- assert(write_result == 0);
+ ceph_assert(write_result == 0);
/* Delete the old journal to free space */
dout(1) << "New journal flushed, erasing old journal" << dendl;
C_SaferCond erase_waiter;
old_journal->erase(&erase_waiter);
int erase_result = erase_waiter.wait();
- assert(erase_result == 0);
+ ceph_assert(erase_result == 0);
{
Mutex::Locker l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
delete new_journal;
return;
}
- assert(journaler == old_journal);
+ ceph_assert(journaler == old_journal);
journaler = NULL;
delete old_journal;
}
/* Update the pointer to reflect we're back in clean single journal state. */
jp.back = 0;
write_result = jp.save(mds->objecter);
- assert(write_result == 0);
+ ceph_assert(write_result == 0);
/* Reset the Journaler object to its default state */
dout(1) << "Journal rewrite complete, continuing with normal startup" << dendl;
journaler->get_read_pos() == journaler->get_write_pos())
break;
- assert(journaler->is_readable() || mds->is_daemon_stopping());
+ ceph_assert(journaler->is_readable() || mds->is_daemon_stopping());
// read it
uint64_t pos = journaler->get_read_pos();
bool r = journaler->try_read_entry(bl);
if (!r && journaler->get_error())
continue;
- assert(r);
+ ceph_assert(r);
// unpack event
LogEvent *le = LogEvent::decode(bl);
// done!
if (r == 0) {
- assert(journaler->get_read_pos() == journaler->get_write_pos());
+ ceph_assert(journaler->get_read_pos() == journaler->get_write_pos());
dout(10) << "_replay - complete, " << num_events
<< " events" << dendl;
void set_safe_pos(uint64_t pos)
{
Mutex::Locker l(submit_mutex);
- assert(pos >= safe_pos);
+ ceph_assert(pos >= safe_pos);
safe_pos = pos;
}
friend class MDSLogContextBase;
friend class MDCache;
uint64_t get_last_segment_seq() const {
- assert(!segments.empty());
+ ceph_assert(!segments.empty());
return segments.rbegin()->first;
}
LogSegment *get_oldest_segment() {
}
LogSegment *get_current_segment() {
- assert(!segments.empty());
+ ceph_assert(!segments.empty());
return segments.rbegin()->second;
}
virtual void last_put() {}
virtual void bad_put(int by) {
#ifdef MDS_REF_SET
- assert(ref_map[by] > 0);
+ ceph_assert(ref_map[by] > 0);
#endif
- assert(ref > 0);
+ ceph_assert(ref > 0);
}
virtual void _put() {}
void put(int by) {
virtual void first_get() {}
virtual void bad_get(int by) {
#ifdef MDS_REF_SET
- assert(by < 0 || ref_map[by] == 0);
+ ceph_assert(by < 0 || ref_map[by] == 0);
#endif
ceph_abort();
}
get_replicas()[mds] = nonce;
}
unsigned get_replica_nonce(mds_rank_t mds) {
- assert(get_replicas().count(mds));
+ ceph_assert(get_replicas().count(mds));
return get_replicas()[mds];
}
void remove_replica(mds_rank_t mds) {
- assert(get_replicas().count(mds));
+ ceph_assert(get_replicas().count(mds));
get_replicas().erase(mds);
if (get_replicas().empty()) {
put(PIN_REPLICATED);
MDSRank *mds = get_mds();
dout(10) << "MDSInternalContextBase::complete: " << typeid(*this).name() << dendl;
- assert(mds != NULL);
- assert(mds->mds_lock.is_locked_by_me());
+ ceph_assert(mds != NULL);
+ ceph_assert(mds->mds_lock.is_locked_by_me());
MDSContext::complete(r);
}
MDSRank *mds = get_mds();
dout(10) << "MDSIOContextBase::complete: " << typeid(*this).name() << dendl;
- assert(mds != NULL);
+ ceph_assert(mds != NULL);
Mutex::Locker l(mds->mds_lock);
if (mds->is_daemon_stopping()) {
public:
explicit MDSInternalContext(MDSRank *mds_) : mds(mds_) {
- assert(mds != NULL);
+ ceph_assert(mds != NULL);
}
};
public:
explicit MDSIOContext(MDSRank *mds_) : mds(mds_) {
- assert(mds != NULL);
+ ceph_assert(mds != NULL);
}
};
public:
C_IO_Wrapper(MDSRank *mds_, MDSInternalContextBase *wrapped_) :
MDSIOContext(mds_), async(true), wrapped(wrapped_) {
- assert(wrapped != NULL);
+ ceph_assert(wrapped != NULL);
}
~C_IO_Wrapper() override {
{
int r;
AdminSocket *admin_socket = g_ceph_context->get_admin_socket();
- assert(asok_hook == nullptr);
+ ceph_assert(asok_hook == nullptr);
asok_hook = new MDSSocketHook(this);
r = admin_socket->register_command("status", "status", asok_hook,
"high-level status of MDS");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_ops_in_flight",
"dump_ops_in_flight", asok_hook,
"show the ops currently in flight");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("ops",
"ops", asok_hook,
"show the ops currently in flight");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_blocked_ops", "dump_blocked_ops",
asok_hook,
"show the blocked ops currently in flight");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_historic_ops", "dump_historic_ops",
asok_hook,
"show recent ops");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump_historic_ops_by_duration", "dump_historic_ops_by_duration",
asok_hook,
"show recent ops, sorted by op duration");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("scrub_path",
"scrub_path name=path,type=CephString "
"name=scrubops,type=CephChoices,"
"strings=force|recursive|repair,n=N,req=false",
asok_hook,
"scrub an inode and output results");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("tag path",
"tag path name=path,type=CephString"
" name=tag,type=CephString",
asok_hook,
"Apply scrub tag recursively");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("flush_path",
"flush_path name=path,type=CephString",
asok_hook,
"flush an inode (and its dirfrags)");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("export dir",
"export dir "
"name=path,type=CephString "
"name=rank,type=CephInt",
asok_hook,
"migrate a subtree to named MDS");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump cache",
"dump cache name=path,type=CephString,req=false",
asok_hook,
"dump metadata cache (optionally to a file)");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("cache status",
"cache status",
asok_hook,
"show cache status");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump tree",
"dump tree "
"name=root,type=CephString,req=true "
"name=depth,type=CephInt,req=false ",
asok_hook,
"dump metadata cache for subtree");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump loads",
"dump loads",
asok_hook,
"dump metadata loads");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump snaps",
"dump snaps name=server,type=CephChoices,strings=--server,req=false",
asok_hook,
"dump snapshots");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("session evict",
"session evict name=client_id,type=CephString",
asok_hook,
"Evict a CephFS client");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("osdmap barrier",
"osdmap barrier name=target_epoch,type=CephInt",
asok_hook,
"Wait until the MDS has this OSD map epoch");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("session ls",
"session ls",
asok_hook,
"Enumerate connected CephFS clients");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("flush journal",
"flush journal",
asok_hook,
"Flush the journal to the backing store");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("force_readonly",
"force_readonly",
asok_hook,
"Force MDS to read-only mode");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("get subtrees",
"get subtrees",
asok_hook,
"Return the subtree map");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dirfrag split",
"dirfrag split "
"name=path,type=CephString,req=true "
"name=bits,type=CephInt,req=true ",
asok_hook,
"Fragment directory by path");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dirfrag merge",
"dirfrag merge "
"name=path,type=CephString,req=true "
"name=frag,type=CephString,req=true",
asok_hook,
"De-fragment directory by path");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dirfrag ls",
"dirfrag ls "
"name=path,type=CephString,req=true",
asok_hook,
"List fragments in directory");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("openfiles ls",
"openfiles ls",
asok_hook,
"List the opening files and their caps");
- assert(r == 0);
+ ceph_assert(r == 0);
r = admin_socket->register_command("dump inode",
"dump inode "
"name=number,type=CephInt,req=true",
asok_hook,
"dump inode by inode number");
- assert(r == 0);
+ ceph_assert(r == 0);
}
void MDSDaemon::clean_up_admin_socket()
tick_event = timer.add_event_after(
g_conf()->mds_tick_interval,
new FunctionContext([this](int) {
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
tick();
}));
}
{
auto priv = m->get_connection()->get_priv();
auto session = static_cast<Session *>(priv.get());
- assert(session != NULL);
+ ceph_assert(session != NULL);
// If someone is using a closed session for sending commands (e.g.
// the ceph CLI) then we should feel free to clean up this connection
// as soon as we've sent them a response.
if (!live_session) {
// This session only existed to issue commands, so terminate it
// as soon as we can.
- assert(session->is_closed());
+ ceph_assert(session->is_closed());
session->get_connection()->mark_disposable();
}
priv.reset();
{
auto priv = m->get_connection()->get_priv();
auto session = static_cast<Session *>(priv.get());
- assert(session != NULL);
+ ceph_assert(session != NULL);
int r = 0;
cmdmap_t cmdmap;
Context **run_later,
bool *need_reply)
{
- assert(outbl != NULL);
- assert(outs != NULL);
+ ceph_assert(outbl != NULL);
+ ceph_assert(outs != NULL);
class SuicideLater : public Context
{
// FIXME harmonize `session kill` with admin socket session evict
int64_t session_id = 0;
bool got = cmd_getval(cct, cmdmap, "session_id", session_id);
- assert(got);
+ ceph_assert(got);
bool killed = mds_rank->evict_client(session_id, false,
g_conf()->mds_session_blacklist_on_evict,
ss);
void MDSDaemon::handle_signal(int signum)
{
- assert(signum == SIGINT || signum == SIGTERM);
+ ceph_assert(signum == SIGINT || signum == SIGTERM);
derr << "*** got signal " << sig_str(signum) << " ***" << dendl;
{
Mutex::Locker l(mds_lock);
void MDSDaemon::suicide()
{
- assert(mds_lock.is_locked());
+ ceph_assert(mds_lock.is_locked());
// make sure we don't suicide twice
- assert(stopping == false);
+ ceph_assert(stopping == false);
stopping = true;
dout(1) << "suicide! Wanted state "
/* Print CWD for the user's interest */
char buf[PATH_MAX];
char *cwd = getcwd(buf, sizeof(buf));
- assert(cwd);
+ ceph_assert(cwd);
dout(1) << " cwd " << cwd << dendl;
/* Fall back to a best-effort: just running in our CWD */
if (m == m_end) {
std::cerr << "Up rank " << u.first << " GID " << u.second << " not found!" << std::endl;
}
- assert(m != m_end);
+ ceph_assert(m != m_end);
const mds_info_t &mds_info(m->second);
if (mds_info.laggy()) {
laggy.insert(mds_info.name);
decode(inline_data_enabled, p);
if (ev >= 8) {
- assert(struct_v >= 5);
+ ceph_assert(struct_v >= 5);
decode(enabled, p);
decode(fs_name, p);
} else {
mds_rank_t get_old_max_mds() const { return old_max_mds; }
mds_rank_t get_standby_count_wanted(mds_rank_t standby_daemon_count) const {
- assert(standby_daemon_count >= 0);
+ ceph_assert(standby_daemon_count >= 0);
std::set<mds_rank_t> s;
get_standby_replay_mds_set(s);
mds_rank_t standbys_avail = (mds_rank_t)s.size()+standby_daemon_count;
return mds_info.at(gid);
}
const mds_info_t& get_mds_info(mds_rank_t m) const {
- assert(up.count(m) && mds_info.count(up.at(m)));
+ ceph_assert(up.count(m) && mds_info.count(up.at(m)));
return mds_info.at(up.at(m));
}
mds_gid_t find_mds_gid_by_name(std::string_view s) const {
++p) {
std::map<mds_gid_t, mds_info_t>::const_iterator q =
mds_info.find(p->second);
- assert(q != mds_info.end());
+ ceph_assert(q != mds_info.end());
if (first) {
cached_up_features = q->second.mds_features;
first = false;
*/
void get_down_mds_set(std::set<mds_rank_t> *s) const
{
- assert(s != NULL);
+ ceph_assert(s != NULL);
s->insert(failed.begin(), failed.end());
s->insert(damaged.begin(), damaged.end());
}
// It should never be possible for shutdown to get called twice, because
// anyone picking up mds_lock checks if stopping is true and drops
// out if it is.
- assert(stopping == false);
+ ceph_assert(stopping == false);
stopping = true;
dout(1) << __func__ << ": shutting down rank " << whoami << dendl;
C_MDS_VoidFn(MDSRank *mds_, fn_ptr fn_)
: MDSInternalContext(mds_), fn(fn_)
{
- assert(mds_);
- assert(fn_);
+ ceph_assert(mds_);
+ ceph_assert(fn_);
}
void finish(int r) override
void MDSRank::damaged()
{
- assert(whoami != MDS_RANK_NONE);
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(whoami != MDS_RANK_NONE);
+ ceph_assert(mds_lock.is_locked_by_me());
beacon.set_want_state(*mdsmap, MDSMap::STATE_DAMAGED);
monc->flush_log(); // Flush any clog error from before we were called
void MDSRank::ProgressThread::shutdown()
{
- assert(mds->mds_lock.is_locked_by_me());
- assert(mds->stopping);
+ ceph_assert(mds->mds_lock.is_locked_by_me());
+ ceph_assert(mds->stopping);
if (am_self()) {
// Stopping is set, we will fall out of our main loop naturally
*/
void MDSRank::_advance_queues()
{
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
if (!finished_queue.empty()) {
dout(7) << "mds has " << finished_queue.size() << " queued contexts" << dendl;
// after a call to suicide() completes, in which case MDSRank::hb
// has been freed and we are a no-op.
if (!hb) {
- assert(stopping);
+ ceph_assert(stopping);
return;
}
<< dendl;
imported_session->info.auth_name = session->info.auth_name;
//assert(session->info.auth_name == imported_session->info.auth_name);
- assert(session->info.inst == imported_session->info.inst);
+ ceph_assert(session->info.inst == imported_session->info.inst);
imported_session->set_connection(session->get_connection().get());
// send out any queued messages
while (!session->preopen_out_queue.empty()) {
session->preopen_out_queue.pop_front();
}
imported_session->auth_caps = session->auth_caps;
- assert(session->get_nref() == 1);
+ ceph_assert(session->get_nref() == 1);
imported_session->get_connection()->set_priv(imported_session->get());
session = imported_session;
}
void MDSRank::send_message(const Message::ref& m, const ConnectionRef& c)
{
- assert(c);
+ ceph_assert(c);
c->send_message2(m);
}
void MDSRank::forward_message_mds(const MClientRequest::const_ref& m, mds_rank_t mds)
{
- assert(mds != whoami);
+ ceph_assert(mds != whoami);
/*
* don't actually forward if non-idempotent!
clog->error() << "Error loading MDS rank " << whoami << ": "
<< cpp_strerror(r);
damaged();
- assert(r == 0); // Unreachable, damaged() calls respawn()
+ ceph_assert(r == 0); // Unreachable, damaged() calls respawn()
} else {
// Completely unexpected error, give up and die
dout(0) << "boot_start encountered an error, failing" << dendl;
}
}
- assert(is_starting() || is_any_replay());
+ ceph_assert(is_starting() || is_any_replay());
switch(step) {
case MDS_BOOT_INITIAL:
}
break;
case MDS_BOOT_REPLAY_DONE:
- assert(is_any_replay());
+ ceph_assert(is_any_replay());
// Sessiontable and inotable should be in sync after replay, validate
// that they are consistent.
void MDSRank::validate_sessions()
{
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
bool valid = true;
// Identify any sessions which have state inconsistent with other,
if (!valid) {
damaged();
- assert(valid);
+ ceph_assert(valid);
}
}
void MDSRank::starting_done()
{
dout(3) << "starting_done" << dendl;
- assert(is_starting());
+ ceph_assert(is_starting());
request_state(MDSMap::STATE_ACTIVE);
mdlog->start_new_segment();
public:
explicit C_MDS_StandbyReplayRestart(MDSRank *m) : MDSInternalContext(m) {}
void finish(int r) override {
- assert(!r);
+ ceph_assert(!r);
mds->standby_replay_restart();
}
};
if (is_standby_replay()) {
// The replay was done in standby state, and we are still in that state
- assert(standby_replaying);
+ ceph_assert(standby_replaying);
dout(10) << "setting replay timer" << dendl;
timer.add_event_after(g_conf()->mds_replay_interval,
new C_MDS_StandbyReplayRestart(this));
return;
} else {
// Replay is complete, journal read should be up to date
- assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos());
- assert(!is_standby_replay());
+ ceph_assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos());
+ ceph_assert(!is_standby_replay());
// Reformat and come back here
if (mdlog->get_journaler()->get_stream_format() < g_conf()->mds_journal_format) {
// The root should always have a subtree!
clog->error() << "No subtrees found for root MDS rank!";
damaged();
- assert(mdcache->is_subtrees());
+ ceph_assert(mdcache->is_subtrees());
} else {
dout(1) << " empty cache, no subtrees, leaving cluster" << dendl;
request_state(MDSMap::STATE_STOPPED);
void MDSRank::recovery_done(int oldstate)
{
dout(1) << "recovery_done -- successful recovery!" << dendl;
- assert(is_clientreplay() || is_active());
+ ceph_assert(is_clientreplay() || is_active());
if (oldstate == MDSMap::STATE_CREATING)
return;
snapserver->save(fin.new_sub());
}
- assert(g_conf()->mds_kill_create_at != 1);
+ ceph_assert(g_conf()->mds_kill_create_at != 1);
// ok now journal it
mdlog->journal_segment_subtree_map(fin.new_sub());
}
dout(20) << __func__ << " matched " << victims.size() << " sessions" << dendl;
- assert(!victims.empty());
+ ceph_assert(!victims.empty());
C_GatherBuilder gather(g_ceph_context, new C_MDSInternalNoop);
for (const auto &s : victims) {
const MDSMap &oldmap)
{
// I am only to be passed MDSMaps in which I hold a rank
- assert(whoami != MDS_RANK_NONE);
+ ceph_assert(whoami != MDS_RANK_NONE);
MDSMap::DaemonState oldstate = state;
mds_gid_t mds_gid = mds_gid_t(monc->get_global_id());
restart.insert(r);
handle_mds_failure(r);
} else {
- assert(info.state == MDSMap::STATE_STARTING ||
+ ceph_assert(info.state == MDSMap::STATE_STARTING ||
info.state == MDSMap::STATE_ACTIVE);
// -> stopped (missing) -> starting -> active
restart.insert(r);
restart.insert(r);
handle_mds_failure(r);
} else {
- assert(info.state == MDSMap::STATE_CREATING ||
+ ceph_assert(info.state == MDSMap::STATE_CREATING ||
info.state == MDSMap::STATE_STARTING ||
info.state == MDSMap::STATE_ACTIVE);
}
} else if (is_starting()) {
boot_start();
} else if (is_stopping()) {
- assert(oldstate == MDSMap::STATE_ACTIVE);
+ ceph_assert(oldstate == MDSMap::STATE_ACTIVE);
stopping_start();
}
}
*/
void MDSRank::command_flush_journal(Formatter *f)
{
- assert(f != NULL);
+ ceph_assert(f != NULL);
std::stringstream ss;
const int r = _command_flush_journal(ss);
int r = cond.wait();
mds_lock.Lock();
- assert(r == 0); // MDLog is not allowed to raise errors via wait_for_expiry
+ ceph_assert(r == 0); // MDLog is not allowed to raise errors via wait_for_expiry
}
dout(5) << __func__ << ": expiry complete, expire_pos/trim_pos is now " << std::hex <<
void MDSRank::command_get_subtrees(Formatter *f)
{
- assert(f != NULL);
+ ceph_assert(f != NULL);
Mutex::Locker l(mds_lock);
std::list<CDir*> subtrees;
bool wait, bool blacklist, std::ostream& err_ss,
Context *on_killed)
{
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
// Mutually exclusive args
- assert(!(wait && on_killed != nullptr));
+ ceph_assert(!(wait && on_killed != nullptr));
if (is_any_replay()) {
err_ss << "MDS is replaying log";
std::vector<std::string> cmd = {tmp};
auto kill_client_session = [this, session_id, wait, on_killed](){
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
Session *session = sessionmap.get_session(
entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id));
if (session) {
};
auto apply_blacklist = [this, cmd](std::function<void ()> fn){
- assert(mds_lock.is_locked_by_me());
+ ceph_assert(mds_lock.is_locked_by_me());
Context *on_blacklist_done = new FunctionContext([this, fn](int r) {
objecter->wait_for_latest_osdmap(
std::stringstream *ss,
bool *need_reply)
{
- assert(r != nullptr);
- assert(ds != nullptr);
- assert(ss != nullptr);
+ ceph_assert(r != nullptr);
+ ceph_assert(ds != nullptr);
+ ceph_assert(ss != nullptr);
*need_reply = true;
waiting_for_active_peer[who].push_back(c);
}
void wait_for_cluster_recovered(MDSInternalContextBase *c) {
- assert(cluster_degraded);
+ ceph_assert(cluster_degraded);
waiting_for_active_peer[MDS_RANK_NONE].push_back(c);
}
MDSRank *get_mds() override {return ida->mds;}
public:
explicit MDSTableIOContext(MDSTable *ida_) : ida(ida_) {
- assert(ida != NULL);
+ ceph_assert(ida != NULL);
}
};
}
dout(10) << "save v " << version << dendl;
- assert(is_active());
+ ceph_assert(is_active());
bufferlist bl;
encode(version, bl);
{
dout(10) << "load" << dendl;
- assert(is_undef());
+ ceph_assert(is_undef());
state = STATE_OPENING;
C_IO_MT_Load *c = new C_IO_MT_Load(this, onfinish);
void MDSTable::load_2(int r, bufferlist& bl, Context *onfinish)
{
- assert(is_opening());
+ ceph_assert(is_opening());
state = STATE_ACTIVE;
if (r == -EBLACKLISTED) {
mds->respawn();
mds->clog->error() << "error reading table object '" << get_object_name()
<< "' " << r << " (" << cpp_strerror(r) << ")";
mds->damaged();
- assert(r >= 0); // Should be unreachable because damaged() calls respawn()
+ ceph_assert(r >= 0); // Should be unreachable because damaged() calls respawn()
}
dout(10) << "load_2 got " << bl.length() << " bytes" << dendl;
mds->clog->error() << "error decoding table object '" << get_object_name()
<< "': " << e.what();
mds->damaged();
- assert(r >= 0); // Should be unreachable because damaged() calls respawn()
+ ceph_assert(r >= 0); // Should be unreachable because damaged() calls respawn()
}
if (onfinish) {
void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m)
{
dout(10) << "handle_request " << *m << dendl;
- assert(m->table == table);
+ ceph_assert(m->table == table);
if (mds->get_state() < MDSMap::STATE_RESOLVE) {
if (mds->get_want_state() == CEPH_MDS_STATE_RESOLVE) {
break;
case TABLESERVER_OP_NOTIFY_PREP:
- assert(g_conf()->mds_kill_mdstable_at != 9);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 9);
handle_notify_prep(m);
break;
if (pending_prepare.count(reqid)) {
dout(10) << "got agree on " << reqid << " atid " << tid << dendl;
- assert(g_conf()->mds_kill_mdstable_at != 3);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 3);
MDSInternalContextBase *onfinish = pending_prepare[reqid].onfinish;
*pending_prepare[reqid].ptid = tid;
}
else if (prepared_update.count(tid)) {
dout(10) << "got duplicated agree on " << reqid << " atid " << tid << dendl;
- assert(prepared_update[tid] == reqid);
- assert(!server_ready);
+ ceph_assert(prepared_update[tid] == reqid);
+ ceph_assert(!server_ready);
}
else if (pending_commit.count(tid)) {
dout(10) << "stray agree on " << reqid << " tid " << tid
<< ", already committing, will resend COMMIT" << dendl;
- assert(!server_ready);
+ ceph_assert(!server_ready);
// will re-send commit when receiving the server ready message
}
else {
dout(10) << "stray agree on " << reqid << " tid " << tid
<< ", sending ROLLBACK" << dendl;
- assert(!server_ready);
+ ceph_assert(!server_ready);
auto req = MMDSTableRequest::create(table, TABLESERVER_OP_ROLLBACK, 0, tid);
mds->send_message_mds(req, mds->get_mds_map()->get_tableserver());
}
pending_commit[tid]->pending_commit_tids[table].count(tid)) {
dout(10) << "got ack on tid " << tid << ", logging" << dendl;
- assert(g_conf()->mds_kill_mdstable_at != 7);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 7);
// remove from committing list
pending_commit[tid]->pending_commit_tids[table].erase(tid);
break;
case TABLESERVER_OP_SERVER_READY:
- assert(!server_ready);
+ ceph_assert(!server_ready);
server_ready = true;
if (last_reqid == ~0ULL)
break;
default:
- assert(0 == "unrecognized mds_table_client request op");
+ ceph_assert(0 == "unrecognized mds_table_client request op");
}
}
{
dout(10) << "commit " << tid << dendl;
- assert(prepared_update.count(tid));
+ ceph_assert(prepared_update.count(tid));
prepared_update.erase(tid);
- assert(pending_commit.count(tid) == 0);
+ ceph_assert(pending_commit.count(tid) == 0);
pending_commit[tid] = ls;
ls->pending_commit_tids[table].insert(tid);
notify_commit(tid);
- assert(g_conf()->mds_kill_mdstable_at != 4);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 4);
if (server_ready) {
// send message
void MDSTableServer::handle_request(const MMDSTableRequest::const_ref &req)
{
- assert(req->op >= 0);
+ ceph_assert(req->op >= 0);
switch (req->op) {
case TABLESERVER_OP_QUERY: return handle_query(req);
case TABLESERVER_OP_PREPARE: return handle_prepare(req);
case TABLESERVER_OP_COMMIT: return handle_commit(req);
case TABLESERVER_OP_ROLLBACK: return handle_rollback(req);
case TABLESERVER_OP_NOTIFY_ACK: return handle_notify_ack(req);
- default: assert(0 == "unrecognized mds_table_server request op");
+ default: ceph_assert(0 == "unrecognized mds_table_server request op");
}
}
dout(7) << "handle_prepare " << *req << dendl;
mds_rank_t from = mds_rank_t(req->get_source().num());
- assert(g_conf()->mds_kill_mdstable_at != 1);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 1);
projected_version++;
dout(7) << "_create_logged " << *req << " tid " << tid << dendl;
mds_rank_t from = mds_rank_t(req->get_source().num());
- assert(g_conf()->mds_kill_mdstable_at != 2);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 2);
_note_prepare(from, req->reqid);
bufferlist out;
_prepare(req->bl, req->reqid, from, out);
- assert(version == tid);
+ ceph_assert(version == tid);
auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, req->reqid, tid);
reply->bl = std::move(out);
return;
}
- assert(g_conf()->mds_kill_mdstable_at != 5);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 5);
projected_version++;
committing_tids.insert(tid);
else {
// wtf.
dout(0) << "got commit for tid " << tid << " > " << version << dendl;
- assert(tid <= version);
+ ceph_assert(tid <= version);
}
}
{
dout(7) << "_commit_logged, sending ACK" << dendl;
- assert(g_conf()->mds_kill_mdstable_at != 6);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 6);
version_t tid = req->get_tid();
pending_for_mds.erase(tid);
{
dout(7) << "handle_rollback " << *req << dendl;
- assert(g_conf()->mds_kill_mdstable_at != 8);
+ ceph_assert(g_conf()->mds_kill_mdstable_at != 8);
version_t tid = req->get_tid();
- assert(pending_for_mds.count(tid));
- assert(!committing_tids.count(tid));
+ ceph_assert(pending_for_mds.count(tid));
+ ceph_assert(!committing_tids.count(tid));
projected_version++;
committing_tids.insert(tid);
for (auto p = pending_for_mds.begin(); p != pending_for_mds.end(); ++p) {
if (p->second.mds != who)
continue;
- assert(!pending_notifies.count(p->second.tid));
+ ceph_assert(!pending_notifies.count(p->second.tid));
if (p->second.reqid >= next_reqid)
next_reqid = p->second.reqid + 1;
/* set the name of the global mds table */
lua_setglobal(L, "mds");
- assert(lua_gettop(L) == 1);
+ ceph_assert(lua_gettop(L) == 1);
if (lua_pcall(L, 0, 1, 0) != LUA_OK) {
mantle_dout(0) << "WARNING: mantle could not execute script: "
<< lua_tostring(L, -1) << mantle_dendl;
}
public:
explicit MigratorContext(Migrator *mig_) : mig(mig_) {
- assert(mig != NULL);
+ ceph_assert(mig != NULL);
}
};
}
public:
explicit MigratorLogContext(Migrator *mig_) : mig(mig_) {
- assert(mig != NULL);
+ ceph_assert(mig != NULL);
}
};
default:
derr << "migrator unknown message " << m->get_type() << dendl;
- assert(0 == "migrator unknown message");
+ ceph_assert(0 == "migrator unknown message");
}
}
void Migrator::export_empty_import(CDir *dir)
{
dout(7) << "export_empty_import " << *dir << dendl;
- assert(dir->is_subtree_root());
+ ceph_assert(dir->is_subtree_root());
if (dir->inode->is_auth()) {
dout(7) << " inode is auth" << dendl;
dout(10) << "export_try_cancel " << *dir << dendl;
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
- assert(it != export_state.end());
+ ceph_assert(it != export_state.end());
int state = it->second.state;
switch (state) {
// drop locks
if (state == EXPORT_LOCKING || state == EXPORT_DISCOVERING) {
MDRequestRef mdr = static_cast<MDRequestImpl*>(mut.get());
- assert(mdr);
+ ceph_assert(mdr);
mds->mdcache->request_kill(mdr);
} else if (mut) {
mds->locker->drop_locks(mut.get());
total_exporting_size -= it->second.approx_size;
export_state.erase(it);
- assert(dir->state_test(CDir::STATE_EXPORTING));
+ ceph_assert(dir->state_test(CDir::STATE_EXPORTING));
dir->clear_exporting();
if (unpin) {
break;
case IMPORT_DISCOVERED:
- assert(diri);
+ ceph_assert(diri);
dout(10) << "import state=discovered : unpinning inode " << *diri << dendl;
import_reverse_discovered(df, diri);
break;
case IMPORT_PREPPING:
- assert(dir);
+ ceph_assert(dir);
dout(10) << "import state=prepping : unpinning base+bounds " << *dir << dendl;
import_reverse_prepping(dir, q->second);
break;
case IMPORT_PREPPED:
- assert(dir);
+ ceph_assert(dir);
dout(10) << "import state=prepped : unpinning base+bounds, unfreezing " << *dir << dendl;
{
set<CDir*> bounds;
// notify bystanders ; wait in aborting state
q->second.state = IMPORT_ABORTING;
import_notify_abort(dir, bounds);
- assert(g_conf()->mds_kill_import_at != 10);
+ ceph_assert(g_conf()->mds_kill_import_at != 10);
}
break;
case IMPORT_LOGGINGSTART:
- assert(dir);
+ ceph_assert(dir);
dout(10) << "import state=loggingstart : reversing import on " << *dir << dendl;
import_reverse(dir);
break;
case IMPORT_ACKING:
- assert(dir);
+ ceph_assert(dir);
// hrm. make this an ambiguous import, and wait for exporter recovery to disambiguate
dout(10) << "import state=acking : noting ambiguous import " << *dir << dendl;
{
break;
case IMPORT_FINISHING:
- assert(dir);
+ ceph_assert(dir);
dout(10) << "import state=finishing : finishing import on " << *dir << dendl;
import_finish(dir, true);
break;
case IMPORT_ABORTING:
- assert(dir);
+ ceph_assert(dir);
dout(10) << "import state=aborting : ignoring repeat failure " << *dir << dendl;
break;
}
if (bystanders_entry != q->second.bystanders.end()) {
q->second.bystanders.erase(bystanders_entry);
if (q->second.state == IMPORT_ABORTING) {
- assert(dir);
+ ceph_assert(dir);
dout(10) << "faking export_notify_ack from mds." << who
<< " on aborting import " << *dir << " from mds." << q->second.peer
<< dendl;
continue;
if (p->second.state == IMPORT_DISCOVERED) {
CInode *in = cache->get_inode(p->first.ino);
- assert(in);
+ ceph_assert(in);
continue;
}
CDir *dir = cache->get_dirfrag(p->first);
- assert(dir);
+ ceph_assert(dir);
if (p->second.state == IMPORT_PREPPING)
continue;
if (p->second.state == IMPORT_ABORTING) {
- assert(!dir->is_ambiguous_dir_auth());
- assert(dir->get_dir_auth().first != mds->get_nodeid());
+ ceph_assert(!dir->is_ambiguous_dir_auth());
+ ceph_assert(dir->get_dir_auth().first != mds->get_nodeid());
continue;
}
- assert(dir->is_ambiguous_dir_auth());
- assert(dir->authority().first == mds->get_nodeid() ||
+ ceph_assert(dir->is_ambiguous_dir_auth());
+ ceph_assert(dir->authority().first == mds->get_nodeid() ||
dir->authority().second == mds->get_nodeid());
}
p->second.state == EXPORT_FREEZING ||
p->second.state == EXPORT_CANCELLING)
continue;
- assert(dir->is_ambiguous_dir_auth());
- assert(dir->authority().first == mds->get_nodeid() ||
+ ceph_assert(dir->is_ambiguous_dir_auth());
+ ceph_assert(dir->authority().first == mds->get_nodeid() ||
dir->authority().second == mds->get_nodeid());
}
public:
C_MDC_ExportFreeze(Migrator *m, CDir *e, uint64_t t) :
MigratorContext(m), ex(e), tid(t) {
- assert(ex != NULL);
+ ceph_assert(ex != NULL);
}
void finish(int r) override {
if (r >= 0)
void Migrator::export_dir(CDir *dir, mds_rank_t dest)
{
dout(7) << "export_dir " << *dir << " to " << dest << dendl;
- assert(dir->is_auth());
- assert(dest != mds->get_nodeid());
+ ceph_assert(dir->is_auth());
+ ceph_assert(dest != mds->get_nodeid());
if (!(mds->is_active() || mds->is_stopping())) {
dout(7) << "i'm not active, no exports for now" << dendl;
while (n--) ++p;
CDir *bd = *p;
if (!(bd->is_frozen() || bd->is_freezing())) {
- assert(bd->is_auth());
+ ceph_assert(bd->is_auth());
dir->state_set(CDir::STATE_AUXSUBTREE);
mds->mdcache->adjust_subtree_auth(dir, mds->get_nodeid());
dout(0) << "export_dir: create aux subtree " << *bd << " under " << *dir << dendl;
MDRequestRef mdr = mds->mdcache->request_start_internal(CEPH_MDS_OP_EXPORTDIR);
mdr->more()->export_dir = dir;
- assert(export_state.count(dir) == 0);
+ ceph_assert(export_state.count(dir) == 0);
export_state_t& stat = export_state[dir];
num_locking_exports++;
stat.state = EXPORT_LOCKING;
if (it == export_state.end() || it->second.tid != mdr->reqid.tid) {
// export must have aborted.
dout(7) << "export must have aborted " << *mdr << dendl;
- assert(mdr->killed || mdr->aborted);
+ ceph_assert(mdr->killed || mdr->aborted);
if (mdr->aborted) {
mdr->aborted = false;
mds->mdcache->request_kill(mdr);
}
return;
}
- assert(it->second.state == EXPORT_LOCKING);
+ ceph_assert(it->second.state == EXPORT_LOCKING);
mds_rank_t dest = it->second.peer;
return;
}
- assert(g_conf()->mds_kill_export_at != 1);
+ ceph_assert(g_conf()->mds_kill_export_at != 1);
auto parent = it->second.parent;
auto discover = MExportDirDiscover::create(dir->dirfrag(), path,
mds->get_nodeid(), it->second.tid);
mds->send_message_mds(discover, dest);
- assert(g_conf()->mds_kill_export_at != 2);
+ ceph_assert(g_conf()->mds_kill_export_at != 2);
it->second.last_cum_auth_pins_change = ceph_clock_now();
it->second.approx_size = results.front().second;
// start the freeze, but hold it up with an auth_pin.
dir->freeze_tree();
- assert(dir->is_freezing_tree());
+ ceph_assert(dir->is_freezing_tree());
dir->add_waiter(CDir::WAIT_FROZEN, new C_MDC_ExportFreeze(this, dir, it->second.tid));
return;
}
for (auto& p : results) {
CDir *sub = p.first;
- assert(sub != dir);
+ ceph_assert(sub != dir);
dout(7) << " sub " << *sub << dendl;
sub->auth_pin(this);
MDRequestRef _mdr = mds->mdcache->request_start_internal(CEPH_MDS_OP_EXPORTDIR);
_mdr->more()->export_dir = sub;
- assert(export_state.count(sub) == 0);
+ ceph_assert(export_state.count(sub) == 0);
auto& stat = export_state[sub];
num_locking_exports++;
stat.state = EXPORT_LOCKING;
dout(7) << "adjust_export_after_rename " << *diri << dendl;
auto &stat = export_state.at(freezing_dir);
- assert(stat.state == EXPORT_DISCOVERING ||
- stat.state == EXPORT_FREEZING);
+ ceph_assert(stat.state == EXPORT_DISCOVERING ||
+ stat.state == EXPORT_FREEZING);
if (g_conf()->mds_thrash_exports) {
if (rand() % 3 == 0) {
{
CDir *dir = cache->get_dirfrag(m->get_dirfrag());
mds_rank_t dest(m->get_source().num());
- assert(dir);
+ ceph_assert(dir);
dout(7) << "export_discover_ack from " << m->get_source()
<< " on " << *dir << dendl;
it->second.peer != dest) {
dout(7) << "must have aborted" << dendl;
} else {
- assert(it->second.state == EXPORT_DISCOVERING);
+ ceph_assert(it->second.state == EXPORT_DISCOVERING);
if (m->is_success()) {
// release locks to avoid deadlock
MDRequestRef mdr = static_cast<MDRequestImpl*>(it->second.mut.get());
- assert(mdr);
+ ceph_assert(mdr);
mds->mdcache->request_finish(mdr);
it->second.mut.reset();
// freeze the subtree
it->second.state = EXPORT_FREEZING;
dir->auth_unpin(this);
- assert(g_conf()->mds_kill_export_at != 3);
+ ceph_assert(g_conf()->mds_kill_export_at != 3);
} else {
dout(7) << "peer failed to discover (not active?), canceling" << dendl;
public:
C_M_ExportSessionsFlushed(Migrator *m, CDir *d, uint64_t t)
: MigratorContext(m), dir(d), tid(t) {
- assert(dir != NULL);
+ ceph_assert(dir != NULL);
}
void finish(int r) override {
mig->export_sessions_flushed(dir, tid);
return;
}
- assert(it->second.state == EXPORT_PREPPING || it->second.state == EXPORT_WARNING);
- assert(it->second.warning_ack_waiting.count(MDS_RANK_NONE) > 0);
+ ceph_assert(it->second.state == EXPORT_PREPPING || it->second.state == EXPORT_WARNING);
+ ceph_assert(it->second.warning_ack_waiting.count(MDS_RANK_NONE) > 0);
it->second.warning_ack_waiting.erase(MDS_RANK_NONE);
if (it->second.state == EXPORT_WARNING && it->second.warning_ack_waiting.empty())
export_go(dir); // start export.
return;
}
- assert(it->second.state == EXPORT_FREEZING);
- assert(dir->is_frozen_tree_root());
- assert(dir->get_cum_auth_pins() == 0);
+ ceph_assert(it->second.state == EXPORT_FREEZING);
+ ceph_assert(dir->is_frozen_tree_root());
+ ceph_assert(dir->get_cum_auth_pins() == 0);
CInode *diri = dir->get_inode();
cache->show_subtrees();
// CDir::_freeze_tree() should have forced it into subtree.
- assert(dir->get_dir_auth() == mds_authority_t(mds->get_nodeid(), mds->get_nodeid()));
+ ceph_assert(dir->get_dir_auth() == mds_authority_t(mds->get_nodeid(), mds->get_nodeid()));
// note the bounds.
set<CDir*> bounds;
cache->get_subtree_bounds(dir, bounds);
inodes_added.insert(cur->inode->ino());
// prepend dentry + inode
- assert(cur->inode->is_auth());
+ ceph_assert(cur->inode->is_auth());
bufferlist bl;
cache->replicate_dentry(cur->inode->parent, it->second.peer, bl);
dout(7) << " added " << *cur->inode->parent << dendl;
assert (g_conf()->mds_kill_export_at != 4);
// make sure any new instantiations of caps are flushed out
- assert(it->second.warning_ack_waiting.empty());
+ ceph_assert(it->second.warning_ack_waiting.empty());
set<client_t> export_client_set;
get_export_client_set(dir, export_client_set);
for (auto& q : ls) {
if (!q->state_test(CDir::STATE_EXPORTBOUND)) {
// include nested dirfrag
- assert(q->get_dir_auth().first == CDIR_AUTH_PARENT);
+ ceph_assert(q->get_dir_auth().first == CDIR_AUTH_PARENT);
dfs.push_back(q); // it's ours, recurse (later)
}
}
{
CDir *dir = cache->get_dirfrag(m->get_dirfrag());
mds_rank_t dest(m->get_source().num());
- assert(dir);
+ ceph_assert(dir);
dout(7) << "export_prep_ack " << *dir << dendl;
dout(7) << "export must have aborted" << dendl;
return;
}
- assert(it->second.state == EXPORT_PREPPING);
+ ceph_assert(it->second.state == EXPORT_PREPPING);
if (!m->is_success()) {
dout(7) << "peer couldn't acquire all needed locks or wasn't active, canceling" << dendl;
set<CDir*> bounds;
cache->get_subtree_bounds(dir, bounds);
- assert(it->second.warning_ack_waiting.empty() ||
+ ceph_assert(it->second.warning_ack_waiting.empty() ||
(it->second.warning_ack_waiting.size() == 1 &&
it->second.warning_ack_waiting.count(MDS_RANK_NONE) > 0));
- assert(it->second.notify_ack_waiting.empty());
+ ceph_assert(it->second.notify_ack_waiting.empty());
for (const auto &p : dir->get_replicas()) {
if (p.first == it->second.peer) continue;
it->second.state = EXPORT_WARNING;
- assert(g_conf()->mds_kill_export_at != 6);
+ ceph_assert(g_conf()->mds_kill_export_at != 6);
// nobody to warn?
if (it->second.warning_ack_waiting.empty())
export_go(dir); // start export.
public:
C_M_ExportGo(Migrator *m, CDir *d, uint64_t t) :
MigratorContext(m), dir(d), tid(t) {
- assert(dir != NULL);
+ ceph_assert(dir != NULL);
}
void finish(int r) override {
mig->export_go_synced(dir, tid);
void Migrator::export_go(CDir *dir)
{
auto it = export_state.find(dir);
- assert(it != export_state.end());
+ ceph_assert(it != export_state.end());
dout(7) << "export_go " << *dir << " to " << it->second.peer << dendl;
// first sync log to flush out e.g. any cap imports
dout(7) << "export must have aborted on " << dir << dendl;
return;
}
- assert(it->second.state == EXPORT_WARNING);
+ ceph_assert(it->second.state == EXPORT_WARNING);
mds_rank_t dest = it->second.peer;
dout(7) << "export_go_synced " << *dir << " to " << dest << dendl;
cache->show_subtrees();
it->second.state = EXPORT_EXPORTING;
- assert(g_conf()->mds_kill_export_at != 7);
+ ceph_assert(g_conf()->mds_kill_export_at != 7);
- assert(dir->is_frozen_tree_root());
- assert(dir->get_cum_auth_pins() == 0);
+ ceph_assert(dir->is_frozen_tree_root());
+ ceph_assert(dir->get_cum_auth_pins() == 0);
// set ambiguous auth
cache->adjust_subtree_auth(dir, mds->get_nodeid(), dest);
// send
mds->send_message_mds(req, dest);
- assert(g_conf()->mds_kill_export_at != 8);
+ ceph_assert(g_conf()->mds_kill_export_at != 8);
mds->hit_export_target(dest, num_exported_inodes+1);
map<client_t,client_metadata_t>& exported_client_metadata_map)
{
dout(7) << "encode_export_inode " << *in << dendl;
- assert(!in->is_replica(mds->get_nodeid()));
+ ceph_assert(!in->is_replica(mds->get_nodeid()));
// relax locks?
if (!in->is_replicated()) {
cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier());
map<client_t,Capability::Import>::iterator q = peer_imported.find(p.first);
- assert(q != peer_imported.end());
+ ceph_assert(q != peer_imported.end());
m->set_cap_peer(q->second.cap_id, q->second.issue_seq, q->second.mseq,
(q->second.cap_id > 0 ? peer : -1), 0);
mds->send_message_client_counted(m, p.first);
in->policylock.export_twiddle();
// mark auth
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
in->state_clear(CInode::STATE_AUTH);
in->replica_nonce = CInode::EXPORT_NONCE;
dout(7) << "encode_export_dir " << *dir << " " << dir->get_num_head_items() << " head items" << dendl;
- assert(dir->get_projected_version() == dir->get_version());
+ ceph_assert(dir->get_projected_version() == dir->get_version());
#ifdef MDS_VERIFY_FRAGSTAT
if (dir->is_complete())
CDir *t = *p;
if (!t->state_test(CDir::STATE_EXPORTBOUND)) {
// include nested dirfrag
- assert(t->get_dir_auth().first == CDIR_AUTH_PARENT);
+ ceph_assert(t->get_dir_auth().first == CDIR_AUTH_PARENT);
subdirs.push_front(t); // it's ours, recurse (later)
}
}
dir->clear_replica_map();
// mark
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
dir->state_clear(CDir::STATE_AUTH);
dir->remove_bloom();
dir->replica_nonce = CDir::EXPORT_NONCE;
{
CDir *dir = cache->get_dirfrag(m->get_dirfrag());
mds_rank_t dest(m->get_source().num());
- assert(dir);
- assert(dir->is_frozen_tree_root()); // i'm exporting!
+ ceph_assert(dir);
+ ceph_assert(dir->is_frozen_tree_root()); // i'm exporting!
// yay!
dout(7) << "handle_export_ack " << *dir << dendl;
mds->hit_export_target(dest, -1);
map<CDir*,export_state_t>::iterator it = export_state.find(dir);
- assert(it != export_state.end());
- assert(it->second.state == EXPORT_EXPORTING);
- assert(it->second.tid == m->get_tid());
+ ceph_assert(it != export_state.end());
+ ceph_assert(it->second.state == EXPORT_EXPORTING);
+ ceph_assert(it->second.tid == m->get_tid());
auto bp = m->imported_caps.cbegin();
decode(it->second.peer_imported, bp);
{
dout(7) << "export_notify_abort " << *dir << dendl;
- assert(stat.state == EXPORT_CANCELLING);
+ ceph_assert(stat.state == EXPORT_CANCELLING);
if (stat.notify_ack_waiting.empty()) {
stat.state = EXPORT_CANCELLED;
{
CDir *dir = cache->get_dirfrag(m->get_dirfrag());
mds_rank_t dest(m->get_source().num());
- assert(dir);
+ ceph_assert(dir);
mds_rank_t from = mds_rank_t(m->get_source().num());
mds->hit_export_target(dest, -1);
// reversing import
dout(7) << "handle_export_notify_ack from " << m->get_source()
<< ": aborting import on " << *dir << dendl;
- assert(stat.bystanders.count(from));
+ ceph_assert(stat.bystanders.count(from));
stat.bystanders.erase(from);
if (stat.bystanders.empty())
import_reverse_unfreeze(dir);
} else {
dout(7) << "not sending MExportDirFinish last, dest has failed" << dendl;
}
- assert(g_conf()->mds_kill_export_at != 13);
+ ceph_assert(g_conf()->mds_kill_export_at != 13);
// finish export (adjust local cache state)
int num_dentries = 0;
finish_export_dir(dir, it->second.peer,
it->second.peer_imported, finished, &num_dentries);
- assert(!dir->is_auth());
+ ceph_assert(!dir->is_auth());
cache->adjust_subtree_auth(dir, it->second.peer);
// unpin bounds
total_exporting_size -= it->second.approx_size;
export_state.erase(it);
- assert(dir->state_test(CDir::STATE_EXPORTING));
+ ceph_assert(dir->state_test(CDir::STATE_EXPORTING));
dir->clear_exporting();
cache->show_subtrees();
void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bool started)
{
mds_rank_t from = m->get_source_mds();
- assert(from != mds->get_nodeid());
+ ceph_assert(from != mds->get_nodeid());
dout(7) << "handle_export_discover on " << m->get_path() << dendl;
import_state_t *p_state;
map<dirfrag_t,import_state_t>::iterator it = import_state.find(df);
if (!started) {
- assert(it == import_state.end());
+ ceph_assert(it == import_state.end());
p_state = &import_state[df];
p_state->state = IMPORT_DISCOVERING;
p_state->peer = from;
dout(7) << " dropping obsolete message" << dendl;
return;
}
- assert(it->second.state == IMPORT_DISCOVERING);
+ ceph_assert(it->second.state == IMPORT_DISCOVERING);
p_state = &it->second;
}
p_state->state = IMPORT_DISCOVERED;
// pin inode in the cache (for now)
- assert(in->is_dir());
+ ceph_assert(in->is_dir());
in->get(CInode::PIN_IMPORTING);
// reply
dirfrag_t df = m->get_dirfrag();
map<dirfrag_t,import_state_t>::iterator it = import_state.find(df);
if (it == import_state.end()) {
- assert(0 == "got export_cancel in weird state");
+ ceph_assert(0 == "got export_cancel in weird state");
} else if (it->second.state == IMPORT_DISCOVERING) {
import_reverse_discovering(df);
} else if (it->second.state == IMPORT_DISCOVERED) {
CInode *in = cache->get_inode(df.ino);
- assert(in);
+ ceph_assert(in);
import_reverse_discovered(df, in);
} else if (it->second.state == IMPORT_PREPPING) {
CDir *dir = mds->mdcache->get_dirfrag(df);
- assert(dir);
+ ceph_assert(dir);
import_reverse_prepping(dir, it->second);
} else if (it->second.state == IMPORT_PREPPED) {
CDir *dir = mds->mdcache->get_dirfrag(df);
- assert(dir);
+ ceph_assert(dir);
set<CDir*> bounds;
cache->get_subtree_bounds(dir, bounds);
import_remove_pins(dir, bounds);
cache->adjust_subtree_auth(dir, it->second.peer);
import_reverse_unfreeze(dir);
} else {
- assert(0 == "got export_cancel in weird state");
+ ceph_assert(0 == "got export_cancel in weird state");
}
}
void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_assim)
{
mds_rank_t oldauth = mds_rank_t(m->get_source().num());
- assert(oldauth != mds->get_nodeid());
+ ceph_assert(oldauth != mds->get_nodeid());
CDir *dir;
CInode *diri;
// assimilate root dir.
map<dirfrag_t,import_state_t>::iterator it = import_state.find(m->get_dirfrag());
if (!did_assim) {
- assert(it != import_state.end());
- assert(it->second.state == IMPORT_DISCOVERED);
- assert(it->second.peer == oldauth);
+ ceph_assert(it != import_state.end());
+ ceph_assert(it->second.state == IMPORT_DISCOVERED);
+ ceph_assert(it->second.peer == oldauth);
diri = cache->get_inode(m->get_dirfrag().ino);
- assert(diri);
+ ceph_assert(diri);
auto p = m->basedir.cbegin();
dir = cache->add_replica_dir(p, diri, oldauth, finished);
dout(7) << "handle_export_prep on " << *dir << " (first pass)" << dendl;
dout(7) << "handle_export_prep obsolete message, dropping" << dendl;
return;
}
- assert(it->second.state == IMPORT_PREPPING);
- assert(it->second.peer == oldauth);
+ ceph_assert(it->second.state == IMPORT_PREPPING);
+ ceph_assert(it->second.peer == oldauth);
dir = cache->get_dirfrag(m->get_dirfrag());
- assert(dir);
+ ceph_assert(dir);
dout(7) << "handle_export_prep on " << *dir << " (subsequent pass)" << dendl;
diri = dir->get_inode();
}
- assert(dir->is_auth() == false);
+ ceph_assert(dir->is_auth() == false);
cache->show_subtrees();
it->second.state = IMPORT_PREPPING;
it->second.bound_ls = m->get_bounds();
it->second.bystanders = m->get_bystanders();
- assert(g_conf()->mds_kill_import_at != 3);
+ ceph_assert(g_conf()->mds_kill_import_at != 3);
// bystander list
dout(7) << "bystanders are " << it->second.bystanders << dendl;
CDir *cur = 0;
if (start == 'd') {
cur = cache->get_dirfrag(df);
- assert(cur);
+ ceph_assert(cur);
dout(10) << " had " << *cur << dendl;
} else if (start == 'f') {
CInode *in = cache->get_inode(df.ino);
- assert(in);
+ ceph_assert(in);
dout(10) << " had " << *in << dendl;
cur = cache->add_replica_dir(q, in, oldauth, finished);
dout(10) << " added " << *cur << dendl;
} else if (start == '-') {
// nothing
} else
- assert(0 == "unrecognized start char");
+ ceph_assert(0 == "unrecognized start char");
while (!q.end()) {
CDentry *dn = cache->add_replica_dentry(q, cur, finished);
p != import_bound_fragset.end();
++p) {
CInode *in = cache->get_inode(p->first);
- assert(in);
+ ceph_assert(in);
in->get_stickydirs();
dout(7) << " set stickydirs on bound inode " << *in << dendl;
}
p != import_bound_fragset.end();
++p) {
CInode *in = cache->get_inode(p->first);
- assert(in);
+ ceph_assert(in);
// map fragset into a frag_t list, based on the inode fragtree
list<frag_t> fglist;
dout(7) << " sending export_prep_ack on " << *dir << dendl;
mds->send_message(MExportDirPrepAck::create(dir->dirfrag(), success, m->get_tid()), m->get_connection());
- assert(g_conf()->mds_kill_import_at != 4);
+ ceph_assert(g_conf()->mds_kill_import_at != 4);
}
{
assert (g_conf()->mds_kill_import_at != 5);
CDir *dir = cache->get_dirfrag(m->dirfrag);
- assert(dir);
+ ceph_assert(dir);
mds_rank_t oldauth = mds_rank_t(m->get_source().num());
dout(7) << "handle_export_dir importing " << *dir << " from " << oldauth << dendl;
- assert(!dir->is_auth());
+ ceph_assert(!dir->is_auth());
map<dirfrag_t,import_state_t>::iterator it = import_state.find(m->dirfrag);
- assert(it != import_state.end());
- assert(it->second.state == IMPORT_PREPPED);
- assert(it->second.tid == m->get_tid());
- assert(it->second.peer == oldauth);
+ ceph_assert(it != import_state.end());
+ ceph_assert(it->second.state == IMPORT_PREPPED);
+ ceph_assert(it->second.tid == m->get_tid());
+ ceph_assert(it->second.peer == oldauth);
if (!dir->get_inode()->dirfragtree.is_leaf(dir->get_frag()))
dir->get_inode()->dirfragtree.force_to_leaf(g_ceph_context, dir->get_frag());
map<client_t,client_metadata_t> client_metadata_map;
decode(client_map, cmp);
decode(client_metadata_map, cmp);
- assert(cmp.end());
+ ceph_assert(cmp.end());
le->cmapv = mds->server->prepare_force_open_sessions(client_map, client_metadata_map,
onlogged->imported_session_map);
encode(client_map, le->client_map, mds->mdsmap->get_up_features());
set<CDir*> import_bounds;
for (const auto &bound : m->bounds) {
CDir *bd = cache->get_dirfrag(bound);
- assert(bd);
+ ceph_assert(bd);
le->metablob.add_dir(bd, false); // note that parent metadata is already in the event
import_bounds.insert(bd);
}
continue;
did.insert(p->ino);
CInode *in = cache->get_inode(p->ino);
- assert(in);
+ ceph_assert(in);
in->put_stickydirs();
}
} else if (stat.state >= IMPORT_PREPPED) {
// bounding dirfrags
for (auto bd : bounds) {
- assert(bd->state_test(CDir::STATE_IMPORTBOUND));
+ ceph_assert(bd->state_test(CDir::STATE_IMPORTBOUND));
bd->put(CDir::PIN_IMPORTBOUND);
bd->state_clear(CDir::STATE_IMPORTBOUND);
}
import_remove_pins(dir, bounds);
// update auth, with possible subtree merge.
- assert(dir->is_subtree_root());
+ ceph_assert(dir->is_subtree_root());
if (mds->is_resolve())
cache->trim_non_auth_subtree(dir);
++q) {
Capability *cap = in->get_client_cap(q->first);
if (!cap) {
- assert(!stat.session_map.count(q->first));
+ ceph_assert(!stat.session_map.count(q->first));
continue;
}
if (cap->is_importing())
void Migrator::import_reverse_unfreeze(CDir *dir)
{
dout(7) << "import_reverse_unfreeze " << *dir << dendl;
- assert(!dir->is_auth());
+ ceph_assert(!dir->is_auth());
cache->discard_delayed_expire(dir);
dir->unfreeze_tree();
if (dir->is_subtree_root())
// clean up
map<dirfrag_t, import_state_t>::iterator it = import_state.find(dir->dirfrag());
- assert(it != import_state.end());
+ ceph_assert(it != import_state.end());
MutationRef mut = it->second.mut;
import_state.erase(it);
void Migrator::handle_export_finish(const MExportDirFinish::const_ref &m)
{
CDir *dir = cache->get_dirfrag(m->get_dirfrag());
- assert(dir);
+ ceph_assert(dir);
dout(7) << "handle_export_finish on " << *dir << (m->is_last() ? " last" : "") << dendl;
map<dirfrag_t,import_state_t>::iterator it = import_state.find(m->get_dirfrag());
- assert(it != import_state.end());
- assert(it->second.tid == m->get_tid());
+ ceph_assert(it != import_state.end());
+ ceph_assert(it->second.tid == m->get_tid());
import_finish(dir, false, m->is_last());
}
dout(7) << "import_finish on " << *dir << dendl;
map<dirfrag_t,import_state_t>::iterator it = import_state.find(dir->dirfrag());
- assert(it != import_state.end());
- assert(it->second.state == IMPORT_ACKING || it->second.state == IMPORT_FINISHING);
+ ceph_assert(it != import_state.end());
+ ceph_assert(it->second.state == IMPORT_ACKING || it->second.state == IMPORT_FINISHING);
if (it->second.state == IMPORT_ACKING) {
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
cache->adjust_subtree_auth(dir, mds->get_nodeid(), mds->get_nodeid());
}
// log finish
- assert(g_conf()->mds_kill_import_at != 9);
+ ceph_assert(g_conf()->mds_kill_import_at != 9);
if (it->second.state == IMPORT_ACKING) {
for (map<CInode*, map<client_t,Capability::Export> >::iterator p = it->second.peer_exports.begin();
p != it->second.peer_exports.end();
++p) {
CInode *in = p->first;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
for (map<client_t,Capability::Export>::iterator q = p->second.begin();
q != p->second.end();
++q) {
Session *session = r->second.first;
Capability *cap = in->get_client_cap(q->first);
- assert(cap);
+ ceph_assert(cap);
cap->merge(q->second, true);
cap->clear_importing();
mds->mdcache->do_cap_import(session, in, cap, q->second.cap_id, q->second.seq,
}
if (!last) {
- assert(it->second.state == IMPORT_ACKING);
+ ceph_assert(it->second.state == IMPORT_ACKING);
it->second.state = IMPORT_FINISHING;
return;
}
// link before state -- or not! -sage
if (dn->get_linkage()->get_inode() != in) {
- assert(!dn->get_linkage()->get_inode());
+ ceph_assert(!dn->get_linkage()->get_inode());
dn->dir->link_primary_inode(dn, in);
}
decode(df, blp);
CInode *diri = cache->get_inode(df.ino);
- assert(diri);
+ ceph_assert(diri);
CDir *dir = diri->get_or_open_dirfrag(mds->mdcache, df.frag);
- assert(dir);
+ ceph_assert(dir);
dout(7) << "decode_import_dir " << *dir << dendl;
if (icode == 'N') {
// null dentry
- assert(dn->get_linkage()->is_null());
+ ceph_assert(dn->get_linkage()->is_null());
// fall thru
}
decode(ino, blp);
decode(d_type, blp);
if (dn->get_linkage()->is_remote()) {
- assert(dn->get_linkage()->get_remote_ino() == ino);
+ ceph_assert(dn->get_linkage()->get_remote_ino() == ino);
} else {
dir->link_remote_inode(dn, ino, d_type);
}
}
else if (icode == 'I') {
// inode
- assert(le);
+ ceph_assert(le);
decode_import_inode(dn, blp, oldauth, ls,
peer_exports, updated_scatterlocks);
}
mds_rank_t dest = in->authority().first;
dout(7) << "export_caps to mds." << dest << " " << *in << dendl;
- assert(in->is_any_caps());
- assert(!in->is_auth());
- assert(!in->is_ambiguous_auth());
- assert(!in->state_test(CInode::STATE_EXPORTINGCAPS));
+ ceph_assert(in->is_any_caps());
+ ceph_assert(!in->is_auth());
+ ceph_assert(!in->is_ambiguous_auth());
+ ceph_assert(!in->state_test(CInode::STATE_EXPORTINGCAPS));
auto ex = MExportCaps::create();
ex->ino = in->ino();
mds_rank_t from = ack->get_source().num();
CInode *in = cache->get_inode(ack->ino);
if (in) {
- assert(!in->is_auth());
+ ceph_assert(!in->is_auth());
dout(10) << "handle_export_caps_ack " << *ack << " from "
<< ack->get_source() << " on " << *in << dendl;
dout(10) << "handle_export_caps " << *ex << " from " << ex->get_source() << dendl;
CInode *in = cache->get_inode(ex->ino);
- assert(in);
- assert(in->is_auth());
+ ceph_assert(in);
+ ceph_assert(in->is_auth());
// FIXME
if (!in->can_auth_pin()) {
// decode new caps
auto blp = ex->cap_bl.cbegin();
decode_import_inode_caps(in, false, blp, finish->peer_exports);
- assert(!finish->peer_exports.empty()); // thus, inode is pinned.
+ ceph_assert(!finish->peer_exports.empty()); // thus, inode is pinned.
// journal open client sessions
ESessions *le = new ESessions(pv, std::move(client_map),
{
dout(10) << "logged_import_caps on " << *in << dendl;
// see export_go() vs export_go_synced()
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
// force open client sessions and finish cap import
mds->server->finish_force_open_sessions(imported_session_map);
auto it = peer_exports.find(in);
- assert(it != peer_exports.end());
+ ceph_assert(it != peer_exports.end());
// clients will release caps from the exporter when they receive the cap import message.
map<client_t,Capability::Import> imported_caps;
int get_import_state(dirfrag_t df) const {
map<dirfrag_t, import_state_t>::const_iterator it = import_state.find(df);
- assert(it != import_state.end());
+ ceph_assert(it != import_state.end());
return it->second.state;
}
int get_import_peer(dirfrag_t df) const {
map<dirfrag_t, import_state_t>::const_iterator it = import_state.find(df);
- assert(it != import_state.end());
+ ceph_assert(it != import_state.end());
return it->second.peer;
}
int get_export_state(CDir *dir) const {
map<CDir*, export_state_t>::const_iterator it = export_state.find(dir);
- assert(it != export_state.end());
+ ceph_assert(it != export_state.end());
return it->second.state;
}
// this returns true if we are export @dir,
// only returns meaningful results during EXPORT_WARNING state.
bool export_has_warned(CDir *dir, mds_rank_t who) {
map<CDir*, export_state_t>::iterator it = export_state.find(dir);
- assert(it != export_state.end());
- assert(it->second.state == EXPORT_WARNING);
+ ceph_assert(it != export_state.end());
+ ceph_assert(it->second.state == EXPORT_WARNING);
return (it->second.warning_ack_waiting.count(who) == 0);
}
bool export_has_notified(CDir *dir, mds_rank_t who) const {
map<CDir*, export_state_t>::const_iterator it = export_state.find(dir);
- assert(it != export_state.end());
- assert(it->second.state == EXPORT_NOTIFYING);
+ ceph_assert(it != export_state.end());
+ ceph_assert(it->second.state == EXPORT_NOTIFYING);
return (it->second.notify_ack_waiting.count(who) == 0);
}
void export_freeze_inc_num_waiters(CDir *dir) {
map<CDir*, export_state_t>::iterator it = export_state.find(dir);
- assert(it != export_state.end());
+ ceph_assert(it != export_state.end());
it->second.num_remote_waiters++;
}
void find_stale_export_freeze();
void MutationImpl::unpin(MDSCacheObject *o)
{
- assert(pins.count(o));
+ ceph_assert(pins.count(o));
o->put(MDSCacheObject::PIN_REQUEST);
pins.erase(o);
}
void MutationImpl::start_locking(SimpleLock *lock, int target)
{
- assert(locking == NULL);
+ ceph_assert(locking == NULL);
pin(lock->get_parent());
locking = lock;
locking_target_mds = target;
void MutationImpl::finish_locking(SimpleLock *lock)
{
- assert(locking == lock);
+ ceph_assert(locking == lock);
locking = NULL;
locking_target_mds = -1;
}
void MutationImpl::auth_unpin(MDSCacheObject *object)
{
- assert(auth_pins.count(object));
+ ceph_assert(auth_pins.count(object));
object->auth_unpin(this);
auth_pins.erase(object);
}
for (set<MDSCacheObject*>::iterator it = auth_pins.begin();
it != auth_pins.end();
++it) {
- assert((*it)->is_auth());
+ ceph_assert((*it)->is_auth());
(*it)->auth_unpin(this);
}
auth_pins.clear();
bool MDRequestImpl::freeze_auth_pin(CInode *inode)
{
- assert(!more()->rename_inode || more()->rename_inode == inode);
+ ceph_assert(!more()->rename_inode || more()->rename_inode == inode);
more()->rename_inode = inode;
more()->is_freeze_authpin = true;
auth_pin(inode);
void MDRequestImpl::unfreeze_auth_pin(bool clear_inode)
{
- assert(more()->is_freeze_authpin);
+ ceph_assert(more()->is_freeze_authpin);
CInode *inode = more()->rename_inode;
if (inode->is_frozen_auth_pin())
inode->unfreeze_auth_pin();
void MDRequestImpl::set_ambiguous_auth(CInode *inode)
{
- assert(!more()->rename_inode || more()->rename_inode == inode);
- assert(!more()->is_ambiguous_auth);
+ ceph_assert(!more()->rename_inode || more()->rename_inode == inode);
+ ceph_assert(!more()->is_ambiguous_auth);
inode->set_ambiguous_auth();
more()->rename_inode = inode;
void MDRequestImpl::clear_ambiguous_auth()
{
CInode *inode = more()->rename_inode;
- assert(inode && more()->is_ambiguous_auth);
+ ceph_assert(inode && more()->is_ambiguous_auth);
inode->clear_ambiguous_auth();
more()->is_ambiguous_auth = false;
}
void MDRequestImpl::set_filepath(const filepath& fp)
{
- assert(!client_request);
+ ceph_assert(!client_request);
more()->filepath1 = fp;
}
void MDRequestImpl::set_filepath2(const filepath& fp)
{
- assert(!client_request);
+ ceph_assert(!client_request);
more()->filepath2 = fp;
}
reqid(ri), attempt(att),
slave_to_mds(slave_to) { }
~MutationImpl() override {
- assert(locking == NULL);
- assert(pins.empty());
- assert(auth_pins.empty());
- assert(xlocks.empty());
- assert(rdlocks.empty());
- assert(wrlocks.empty());
- assert(remote_wrlocks.empty());
+ ceph_assert(locking == NULL);
+ ceph_assert(pins.empty());
+ ceph_assert(auth_pins.empty());
+ ceph_assert(xlocks.empty());
+ ceph_assert(rdlocks.empty());
+ ceph_assert(wrlocks.empty());
+ ceph_assert(remote_wrlocks.empty());
}
bool is_master() const { return slave_to_mds == MDS_RANK_NONE; }
do {
auto p = anchor_map.find(in->ino());
if (p != anchor_map.end()) {
- assert(in->state_test(CInode::STATE_TRACKEDBYOFT));
- assert(p->second.nref > 0);
+ ceph_assert(in->state_test(CInode::STATE_TRACKEDBYOFT));
+ ceph_assert(p->second.nref > 0);
p->second.nref++;
break;
}
auto ret = anchor_map.emplace(std::piecewise_construct, std::forward_as_tuple(in->ino()),
std::forward_as_tuple(in->ino(), (pin ? pin->ino() : inodeno_t(0)),
(dn ? dn->get_name() : string()), in->d_type(), 1));
- assert(ret.second == true);
+ ceph_assert(ret.second == true);
in->state_set(CInode::STATE_TRACKEDBYOFT);
auto ret1 = dirty_items.emplace(in->ino(), (int)DIRTY_NEW);
if (!ret1.second) {
int omap_idx = ret1.first->second;
- assert(omap_idx >= 0);
+ ceph_assert(omap_idx >= 0);
ret.first->second.omap_idx = omap_idx;
}
void OpenFileTable::put_ref(CInode *in)
{
do {
- assert(in->state_test(CInode::STATE_TRACKEDBYOFT));
+ ceph_assert(in->state_test(CInode::STATE_TRACKEDBYOFT));
auto p = anchor_map.find(in->ino());
- assert(p != anchor_map.end());
- assert(p->second.nref > 0);
+ ceph_assert(p != anchor_map.end());
+ ceph_assert(p->second.nref > 0);
if (p->second.nref > 1) {
p->second.nref--;
CDentry *dn = in->get_parent_dn();
CInode *pin = dn ? dn->get_dir()->get_inode() : nullptr;
if (dn) {
- assert(p->second.dirino == pin->ino());
- assert(p->second.d_name == dn->get_name());
+ ceph_assert(p->second.dirino == pin->ino());
+ ceph_assert(p->second.d_name == dn->get_name());
} else {
- assert(p->second.dirino == inodeno_t(0));
- assert(p->second.d_name == "");
+ ceph_assert(p->second.dirino == inodeno_t(0));
+ ceph_assert(p->second.d_name == "");
}
int omap_idx = p->second.omap_idx;
auto ret = dirty_items.emplace(in->ino(), omap_idx);
if (!ret.second) {
if (ret.first->second == DIRTY_NEW) {
- assert(omap_idx < 0);
+ ceph_assert(omap_idx < 0);
dirty_items.erase(ret.first);
} else {
- assert(omap_idx >= 0);
+ ceph_assert(omap_idx >= 0);
ret.first->second = omap_idx;
}
}
dout(10) << __func__ << " " << *in << dendl;
if (!in->is_dir()) {
auto p = anchor_map.find(in->ino());
- assert(p == anchor_map.end());
+ ceph_assert(p == anchor_map.end());
}
get_ref(in);
}
dout(10) << __func__ << " " << *in << dendl;
if (!in->is_dir()) {
auto p = anchor_map.find(in->ino());
- assert(p != anchor_map.end());
- assert(p->second.nref == 1);
+ ceph_assert(p != anchor_map.end());
+ ceph_assert(p->second.nref == 1);
}
put_ref(in);
}
void OpenFileTable::add_dirfrag(CDir *dir)
{
dout(10) << __func__ << " " << *dir << dendl;
- assert(!dir->state_test(CDir::STATE_TRACKEDBYOFT));
+ ceph_assert(!dir->state_test(CDir::STATE_TRACKEDBYOFT));
dir->state_set(CDir::STATE_TRACKEDBYOFT);
auto ret = dirfrags.insert(dir->dirfrag());
- assert(ret.second);
+ ceph_assert(ret.second);
get_ref(dir->get_inode());
dirty_items.emplace(dir->ino(), (int)DIRTY_UNDEF);
}
void OpenFileTable::remove_dirfrag(CDir *dir)
{
dout(10) << __func__ << " " << *dir << dendl;
- assert(dir->state_test(CDir::STATE_TRACKEDBYOFT));
+ ceph_assert(dir->state_test(CDir::STATE_TRACKEDBYOFT));
dir->state_clear(CDir::STATE_TRACKEDBYOFT);
auto p = dirfrags.find(dir->dirfrag());
- assert(p != dirfrags.end());
+ ceph_assert(p != dirfrags.end());
dirfrags.erase(p);
dirty_items.emplace(dir->ino(), (int)DIRTY_UNDEF);
put_ref(dir->get_inode());
{
dout(10) << __func__ << " " << *in << dendl;
auto p = anchor_map.find(in->ino());
- assert(p != anchor_map.end());
- assert(p->second.nref > 0);
- assert(p->second.dirino == inodeno_t(0));
- assert(p->second.d_name == "");
+ ceph_assert(p != anchor_map.end());
+ ceph_assert(p->second.nref > 0);
+ ceph_assert(p->second.dirino == inodeno_t(0));
+ ceph_assert(p->second.d_name == "");
CDentry *dn = in->get_parent_dn();
CInode *pin = dn->get_dir()->get_inode();
{
dout(10) << __func__ << " " << *in << dendl;
auto p = anchor_map.find(in->ino());
- assert(p != anchor_map.end());
- assert(p->second.nref > 0);
+ ceph_assert(p != anchor_map.end());
+ ceph_assert(p->second.nref > 0);
CDentry *dn = in->get_parent_dn();
CInode *pin = dn->get_dir()->get_inode();
- assert(p->second.dirino == pin->ino());
- assert(p->second.d_name == dn->get_name());
+ ceph_assert(p->second.dirino == pin->ino());
+ ceph_assert(p->second.d_name == dn->get_name());
p->second.dirino = inodeno_t(0);
p->second.d_name = "";
return;
}
- assert(log_seq <= committing_log_seq);
- assert(log_seq >= committed_log_seq);
+ ceph_assert(log_seq <= committing_log_seq);
+ ceph_assert(log_seq >= committed_log_seq);
committed_log_seq = log_seq;
num_pending_commit--;
{
dout(10) << __func__ << " log_seq " << log_seq << dendl;
- assert(num_pending_commit == 0);
+ ceph_assert(num_pending_commit == 0);
num_pending_commit++;
- assert(log_seq >= committing_log_seq);
+ ceph_assert(log_seq >= committing_log_seq);
committing_log_seq = log_seq;
omap_version++;
if (journal_state == JOURNAL_NONE)
journal_state = JOURNAL_START;
else
- assert(journal_state == JOURNAL_START);
+ ceph_assert(journal_state == JOURNAL_START);
bufferlist header;
_encode_header(header, journal_state);
if (first_commit) {
auto q = loaded_anchor_map.find(it.first);
if (q != loaded_anchor_map.end()) {
- assert(p != anchor_map.end());
+ ceph_assert(p != anchor_map.end());
p->second.omap_idx = q->second.omap_idx;
bool same = p->second == q->second;
if (same) {
if (p != anchor_map.end()) {
omap_idx = p->second.omap_idx;
if (omap_idx < 0) {
- assert(it.second == DIRTY_NEW);
+ ceph_assert(it.second == DIRTY_NEW);
// find omap object to store the key
for (unsigned i = first_free_idx; i < omap_num_objs; i++) {
if (omap_num_items[i] < MAX_ITEMS_PER_OBJ)
}
if (omap_idx < 0) {
++omap_num_objs;
- assert(omap_num_objs <= MAX_OBJECTS);
+ ceph_assert(omap_num_objs <= MAX_OBJECTS);
omap_num_items.resize(omap_num_objs);
omap_updates.resize(omap_num_objs);
omap_updates.back().clear = true;
} else {
omap_idx = it.second;
unsigned& count = omap_num_items.at(omap_idx);
- assert(count > 0);
+ ceph_assert(count > 0);
--count;
if ((unsigned)omap_idx < first_free_idx && count < MAX_ITEMS_PER_OBJ)
first_free_idx = omap_idx;
int omap_idx = it.second.omap_idx;
unsigned& count = omap_num_items.at(omap_idx);
- assert(count > 0);
+ ceph_assert(count > 0);
--count;
auto& ctl = omap_updates.at(omap_idx);
if (omap_num_items[i] > 0)
used_objs = i + 1;
}
- assert(total_items == anchor_map.size());
+ ceph_assert(total_items == anchor_map.size());
// adjust omap object count
if (used_objs < omap_num_objs) {
omap_num_objs = used_objs;
// does not change.
if (!journaled && old_num_objs == omap_num_objs &&
objs_to_write.size() <= 1) {
- assert(journal_state == JOURNAL_NONE);
- assert(!gather.has_subs());
+ ceph_assert(journal_state == JOURNAL_NONE);
+ ceph_assert(!gather.has_subs());
unsigned omap_idx = objs_to_write.empty() ? 0 : objs_to_write.front();
create_op_func(omap_idx, true);
}
if (journal_state == JOURNAL_START) {
- assert(gather.has_subs());
+ ceph_assert(gather.has_subs());
journal_state = JOURNAL_FINISH;
} else {
// only object count changes
- assert(journal_state == JOURNAL_NONE);
- assert(!gather.has_subs());
+ ceph_assert(journal_state == JOURNAL_NONE);
+ ceph_assert(!gather.has_subs());
}
for (unsigned omap_idx = 0; omap_idx < omap_updates.size(); omap_idx++) {
auto& ctl = omap_updates[omap_idx];
- assert(ctl.to_update.empty() && ctl.to_remove.empty());
+ ceph_assert(ctl.to_update.empty() && ctl.to_remove.empty());
if (ctl.journal_idx == 0)
- assert(ctl.journaled_update.empty() && ctl.journaled_remove.empty());
+ ceph_assert(ctl.journaled_update.empty() && ctl.journaled_remove.empty());
bool first = true;
for (auto& it : ctl.journaled_update) {
create_op_func(omap_idx, first);
}
- assert(!ops_map.empty());
+ ceph_assert(!ops_map.empty());
if (journal_state == JOURNAL_FINISH) {
gather.set_finisher(new C_OnFinisher(new C_IO_OFT_Journal(this, log_seq, c, ops_map),
mds->finisher));
std::make_tuple());
RecoveredAnchor& anchor = it->second;
decode(anchor, p);
- assert(ino == anchor.ino);
+ ceph_assert(ino == anchor.ino);
anchor.omap_idx = idx;
anchor.auth = MDS_RANK_NONE;
omap_num_items.resize(omap_num_objs);
journal_state = jstate;
} else if (version == omap_version) {
- assert(omap_num_objs == num_objs);
+ ceph_assert(omap_num_objs == num_objs);
if (jstate > journal_state)
journal_state = jstate;
}
for (auto& q : to_remove) {
inodeno_t ino;
sscanf(q.c_str(), "%llx",(unsigned long long*)&ino.val);
- assert(ino.val > 0);
+ ceph_assert(ino.val > 0);
if (loaded_anchor_map.erase(ino)) {
unsigned& count = omap_num_items[omap_idx];
- assert(count > 0);
+ ceph_assert(count > 0);
--count;
}
auto r = loaded_dirfrags.lower_bound(dirfrag_t(ino, 0));
void OpenFileTable::load(MDSInternalContextBase *onload)
{
dout(10) << __func__ << dendl;
- assert(!load_done);
+ ceph_assert(!load_done);
if (onload)
waiting_for_load.push_back(onload);
{
if (prefetch_state == DIR_INODES && r >= 0 && ino != inodeno_t(0)) {
auto p = loaded_anchor_map.find(ino);
- assert(p != loaded_anchor_map.end());
+ ceph_assert(p != loaded_anchor_map.end());
p->second.auth = mds_rank_t(r);
}
finish_contexts(g_ceph_context, waiting_for_prefetch);
waiting_for_prefetch.clear();
} else {
- assert(0);
+ ceph_assert(0);
}
}
}
void OpenFileTable::_prefetch_dirfrags()
{
dout(10) << __func__ << dendl;
- assert(prefetch_state == DIRFRAGS);
+ ceph_assert(prefetch_state == DIRFRAGS);
MDCache *mdcache = mds->mdcache;
list<CDir*> fetch_queue;
int num_opening_dirfrags = 0;
for (auto dir : fetch_queue) {
if (dir->state_test(CDir::STATE_REJOINUNDEF))
- assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag()));
+ ceph_assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag()));
dir->fetch(gather.new_sub());
if (!(++num_opening_dirfrags % 1000))
void OpenFileTable::_prefetch_inodes()
{
dout(10) << __func__ << " state " << prefetch_state << dendl;
- assert(!num_opening_inodes);
+ ceph_assert(!num_opening_inodes);
num_opening_inodes = 1;
int64_t pool;
else if (prefetch_state == FILE_INODES)
pool = mds->mdsmap->get_first_data_pool();
else
- assert(0);
+ ceph_assert(0);
MDCache *mdcache = mds->mdcache;
bool OpenFileTable::prefetch_inodes()
{
dout(10) << __func__ << dendl;
- assert(!prefetch_state);
+ ceph_assert(!prefetch_state);
prefetch_state = DIR_INODES;
if (!load_done) {
void load(MDSInternalContextBase *c);
bool is_loaded() const { return load_done; }
void wait_for_load(MDSInternalContextBase *c) {
- assert(!load_done);
+ ceph_assert(!load_done);
waiting_for_load.push_back(c);
}
bool prefetch_inodes();
bool is_prefetched() const { return prefetch_state == DONE; }
void wait_for_prefetch(MDSInternalContextBase *c) {
- assert(!is_prefetched());
+ ceph_assert(!is_prefetched());
waiting_for_prefetch.push_back(c);
}
delayed_flush(nullptr),
recovered(false)
{
- assert(cct != nullptr);
- assert(on_error != nullptr);
- assert(objecter != nullptr);
+ ceph_assert(cct != nullptr);
+ ceph_assert(on_error != nullptr);
+ ceph_assert(objecter != nullptr);
journaler.set_write_error_handler(on_error);
}
{
Mutex::Locker l(lock);
- assert(logger != nullptr);
+ ceph_assert(logger != nullptr);
finisher.start();
timer.init();
void PurgeQueue::_recover()
{
- assert(lock.is_locked_by_me());
+ ceph_assert(lock.is_locked_by_me());
// Journaler::is_readable() adjusts write_pos if partial entry is encountered
while (1) {
bufferlist bl;
bool readable = journaler.try_read_entry(bl);
- assert(readable); // we checked earlier
+ ceph_assert(readable); // we checked earlier
}
}
Mutex::Locker l(lock);
// Callers should have waited for open() before using us
- assert(!journaler.is_readonly());
+ ceph_assert(!journaler.is_readonly());
bufferlist bl;
bool PurgeQueue::_consume()
{
- assert(lock.is_locked_by_me());
+ ceph_assert(lock.is_locked_by_me());
bool could_consume = false;
while(can_consume()) {
// The journaler is readable: consume an entry
bufferlist bl;
bool readable = journaler.try_read_entry(bl);
- assert(readable); // we checked earlier
+ ceph_assert(readable); // we checked earlier
dout(20) << " decoding entry" << dendl;
PurgeItem item;
const PurgeItem &item,
uint64_t expire_to)
{
- assert(lock.is_locked_by_me());
+ ceph_assert(lock.is_locked_by_me());
in_flight[expire_to] = item;
logger->set(l_pq_executing, in_flight.size());
logger->set(l_pq_executing, in_flight.size());
return;
}
- assert(gather.has_subs());
+ ceph_assert(gather.has_subs());
gather.set_finisher(new C_OnFinisher(
new FunctionContext([this, expire_to](int r){
void PurgeQueue::_execute_item_complete(
uint64_t expire_to)
{
- assert(lock.is_locked_by_me());
+ ceph_assert(lock.is_locked_by_me());
dout(10) << "complete at 0x" << std::hex << expire_to << std::dec << dendl;
- assert(in_flight.count(expire_to) == 1);
+ ceph_assert(in_flight.count(expire_to) == 1);
auto iter = in_flight.find(expire_to);
- assert(iter != in_flight.end());
+ ceph_assert(iter != in_flight.end());
if (iter == in_flight.begin()) {
uint64_t pos = expire_to;
if (!pending_expire.empty()) {
size_t *in_flight_count
)
{
- assert(progress != nullptr);
- assert(progress_total != nullptr);
- assert(in_flight_count != nullptr);
+ ceph_assert(progress != nullptr);
+ ceph_assert(progress_total != nullptr);
+ ceph_assert(in_flight_count != nullptr);
const bool done = in_flight.empty() && (
journaler.get_read_pos() == journaler.get_write_pos());
C_MDC_Recover(RecoveryQueue *rq_, CInode *i) :
MDSIOContextBase(false), rq(rq_), in(i), size(0) {
- assert(rq != NULL);
+ ceph_assert(rq != NULL);
}
void print(ostream& out) const override {
out << "file_recover(" << in->ino() << ")";
if (!in->item_recover_queue_front.is_on_list()) {
dout(20) << *in << dendl;
- assert(in->item_recover_queue.is_on_list());
+ ceph_assert(in->item_recover_queue.is_on_list());
in->item_recover_queue.remove_myself();
file_recover_queue_size--;
void RecoveryQueue::enqueue(CInode *in)
{
dout(15) << "RecoveryQueue::enqueue " << *in << dendl;
- assert(logger); // Caller should have done set_logger before using me
- assert(in->is_auth());
+ ceph_assert(logger); // Caller should have done set_logger before using me
+ ceph_assert(in->is_auth());
in->state_clear(CInode::STATE_NEEDSRECOVER);
if (!in->state_test(CInode::STATE_RECOVERING)) {
}
auto p = file_recovering.find(in);
- assert(p != file_recovering.end());
+ ceph_assert(p != file_recovering.end());
bool restart = p->second;
file_recovering.erase(p);
ScatterLock(MDSCacheObject *o, LockType *lt) :
SimpleLock(o, lt) {}
~ScatterLock() override {
- assert(!_more);
+ ceph_assert(!_more);
}
bool is_scatterlock() const override {
void set_xlock_snap_sync(MDSInternalContextBase *c)
{
- assert(get_type() == CEPH_LOCK_IFILE);
- assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE);
+ ceph_assert(get_type() == CEPH_LOCK_IFILE);
+ ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE);
state = LOCK_XLOCKSNAP;
add_waiter(WAIT_STABLE, c);
}
: tag(tag_), force(force_), recursive(recursive_), repair(repair_),
formatter(f_), origin(nullptr)
{
- assert(formatter != nullptr);
+ ceph_assert(formatter != nullptr);
}
// Set after construction because it won't be known until we've
{
dout(20) << "popping " << *in
<< " off of ScrubStack" << dendl;
- assert(in->item_scrub.is_on_list());
+ ceph_assert(in->item_scrub.is_on_list());
in->put(CInode::PIN_SCRUBQUEUE);
in->item_scrub.remove_myself();
stack_size--;
{
dout(10) << __func__ << " with {" << *in << "}"
<< ", on_finish=" << on_finish << ", top=" << top << dendl;
- assert(mdcache->mds->mds_lock.is_locked_by_me());
+ ceph_assert(mdcache->mds->mds_lock.is_locked_by_me());
in->scrub_initialize(parent, header, on_finish);
if (top)
push_inode(in);
bool all_frags_done = true;
ScrubHeaderRef header = in->get_scrub_header();
- assert(header != nullptr);
+ ceph_assert(header != nullptr);
if (header->get_recursive()) {
list<frag_t> scrubbing_frags;
dout(25) << "returning dir " << *new_dir << dendl;
return true;
}
- assert(r == ENOENT);
+ ceph_assert(r == ENOENT);
// there are no dirfrags left
*new_dir = NULL;
return true;
bool *added_children, bool *is_terminal,
bool *done)
{
- assert(dir != NULL);
+ ceph_assert(dir != NULL);
dout(20) << __func__ << " on " << *dir << dendl;
*added_children = false;
// scrub_dentry_next defined to only give EAGAIN, ENOENT, 0 -- we should
// never get random IO errors here.
- assert(r == 0);
+ ceph_assert(r == 0);
_enqueue_inode(dn->get_projected_inode(), dn, header, NULL, true);
scrub_kick(mdc, this),
mdcache(mdc) {}
~ScrubStack() {
- assert(inode_stack.empty());
- assert(!scrubs_in_progress);
+ ceph_assert(inode_stack.empty());
+ ceph_assert(!scrubs_in_progress);
}
/**
* Put a inode on the top of the scrub stack, so it is the highest priority.
public:
explicit ServerContext(Server *s) : server(s) {
- assert(server != NULL);
+ ceph_assert(server != NULL);
}
};
}
public:
explicit ServerLogContext(Server *s) : server(s) {
- assert(server != NULL);
+ ceph_assert(server != NULL);
}
explicit ServerLogContext(Server *s, MDRequestRef& r) : server(s), mdr(r) {
- assert(server != NULL);
+ ceph_assert(server != NULL);
}
};
return;
default:
derr << "server unknown message " << m->get_type() << dendl;
- assert(0 == "server unknown message");
+ ceph_assert(0 == "server unknown message");
}
}
C_MDS_session_finish(Server *srv, Session *se, uint64_t sseq, bool s, version_t mv, interval_set<inodeno_t>& i, version_t iv, Context *fin_ = NULL) :
ServerLogContext(srv), session(se), state_seq(sseq), open(s), cmapv(mv), inos(i), inotablev(iv), fin(fin_) { }
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
server->_session_logged(session, state_seq, open, cmapv, inos, inotablev);
if (fin) {
fin->complete(r);
Session *session = mds->get_session(m);
dout(3) << "handle_client_session " << *m << " from " << m->get_source() << dendl;
- assert(m->get_source().is_client()); // should _not_ come from an mds!
+ ceph_assert(m->get_source().is_client()); // should _not_ come from an mds!
if (!session) {
dout(0) << " ignoring sessionless msg " << *m << dendl;
dout(10) << "currently open|opening|stale|killing, dropping this req" << dendl;
return;
}
- assert(session->is_closed() ||
+ ceph_assert(session->is_closed() ||
session->is_closing());
if (mds->is_stopping()) {
dout(10) << "ignoring close req on importing session" << dendl;
return;
}
- assert(session->is_open() ||
+ ceph_assert(session->is_open() ||
session->is_stale() ||
session->is_opening());
if (m->get_seq() < session->get_push_seq()) {
{
for (set<client_t>::iterator p = client_set.begin(); p != client_set.end(); ++p) {
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->v));
- assert(session);
+ ceph_assert(session);
if (!session->is_open() ||
!session->get_connection() ||
!session->get_connection()->has_feature(CEPH_FEATURE_EXPORT_PEER))
<< " " << pv << dendl;
if (piv) {
- assert(session->is_closing() || session->is_killing() ||
+ ceph_assert(session->is_closing() || session->is_killing() ||
session->is_opening()); // re-open closing session
session->info.prealloc_inos.subtract(inos);
mds->inotable->apply_release_ids(inos);
- assert(mds->inotable->get_version() == piv);
+ ceph_assert(mds->inotable->get_version() == piv);
}
mds->sessionmap.mark_dirty(session);
<< ", noop" << dendl;
// close must have been canceled (by an import?), or any number of other things..
} else if (open) {
- assert(session->is_opening());
+ ceph_assert(session->is_opening());
mds->sessionmap.set_state(session, Session::STATE_OPEN);
mds->sessionmap.touch_session(session);
- assert(session->get_connection());
+ ceph_assert(session->get_connection());
auto reply = MClientSession::create(CEPH_SESSION_OPEN);
if (session->info.has_feature(CEPHFS_FEATURE_MIMIC))
reply->supported_features = supported_features;
if (q != cmm.end())
session->info.client_metadata.merge(q->second);
} else {
- assert(session->is_open() ||
+ ceph_assert(session->is_open() ||
session->is_opening() ||
session->is_stale());
sseq = 0;
}
} else {
dout(10) << "force_open_sessions skipping already-open " << session->info.inst << dendl;
- assert(session->is_open() || session->is_stale());
+ ceph_assert(session->is_open() || session->is_stale());
}
if (dec_import) {
return;
}
const auto &stale_sessions = sessions_p->second;
- assert(stale_sessions != nullptr);
+ ceph_assert(stale_sessions != nullptr);
for (const auto &session: *stale_sessions) {
auto last_cap_renew_span = std::chrono::duration<double>(now-session->last_cap_renew).count();
dout(10) << "stopping at importing session " << session->info.inst << dendl;
break;
}
- assert(session->is_stale());
+ ceph_assert(session->is_stale());
if (last_cap_renew_span < cutoff) {
dout(20) << "oldest stale session is " << session->info.inst << " and recently renewed caps " << last_cap_renew_span << "s ago" << dendl;
break;
*/
void Server::kill_session(Session *session, Context *on_safe)
{
- assert(mds->mds_lock.is_locked_by_me());
+ ceph_assert(mds->mds_lock.is_locked_by_me());
if ((session->is_opening() ||
session->is_open() ||
journal_close_session(session, Session::STATE_KILLING, on_safe);
} else {
dout(10) << "kill_session importing or already closing/killing " << session << dendl;
- assert(session->is_closing() ||
+ ceph_assert(session->is_closing() ||
session->is_closed() ||
session->is_killing() ||
session->is_importing());
dout(7) << "handle_client_reconnect " << m->get_source() << dendl;
client_t from = m->get_source().num();
Session *session = mds->get_session(m);
- assert(session);
+ ceph_assert(session);
if (!mds->is_reconnect() && mds->get_want_state() == CEPH_MDS_STATE_RECONNECT) {
dout(10) << " we're almost in reconnect state (mdsmap delivery race?); waiting" << dendl;
void Server::reconnect_gather_finish()
{
dout(7) << "reconnect_gather_finish. failed on " << failed_reconnects << " clients" << dendl;
- assert(reconnect_done);
+ ceph_assert(reconnect_done);
if (!mds->snapclient->is_synced()) {
// make sure snaptable cache is populated. snaprealms will be
p != client_reconnect_gather.end();
++p) {
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->v));
- assert(session);
+ ceph_assert(session);
dout(1) << "reconnect gave up on " << session->info.inst << dendl;
mds->clog->warn() << "evicting unresponsive client " << *session
void Server::journal_and_reply(MDRequestRef& mdr, CInode *in, CDentry *dn, LogEvent *le, MDSLogContextBase *fin)
{
dout(10) << "journal_and_reply tracei " << in << " tracedn " << dn << dendl;
- assert(!mdr->has_completed);
+ ceph_assert(!mdr->has_completed);
// note trace items for eventual reply.
mdr->tracei = in;
} else if (mdr->internal_op > -1) {
dout(10) << "respond_to_request on internal request " << mdr << dendl;
if (!mdr->internal_op_finish)
- assert(0 == "trying to respond to internal op without finisher");
+ ceph_assert(0 == "trying to respond to internal op without finisher");
mdr->internal_op_finish->complete(r);
mdcache->request_finish(mdr);
}
*/
void Server::reply_client_request(MDRequestRef& mdr, const MClientReply::ref &reply)
{
- assert(mdr.get());
+ ceph_assert(mdr.get());
const MClientRequest::const_ref &req = mdr->client_request;
dout(7) << "reply_client_request " << reply->get_result()
// completed request?
bool has_completed = false;
if (req->is_replay() || req->get_retry_attempt()) {
- assert(session);
+ ceph_assert(session);
inodeno_t created;
if (session->have_completed_request(req->get_reqid().tid, &created)) {
has_completed = true;
// trim completed_request list
if (req->get_oldest_client_tid() > 0) {
dout(15) << " oldest_client_tid=" << req->get_oldest_client_tid() << dendl;
- assert(session);
+ ceph_assert(session);
if (session->trim_completed_requests(req->get_oldest_client_tid())) {
// Sessions 'completed_requests' was dirtied, mark it to be
// potentially flushed at segment expiry.
void Server::dispatch_client_request(MDRequestRef& mdr)
{
// we shouldn't be waiting on anyone.
- assert(!mdr->has_more() || mdr->more()->waiting_on_slave.empty());
+ ceph_assert(!mdr->has_more() || mdr->more()->waiting_on_slave.empty());
if (mdr->killed) {
dout(10) << "request " << *mdr << " was killed" << dendl;
CDentry *straydn = NULL;
if (m->straybl.length() > 0) {
straydn = mdcache->add_replica_stray(m->straybl, from);
- assert(straydn);
+ ceph_assert(straydn);
m->straybl.clear();
}
mdr->aborted = true;
if (mdr->slave_request) {
// only abort on-going xlock, wrlock and auth pin
- assert(!mdr->slave_did_prepare());
+ ceph_assert(!mdr->slave_did_prepare());
} else {
mdcache->request_finish(mdr);
}
mdr = mdcache->request_start_slave(m->get_reqid(), m->get_attempt(), m);
mdr->set_op_stamp(m->op_stamp);
}
- assert(mdr->slave_request == 0); // only one at a time, please!
+ ceph_assert(mdr->slave_request == 0); // only one at a time, please!
if (straydn) {
mdr->pin(straydn);
mdr->finish_locking(lock);
lock->get_xlock(mdr, mdr->get_client());
- assert(mdr->more()->waiting_on_slave.count(from));
+ ceph_assert(mdr->more()->waiting_on_slave.count(from));
mdr->more()->waiting_on_slave.erase(from);
- assert(mdr->more()->waiting_on_slave.empty());
+ ceph_assert(mdr->more()->waiting_on_slave.empty());
mdcache->dispatch_request(mdr);
}
break;
mdr->locks.insert(lock);
mdr->finish_locking(lock);
- assert(mdr->more()->waiting_on_slave.count(from));
+ ceph_assert(mdr->more()->waiting_on_slave.count(from));
mdr->more()->waiting_on_slave.erase(from);
- assert(mdr->more()->waiting_on_slave.empty());
+ ceph_assert(mdr->more()->waiting_on_slave.empty());
mdcache->dispatch_request(mdr);
}
break;
{
SimpleLock *lock = mds->locker->get_lock(mdr->slave_request->get_lock_type(),
mdr->slave_request->get_object_info());
- assert(lock);
+ ceph_assert(lock);
bool need_issue = false;
switch (op) {
case MMDSSlaveRequest::OP_UNXLOCK:
set<MDSCacheObject*> pinned;
for (const auto &oi : ack->get_authpins()) {
MDSCacheObject *object = mdcache->get_object(oi);
- assert(object); // we pinned it
+ ceph_assert(object); // we pinned it
dout(10) << " remote has pinned " << *object << dendl;
if (!mdr->is_auth_pinned(object))
mdr->remote_auth_pins[object] = from;
if (mdr->more()->is_remote_frozen_authpin &&
ack->get_authpin_freeze() == MDSCacheObjectInfo()) {
auto p = mdr->remote_auth_pins.find(mdr->more()->rename_inode);
- assert(p != mdr->remote_auth_pins.end());
+ ceph_assert(p != mdr->remote_auth_pins.end());
if (p->second == from) {
mdr->more()->is_remote_frozen_authpin = false;
}
mdr->more()->slaves.insert(from);
// clear from waiting list
- assert(mdr->more()->waiting_on_slave.count(from));
+ ceph_assert(mdr->more()->waiting_on_slave.count(from));
mdr->more()->waiting_on_slave.erase(from);
// go again?
CDentry* Server::prepare_null_dentry(MDRequestRef& mdr, CDir *dir, std::string_view dname, bool okexist)
{
dout(10) << "prepare_null_dentry " << dname << " in " << *dir << dendl;
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
client_t client = mdr->get_client();
if (straydn->get_name() == straydname)
return straydn;
- assert(!mdr->done_locking);
+ ceph_assert(!mdr->done_locking);
mdr->unpin(straydn);
}
mdr->session->get_num_projected_prealloc_inos() < g_conf()->mds_client_prealloc_inos / 2) {
int need = g_conf()->mds_client_prealloc_inos - mdr->session->get_num_projected_prealloc_inos();
mds->inotable->project_alloc_ids(mdr->prealloc_inos, need);
- assert(mdr->prealloc_inos.size()); // or else fix projected increment semantics
+ ceph_assert(mdr->prealloc_inos.size()); // or else fix projected increment semantics
mdr->session->pending_prealloc_inos.insert(mdr->prealloc_inos);
mds->sessionmap.mark_projected(mdr->session);
dout(10) << "prepare_new_inode prealloc " << mdr->prealloc_inos << dendl;
mds->inotable->apply_alloc_id(mdr->alloc_ino);
}
if (mdr->prealloc_inos.size()) {
- assert(session);
+ ceph_assert(session);
session->pending_prealloc_inos.subtract(mdr->prealloc_inos);
session->info.prealloc_inos.insert(mdr->prealloc_inos);
mds->sessionmap.mark_dirty(session);
mds->inotable->apply_alloc_ids(mdr->prealloc_inos);
}
if (mdr->used_prealloc_ino) {
- assert(session);
+ ceph_assert(session);
session->info.used_inos.erase(mdr->used_prealloc_ino);
mds->sessionmap.mark_dirty(session);
}
// not open and inode frozen?
if (!dir && diri->is_frozen()) {
dout(10) << "try_open_auth_dirfrag: dir inode is frozen, waiting " << *diri << dendl;
- assert(diri->get_parent_dir());
+ ceph_assert(diri->get_parent_dir());
diri->add_waiter(CInode::WAIT_UNFREEZE, new C_MDS_RetryRequest(mdcache, mdr));
return 0;
}
return;
if (cur->is_frozen() || cur->state_test(CInode::STATE_EXPORTINGCAPS)) {
- assert(!need_auth);
+ ceph_assert(!need_auth);
mdr->done_locking = false;
CInode *cur = rdlock_path_pin_ref(mdr, 0, rdlocks, true);
if (!cur)
// O_TRUNC
if ((flags & CEPH_O_TRUNC) && !mdr->has_completed) {
- assert(cur->is_auth());
+ ceph_assert(cur->is_auth());
xlocks.insert(&cur->filelock);
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
CDentry *dn = 0;
if (req->get_dentry_wanted()) {
- assert(mdr->dn[0].size());
+ ceph_assert(mdr->dn[0].size());
dn = mdr->dn[0].back();
}
C_MDS_openc_finish(Server *s, MDRequestRef& r, CDentry *d, CInode *ni) :
ServerLogContext(s, r), dn(d), newi(ni) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
dn->pop_projected_linkage();
server->respond_to_request(mdr, 0);
- assert(g_conf()->mds_kill_openc_at != 1);
+ ceph_assert(g_conf()->mds_kill_openc_at != 1);
}
};
if (!dnl->is_null()) {
// it existed.
- assert(req->head.args.open.flags & CEPH_O_EXCL);
+ ceph_assert(req->head.args.open.flags & CEPH_O_EXCL);
dout(10) << "O_EXCL, target exists, failing with -EEXIST" << dendl;
mdr->tracei = dnl->get_inode();
mdr->tracedn = dn;
// create inode.
CInode *in = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino),
req->head.args.open.mode | S_IFREG, &layout);
- assert(in);
+ ceph_assert(in);
// it's a file.
dn->push_projected_linkage(in);
SnapRealm *realm = diri->find_snaprealm();
snapid_t follows = mdcache->get_global_snaprealm()->get_newest_seq();
- assert(follows >= realm->get_newest_seq());
+ ceph_assert(follows >= realm->get_newest_seq());
if (cmode & CEPH_FILE_MODE_WR) {
in->inode.client_ranges[client].range.first = 0;
}
in->inode.rstat.rfiles = 1;
- assert(dn->first == follows+1);
+ ceph_assert(dn->first == follows+1);
in->first = dn->first;
// prepare finisher
// ok!
dout(10) << "handle_client_readdir on " << *dir << dendl;
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
if (!dir->is_complete()) {
if (dir->is_frozen()) {
return;
}
}
- assert(in);
+ ceph_assert(in);
if ((int)(dnbl.length() + dn->get_name().length() + sizeof(__u32) + sizeof(LeaseStat)) > bytes_left) {
dout(10) << " ran out of room, stopping at " << dnbl.length() << " < " << bytes_left << dendl;
dnbl.swap(keep);
break;
}
- assert(r >= 0);
+ ceph_assert(r >= 0);
numfiles++;
// touch dn
ServerLogContext(s, r), in(i),
truncating_smaller(sm), changed_ranges(cr), new_realm(nr) { }
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
// apply
in->pop_and_dirty_projected_inode(mdr->ls);
respond_to_request(mdr, -EWOULDBLOCK);
} else {
dout(10) << " added to waiting list" << dendl;
- assert(lock_state->is_waiting(set_lock));
+ ceph_assert(lock_state->is_waiting(set_lock));
mdr->more()->flock_was_waiting = true;
mds->locker->drop_locks(mdr.get());
mdr->drop_local_auth_pins();
{
CInode *in = mdr->in[0];
client_t client = mdr->get_client();
- assert(in);
+ ceph_assert(in);
dout(10) << "do_open_truncate " << *in << dendl;
CDentry *dn = 0;
if (mdr->client_request->get_dentry_wanted()) {
- assert(mdr->dn[0].size());
+ ceph_assert(mdr->dn[0].size());
dn = mdr->dn[0].back();
}
epoch = osdmap.get_epoch();
});
- assert(epoch >= req_epoch); // otherwise wait_for_map() told a lie
+ ceph_assert(epoch >= req_epoch); // otherwise wait_for_map() told a lie
} else if (req_epoch == 0 && !mdr->waited_for_osdmap) {
C_MDS_inode_xattr_update_finish(Server *s, MDRequestRef& r, CInode *i) :
ServerLogContext(s, r), in(i) { }
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
// apply
in->pop_and_dirty_projected_inode(mdr->ls);
C_MDS_mknod_finish(Server *s, MDRequestRef& r, CDentry *d, CInode *ni) :
ServerLogContext(s, r), dn(d), newi(ni) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
// link the inode
dn->pop_projected_linkage();
// mkdir?
if (newi->inode.is_dir()) {
CDir *dir = newi->get_dirfrag(frag_t());
- assert(dir);
+ ceph_assert(dir);
dir->fnode.version--;
dir->mark_dirty(dir->fnode.version + 1, mdr->ls);
dir->mark_new(mdr->ls);
layout = mdcache->default_file_layout;
CInode *newi = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino), mode, &layout);
- assert(newi);
+ ceph_assert(newi);
dn->push_projected_linkage(newi);
snapid_t follows = mdcache->get_global_snaprealm()->get_newest_seq();
SnapRealm *realm = dn->get_dir()->inode->find_snaprealm();
- assert(follows >= realm->get_newest_seq());
+ ceph_assert(follows >= realm->get_newest_seq());
// if the client created a _regular_ file via MKNOD, it's highly likely they'll
// want to write to it (e.g., if they are reexporting NFS)
}
}
- assert(dn->first == follows + 1);
+ ceph_assert(dn->first == follows + 1);
newi->first = dn->first;
dout(10) << "mknod mode " << newi->inode.mode << " rdev " << newi->inode.rdev << dendl;
mode &= ~S_IFMT;
mode |= S_IFDIR;
CInode *newi = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino), mode);
- assert(newi);
+ ceph_assert(newi);
// it's a directory.
dn->push_projected_linkage(newi);
snapid_t follows = mdcache->get_global_snaprealm()->get_newest_seq();
SnapRealm *realm = dn->get_dir()->inode->find_snaprealm();
- assert(follows >= realm->get_newest_seq());
+ ceph_assert(follows >= realm->get_newest_seq());
dout(12) << " follows " << follows << dendl;
- assert(dn->first == follows + 1);
+ ceph_assert(dn->first == follows + 1);
newi->first = dn->first;
// ...and that new dir is empty.
unsigned mode = S_IFLNK | 0777;
CInode *newi = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino), mode);
- assert(newi);
+ ceph_assert(newi);
// it's a symlink
dn->push_projected_linkage(newi);
}
// go!
- assert(g_conf()->mds_kill_link_at != 1);
+ ceph_assert(g_conf()->mds_kill_link_at != 1);
// local or remote?
if (targeti->is_auth())
ServerLogContext(s, r), dn(d), targeti(ti),
dnpv(dnpv_), tipv(tipv_), adjust_realm(ar) { }
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
server->_link_local_finish(mdr, dn, targeti, dnpv, tipv, adjust_realm);
}
};
ServerLogContext(s, r), inc(i), dn(d), targeti(ti),
dpv(d->get_projected_version()) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
server->_link_remote_finish(mdr, inc, dn, targeti, dpv);
}
};
encode(*desti_srnode, req->desti_snapbl);
mds->send_message_mds(req, linkauth);
- assert(mdr->more()->waiting_on_slave.count(linkauth) == 0);
+ ceph_assert(mdr->more()->waiting_on_slave.count(linkauth) == 0);
mdr->more()->waiting_on_slave.insert(linkauth);
return;
}
dout(10) << " targeti auth has prepared nlink++/--" << dendl;
- assert(g_conf()->mds_kill_link_at != 2);
+ ceph_assert(g_conf()->mds_kill_link_at != 2);
if (auto& desti_srnode = mdr->more()->desti_srnode) {
delete desti_srnode;
<< (inc ? "link ":"unlink ")
<< *dn << " to " << *targeti << dendl;
- assert(g_conf()->mds_kill_link_at != 3);
+ ceph_assert(g_conf()->mds_kill_link_at != 3);
if (!mdr->more()->witnessed.empty())
mdcache->logged_master_update(mdr->reqid);
C_MDS_SlaveLinkPrep(Server *s, MDRequestRef& r, CInode *t, bool ar) :
ServerLogContext(s, r), targeti(t), adjust_realm(ar) { }
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
server->_logged_slave_link(mdr, targeti, adjust_realm);
}
};
<< " on " << mdr->slave_request->get_object_info()
<< dendl;
- assert(g_conf()->mds_kill_link_at != 4);
+ ceph_assert(g_conf()->mds_kill_link_at != 4);
CInode *targeti = mdcache->get_inode(mdr->slave_request->get_object_info().ino);
- assert(targeti);
+ ceph_assert(targeti);
dout(10) << "targeti " << *targeti << dendl;
CDentry *dn = targeti->get_parent_dn();
CDentry::linkage_t *dnl = dn->get_linkage();
- assert(dnl->is_primary());
+ ceph_assert(dnl->is_primary());
mdr->set_op_stamp(mdr->slave_request->op_stamp);
mdr->auth_pin(targeti);
//ceph_abort(); // test hack: make sure master can handle a slave that fails to prepare...
- assert(g_conf()->mds_kill_link_at != 5);
+ ceph_assert(g_conf()->mds_kill_link_at != 5);
// journal it
mdr->ls = mdlog->get_current_segment();
inc = false;
pi.inode.nlink--;
if (targeti->is_projected_snaprealm_global()) {
- assert(mdr->slave_request->desti_snapbl.length());
+ ceph_assert(mdr->slave_request->desti_snapbl.length());
auto p = mdr->slave_request->desti_snapbl.cbegin();
sr_t *newsnap = targeti->project_snaprealm();
decode(*newsnap, p);
if (pi.inode.nlink == 0)
- assert(!newsnap->is_parent_global());
+ ceph_assert(!newsnap->is_parent_global());
realm_projected = true;
} else {
- assert(mdr->slave_request->desti_snapbl.length() == 0);
+ ceph_assert(mdr->slave_request->desti_snapbl.length() == 0);
}
}
dout(10) << "_logged_slave_link " << *mdr
<< " " << *targeti << dendl;
- assert(g_conf()->mds_kill_link_at != 6);
+ ceph_assert(g_conf()->mds_kill_link_at != 6);
// update the target
targeti->pop_and_dirty_projected_inode(mdr->ls);
<< " r=" << r
<< " " << *targeti << dendl;
- assert(g_conf()->mds_kill_link_at != 7);
+ ceph_assert(g_conf()->mds_kill_link_at != 7);
if (r == 0) {
// drop our pins, etc.
{
dout(10) << "_committed_slave " << *mdr << dendl;
- assert(g_conf()->mds_kill_link_at != 8);
+ ceph_assert(g_conf()->mds_kill_link_at != 8);
auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED);
mds->send_message_mds(req, mdr->slave_to_mds);
<< " ino " << rollback.ino
<< dendl;
- assert(g_conf()->mds_kill_link_at != 9);
+ ceph_assert(g_conf()->mds_kill_link_at != 9);
mdcache->add_rollback(rollback.reqid, master); // need to finish this update before resolve finishes
- assert(mdr || mds->is_resolve());
+ ceph_assert(mdr || mds->is_resolve());
MutationRef mut(new MutationImpl(nullptr, utime_t(), rollback.reqid));
mut->ls = mds->mdlog->get_current_segment();
CInode *in = mdcache->get_inode(rollback.ino);
- assert(in);
+ ceph_assert(in);
dout(10) << " target is " << *in << dendl;
- assert(!in->is_projected()); // live slave request hold versionlock xlock.
+ ceph_assert(!in->is_projected()); // live slave request hold versionlock xlock.
auto &pi = in->project_inode();
pi.inode.version = in->pre_dirty();
{
dout(10) << "_link_rollback_finish" << dendl;
- assert(g_conf()->mds_kill_link_at != 10);
+ ceph_assert(g_conf()->mds_kill_link_at != 10);
mut->apply();
<< " " << *m << dendl;
mds_rank_t from = mds_rank_t(m->get_source().num());
- assert(g_conf()->mds_kill_link_at != 11);
+ ceph_assert(g_conf()->mds_kill_link_at != 11);
// note slave
mdr->more()->slaves.insert(from);
// witnessed!
- assert(mdr->more()->witnessed.count(from) == 0);
+ ceph_assert(mdr->more()->witnessed.count(from) == 0);
mdr->more()->witnessed.insert(from);
- assert(!m->is_not_journaled());
+ ceph_assert(!m->is_not_journaled());
mdr->more()->has_journaled_slaves = true;
// remove from waiting list
- assert(mdr->more()->waiting_on_slave.count(from));
+ ceph_assert(mdr->more()->waiting_on_slave.count(from));
mdr->more()->waiting_on_slave.erase(from);
- assert(mdr->more()->waiting_on_slave.empty());
+ ceph_assert(mdr->more()->waiting_on_slave.empty());
dispatch_client_request(mdr); // go again!
}
}
CDentry *dn = trace.back();
- assert(dn);
+ ceph_assert(dn);
if (!dn->is_auth()) {
mdcache->request_forward(mdr, dn->authority().first);
return;
CInode *diri = dn->get_dir()->get_inode();
CDentry::linkage_t *dnl = dn->get_linkage(client, mdr);
- assert(!dnl->is_null());
+ ceph_assert(!dnl->is_null());
if (rmdir) {
dout(7) << "handle_client_rmdir on " << *dn << dendl;
ServerLogContext(s, r), dn(d), straydn(sd),
dnpv(d->get_projected_version()) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
server->_unlink_local_finish(mdr, dn, straydn, dnpv);
}
};
}
if (straydn) {
- assert(dnl->is_primary());
+ ceph_assert(dnl->is_primary());
straydn->push_projected_linkage(in);
}
dn->push_projected_linkage();
if (straydn) {
- assert(in->first <= straydn->first);
+ ceph_assert(in->first <= straydn->first);
in->first = straydn->first;
}
if (in->is_dir()) {
- assert(straydn);
+ ceph_assert(straydn);
mdcache->project_subtree_rename(in, dn->get_dir(), straydn->get_dir());
in->maybe_export_pin(true);
req->op_stamp = mdr->get_op_stamp();
mds->send_message_mds(req, who);
- assert(mdr->more()->waiting_on_slave.count(who) == 0);
+ ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0);
mdr->more()->waiting_on_slave.insert(who);
return true;
}
mdr->slave_to_mds);
return;
}
- assert(r == 0);
+ ceph_assert(r == 0);
CDentry *dn = trace.back();
dout(10) << " dn " << *dn << dendl;
mdr->pin(dn);
- assert(mdr->straydn);
+ ceph_assert(mdr->straydn);
CDentry *straydn = mdr->straydn;
dout(10) << " straydn " << *straydn << dendl;
straydn->push_projected_linkage(in);
dn->push_projected_linkage();
- assert(straydn->first >= in->first);
+ ceph_assert(straydn->first >= in->first);
in->first = straydn->first;
if (!in->has_subtree_root_dirfrag(mds->get_nodeid())) {
if (mdr->slave_request->desti_snapbl.length()) {
new_realm = !in->snaprealm;
in->decode_snap_blob(mdr->slave_request->desti_snapbl);
- assert(in->snaprealm);
- assert(in->snaprealm->have_past_parents_open());
+ ceph_assert(in->snaprealm);
+ ceph_assert(in->snaprealm->have_past_parents_open());
} else {
new_realm = false;
}
mdr->more()->has_journaled_slaves = true;
// remove from waiting list
- assert(mdr->more()->waiting_on_slave.count(from));
+ ceph_assert(mdr->more()->waiting_on_slave.count(from));
mdr->more()->waiting_on_slave.erase(from);
if (mdr->more()->waiting_on_slave.empty())
dout(10) << "do_rmdir_rollback on " << rollback.reqid << dendl;
mdcache->add_rollback(rollback.reqid, master); // need to finish this update before resolve finishes
- assert(mdr || mds->is_resolve());
+ ceph_assert(mdr || mds->is_resolve());
CDir *dir = mdcache->get_dirfrag(rollback.src_dir);
if (!dir)
dir = mdcache->get_dirfrag(rollback.src_dir.ino, rollback.src_dname);
- assert(dir);
+ ceph_assert(dir);
CDentry *dn = dir->lookup(rollback.src_dname);
- assert(dn);
+ ceph_assert(dn);
dout(10) << " dn " << *dn << dendl;
CDir *straydir = mdcache->get_dirfrag(rollback.dest_dir);
- assert(straydir);
+ ceph_assert(straydir);
CDentry *straydn = straydir->lookup(rollback.dest_dname);
- assert(straydn);
+ ceph_assert(straydn);
dout(10) << " straydn " << *straydn << dendl;
CInode *in = straydn->get_linkage()->get_inode();
}
if (mdr && !mdr->more()->slave_update_journaled) {
- assert(!in->has_subtree_root_dirfrag(mds->get_nodeid()));
+ ceph_assert(!in->has_subtree_root_dirfrag(mds->get_nodeid()));
_rmdir_rollback_finish(mdr, rollback.reqid, dn, straydn);
return;
bool Server::_dir_is_nonempty_unlocked(MDRequestRef& mdr, CInode *in)
{
dout(10) << "dir_is_nonempty_unlocked " << *in << dendl;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
if (in->snaprealm && in->snaprealm->srnode.snaps.size())
return true; // in a snapshot!
bool Server::_dir_is_nonempty(MDRequestRef& mdr, CInode *in)
{
dout(10) << "dir_is_nonempty " << *in << dendl;
- assert(in->is_auth());
- assert(in->filelock.can_read(mdr->get_client()));
+ ceph_assert(in->is_auth());
+ ceph_assert(in->filelock.can_read(mdr->get_client()));
frag_info_t dirstat;
version_t dirstat_version = in->get_projected_inode()->dirstat.version;
ServerLogContext(s, r),
srcdn(sdn), destdn(ddn), straydn(stdn) { }
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
server->_rename_finish(mdr, srcdn, destdn, straydn);
}
};
}
CDentry::linkage_t *destdnl = destdn->get_projected_linkage();
CDir *destdir = destdn->get_dir();
- assert(destdir->is_auth());
+ ceph_assert(destdir->is_auth());
CF_MDS_MDRContextFactory cf(mdcache, mdr);
int r = mdcache->path_traverse(mdr, cf, srcpath, &srctrace, NULL, MDS_TRAVERSE_DISCOVER);
return;
}
- assert(!srctrace.empty());
+ ceph_assert(!srctrace.empty());
CDentry *srcdn = srctrace.back();
dout(10) << " srcdn " << *srcdn << dendl;
if (srcdn->last != CEPH_NOSNAP) {
return;
if (linkmerge)
- assert(srcdir->inode->is_stray() && srcdnl->is_primary() && destdnl->is_remote());
+ ceph_assert(srcdir->inode->is_stray() && srcdnl->is_primary() && destdnl->is_remote());
if ((!mdr->has_more() || mdr->more()->witnessed.empty())) {
if (!check_access(mdr, srcdir->get_inode(), MAY_WRITE))
}
*/
- assert(g_conf()->mds_kill_rename_at != 1);
+ ceph_assert(g_conf()->mds_kill_rename_at != 1);
// -- open all srcdn inode frags, if any --
// we need these open so that auth can properly delegate from inode to dirfrags
// are involved in the rename operation.
if (srcdnl->is_primary() && !mdr->more()->is_ambiguous_auth) {
dout(10) << " preparing ambiguous auth for srci" << dendl;
- assert(mdr->more()->is_remote_frozen_authpin);
- assert(mdr->more()->rename_inode == srci);
+ ceph_assert(mdr->more()->is_remote_frozen_authpin);
+ ceph_assert(mdr->more()->rename_inode == srci);
_rename_prepare_witness(mdr, last, witnesses, srctrace, desttrace, straydn);
return;
}
if (last != MDS_RANK_NONE && mdr->more()->witnessed.count(last) == 0) {
dout(10) << " preparing last witness (srcdn auth)" << dendl;
- assert(mdr->more()->waiting_on_slave.count(last) == 0);
+ ceph_assert(mdr->more()->waiting_on_slave.count(last) == 0);
_rename_prepare_witness(mdr, last, witnesses, srctrace, desttrace, straydn);
return;
}
// test hack: bail after slave does prepare, so we can verify it's _live_ rollback.
if (!mdr->more()->slaves.empty() && !srci->is_dir())
- assert(g_conf()->mds_kill_rename_at != 3);
+ ceph_assert(g_conf()->mds_kill_rename_at != 3);
if (!mdr->more()->slaves.empty() && srci->is_dir())
- assert(g_conf()->mds_kill_rename_at != 4);
+ ceph_assert(g_conf()->mds_kill_rename_at != 4);
// -- declare now --
mdr->set_mds_stamp(ceph_clock_now());
// test hack: test slave commit
if (!mdr->more()->slaves.empty() && !in->is_dir())
- assert(g_conf()->mds_kill_rename_at != 5);
+ ceph_assert(g_conf()->mds_kill_rename_at != 5);
if (!mdr->more()->slaves.empty() && in->is_dir())
- assert(g_conf()->mds_kill_rename_at != 6);
+ ceph_assert(g_conf()->mds_kill_rename_at != 6);
// bump popularity
mds->balancer->hit_dir(srcdn->get_dir(), META_POP_IWR);
// did we import srci? if so, explicitly ack that import that, before we unlock and reply.
- assert(g_conf()->mds_kill_rename_at != 7);
+ ceph_assert(g_conf()->mds_kill_rename_at != 7);
// reply
respond_to_request(mdr, 0);
req->op_stamp = mdr->get_op_stamp();
mds->send_message_mds(req, who);
- assert(mdr->more()->waiting_on_slave.count(who) == 0);
+ ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0);
mdr->more()->waiting_on_slave.insert(who);
return true;
}
// primary+remote link merge?
bool linkmerge = (srci == oldin);
if (linkmerge)
- assert(srcdnl->is_primary() && destdnl->is_remote());
+ ceph_assert(srcdnl->is_primary() && destdnl->is_remote());
bool silent = srcdn->get_dir()->inode->is_stray();
bool force_journal_dest = false;
// target inode
if (!linkmerge) {
if (destdnl->is_primary()) {
- assert(straydn); // moving to straydn.
+ ceph_assert(straydn); // moving to straydn.
// link--, and move.
if (destdn->is_auth()) {
auto &pi= oldin->project_inode(); //project_snaprealm
// guarantee stray dir is processed first during journal replay. unlink the old inode,
// then link the source inode to destdn
if (destdnl->is_primary()) {
- assert(straydn);
+ ceph_assert(straydn);
if (straydn->is_auth()) {
metablob->add_dir_context(straydn->get_dir());
metablob->add_dir(straydn->get_dir(), true);
mdcache->predirty_journal_parents(mdr, metablob, oldin, destdn->get_dir(),
(destdnl->is_primary() ? PREDIRTY_PRIMARY:0)|predirty_dir, -1);
if (destdnl->is_primary()) {
- assert(straydn);
+ ceph_assert(straydn);
mdcache->predirty_journal_parents(mdr, metablob, oldin, straydn->get_dir(),
PREDIRTY_PRIMARY|PREDIRTY_DIR, 1);
}
// target inode
if (!linkmerge) {
if (destdnl->is_primary()) {
- assert(straydn);
+ ceph_assert(straydn);
if (destdn->is_auth()) {
// project snaprealm, too
if (auto& desti_srnode = mdr->more()->desti_srnode) {
oldin->project_snaprealm(desti_srnode);
if (tpi->nlink == 0)
- assert(!desti_srnode->is_parent_global());
+ ceph_assert(!desti_srnode->is_parent_global());
desti_srnode = NULL;
}
straydn->first = mdcache->get_global_snaprealm()->get_newest_seq() + 1;
if (new_srnode) {
oldin->project_snaprealm(new_srnode);
if (tpi->nlink == 0)
- assert(!new_srnode->is_parent_global());
+ ceph_assert(!new_srnode->is_parent_global());
}
// auth for targeti
metablob->add_dir_context(oldin->get_projected_parent_dir());
// dest
if (srcdnl->is_remote()) {
- assert(!linkmerge);
+ ceph_assert(!linkmerge);
if (destdn->is_auth() && !destdnl->is_null())
mdcache->journal_cow_dentry(mdr.get(), metablob, destdn, CEPH_NOSNAP, 0, destdnl);
else
// make renamed inode first track the dn
if (srcdnl->is_primary() && destdn->is_auth()) {
- assert(srci->first <= destdn->first);
+ ceph_assert(srci->first <= destdn->first);
srci->first = destdn->first;
}
// make stray inode first track the straydn
if (straydn && straydn->is_auth()) {
- assert(oldin->first <= straydn->first);
+ ceph_assert(oldin->first <= straydn->first);
oldin->first = straydn->first;
}
if (oldin && oldin->is_dir()) {
- assert(straydn);
+ ceph_assert(straydn);
mdcache->project_subtree_rename(oldin, destdn->get_dir(), straydn->get_dir());
}
if (srci->is_dir())
// primary+remote link merge?
bool linkmerge = (srcdnl->get_inode() == oldin);
if (linkmerge)
- assert(srcdnl->is_primary() || destdnl->is_remote());
+ ceph_assert(srcdnl->is_primary() || destdnl->is_remote());
bool new_in_snaprealm = false;
bool new_oldin_snaprealm = false;
// target inode
if (!linkmerge) {
if (destdnl->is_primary()) {
- assert(straydn);
+ ceph_assert(straydn);
dout(10) << "straydn is " << *straydn << dendl;
// if there is newly created snaprealm, need to split old snaprealm's
oldin->early_pop_projected_snaprealm();
new_oldin_snaprealm = (oldin->snaprealm && !hadrealm);
} else {
- assert(mdr->slave_request);
+ ceph_assert(mdr->slave_request);
if (mdr->slave_request->desti_snapbl.length()) {
new_oldin_snaprealm = !oldin->snaprealm;
oldin->decode_snap_blob(mdr->slave_request->desti_snapbl);
- assert(oldin->snaprealm);
- assert(oldin->snaprealm->have_past_parents_open());
+ ceph_assert(oldin->snaprealm);
+ ceph_assert(oldin->snaprealm->have_past_parents_open());
}
}
straydn->pop_projected_linkage();
if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
- assert(!straydn->is_projected()); // no other projected
+ ceph_assert(!straydn->is_projected()); // no other projected
// nlink-- targeti
if (destdn->is_auth())
oldin->pop_and_dirty_projected_inode(mdr->ls);
} else if (mdr->slave_request) {
if (mdr->slave_request->desti_snapbl.length() > 0) {
- assert(oldin->snaprealm);
+ ceph_assert(oldin->snaprealm);
oldin->decode_snap_blob(mdr->slave_request->desti_snapbl);
}
} else if (auto& desti_srnode = mdr->more()->desti_srnode) {
// unlink src before we relink it at dest
CInode *in = srcdnl->get_inode();
- assert(in);
+ ceph_assert(in);
bool srcdn_was_remote = srcdnl->is_remote();
if (!srcdn_was_remote) {
in->early_pop_projected_snaprealm();
new_in_snaprealm = (in->snaprealm && !hadrealm);
} else {
- assert(mdr->slave_request);
+ ceph_assert(mdr->slave_request);
if (mdr->slave_request->srci_snapbl.length()) {
new_in_snaprealm = !in->snaprealm;
in->decode_snap_blob(mdr->slave_request->srci_snapbl);
- assert(in->snaprealm);
- assert(in->snaprealm->have_past_parents_open());
+ ceph_assert(in->snaprealm);
+ ceph_assert(in->snaprealm->have_past_parents_open());
}
}
}
// destdn
destdnl = destdn->pop_projected_linkage();
if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
- assert(!destdn->is_projected()); // no other projected
+ ceph_assert(!destdn->is_projected()); // no other projected
destdn->link_remote(destdnl, in);
if (destdn->is_auth())
in->pop_and_dirty_projected_inode(mdr->ls);
} else if (mdr->slave_request) {
if (mdr->slave_request->srci_snapbl.length() > 0) {
- assert(in->snaprealm);
+ ceph_assert(in->snaprealm);
in->decode_snap_blob(mdr->slave_request->srci_snapbl);
}
} else if (auto& srci_srnode = mdr->more()->srci_srnode) {
}
destdnl = destdn->pop_projected_linkage();
if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
- assert(!destdn->is_projected()); // no other projected
+ ceph_assert(!destdn->is_projected()); // no other projected
// srcdn inode import?
if (!srcdn->is_auth() && destdn->is_auth()) {
- assert(mdr->more()->inode_import.length() > 0);
+ ceph_assert(mdr->more()->inode_import.length() > 0);
map<client_t,Capability::Import> imported_caps;
srcdn->mark_dirty(mdr->more()->pvmap[srcdn], mdr->ls);
srcdn->pop_projected_linkage();
if (mdr->is_slave() && !mdr->more()->slave_update_journaled)
- assert(!srcdn->is_projected()); // no other projected
+ ceph_assert(!srcdn->is_projected()); // no other projected
// apply remaining projected inodes (nested)
mdr->apply();
mdr->slave_to_mds);
return;
}
- assert(r == 0); // we shouldn't get an error here!
+ ceph_assert(r == 0); // we shouldn't get an error here!
CDentry *destdn = trace.back();
CDentry::linkage_t *destdnl = destdn->get_projected_linkage();
CInode *srci = nullptr;
r = mdcache->path_traverse(mdr, cf, srcpath, &trace, &srci, MDS_TRAVERSE_DISCOVERXLOCK);
if (r > 0) return;
- assert(r == 0);
+ ceph_assert(r == 0);
// srcpath must not point to a null dentry
- assert(srci != nullptr);
+ ceph_assert(srci != nullptr);
CDentry *srcdn = trace.back();
CDentry::linkage_t *srcdnl = srcdn->get_projected_linkage();
// stray?
bool linkmerge = srcdnl->get_inode() == destdnl->get_inode();
if (linkmerge)
- assert(srcdnl->is_primary() && destdnl->is_remote());
+ ceph_assert(srcdnl->is_primary() && destdnl->is_remote());
CDentry *straydn = mdr->straydn;
if (destdnl->is_primary() && !linkmerge)
- assert(straydn);
+ ceph_assert(straydn);
mdr->set_op_stamp(mdr->slave_request->op_stamp);
mdr->more()->srcdn_auth_mds = srcdn->authority().first;
}
if (reply_witness) {
- assert(!srcdnrep.empty());
+ ceph_assert(!srcdnrep.empty());
auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK);
reply->witnesses.swap(srcdnrep);
mds->send_message_mds(reply, mdr->slave_to_mds);
if (srcdnl->is_primary())
rollback.orig_src.ino = srcdnl->get_inode()->ino();
else {
- assert(srcdnl->is_remote());
+ ceph_assert(srcdnl->is_remote());
rollback.orig_src.remote_ino = srcdnl->get_remote_ino();
rollback.orig_src.remote_d_type = srcdnl->get_remote_d_type();
}
if (reply) {
mds->send_message_mds(reply, mdr->slave_to_mds);
} else {
- assert(mdr->aborted);
+ ceph_assert(mdr->aborted);
dout(10) << " abort flag set, finishing" << dendl;
mdcache->request_finish(mdr);
}
mds->queue_waiters(finished); // this includes SINGLEAUTH waiters.
// unfreeze
- assert(in->is_frozen_inode());
+ ceph_assert(in->is_frozen_inode());
in->unfreeze_inode(finished);
}
srcdn = srcdir->lookup(rollback.orig_src.dname);
if (srcdn) {
dout(10) << " srcdn " << *srcdn << dendl;
- assert(srcdn->get_linkage()->is_null());
+ ceph_assert(srcdn->get_linkage()->is_null());
} else
dout(10) << " srcdn not found" << dendl;
} else
if (rollback.orig_src.ino) {
in = mdcache->get_inode(rollback.orig_src.ino);
if (in && in->is_dir())
- assert(srcdn && destdn);
+ ceph_assert(srcdn && destdn);
} else
in = mdcache->get_inode(rollback.orig_src.remote_ino);
straydn = straydir->lookup(rollback.stray.dname);
if (straydn) {
dout(10) << " straydn " << *straydn << dendl;
- assert(straydn->get_linkage()->is_primary());
+ ceph_assert(straydn->get_linkage()->is_primary());
} else
dout(10) << " straydn not found" << dendl;
} else
if (rollback.orig_dest.ino) {
target = mdcache->get_inode(rollback.orig_dest.ino);
if (target)
- assert(destdn && straydn);
+ ceph_assert(destdn && straydn);
} else if (rollback.orig_dest.remote_ino)
target = mdcache->get_inode(rollback.orig_dest.remote_ino);
// can't use is_auth() in the resolve stage
mds_rank_t whoami = mds->get_nodeid();
// slave
- assert(!destdn || destdn->authority().first != whoami);
- assert(!straydn || straydn->authority().first != whoami);
+ ceph_assert(!destdn || destdn->authority().first != whoami);
+ ceph_assert(!straydn || straydn->authority().first != whoami);
bool force_journal_src = false;
bool force_journal_dest = false;
if (srcdn->authority().first == whoami)
srcdnpv = srcdn->pre_dirty();
if (rollback.orig_src.ino) {
- assert(in);
+ ceph_assert(in);
srcdn->push_projected_linkage(in);
} else
srcdn->push_projected_linkage(rollback.orig_src.remote_ino,
} else {
SnapRealm *realm;
if (rollback.orig_src.ino) {
- assert(srcdir);
+ ceph_assert(srcdir);
realm = srcdir->get_inode()->find_snaprealm();
} else {
realm = in->snaprealm->parent;
} else {
// the dentry will be trimmed soon, it's ok to have wrong linkage
if (rollback.orig_dest.ino)
- assert(mds->is_resolve());
+ ceph_assert(mds->is_resolve());
destdn->push_projected_linkage();
}
}
ti->ctime = ti->rstat.rctime = rollback.orig_dest.old_ctime;
if (MDS_INO_IS_STRAY(rollback.orig_src.dirfrag.ino)) {
if (MDS_INO_IS_STRAY(rollback.orig_dest.dirfrag.ino))
- assert(!rollback.orig_dest.ino && !rollback.orig_dest.remote_ino);
+ ceph_assert(!rollback.orig_dest.ino && !rollback.orig_dest.remote_ino);
else
- assert(rollback.orig_dest.remote_ino &&
+ ceph_assert(rollback.orig_dest.remote_ino &&
rollback.orig_dest.remote_ino == rollback.orig_src.ino);
} else
ti->nlink++;
} else {
SnapRealm *realm;
if (rollback.orig_dest.ino) {
- assert(destdir);
+ ceph_assert(destdir);
realm = destdir->get_inode()->find_snaprealm();
} else {
realm = target->snaprealm->parent;
}
if (force_journal_dest) {
- assert(rollback.orig_dest.ino);
+ ceph_assert(rollback.orig_dest.ino);
le->commit.add_dir_context(destdir);
le->commit.add_primary_dentry(destdn, 0, true);
}
// slave: no need to journal straydn
if (target && target != in && target->authority().first == whoami) {
- assert(rollback.orig_dest.remote_ino);
+ ceph_assert(rollback.orig_dest.remote_ino);
le->commit.add_dir_context(target->get_projected_parent_dir());
le->commit.add_primary_dentry(target->get_projected_parent_dn(), target, true);
}
}
if (target && target->is_dir()) {
- assert(destdn);
+ ceph_assert(destdn);
mdcache->project_subtree_rename(target, straydir, destdir);
}
if (in && in->is_dir()) {
- assert(srcdn);
+ ceph_assert(srcdn);
mdcache->project_subtree_rename(in, destdir, srcdir);
}
if (mdr && !mdr->more()->slave_update_journaled) {
- assert(le->commit.empty());
+ ceph_assert(le->commit.empty());
mdlog->cancel_entry(le);
mut->ls = NULL;
_rename_rollback_finish(mut, mdr, srcdn, srcdnpv, destdn, straydn, splits, finish_mdr);
} else {
- assert(!le->commit.empty());
+ ceph_assert(!le->commit.empty());
if (mdr)
mdr->more()->slave_update_journaled = false;
MDSLogContextBase *fin = new C_MDS_LoggedRenameRollback(this, mut, mdr,
if (srcdn && srcdn->get_linkage()->is_primary()) {
CInode *in = srcdn->get_linkage()->get_inode();
if (in && in->is_dir()) {
- assert(destdn);
+ ceph_assert(destdn);
mdcache->adjust_subtree_after_rename(in, destdn->get_dir(), true);
}
}
CInode *oldin = destdn->get_linkage()->get_inode();
// update subtree map?
if (oldin && oldin->is_dir()) {
- assert(straydn);
+ ceph_assert(straydn);
mdcache->adjust_subtree_after_rename(oldin, straydn->get_dir(), true);
}
}
}
// witnessed? or add extra witnesses?
- assert(mdr->more()->witnessed.count(from) == 0);
+ ceph_assert(mdr->more()->witnessed.count(from) == 0);
if (ack->is_interrupted()) {
dout(10) << " slave request interrupted, noop" << dendl;
} else if (ack->witnesses.empty()) {
}
// remove from waiting list
- assert(mdr->more()->waiting_on_slave.count(from));
+ ceph_assert(mdr->more()->waiting_on_slave.count(from));
mdr->more()->waiting_on_slave.erase(from);
if (mdr->more()->waiting_on_slave.empty())
{
dout(10) << "handle_slave_rename_notify_ack " << *mdr << " from mds."
<< ack->get_source() << dendl;
- assert(mdr->is_slave());
+ ceph_assert(mdr->is_slave());
mds_rank_t from = mds_rank_t(ack->get_source().num());
if (mdr->more()->waiting_on_slave.count(from)) {
decode(snapid, p);
dout(10) << " stid " << stid << " snapid " << snapid << dendl;
- assert(mds->snapclient->get_cached_version() >= stid);
+ ceph_assert(mds->snapclient->get_cached_version() >= stid);
// journal
SnapInfo info;
decode(seq, p);
dout(10) << " stid is " << stid << ", seq is " << seq << dendl;
- assert(mds->snapclient->get_cached_version() >= stid);
+ ceph_assert(mds->snapclient->get_cached_version() >= stid);
// journal
auto &pi = diri->project_inode(false, true);
version_t stid = mdr->more()->stid;
dout(10) << " stid is " << stid << dendl;
- assert(mds->snapclient->get_cached_version() >= stid);
+ ceph_assert(mds->snapclient->get_cached_version() >= stid);
// journal
auto &pi = diri->project_inode(false, true);
// project the snaprealm
auto &newsnap = *pi.snapnode;
auto it = newsnap.snaps.find(snapid);
- assert(it != newsnap.snaps.end());
+ ceph_assert(it != newsnap.snaps.end());
it->second.name = dstname;
// journal the inode changes
MDSRank *get_mds() override {return sessionmap->mds;}
public:
explicit SessionMapIOContext(SessionMap *sessionmap_) : sessionmap(sessionmap_) {
- assert(sessionmap != NULL);
+ ceph_assert(sessionmap != NULL);
}
};
};
auto blp = bl.cbegin();
if (r < 0) {
derr << "_load_finish got " << cpp_strerror(r) << dendl;
- assert(0 == "failed to load sessionmap");
+ ceph_assert(0 == "failed to load sessionmap");
}
dump();
decode_legacy(blp); // note: this sets last_cap_renew = now()
dout(10) << __func__ << ": needv " << needv << ", v " << version << dendl;
if (needv && committing >= needv) {
- assert(committing > committed);
+ ceph_assert(committing > committed);
commit_waiters[committing].push_back(onsave);
return;
}
decode(pre, p);
if (pre == (uint64_t)-1) {
DECODE_START_LEGACY_COMPAT_LEN(3, 3, 3, p);
- assert(struct_v >= 2);
+ ceph_assert(struct_v >= 2);
decode(version, p);
{
dout(10) << __func__ << " s=" << s << " name=" << s->info.inst.name << dendl;
- assert(session_map.count(s->info.inst.name) == 0);
+ ceph_assert(session_map.count(s->info.inst.name) == 0);
session_map[s->info.inst.name] = s;
auto by_state_entry = by_state.find(s->state);
if (by_state_entry == by_state.end())
// Move to the back of the session list for this state (should
// already be on a list courtesy of add_session and set_state)
- assert(session->item_session_list.is_on_list());
+ ceph_assert(session->item_session_list.is_on_list());
auto by_state_entry = by_state.find(session->state);
if (by_state_entry == by_state.end())
by_state_entry = by_state.emplace(session->state,
void SessionMap::save_if_dirty(const std::set<entity_name_t> &tgt_sessions,
MDSGatherBuilder *gather_bld)
{
- assert(gather_bld != NULL);
+ ceph_assert(gather_bld != NULL);
std::vector<entity_name_t> write_sessions;
void SessionMap::hit_session(Session *session) {
uint64_t sessions = get_session_count_in_state(Session::STATE_OPEN) +
get_session_count_in_state(Session::STATE_STALE);
- assert(sessions != 0);
+ ceph_assert(sessions != 0);
double total_load = total_load_avg.hit();
double avg_load = total_load / sessions;
const std::vector<std::string> &args,
std::stringstream *ss)
{
- assert(ss != NULL);
+ ceph_assert(ss != NULL);
for (const auto &s : args) {
dout(20) << __func__ << " parsing filter '" << s << "'" << dendl;
*/
auto is_true = [](std::string_view bstr, bool *out) -> bool
{
- assert(out != nullptr);
+ ceph_assert(out != nullptr);
if (bstr == "true" || bstr == "1") {
*out = true;
void push_pv(version_t pv)
{
- assert(projected.empty() || projected.back() != pv);
+ ceph_assert(projected.empty() || projected.back() != pv);
projected.push_back(pv);
}
void pop_pv(version_t v)
{
- assert(!projected.empty());
- assert(projected.front() == v);
+ ceph_assert(!projected.empty());
+ ceph_assert(projected.front() == v);
projected.pop_front();
}
return info.prealloc_inos.range_start();
}
inodeno_t take_ino(inodeno_t ino = 0) {
- assert(!info.prealloc_inos.empty());
+ ceph_assert(!info.prealloc_inos.empty());
if (ino) {
if (info.prealloc_inos.contains(ino))
++importing_count;
}
void dec_importing() {
- assert(importing_count > 0);
+ ceph_assert(importing_count > 0);
--importing_count;
}
bool is_importing() const { return importing_count > 0; }
void set_load_avg_decay_rate(double rate) {
- assert(is_open() || is_stale());
+ ceph_assert(is_open() || is_stale());
load_avg = DecayCounter(rate);
}
uint64_t get_load_avg() const {
if (state == STATE_CLOSED) {
item_session_list.remove_myself();
} else {
- assert(!item_session_list.is_on_list());
+ ceph_assert(!item_session_list.is_on_list());
}
preopen_out_queue.clear();
}
// helpers
entity_inst_t& get_inst(entity_name_t w) {
- assert(session_map.count(w));
+ ceph_assert(session_map.count(w));
return session_map[w]->info.inst;
}
version_t inc_push_seq(client_t client) {
}
void trim_completed_requests(entity_name_t c, ceph_tid_t tid) {
Session *session = get_session(c);
- assert(session);
+ ceph_assert(session);
session->trim_completed_requests(tid);
}
#include "Mutation.h"
void SimpleLock::dump(Formatter *f) const {
- assert(f != NULL);
+ ceph_assert(f != NULL);
if (is_sync_and_unlocked()) {
return;
}
return s;
}
void set_state_rejoin(int s, MDSInternalContextBase::vec& waiters, bool survivor) {
- assert(!get_parent()->is_auth());
+ ceph_assert(!get_parent()->is_auth());
// If lock in the replica object was not in SYNC state when auth mds of the object failed.
// Auth mds of the object may take xlock on the lock and change the object when replaying
return ++num_rdlock;
}
int put_rdlock() {
- assert(num_rdlock>0);
+ ceph_assert(num_rdlock>0);
--num_rdlock;
if (num_rdlock == 0)
parent->put(MDSCacheObject::PIN_LOCK);
// xlock
void get_xlock(MutationRef who, client_t client) {
- assert(get_xlock_by() == MutationRef());
- assert(state == LOCK_XLOCK || is_locallock() ||
+ ceph_assert(get_xlock_by() == MutationRef());
+ ceph_assert(state == LOCK_XLOCK || is_locallock() ||
state == LOCK_LOCK /* if we are a slave */);
parent->get(MDSCacheObject::PIN_LOCK);
more()->num_xlock++;
more()->xlock_by_client = client;
}
void set_xlock_done() {
- assert(more()->xlock_by);
- assert(state == LOCK_XLOCK || is_locallock() ||
+ ceph_assert(more()->xlock_by);
+ ceph_assert(state == LOCK_XLOCK || is_locallock() ||
state == LOCK_LOCK /* if we are a slave */);
if (!is_locallock())
state = LOCK_XLOCKDONE;
more()->xlock_by.reset();
}
void put_xlock() {
- assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE ||
+ ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE ||
state == LOCK_XLOCKSNAP || is_locallock() ||
state == LOCK_LOCK /* if we are a master of a slave */);
--more()->num_xlock;
return state_flags & LEASED;
}
void get_client_lease() {
- assert(!is_leased());
+ ceph_assert(!is_leased());
state_flags |= LEASED;
}
void put_client_lease() {
- assert(is_leased());
+ ceph_assert(is_leased());
state_flags &= ~LEASED;
}
* called on first replica creation.
*/
void replicate_relax() {
- assert(parent->is_auth());
- assert(!parent->is_replicated());
+ ceph_assert(parent->is_auth());
+ ceph_assert(!parent->is_replicated());
if (state == LOCK_LOCK && !is_used())
state = LOCK_SYNC;
}
switch (type) {
case 'U': // uptodate
- assert(cached_version == m->get_tid());
+ ceph_assert(cached_version == m->get_tid());
break;
case 'F': // full
{
{
dout(10) << __func__ << " tid " << tid << dendl;
- assert(cached_version == 0 || cached_version >= tid);
+ ceph_assert(cached_version == 0 || cached_version >= tid);
if (cached_version == 0) {
committing_tids.insert(tid);
} else if (cached_pending_update.count(tid)) {
{
dout(10) << __func__ << " want " << want << dendl;
- assert(want >= cached_version);
+ ceph_assert(want >= cached_version);
if (onfinish)
waiting_for_version[want].push_back(onfinish);
void SnapClient::get_snaps(set<snapid_t>& result) const
{
- assert(cached_version > 0);
+ ceph_assert(cached_version > 0);
for (auto& p : cached_snaps)
result.insert(p.first);
set<snapid_t> SnapClient::filter(const set<snapid_t>& snaps) const
{
- assert(cached_version > 0);
+ ceph_assert(cached_version > 0);
if (snaps.empty())
return snaps;
const SnapInfo* SnapClient::get_snap_info(snapid_t snapid) const
{
- assert(cached_version > 0);
+ ceph_assert(cached_version > 0);
const SnapInfo* result = NULL;
auto it = cached_snaps.find(snapid);
void SnapClient::get_snap_infos(map<snapid_t, const SnapInfo*>& infomap,
const set<snapid_t>& snaps) const
{
- assert(cached_version > 0);
+ ceph_assert(cached_version > 0);
if (snaps.empty())
return;
bool is_synced() const { return synced; }
void wait_for_sync(MDSInternalContextBase *c) {
- assert(!synced);
+ ceph_assert(!synced);
waiting_for_version[std::max<version_t>(cached_version, 1)].push_back(c);
}
{
auto p = open_past_parents.find(parent->inode->ino());
if (p != open_past_parents.end()) {
- assert(p->second.second.count(last) == 0);
+ ceph_assert(p->second.second.count(last) == 0);
p->second.second.insert(last);
} else {
open_past_parents[parent->inode->ino()].first = parent;
void SnapRealm::remove_open_past_parent(inodeno_t ino, snapid_t last)
{
auto p = open_past_parents.find(ino);
- assert(p != open_past_parents.end());
+ ceph_assert(p != open_past_parents.end());
auto q = p->second.second.find(last);
- assert(q != p->second.second.end());
+ ceph_assert(q != p->second.second.end());
p->second.second.erase(q);
--num_open_past_parents;
if (p->second.second.empty()) {
}
if (!srnode.past_parent_snaps.empty())
- assert(mdcache->mds->snapclient->get_cached_version() > 0);
+ ceph_assert(mdcache->mds->snapclient->get_cached_version() > 0);
if (!srnode.past_parents.empty() &&
mdcache->mds->allows_multimds_snaps()) {
}
// and my past parents too!
- assert(srnode.past_parents.size() >= num_open_past_parents);
+ ceph_assert(srnode.past_parents.size() >= num_open_past_parents);
if (srnode.past_parents.size() > num_open_past_parents) {
for (map<snapid_t, snaplink_t>::iterator p = srnode.past_parents.begin();
p != srnode.past_parents.end(); ) {
past_parents_dirty = true;
continue;
}
- assert(parent->snaprealm); // hmm!
+ ceph_assert(parent->snaprealm); // hmm!
if (!parent->snaprealm->_open_parents(finish, p->second.first, p->first))
return false;
auto q = open_past_parents.find(p->second.ino);
return true;
if (!srnode.past_parent_snaps.empty())
- assert(mdcache->mds->snapclient->get_cached_version() > 0);
+ ceph_assert(mdcache->mds->snapclient->get_cached_version() > 0);
if (!srnode.past_parents.empty() &&
mdcache->mds->allows_multimds_snaps()) {
// include snaps for parents
for (const auto& p : srnode.past_parents) {
const CInode *oldparent = mdcache->get_inode(p.second.ino);
- assert(oldparent); // call open_parents first!
- assert(oldparent->snaprealm);
+ ceph_assert(oldparent); // call open_parents first!
+ ceph_assert(oldparent->snaprealm);
const set<snapid_t>& snaps = oldparent->snaprealm->get_snaps();
snapid_t last = 0;
void SnapRealm::check_cache() const
{
- assert(have_past_parents_open());
+ ceph_assert(have_past_parents_open());
snapid_t seq;
snapid_t last_created;
snapid_t last_destroyed = mdcache->mds->snapclient->get_last_destroyed();
p != srnode.past_parents.end() && p->first >= first && p->second.first <= last;
++p) {
CInode *oldparent = mdcache->get_inode(p->second.ino);
- assert(oldparent); // call open_parents first!
- assert(oldparent->snaprealm);
+ ceph_assert(oldparent); // call open_parents first!
+ ceph_assert(oldparent->snaprealm);
oldparent->snaprealm->get_snap_info(infomap,
std::max(first, p->second.first),
std::min(last, p->first));
map<snapid_t,snaplink_t>::iterator p = srnode.past_parents.lower_bound(snapid);
if (p != srnode.past_parents.end() && p->second.first <= snapid) {
CInode *oldparent = mdcache->get_inode(p->second.ino);
- assert(oldparent); // call open_parents first!
- assert(oldparent->snaprealm);
+ ceph_assert(oldparent); // call open_parents first!
+ ceph_assert(oldparent->snaprealm);
return oldparent->snaprealm->get_snapname(snapid, atino);
}
}
- assert(srnode.current_parent_since <= snapid);
- assert(parent);
+ ceph_assert(srnode.current_parent_since <= snapid);
+ ceph_assert(parent);
return parent->get_snapname(snapid, atino);
}
p != srnode.past_parents.end() && p->first >= first && p->second.first <= last;
++p) {
CInode *oldparent = mdcache->get_inode(p->second.ino);
- assert(oldparent); // call open_parents first!
- assert(oldparent->snaprealm);
+ ceph_assert(oldparent); // call open_parents first!
+ ceph_assert(oldparent->snaprealm);
snapid_t r = oldparent->snaprealm->resolve_snapname(n, atino,
std::max(first, p->second.first),
std::min(last, p->first));
} else {
// no caps, nothing to move/split.
dout(20) << " split no-op, no caps to move on file " << *child->inode << dendl;
- assert(!child->inode->is_any_caps());
+ ceph_assert(!child->inode->is_any_caps());
}
return;
}
newparent = parent;
dout(10) << "merge to " << *newparent << " on " << *newparent->inode << dendl;
- assert(open_past_children.empty());
+ ceph_assert(open_past_children.empty());
dout(10) << " open_children are " << open_children << dendl;
for (auto realm : open_children) {
++p;
in->move_to_realm(newparent);
}
- assert(inodes_with_caps.empty());
+ ceph_assert(inodes_with_caps.empty());
// delete this
inode->close_snaprealm();
encode(pending_destroy, bl);
encode(last_created, bl);
encode(last_destroyed, bl);
- assert(version == tid);
+ ceph_assert(version == tid);
for (auto &p : active_clients) {
auto m = MMDSTableRequest::create(table, TABLESERVER_OP_NOTIFY_PREP, 0, version);
case 'F': // full
version_t have_version;
decode(have_version, p);
- assert(have_version <= version);
+ ceph_assert(have_version <= version);
if (have_version == version) {
char type = 'U';
encode(type, reply->bl);
bool upgrade_format() {
// upgraded from old filesystem
- assert(last_snap > 0);
+ ceph_assert(last_snap > 0);
bool upgraded = false;
if (get_version() == 0) {
// version 0 confuses snapclient code
else if (ino == MDS_INO_MDSDIR(rank))
mdsdir_scrubbed = true;
else
- assert(0);
+ ceph_assert(0);
}
bool can_allow_multimds_snaps() const {
return (root_scrubbed && mdsdir_scrubbed) ||
C_IO_PurgeStrayPurged(StrayManager *sm_, CDentry *d, bool oh) :
StrayManagerIOContext(sm_), dn(d), only_head(oh) { }
void finish(int r) override {
- assert(r == 0 || r == -ENOENT);
+ ceph_assert(r == 0 || r == -ENOENT);
sm->_purge_stray_purged(dn, only_head);
}
void print(ostream& out) const override {
CDentry::linkage_t *dnl = dn->get_projected_linkage();
CInode *in = dnl->get_inode();
dout(10) << __func__ << " " << *dn << " " << *in << dendl;
- assert(!dn->is_replicated());
+ ceph_assert(!dn->is_replicated());
// CHEAT. there's no real need to journal our intent to purge, since
// that is implicit in the dentry's presence and non-use in the stray
} else {
dout(10) << " NO realm, using null context" << dendl;
snapc = &nullsnapc;
- assert(in->last == CEPH_NOSNAP);
+ ceph_assert(in->last == CEPH_NOSNAP);
}
uint64_t to = 0;
// is being purged (aside from it were
derr << "Rogue reference after purge to " << *dn << dendl;
- assert(0 == "rogue reference to purging inode");
+ ceph_assert(0 == "rogue reference to purging inode");
}
// kill dentry.
CInode *in = dn->get_linkage()->get_inode();
dout(10) << "_purge_stray_logged " << *dn << " " << *in << dendl;
- assert(!in->state_test(CInode::STATE_RECOVERING));
+ ceph_assert(!in->state_test(CInode::STATE_RECOVERING));
bool new_dn = dn->is_new();
// unlink
- assert(dn->get_projected_linkage()->is_null());
+ ceph_assert(dn->get_projected_linkage()->is_null());
dn->dir->unlink_inode(dn, !new_dn);
dn->pop_projected_linkage();
dn->mark_dirty(pdv, ls);
void StrayManager::enqueue(CDentry *dn, bool trunc)
{
CDentry::linkage_t *dnl = dn->get_projected_linkage();
- assert(dnl);
+ ceph_assert(dnl);
CInode *in = dnl->get_inode();
- assert(in);
+ ceph_assert(in);
/* We consider a stray to be purging as soon as it is enqueued, to avoid
* enqueing it twice */
void StrayManager::_enqueue(CDentry *dn, bool trunc)
{
- assert(started);
+ ceph_assert(started);
CInode *in = dn->get_linkage()->get_inode();
if (in->snaprealm &&
void StrayManager::set_num_strays(uint64_t num)
{
- assert(!started);
+ ceph_assert(!started);
num_strays = num;
logger->set(l_mdc_num_strays, num_strays);
}
{
dout(10) << "eval_stray " << *dn << dendl;
CDentry::linkage_t *dnl = dn->get_projected_linkage();
- assert(dnl->is_primary());
+ ceph_assert(dnl->is_primary());
dout(10) << " inode is " << *dnl->get_inode() << dendl;
CInode *in = dnl->get_inode();
- assert(in);
- assert(!in->state_test(CInode::STATE_REJOINUNDEF));
+ ceph_assert(in);
+ ceph_assert(!in->state_test(CInode::STATE_REJOINUNDEF));
// The only dentries elegible for purging are those
// in the stray directories
- assert(dn->get_dir()->get_inode()->is_stray());
+ ceph_assert(dn->get_dir()->get_inode()->is_stray());
// Inode may not pass through this function if it
// was already identified for purging (i.e. cannot
// call eval_stray() after purge()
- assert(!dn->state_test(CDentry::STATE_PURGING));
+ ceph_assert(!dn->state_test(CDentry::STATE_PURGING));
if (!dn->is_auth()) {
return false;
for (auto it = in->remote_parents.begin(); it != in->remote_parents.end(); ) {
CDentry *remote_dn = *it;
++it;
- assert(remote_dn->last != CEPH_NOSNAP);
+ ceph_assert(remote_dn->last != CEPH_NOSNAP);
remote_dn->unlink_remote(remote_dn->get_linkage());
}
}
!in->old_inodes.empty()) {
// A file with snapshots: we will truncate the HEAD revision
// but leave the metadata intact.
- assert(!in->is_dir());
+ ceph_assert(!in->is_dir());
dout(20) << " file has past parents "
<< in->snaprealm << dendl;
if (in->is_file() && in->get_projected_inode()->size > 0) {
dout(10) << __func__ << " " << *remote_dn << dendl;
CDentry::linkage_t *dnl = remote_dn->get_projected_linkage();
- assert(dnl->is_remote());
+ ceph_assert(dnl->is_remote());
CInode *in = dnl->get_inode();
if (!in) {
// refers to stray?
CDentry *primary_dn = in->get_projected_parent_dn();
- assert(primary_dn != NULL);
+ ceph_assert(primary_dn != NULL);
if (primary_dn->get_dir()->get_inode()->is_stray()) {
_eval_stray_remote(primary_dn, remote_dn);
} else {
void StrayManager::_eval_stray_remote(CDentry *stray_dn, CDentry *remote_dn)
{
dout(20) << __func__ << " " << *stray_dn << dendl;
- assert(stray_dn != NULL);
- assert(stray_dn->get_dir()->get_inode()->is_stray());
+ ceph_assert(stray_dn != NULL);
+ ceph_assert(stray_dn->get_dir()->get_inode()->is_stray());
CDentry::linkage_t *stray_dnl = stray_dn->get_projected_linkage();
- assert(stray_dnl->is_primary());
+ ceph_assert(stray_dnl->is_primary());
CInode *stray_in = stray_dnl->get_inode();
- assert(stray_in->inode.nlink >= 1);
- assert(stray_in->last == CEPH_NOSNAP);
+ ceph_assert(stray_in->inode.nlink >= 1);
+ ceph_assert(stray_in->last == CEPH_NOSNAP);
/* If no remote_dn hinted, pick one arbitrarily */
if (remote_dn == NULL) {
return;
}
}
- assert(remote_dn->last == CEPH_NOSNAP);
+ ceph_assert(remote_dn->last == CEPH_NOSNAP);
// NOTE: we repeat this check in _rename(), since our submission path is racey.
if (!remote_dn->is_projected()) {
if (remote_dn->is_auth()) {
void StrayManager::migrate_stray(CDentry *dn, mds_rank_t to)
{
CInode *in = dn->get_projected_linkage()->get_inode();
- assert(in);
+ ceph_assert(in);
CInode *diri = dn->dir->get_inode();
- assert(diri->is_stray());
+ ceph_assert(diri->is_stray());
dout(10) << "migrate_stray from mds." << MDS_INO_STRAY_OWNER(diri->inode.ino)
<< " to mds." << to
<< " " << *dn << " " << *in << dendl;
// rename it to another mds.
filepath src;
dn->make_path(src);
- assert(src.depth() == 2);
+ ceph_assert(src.depth() == 2);
filepath dst(MDS_INO_MDSDIR(to));
dst.push_dentry(src[0]);
num_strays_delayed(0), num_strays_enqueuing(0),
purge_queue(purge_queue_)
{
- assert(mds != NULL);
+ ceph_assert(mds != NULL);
}
void StrayManager::truncate(CDentry *dn)
{
const CDentry::linkage_t *dnl = dn->get_projected_linkage();
const CInode *in = dnl->get_inode();
- assert(in);
+ ceph_assert(in);
dout(10) << __func__ << ": " << *dn << " " << *in << dendl;
- assert(!dn->is_replicated());
+ ceph_assert(!dn->is_replicated());
const SnapRealm *realm = in->find_snaprealm();
- assert(realm);
+ ceph_assert(realm);
dout(10) << " realm " << *realm << dendl;
const SnapContext *snapc = &realm->get_snap_context();
// the file has ever been.
to = std::max(in->inode.max_size_ever, to);
- assert(to > 0);
+ ceph_assert(to > 0);
PurgeItem item;
item.action = PurgeItem::TRUNCATE_FILE;
}
void add_opened_ino(inodeno_t ino) {
- assert(!opened_ino);
+ ceph_assert(!opened_ino);
opened_ino = ino;
}
add_null_dentry(dn, dirty);
return;
}
- assert(dn->get_projected_linkage()->is_primary());
+ ceph_assert(dn->get_projected_linkage()->is_primary());
add_primary_dentry(dn, 0, dirty, dirty_parent, dirty_pool);
}
dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire" << dendl;
- assert(g_conf()->mds_kill_journal_expire_at != 1);
+ ceph_assert(g_conf()->mds_kill_journal_expire_at != 1);
// commit dirs
for (elist<CDir*>::iterator p = new_dirfrags.begin(); !p.end(); ++p) {
dout(20) << " new_dirfrag " << **p << dendl;
- assert((*p)->is_auth());
+ ceph_assert((*p)->is_auth());
commit.insert(*p);
}
for (elist<CDir*>::iterator p = dirty_dirfrags.begin(); !p.end(); ++p) {
dout(20) << " dirty_dirfrag " << **p << dendl;
- assert((*p)->is_auth());
+ ceph_assert((*p)->is_auth());
commit.insert(*p);
}
for (elist<CDentry*>::iterator p = dirty_dentries.begin(); !p.end(); ++p) {
dout(20) << " dirty_dentry " << **p << dendl;
- assert((*p)->is_auth());
+ ceph_assert((*p)->is_auth());
commit.insert((*p)->get_dir());
}
for (elist<CInode*>::iterator p = dirty_inodes.begin(); !p.end(); ++p) {
dout(20) << " dirty_inode " << **p << dendl;
- assert((*p)->is_auth());
+ ceph_assert((*p)->is_auth());
if ((*p)->is_base()) {
(*p)->store(gather_bld.new_sub());
} else
p != commit.end();
++p) {
CDir *dir = *p;
- assert(dir->is_auth());
+ ceph_assert(dir->is_auth());
if (dir->can_auth_pin()) {
dout(15) << "try_to_expire committing " << *dir << dendl;
dir->commit(0, gather_bld.new_sub(), false, op_prio);
mds->locker->scatter_nudge(&in->nestlock, gather_bld.new_sub());
}
- assert(g_conf()->mds_kill_journal_expire_at != 2);
+ ceph_assert(g_conf()->mds_kill_journal_expire_at != 2);
// open files and snap inodes
if (!open_files.empty()) {
- assert(!mds->mdlog->is_capped()); // hmm FIXME
+ ceph_assert(!mds->mdlog->is_capped()); // hmm FIXME
EOpen *le = 0;
LogSegment *ls = mds->mdlog->get_current_segment();
- assert(ls != this);
+ ceph_assert(ls != this);
elist<CInode*>::iterator p = open_files.begin(member_offset(CInode, item_open_file));
while (!p.end()) {
CInode *in = *p;
}
}
- assert(g_conf()->mds_kill_journal_expire_at != 3);
+ ceph_assert(g_conf()->mds_kill_journal_expire_at != 3);
// backtraces to be stored/updated
for (elist<CInode*>::iterator p = dirty_parent_inodes.begin(); !p.end(); ++p) {
CInode *in = *p;
- assert(in->is_auth());
+ ceph_assert(in->is_auth());
if (in->can_auth_pin()) {
dout(15) << "try_to_expire waiting for storing backtrace on " << *in << dendl;
in->store_backtrace(gather_bld.new_sub(), op_prio);
}
}
- assert(g_conf()->mds_kill_journal_expire_at != 4);
+ ceph_assert(g_conf()->mds_kill_journal_expire_at != 4);
// slave updates
for (elist<MDSlaveUpdate*>::iterator p = slave_updates.begin(member_offset(MDSlaveUpdate,
!p.end(); ++p) {
MDSlaveUpdate *su = *p;
dout(10) << "try_to_expire waiting on slave update " << su << dendl;
- assert(su->waiter == 0);
+ ceph_assert(su->waiter == 0);
su->waiter = gather_bld.new_sub();
}
p != pending_commit_tids.end();
++p) {
MDSTableClient *client = mds->get_table_client(p->first);
- assert(client);
+ ceph_assert(client);
for (ceph::unordered_set<version_t>::iterator q = p->second.begin();
q != p->second.end();
++q) {
dout(10) << "try_to_expire " << get_mdstable_name(p->first) << " transaction " << *q
<< " pending commit (not yet acked), waiting" << dendl;
- assert(!client->has_committed(*q));
+ ceph_assert(!client->has_committed(*q));
client->wait_for_ack(*q, gather_bld.new_sub());
}
}
p != tablev.end();
++p) {
MDSTableServer *server = mds->get_table_server(p->first);
- assert(server);
+ ceph_assert(server);
if (p->second > server->get_committed_version()) {
dout(10) << "try_to_expire waiting for " << get_mdstable_name(p->first)
<< " to save, need " << p->second << dendl;
dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire waiting" << dendl;
mds->mdlog->flush();
} else {
- assert(g_conf()->mds_kill_journal_expire_at != 5);
+ ceph_assert(g_conf()->mds_kill_journal_expire_at != 5);
dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire success" << dendl;
}
}
!dir->state_test(CDir::STATE_AUXSUBTREE) &&
!diri->state_test(CInode::STATE_AMBIGUOUSAUTH)) {
dout(0) << "EMetaBlob::add_dir_context unexpected subtree " << *dir << dendl;
- assert(0);
+ ceph_assert(0);
}
dout(20) << "EMetaBlob::add_dir_context(" << dir << ") ambiguous or transient subtree " << dendl;
} else {
dout(20) << "EMetaBlob::add_dir_context final: " << parents << dendl;
for (list<CDentry*>::iterator p = parents.begin(); p != parents.end(); ++p) {
- assert((*p)->get_projected_linkage()->is_primary());
+ ceph_assert((*p)->get_projected_linkage()->is_primary());
add_dentry(*p, false);
}
}
{
dout(10) << "EMetaBlob.replay " << lump_map.size() << " dirlumps by " << client_name << dendl;
- assert(logseg);
+ ceph_assert(logseg);
- assert(g_conf()->mds_kill_journal_replay_at != 1);
+ ceph_assert(g_conf()->mds_kill_journal_replay_at != 1);
for (list<std::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
CInode *in = mds->mdcache->get_inode((*p)->inode.ino);
nnull += lump.nnull;
}
}
- assert(nnull <= 1);
+ ceph_assert(nnull <= 1);
}
// keep track of any inodes we unlink and don't relink elsewhere
CInode *diri = mds->mdcache->get_inode((*lp).ino);
if (!diri) {
if (MDS_INO_IS_MDSDIR(lp->ino)) {
- assert(MDS_INO_MDSDIR(mds->get_nodeid()) != lp->ino);
+ ceph_assert(MDS_INO_MDSDIR(mds->get_nodeid()) != lp->ino);
diri = mds->mdcache->create_system_inode(lp->ino, S_IFDIR|0755);
diri->state_clear(CInode::STATE_AUTH);
dout(10) << "EMetaBlob.replay created base " << *diri << dendl;
if (p->is_dirty()) dn->_mark_dirty(logseg);
dout(10) << "EMetaBlob.replay for [" << p->dnfirst << "," << p->dnlast << "] had " << *dn << dendl;
dn->first = p->dnfirst;
- assert(dn->last == p->dnlast);
+ ceph_assert(dn->last == p->dnlast);
}
if (lump.is_importing())
dn->state_set(CDentry::STATE_AUTH);
} else {
dout(10) << "EMetaBlob.replay for [" << p->dnfirst << "," << p->dnlast << "] had " << *in << dendl;
}
- assert(in->first == p->dnfirst ||
+ ceph_assert(in->first == p->dnfirst ||
(in->is_multiversion() && in->first > p->dnfirst));
}
if (p->is_dirty())
in->state_set(CInode::STATE_AUTH);
else
in->state_clear(CInode::STATE_AUTH);
- assert(g_conf()->mds_kill_journal_replay_at != 2);
+ ceph_assert(g_conf()->mds_kill_journal_replay_at != 2);
}
// remote dentries
if (p->dirty) dn->_mark_dirty(logseg);
dout(10) << "EMetaBlob.replay for [" << p->dnfirst << "," << p->dnlast << "] had " << *dn << dendl;
dn->first = p->dnfirst;
- assert(dn->last == p->dnlast);
+ ceph_assert(dn->last == p->dnlast);
}
if (lump.is_importing())
dn->state_set(CDentry::STATE_AUTH);
dn->set_version(p->dnv);
if (p->dirty) dn->_mark_dirty(logseg);
dout(10) << "EMetaBlob.replay had " << *dn << dendl;
- assert(dn->last == p->dnlast);
+ ceph_assert(dn->last == p->dnlast);
}
olddir = dir;
if (lump.is_importing())
}
}
- assert(g_conf()->mds_kill_journal_replay_at != 3);
+ ceph_assert(g_conf()->mds_kill_journal_replay_at != 3);
if (renamed_dirino) {
if (renamed_diri) {
- assert(unlinked.count(renamed_diri));
- assert(linked.count(renamed_diri));
+ ceph_assert(unlinked.count(renamed_diri));
+ ceph_assert(linked.count(renamed_diri));
olddir = unlinked[renamed_diri];
} else {
// we imported a diri we haven't seen before
renamed_diri = mds->mdcache->get_inode(renamed_dirino);
- assert(renamed_diri); // it was in the metablob
+ ceph_assert(renamed_diri); // it was in the metablob
}
if (olddir) {
if (olddir->authority() != CDIR_AUTH_UNDEF &&
renamed_diri->authority() == CDIR_AUTH_UNDEF) {
- assert(slaveup); // auth to non-auth, must be slave prepare
+ ceph_assert(slaveup); // auth to non-auth, must be slave prepare
list<frag_t> leaves;
renamed_diri->dirfragtree.get_leaves(leaves);
for (list<frag_t>::iterator p = leaves.begin(); p != leaves.end(); ++p) {
CDir *dir = renamed_diri->get_dirfrag(*p);
- assert(dir);
+ ceph_assert(dir);
if (dir->get_dir_auth() == CDIR_AUTH_UNDEF)
// preserve subtree bound until slave commit
slaveup->olddirs.insert(dir->inode);
if (dir) {
// we already had the inode before, and we already adjusted this subtree accordingly.
dout(10) << " already had+adjusted rename import bound " << *dir << dendl;
- assert(olddir);
+ ceph_assert(olddir);
continue;
}
dir = renamed_diri->get_or_open_dirfrag(mds->mdcache, *p);
for (map<CInode*, CDir*>::iterator p = unlinked.begin(); p != unlinked.end(); ++p) {
if (!linked.count(p->first))
continue;
- assert(p->first->is_dir());
+ ceph_assert(p->first->is_dir());
mds->mdcache->adjust_subtree_after_rename(p->first, p->second, false);
}
}
// opened ino?
if (opened_ino) {
CInode *in = mds->mdcache->get_inode(opened_ino);
- assert(in);
+ ceph_assert(in);
dout(10) << "EMetaBlob.replay noting opened inode " << *in << dendl;
logseg->open_files.push_back(&in->item_open_file);
}
mds->inotable->force_replay_version(inotablev);
}
- assert(inotablev == mds->inotable->get_version());
+ ceph_assert(inotablev == mds->inotable->get_version());
}
}
if (sessionmapv) {
if (next != i)
mds->clog->warn() << " replayed op " << client_reqs << " used ino " << i
<< " but session next is " << next;
- assert(i == used_preallocated_ino);
+ ceph_assert(i == used_preallocated_ino);
session->info.used_inos.clear();
}
mds->sessionmap.replay_dirty_session(session);
if (!preallocated_inos.empty())
mds->sessionmap.replay_advance_version();
}
- assert(sessionmapv == mds->sessionmap.get_version());
+ ceph_assert(sessionmapv == mds->sessionmap.get_version());
} else {
mds->clog->error() << "journal replay sessionmap v " << sessionmapv
<< " -(1|2) > table " << mds->sessionmap.get_version();
- assert(g_conf()->mds_wipe_sessions);
+ ceph_assert(g_conf()->mds_wipe_sessions);
mds->sessionmap.wipe();
mds->sessionmap.set_version(sessionmapv);
}
p != truncate_start.end();
++p) {
CInode *in = mds->mdcache->get_inode(*p);
- assert(in);
+ ceph_assert(in);
mds->mdcache->add_recovered_truncate(in, logseg);
}
for (map<inodeno_t,uint64_t>::iterator p = truncate_finish.begin();
LogSegment *ls = mds->mdlog->get_segment(p->second);
if (ls) {
CInode *in = mds->mdcache->get_inode(p->first);
- assert(in);
+ ceph_assert(in);
mds->mdcache->remove_recovered_truncate(in, ls);
}
}
mds->mdcache->remove_inode(in);
if (parent) {
dout(10) << "EMetaBlob.replay unlinked from dentry " << *parent << dendl;
- assert(parent->get_linkage()->is_null());
+ ceph_assert(parent->get_linkage()->is_null());
}
} else {
dout(10) << "EMetaBlob.replay destroyed " << *p << ", not in cache" << dendl;
dout(10) << "EMetaBlob.replay request " << p->first << " trim_to " << p->second << dendl;
inodeno_t created = allocated_ino ? allocated_ino : used_preallocated_ino;
// if we allocated an inode, there should be exactly one client request id.
- assert(created == inodeno_t() || client_reqs.size() == 1);
+ ceph_assert(created == inodeno_t() || client_reqs.size() == 1);
Session *session = mds->sessionmap.get_session(p->first.name);
if (session) {
// update segment
update_segment(logseg);
- assert(g_conf()->mds_kill_journal_replay_at != 4);
+ ceph_assert(g_conf()->mds_kill_journal_replay_at != 4);
}
// -----------------------
} else {
mds->sessionmap.replay_advance_version();
}
- assert(mds->sessionmap.get_version() == cmapv);
+ ceph_assert(mds->sessionmap.get_version() == cmapv);
}
if (inos.size() && inotablev) {
} else {
dout(10) << "ESession.replay inotable " << mds->inotable->get_version()
<< " < " << inotablev << " " << (open ? "add":"remove") << dendl;
- assert(!open); // for now
+ ceph_assert(!open); // for now
mds->inotable->replay_release_ids(inos);
- assert(mds->inotable->get_version() == inotablev);
+ ceph_assert(mds->inotable->get_version() == inotablev);
}
}
dout(10) << "ESessions.replay sessionmap " << mds->sessionmap.get_version()
<< " < " << cmapv << dendl;
mds->sessionmap.replay_open_sessions(client_map, client_metadata_map);
- assert(mds->sessionmap.get_version() == cmapv);
+ ceph_assert(mds->sessionmap.get_version() == cmapv);
}
update_segment();
}
dout(10) << " ETableServer.replay " << get_mdstable_name(table)
<< " " << get_mdstableserver_opname(op)
<< " event " << version << " - 1 == table " << server->get_version() << dendl;
- assert(version-1 == server->get_version());
+ ceph_assert(version-1 == server->get_version());
switch (op) {
case TABLESERVER_OP_PREPARE: {
ceph_abort(); // Should be unreachable because damaged() calls respawn()
}
- assert(version == server->get_version());
+ ceph_assert(version == server->get_version());
update_segment();
}
if (!client)
return;
- assert(op == TABLESERVER_OP_ACK);
+ ceph_assert(op == TABLESERVER_OP_ACK);
client->got_journaled_ack(tid);
}
dout(10) << " ESnap.replay event " << version
<< " - 1 == table " << mds->snaptable->get_version() << dendl;
- assert(version-1 == mds->snaptable->get_version());
+ ceph_assert(version-1 == mds->snaptable->get_version());
if (create) {
version_t v;
snapid_t s = mds->snaptable->create(snap.dirino, snap.name, snap.stamp, &v);
- assert(s == snap.snapid);
+ ceph_assert(s == snap.snapid);
} else {
mds->snaptable->remove(snap.snapid);
}
- assert(version == mds->snaptable->get_version());
+ ceph_assert(version == mds->snaptable->get_version());
}
*/
decode(cmm, blp);
mds->sessionmap.replay_open_sessions(cm, cmm);
- assert(mds->sessionmap.get_version() == cmapv);
+ ceph_assert(mds->sessionmap.get_version() == cmapv);
}
}
update_segment();
CInode *in = mds->mdcache->get_inode(ino);
if (!in) {
dout(0) << "EOpen.replay ino " << ino << " not in metablob" << dendl;
- assert(in);
+ ceph_assert(in);
}
_segment->open_files.push_back(&in->item_open_file);
}
CInode *in = mds->mdcache->get_inode(vino);
if (!in) {
dout(0) << "EOpen.replay ino " << vino << " not in metablob" << dendl;
- assert(in);
+ ceph_assert(in);
}
_segment->open_files.push_back(&in->item_open_file);
}
dout(0) << "journal subtrees: " << subtrees << dendl;
dout(0) << "journal ambig_subtrees: " << ambiguous_subtrees << dendl;
mds->mdcache->show_subtrees();
- assert(!g_conf()->mds_debug_subtrees || errors == 0);
+ ceph_assert(!g_conf()->mds_debug_subtrees || errors == 0);
}
return;
}
p != subtrees.end();
++p) {
CDir *dir = mds->mdcache->get_dirfrag(p->first);
- assert(dir);
+ ceph_assert(dir);
if (ambiguous_subtrees.count(p->first)) {
// ambiguous!
mds->mdcache->add_ambiguous_import(p->first, p->second);
metablob.replay(mds, _segment);
CDir *dir = mds->mdcache->get_dirfrag(base);
- assert(dir);
+ ceph_assert(dir);
set<CDir*> realbounds;
for (set<dirfrag_t>::iterator p = bounds.begin();
p != bounds.end();
++p) {
CDir *bd = mds->mdcache->get_dirfrag(*p);
- assert(bd);
+ ceph_assert(bd);
realbounds.insert(bd);
}
// set auth partially to us so we don't trim it
CDir *dir = mds->mdcache->get_dirfrag(base);
- assert(dir);
+ ceph_assert(dir);
set<CDir*> realbounds;
for (vector<dirfrag_t>::iterator p = bounds.begin();
p != bounds.end();
++p) {
CDir *bd = mds->mdcache->get_dirfrag(*p);
- assert(bd);
+ ceph_assert(bd);
if (!bd->is_subtree_root())
bd->state_clear(CDir::STATE_AUTH);
realbounds.insert(bd);
mds->mdcache->finish_ambiguous_import(base);
} else {
CDir *dir = mds->mdcache->get_dirfrag(base);
- assert(dir);
+ ceph_assert(dir);
vector<dirfrag_t> bounds;
mds->mdcache->get_ambiguous_import_bounds(base, bounds);
mds->mdcache->adjust_bounded_subtree_auth(dir, bounds, CDIR_AUTH_UNDEF);
if (bit > last)
last = bit;
else
- assert(bit == last);
+ ceph_assert(bit == last);
_vec[bit / bits_per_block] |= (block_type)1 << (bit % bits_per_block);
}
}
bool is_truncating() const { return (truncate_pending > 0); }
void truncate(uint64_t old_size, uint64_t new_size) {
- assert(new_size < old_size);
+ ceph_assert(new_size < old_size);
if (old_size > max_size_ever)
max_size_ever = old_size;
truncate_from = old_size;
template<template<typename> class Allocator>
int inode_t<Allocator>::compare(const inode_t<Allocator> &other, bool *divergent) const
{
- assert(ino == other.ino);
+ ceph_assert(ino == other.ino);
*divergent = false;
if (version == other.version) {
if (rdev != other.rdev ||
*divergent = !older_is_consistent(other);
return 1;
} else {
- assert(version < other.version);
+ ceph_assert(version < other.version);
*divergent = !other.older_is_consistent(*this);
return -1;
}
}
static void decode_helper(std::string_view key, string& nm, snapid_t& sn) {
size_t i = key.find_last_of('_');
- assert(i != string::npos);
+ ceph_assert(i != string::npos);
if (key.compare(i+1, std::string_view::npos, "head") == 0) {
// name_head
sn = CEPH_NOSNAP;