From 37760083de29ee1a3cdb2830f4e9ee687f9f9f02 Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Thu, 23 Aug 2018 11:25:33 -0400 Subject: [PATCH] mds: Use ceph_assert for asserts. Signed-off-by: Adam C. Emerson --- src/mds/Beacon.cc | 8 +- src/mds/CDentry.cc | 42 +-- src/mds/CDentry.h | 2 +- src/mds/CDir.cc | 254 ++++++------- src/mds/CDir.h | 4 +- src/mds/CInode.cc | 182 ++++----- src/mds/CInode.h | 30 +- src/mds/Capability.h | 2 +- src/mds/DamageTable.cc | 2 +- src/mds/DamageTable.h | 2 +- src/mds/FSMap.cc | 92 ++--- src/mds/FSMap.h | 4 +- src/mds/InoTable.cc | 10 +- src/mds/JournalPointer.cc | 8 +- src/mds/LocalLock.h | 2 +- src/mds/Locker.cc | 198 +++++----- src/mds/LogEvent.cc | 2 +- src/mds/LogSegment.h | 2 +- src/mds/MDBalancer.cc | 16 +- src/mds/MDCache.cc | 732 ++++++++++++++++++------------------- src/mds/MDCache.h | 16 +- src/mds/MDLog.cc | 96 ++--- src/mds/MDLog.h | 6 +- src/mds/MDSCacheObject.h | 10 +- src/mds/MDSContext.cc | 6 +- src/mds/MDSContext.h | 6 +- src/mds/MDSDaemon.cc | 76 ++-- src/mds/MDSMap.cc | 4 +- src/mds/MDSMap.h | 8 +- src/mds/MDSRank.cc | 82 ++--- src/mds/MDSRank.h | 2 +- src/mds/MDSTable.cc | 12 +- src/mds/MDSTableClient.cc | 26 +- src/mds/MDSTableServer.cc | 24 +- src/mds/Mantle.cc | 2 +- src/mds/Migrator.cc | 292 +++++++-------- src/mds/Migrator.h | 16 +- src/mds/Mutation.cc | 24 +- src/mds/Mutation.h | 14 +- src/mds/OpenFileTable.cc | 118 +++--- src/mds/OpenFileTable.h | 4 +- src/mds/PurgeQueue.cc | 34 +- src/mds/RecoveryQueue.cc | 10 +- src/mds/ScatterLock.h | 6 +- src/mds/ScrubHeader.h | 2 +- src/mds/ScrubStack.cc | 12 +- src/mds/ScrubStack.h | 4 +- src/mds/Server.cc | 396 ++++++++++---------- src/mds/SessionMap.cc | 20 +- src/mds/SessionMap.h | 18 +- src/mds/SimpleLock.cc | 2 +- src/mds/SimpleLock.h | 22 +- src/mds/SnapClient.cc | 14 +- src/mds/SnapClient.h | 2 +- src/mds/SnapRealm.cc | 42 +-- src/mds/SnapServer.cc | 4 +- src/mds/SnapServer.h | 4 +- src/mds/StrayManager.cc | 66 ++-- src/mds/events/EMetaBlob.h | 4 +- src/mds/journal.cc | 132 +++---- src/mds/mdstypes.cc | 2 +- src/mds/mdstypes.h | 8 +- 62 files changed, 1621 insertions(+), 1621 deletions(-) diff --git a/src/mds/Beacon.cc b/src/mds/Beacon.cc index 45763273b7ed3..94c5befba020d 100644 --- a/src/mds/Beacon.cc +++ b/src/mds/Beacon.cc @@ -96,7 +96,7 @@ bool Beacon::ms_can_fast_dispatch2(const Message::const_ref& m) const void Beacon::ms_fast_dispatch2(const Message::ref& m) { bool handled = ms_dispatch2(m); - assert(handled); + ceph_assert(handled); } bool Beacon::ms_dispatch2(const Message::ref& m) @@ -196,7 +196,7 @@ void Beacon::_send() seq_stamp[last_seq] = now; - assert(want_state != MDSMap::STATE_NULL); + ceph_assert(want_state != MDSMap::STATE_NULL); auto beacon = MMDSBeacon::create( monc->get_fsid(), mds_gid_t(monc->get_global_id()), @@ -235,7 +235,7 @@ void Beacon::notify_mdsmap(const MDSMap &mdsmap) void Beacon::_notify_mdsmap(const MDSMap &mdsmap) { - assert(mdsmap.get_epoch() >= epoch); + ceph_assert(mdsmap.get_epoch() >= epoch); if (mdsmap.get_epoch() != epoch) { epoch = mdsmap.get_epoch(); @@ -302,7 +302,7 @@ void Beacon::notify_health(MDSRank const *mds) } // I'm going to touch this MDS, so it must be locked - assert(mds->mds_lock.is_locked_by_me()); + ceph_assert(mds->mds_lock.is_locked_by_me()); health.metrics.clear(); diff --git a/src/mds/CDentry.cc b/src/mds/CDentry.cc index 3dfb070469b2c..93e07e04c269f 100644 --- a/src/mds/CDentry.cc +++ b/src/mds/CDentry.cc @@ -170,7 +170,7 @@ void CDentry::_mark_dirty(LogSegment *ls) get(PIN_DIRTY); dir->inc_num_dirty(); dir->dirty_dentries.push_back(&item_dir_dirty); - assert(ls); + ceph_assert(ls); } if (ls) ls->dirty_dentries.push_back(&item_dirty); @@ -181,7 +181,7 @@ void CDentry::mark_dirty(version_t pv, LogSegment *ls) dout(10) << __func__ << " " << *this << dendl; // i now live in this new dir version - assert(pv <= projected_version); + ceph_assert(pv <= projected_version); version = pv; _mark_dirty(ls); @@ -193,7 +193,7 @@ void CDentry::mark_dirty(version_t pv, LogSegment *ls) void CDentry::mark_clean() { dout(10) << __func__ << " " << *this << dendl; - assert(is_dirty()); + ceph_assert(is_dirty()); // not always true for recalc_auth_bits during resolve finish //assert(dir->get_version() == 0 || version <= dir->get_version()); // hmm? @@ -226,7 +226,7 @@ void CDentry::make_path_string(string& s, bool projected) const void CDentry::make_path(filepath& fp, bool projected) const { - assert(dir); + ceph_assert(dir); dir->inode->make_path(fp, projected); fp.push_dentry(get_name()); } @@ -238,8 +238,8 @@ void CDentry::make_path(filepath& fp, bool projected) const */ void CDentry::link_remote(CDentry::linkage_t *dnl, CInode *in) { - assert(dnl->is_remote()); - assert(in->ino() == dnl->get_remote_ino()); + ceph_assert(dnl->is_remote()); + ceph_assert(in->ino() == dnl->get_remote_ino()); dnl->inode = in; if (dnl == &linkage) @@ -248,8 +248,8 @@ void CDentry::link_remote(CDentry::linkage_t *dnl, CInode *in) void CDentry::unlink_remote(CDentry::linkage_t *dnl) { - assert(dnl->is_remote()); - assert(dnl->inode); + ceph_assert(dnl->is_remote()); + ceph_assert(dnl->inode); if (dnl == &linkage) dnl->inode->remove_remote_parent(this); @@ -291,7 +291,7 @@ void CDentry::push_projected_linkage(CInode *inode) CDentry::linkage_t *CDentry::pop_projected_linkage() { - assert(projected.size()); + ceph_assert(projected.size()); linkage_t& n = projected.front(); @@ -312,9 +312,9 @@ CDentry::linkage_t *CDentry::pop_projected_linkage() n.inode->pop_projected_parent(); } - assert(n.inode == linkage.inode); - assert(n.remote_ino == linkage.remote_ino); - assert(n.remote_d_type == linkage.remote_d_type); + ceph_assert(n.inode == linkage.inode); + ceph_assert(n.remote_ino == linkage.remote_ino); + ceph_assert(n.remote_d_type == linkage.remote_d_type); projected.pop_front(); @@ -328,7 +328,7 @@ CDentry::linkage_t *CDentry::pop_projected_linkage() int CDentry::get_num_dir_auth_pins() const { - assert(!is_projected()); + ceph_assert(!is_projected()); if (get_linkage()->is_primary()) return auth_pins + get_linkage()->get_inode()->get_num_auth_pins(); return auth_pins; @@ -336,7 +336,7 @@ int CDentry::get_num_dir_auth_pins() const bool CDentry::can_auth_pin(int *err_ret) const { - assert(dir); + ceph_assert(dir); return dir->can_auth_pin(err_ret); } @@ -362,7 +362,7 @@ void CDentry::auth_unpin(void *by) auth_pins--; #ifdef MDS_AUTHPIN_SET - assert(auth_pin_set.count(by)); + ceph_assert(auth_pin_set.count(by)); auth_pin_set.erase(auth_pin_set.find(by)); #endif @@ -372,7 +372,7 @@ void CDentry::auth_unpin(void *by) dout(10) << "auth_unpin by " << by << " on " << *this << " now " << auth_pins << "+" << nested_auth_pins << dendl; - assert(auth_pins >= 0); + ceph_assert(auth_pins >= 0); dir->adjust_nested_auth_pins(-1, -1, by); } @@ -385,7 +385,7 @@ void CDentry::adjust_nested_auth_pins(int adjustment, int diradj, void *by) << ", change " << adjustment << " yields " << auth_pins << "+" << nested_auth_pins << dendl; - assert(nested_auth_pins >= 0); + ceph_assert(nested_auth_pins >= 0); dir->adjust_nested_auth_pins(adjustment, diradj, by); } @@ -466,13 +466,13 @@ void CDentry::decode_lock_state(int type, const bufferlist& bl) if (!is_auth() && newfirst != first) { dout(10) << __func__ << " first " << first << " -> " << newfirst << dendl; - assert(newfirst > first); + ceph_assert(newfirst > first); first = newfirst; } if (p.end()) { // null - assert(linkage.is_null()); + ceph_assert(linkage.is_null()); return; } @@ -521,7 +521,7 @@ ClientLease *CDentry::add_client_lease(client_t c, Session *session) void CDentry::remove_client_lease(ClientLease *l, Locker *locker) { - assert(l->parent == this); + ceph_assert(l->parent == this); bool gather = false; @@ -562,7 +562,7 @@ void CDentry::_put() void CDentry::dump(Formatter *f) const { - assert(f != NULL); + ceph_assert(f != NULL); filepath path; make_path(path); diff --git a/src/mds/CDentry.h b/src/mds/CDentry.h index 63bc3c2c40b5d..0958c8044ac84 100644 --- a/src/mds/CDentry.h +++ b/src/mds/CDentry.h @@ -303,7 +303,7 @@ public: // -- locking -- SimpleLock* get_lock(int type) override { - assert(type == CEPH_LOCK_DN); + ceph_assert(type == CEPH_LOCK_DN); return &lock; } void set_object_info(MDSCacheObjectInfo &info) override; diff --git a/src/mds/CDir.cc b/src/mds/CDir.cc index 8947a49932ef8..8fe2fbe483614 100644 --- a/src/mds/CDir.cc +++ b/src/mds/CDir.cc @@ -54,7 +54,7 @@ protected: public: explicit CDirContext(CDir *d) : dir(d) { - assert(dir != NULL); + ceph_assert(dir != NULL); } }; @@ -67,7 +67,7 @@ protected: public: explicit CDirIOContext(CDir *d) : dir(d) { - assert(dir != NULL); + ceph_assert(dir != NULL); } }; @@ -210,7 +210,7 @@ CDir::CDir(CInode *in, frag_t fg, MDCache *mdcache, bool auth) : dir_auth(CDIR_AUTH_DEFAULT) { // auth - assert(in->is_dir()); + ceph_assert(in->is_dir()); if (auth) state_set(STATE_AUTH); } @@ -226,7 +226,7 @@ bool CDir::check_rstats(bool scrub) dout(25) << "check_rstats on " << this << dendl; if (!is_complete() || !is_auth() || is_frozen()) { - assert(!scrub); + ceph_assert(!scrub); dout(10) << "check_rstats bailing out -- incomplete or non-auth or frozen dir!" << dendl; return true; } @@ -285,11 +285,11 @@ bool CDir::check_rstats(bool scrub) } } - assert(frag_info.nfiles == fnode.fragstat.nfiles); - assert(frag_info.nsubdirs == fnode.fragstat.nsubdirs); - assert(nest_info.rbytes == fnode.rstat.rbytes); - assert(nest_info.rfiles == fnode.rstat.rfiles); - assert(nest_info.rsubdirs == fnode.rstat.rsubdirs); + ceph_assert(frag_info.nfiles == fnode.fragstat.nfiles); + ceph_assert(frag_info.nsubdirs == fnode.fragstat.nsubdirs); + ceph_assert(nest_info.rbytes == fnode.rstat.rbytes); + ceph_assert(nest_info.rfiles == fnode.rstat.rfiles); + ceph_assert(nest_info.rsubdirs == fnode.rstat.rsubdirs); } } dout(10) << "check_rstats complete on " << this << dendl; @@ -305,7 +305,7 @@ void CDir::adjust_num_inodes_with_caps(int d) cache->open_file_table.remove_dirfrag(this); num_inodes_with_caps += d; - assert(num_inodes_with_caps >= 0); + ceph_assert(num_inodes_with_caps >= 0); } CDentry *CDir::lookup(std::string_view name, snapid_t snap) @@ -340,7 +340,7 @@ CDentry* CDir::add_null_dentry(std::string_view dname, snapid_t first, snapid_t last) { // foreign - assert(lookup_exact_snap(dname, last) == 0); + ceph_assert(lookup_exact_snap(dname, last) == 0); // create dentry CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), first, last); @@ -354,7 +354,7 @@ CDentry* CDir::add_null_dentry(std::string_view dname, dn->version = get_projected_version(); // add to dir - assert(items.count(dn->key()) == 0); + ceph_assert(items.count(dn->key()) == 0); //assert(null_items.count(dn->get_name()) == 0); items[dn->key()] = dn; @@ -374,7 +374,7 @@ CDentry* CDir::add_null_dentry(std::string_view dname, if (get_num_any() == 1) get(PIN_CHILD); - assert(get_num_any() == items.size()); + ceph_assert(get_num_any() == items.size()); return dn; } @@ -383,7 +383,7 @@ CDentry* CDir::add_primary_dentry(std::string_view dname, CInode *in, snapid_t first, snapid_t last) { // primary - assert(lookup_exact_snap(dname, last) == 0); + ceph_assert(lookup_exact_snap(dname, last) == 0); // create dentry CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), first, last); @@ -400,7 +400,7 @@ CDentry* CDir::add_primary_dentry(std::string_view dname, CInode *in, dn->version = get_projected_version(); // add to dir - assert(items.count(dn->key()) == 0); + ceph_assert(items.count(dn->key()) == 0); //assert(null_items.count(dn->get_name()) == 0); items[dn->key()] = dn; @@ -424,7 +424,7 @@ CDentry* CDir::add_primary_dentry(std::string_view dname, CInode *in, // pin? if (get_num_any() == 1) get(PIN_CHILD); - assert(get_num_any() == items.size()); + ceph_assert(get_num_any() == items.size()); return dn; } @@ -432,7 +432,7 @@ CDentry* CDir::add_remote_dentry(std::string_view dname, inodeno_t ino, unsigned snapid_t first, snapid_t last) { // foreign - assert(lookup_exact_snap(dname, last) == 0); + ceph_assert(lookup_exact_snap(dname, last) == 0); // create dentry CDentry* dn = new CDentry(dname, inode->hash_dentry_name(dname), ino, d_type, first, last); @@ -444,7 +444,7 @@ CDentry* CDir::add_remote_dentry(std::string_view dname, inodeno_t ino, unsigned dn->version = get_projected_version(); // add to dir - assert(items.count(dn->key()) == 0); + ceph_assert(items.count(dn->key()) == 0); //assert(null_items.count(dn->get_name()) == 0); items[dn->key()] = dn; @@ -464,7 +464,7 @@ CDentry* CDir::add_remote_dentry(std::string_view dname, inodeno_t ino, unsigned if (get_num_any() == 1) get(PIN_CHILD); - assert(get_num_any() == items.size()); + ceph_assert(get_num_any() == items.size()); return dn; } @@ -475,7 +475,7 @@ void CDir::remove_dentry(CDentry *dn) dout(12) << __func__ << " " << *dn << dendl; // there should be no client leases at this point! - assert(dn->client_lease_map.empty()); + ceph_assert(dn->client_lease_map.empty()); if (state_test(CDir::STATE_DNPINNEDFRAG)) { dn->put(CDentry::PIN_FRAGMENTING); @@ -499,7 +499,7 @@ void CDir::remove_dentry(CDentry *dn) unlink_inode_work(dn); // remove from list - assert(items.count(dn->key()) == 1); + ceph_assert(items.count(dn->key()) == 1); items.erase(dn->key()); // clean? @@ -515,7 +515,7 @@ void CDir::remove_dentry(CDentry *dn) // unpin? if (get_num_any() == 0) put(PIN_CHILD); - assert(get_num_any() == items.size()); + ceph_assert(get_num_any() == items.size()); } void CDir::link_remote_inode(CDentry *dn, CInode *in) @@ -526,7 +526,7 @@ void CDir::link_remote_inode(CDentry *dn, CInode *in) void CDir::link_remote_inode(CDentry *dn, inodeno_t ino, unsigned char d_type) { dout(12) << __func__ << " " << *dn << " remote " << ino << dendl; - assert(dn->get_linkage()->is_null()); + ceph_assert(dn->get_linkage()->is_null()); dn->get_linkage()->set_remote(ino, d_type); @@ -543,13 +543,13 @@ void CDir::link_remote_inode(CDentry *dn, inodeno_t ino, unsigned char d_type) num_snap_items++; num_snap_null--; } - assert(get_num_any() == items.size()); + ceph_assert(get_num_any() == items.size()); } void CDir::link_primary_inode(CDentry *dn, CInode *in) { dout(12) << __func__ << " " << *dn << " " << *in << dendl; - assert(dn->get_linkage()->is_null()); + ceph_assert(dn->get_linkage()->is_null()); dn->get_linkage()->inode = in; @@ -570,12 +570,12 @@ void CDir::link_primary_inode(CDentry *dn, CInode *in) num_snap_null--; } - assert(get_num_any() == items.size()); + ceph_assert(get_num_any() == items.size()); } void CDir::link_inode_work( CDentry *dn, CInode *in) { - assert(dn->get_linkage()->get_inode() == in); + ceph_assert(dn->get_linkage()->get_inode() == in); in->set_primary_parent(dn); // set inode version @@ -624,14 +624,14 @@ void CDir::unlink_inode(CDentry *dn, bool adjust_lru) num_snap_items--; num_snap_null++; } - assert(get_num_any() == items.size()); + ceph_assert(get_num_any() == items.size()); } void CDir::try_remove_unlinked_dn(CDentry *dn) { - assert(dn->dir == this); - assert(dn->get_linkage()->is_null()); + ceph_assert(dn->dir == this); + ceph_assert(dn->get_linkage()->is_null()); // no pins (besides dirty)? if (dn->get_num_ref() != dn->is_dirty()) @@ -681,13 +681,13 @@ void CDir::unlink_inode_work( CDentry *dn ) in->item_pop_lru.remove_myself(); dn->get_linkage()->inode = 0; } else { - assert(!dn->get_linkage()->is_null()); + ceph_assert(!dn->get_linkage()->is_null()); } } void CDir::add_to_bloom(CDentry *dn) { - assert(dn->last == CEPH_NOSNAP); + ceph_assert(dn->last == CEPH_NOSNAP); if (!bloom) { /* not create bloom filter for incomplete dir that was added by log replay */ if (!is_complete()) @@ -725,9 +725,9 @@ void CDir::remove_null_dentries() { remove_dentry(dn); } - assert(num_snap_null == 0); - assert(num_head_null == 0); - assert(get_num_any() == items.size()); + ceph_assert(num_snap_null == 0); + ceph_assert(num_head_null == 0); + ceph_assert(get_num_any() == items.size()); } /** remove dirty null dentries for deleted directory. the dirfrag will be @@ -740,7 +740,7 @@ void CDir::remove_null_dentries() { void CDir::try_remove_dentries_for_stray() { dout(10) << __func__ << dendl; - assert(get_parent_dir()->inode->is_stray()); + ceph_assert(get_parent_dir()->inode->is_stray()); // clear dirty only when the directory was not snapshotted bool clear_dirty = !inode->snaprealm; @@ -750,8 +750,8 @@ void CDir::try_remove_dentries_for_stray() CDentry *dn = p->second; ++p; if (dn->last == CEPH_NOSNAP) { - assert(!dn->is_projected()); - assert(dn->get_linkage()->is_null()); + ceph_assert(!dn->is_projected()); + ceph_assert(dn->get_linkage()->is_null()); if (clear_dirty && dn->is_dirty()) dn->mark_clean(); // It's OK to remove lease prematurely because we will never link @@ -761,7 +761,7 @@ void CDir::try_remove_dentries_for_stray() if (dn->get_num_ref() == 0) remove_dentry(dn); } else { - assert(!dn->is_projected()); + ceph_assert(!dn->is_projected()); CDentry::linkage_t *dnl= dn->get_linkage(); CInode *in = NULL; if (dnl->is_primary()) { @@ -785,7 +785,7 @@ void CDir::try_remove_dentries_for_stray() bool CDir::try_trim_snap_dentry(CDentry *dn, const set& snaps) { - assert(dn->last != CEPH_NOSNAP); + ceph_assert(dn->last != CEPH_NOSNAP); set::const_iterator p = snaps.lower_bound(dn->first); CDentry::linkage_t *dnl= dn->get_linkage(); CInode *in = 0; @@ -892,7 +892,7 @@ void CDir::steal_dentry(CDentry *dn) // use the helpers here to maintain the auth_pin invariants on the dir inode int ap = dn->get_num_auth_pins() + dn->get_num_nested_auth_pins(); int dap = dn->get_num_dir_auth_pins(); - assert(dap <= ap); + ceph_assert(dap <= ap); adjust_nested_auth_pins(ap, dap, NULL); dn->dir->adjust_nested_auth_pins(-ap, -dap, NULL); } @@ -940,14 +940,14 @@ void CDir::finish_old_fragment(MDSInternalContextBase::vec& waiters, bool replay take_waiting(WAIT_ANY_MASK, waiters); if (is_auth()) { auth_unpin(this); // pinned in prepare_old_fragment - assert(is_frozen_dir()); + ceph_assert(is_frozen_dir()); unfreeze_dir(); } } - assert(nested_auth_pins == 0); - assert(dir_auth_pins == 0); - assert(auth_pins == 0); + ceph_assert(nested_auth_pins == 0); + ceph_assert(dir_auth_pins == 0); + ceph_assert(auth_pins == 0); num_head_items = num_head_null = 0; num_snap_items = num_snap_null = 0; @@ -968,7 +968,7 @@ void CDir::finish_old_fragment(MDSInternalContextBase::vec& waiters, bool replay if (auth_pins > 0) put(PIN_AUTHPIN); - assert(get_num_ref() == (state_test(STATE_STICKY) ? 1:0)); + ceph_assert(get_num_ref() == (state_test(STATE_STICKY) ? 1:0)); } void CDir::init_fragment_pins() @@ -989,7 +989,7 @@ void CDir::split(int bits, list& subs, MDSInternalContextBase::vec& waite { dout(10) << "split by " << bits << " bits on " << *this << dendl; - assert(replay || is_complete() || !is_auth()); + ceph_assert(replay || is_complete() || !is_auth()); list frags; frag.split(bits, frags); @@ -1094,7 +1094,7 @@ void CDir::merge(list& subs, MDSInternalContextBase::vec& waiters, bool r for (auto dir : subs) { if (dir->get_dir_auth() != CDIR_AUTH_DEFAULT && dir->get_dir_auth() != new_auth) { - assert(new_auth == CDIR_AUTH_DEFAULT); + ceph_assert(new_auth == CDIR_AUTH_DEFAULT); new_auth = dir->get_dir_auth(); } } @@ -1112,7 +1112,7 @@ void CDir::merge(list& subs, MDSInternalContextBase::vec& waiters, bool r for (auto dir : subs) { dout(10) << " subfrag " << dir->get_frag() << " " << *dir << dendl; - assert(!dir->is_auth() || dir->is_complete() || replay); + ceph_assert(!dir->is_auth() || dir->is_complete() || replay); if (dir->fnode.accounted_rstat.version == rstat_version) rstatdiff.add_delta(dir->fnode.accounted_rstat, dir->fnode.rstat); @@ -1206,7 +1206,7 @@ void CDir::assimilate_dirty_rstat_inodes() for (elist::iterator p = dirty_rstat_inodes.begin_use_current(); !p.end(); ++p) { CInode *in = *p; - assert(in->is_auth()); + ceph_assert(in->is_auth()); if (in->is_frozen()) continue; @@ -1329,7 +1329,7 @@ void CDir::add_waiter(uint64_t tag, MDSInternalContextBase *c) } } - assert(!(tag & WAIT_CREATED) || state_test(STATE_CREATING)); + ceph_assert(!(tag & WAIT_CREATED) || state_test(STATE_CREATING)); MDSCacheObject::add_waiter(tag, c); } @@ -1375,7 +1375,7 @@ void CDir::finish_waiting(uint64_t mask, int result) fnode_t *CDir::project_fnode() { - assert(get_version() != 0); + ceph_assert(get_version() != 0); auto &p = projected_fnode.emplace_back(*get_projected_fnode()); if (scrub_infop && scrub_infop->last_scrub_dirty) { @@ -1393,7 +1393,7 @@ fnode_t *CDir::project_fnode() void CDir::pop_and_dirty_projected_fnode(LogSegment *ls) { - assert(!projected_fnode.empty()); + ceph_assert(!projected_fnode.empty()); auto &front = projected_fnode.front(); dout(15) << __func__ << " " << &front << " v" << front.version << dendl; fnode = front; @@ -1413,8 +1413,8 @@ version_t CDir::pre_dirty(version_t min) void CDir::mark_dirty(version_t pv, LogSegment *ls) { - assert(get_version() < pv); - assert(pv <= projected_version); + ceph_assert(get_version() < pv); + ceph_assert(pv <= projected_version); fnode.version = pv; _mark_dirty(ls); } @@ -1424,7 +1424,7 @@ void CDir::_mark_dirty(LogSegment *ls) if (!state_test(STATE_DIRTY)) { dout(10) << __func__ << " (was clean) " << *this << " version " << get_version() << dendl; _set_dirty_flag(); - assert(ls); + ceph_assert(ls); } else { dout(10) << __func__ << " (already dirty) " << *this << " version " << get_version() << dendl; } @@ -1502,8 +1502,8 @@ void CDir::fetch(MDSInternalContextBase *c, std::string_view want_dn, bool ignor { dout(10) << "fetch on " << *this << dendl; - assert(is_auth()); - assert(!is_complete()); + ceph_assert(is_auth()); + ceph_assert(!is_complete()); if (!can_auth_pin() && !ignore_authpinnability) { if (c) { @@ -1519,11 +1519,11 @@ void CDir::fetch(MDSInternalContextBase *c, std::string_view want_dn, bool ignor !inode->snaprealm) { dout(7) << "fetch dirfrag for unlinked directory, mark complete" << dendl; if (get_version() == 0) { - assert(inode->is_auth()); + ceph_assert(inode->is_auth()); set_version(1); if (state_test(STATE_REJOINUNDEF)) { - assert(cache->mds->is_rejoin()); + ceph_assert(cache->mds->is_rejoin()); state_clear(STATE_REJOINUNDEF); cache->opened_undef_dirfrag(this); } @@ -1557,8 +1557,8 @@ void CDir::fetch(MDSInternalContextBase *c, const std::set& keys) { dout(10) << "fetch " << keys.size() << " keys on " << *this << dendl; - assert(is_auth()); - assert(!is_complete()); + ceph_assert(is_auth()); + ceph_assert(!is_complete()); if (!can_auth_pin()) { dout(7) << "fetch keys waiting for authpinnable" << dendl; @@ -1645,11 +1645,11 @@ void CDir::_omap_fetch(MDSInternalContextBase *c, const std::set& ObjectOperation rd; rd.omap_get_header(&fin->hdrbl, &fin->ret1); if (keys.empty()) { - assert(!c); + ceph_assert(!c); rd.omap_get_vals("", "", g_conf()->mds_dir_keys_per_op, &fin->omap, &fin->more, &fin->ret2); } else { - assert(c); + ceph_assert(c); std::set str_keys; for (auto p : keys) { string str; @@ -1898,9 +1898,9 @@ void CDir::_omap_fetched(bufferlist& hdrbl, map& omap, dout(10) << "_fetched header " << hdrbl.length() << " bytes " << omap.size() << " keys for " << *this << dendl; - assert(r == 0 || r == -ENOENT || r == -ENODATA); - assert(is_auth()); - assert(!is_frozen()); + ceph_assert(r == 0 || r == -ENOENT || r == -ENODATA); + ceph_assert(is_auth()); + ceph_assert(!is_frozen()); if (hdrbl.length() == 0) { dout(0) << "_fetched missing object for " << *this << dendl; @@ -1939,13 +1939,13 @@ void CDir::_omap_fetched(bufferlist& hdrbl, map& omap, // take the loaded fnode? // only if we are a fresh CDir* with no prior state. if (get_version() == 0) { - assert(!is_projected()); - assert(!state_test(STATE_COMMITTING)); + ceph_assert(!is_projected()); + ceph_assert(!state_test(STATE_COMMITTING)); fnode = got_fnode; projected_version = committing_version = committed_version = got_fnode.version; if (state_test(STATE_REJOINUNDEF)) { - assert(cache->mds->is_rejoin()); + ceph_assert(cache->mds->is_rejoin()); state_clear(STATE_REJOINUNDEF); cache->opened_undef_dirfrag(this); } @@ -2108,10 +2108,10 @@ void CDir::commit(version_t want, MDSInternalContextBase *c, bool ignore_authpin if (want == 0) want = get_version(); // preconditions - assert(want <= get_version() || get_version() == 0); // can't commit the future - assert(want > committed_version); // the caller is stupid - assert(is_auth()); - assert(ignore_authpinnability || can_auth_pin()); + ceph_assert(want <= get_version() || get_version() == 0); // can't commit the future + ceph_assert(want > committed_version); // the caller is stupid + ceph_assert(is_auth()); + ceph_assert(ignore_authpinnability || can_auth_pin()); // note: queue up a noop if necessary, so that we always // get an auth_pin. @@ -2301,7 +2301,7 @@ void CDir::_encode_dentry(CDentry *dn, bufferlist& bl, } else if (dn->linkage.is_primary()) { // primary link CInode *in = dn->linkage.get_inode(); - assert(in); + ceph_assert(in); dout(14) << " pos " << bl.length() << " dn '" << dn->get_name() << "' inode " << *in << dendl; @@ -2321,7 +2321,7 @@ void CDir::_encode_dentry(CDentry *dn, bufferlist& bl, in->encode_snap_blob(snap_blob); in->encode_bare(bl, cache->mds->mdsmap->get_up_features(), &snap_blob); } else { - assert(!dn->linkage.is_null()); + ceph_assert(!dn->linkage.is_null()); } } @@ -2331,10 +2331,10 @@ void CDir::_commit(version_t want, int op_prio) // we can't commit things in the future. // (even the projected future.) - assert(want <= get_version() || get_version() == 0); + ceph_assert(want <= get_version() || get_version() == 0); // check pre+postconditions. - assert(is_auth()); + ceph_assert(is_auth()); // already committed? if (committed_version >= want) { @@ -2344,7 +2344,7 @@ void CDir::_commit(version_t want, int op_prio) // already committing >= want? if (committing_version >= want) { dout(10) << "already committing " << committing_version << " >= " << want << dendl; - assert(state_test(STATE_COMMITTING)); + ceph_assert(state_test(STATE_COMMITTING)); return; } @@ -2394,13 +2394,13 @@ void CDir::_committed(int r, version_t v) } dout(10) << "_committed v " << v << " on " << *this << dendl; - assert(is_auth()); + ceph_assert(is_auth()); bool stray = inode->is_stray(); // take note. - assert(v > committed_version); - assert(v <= committing_version); + ceph_assert(v > committed_version); + ceph_assert(v <= committing_version); committed_version = v; // _all_ commits done? @@ -2422,8 +2422,8 @@ void CDir::_committed(int r, version_t v) // inode? if (dn->linkage.is_primary()) { CInode *in = dn->linkage.get_inode(); - assert(in); - assert(in->is_auth()); + ceph_assert(in); + ceph_assert(in->is_auth()); if (committed_version >= in->get_version()) { if (in->is_dirty()) { @@ -2432,7 +2432,7 @@ void CDir::_committed(int r, version_t v) } } else { dout(15) << " dir " << committed_version << " < inode " << in->get_version() << " still dirty " << *in << dendl; - assert(in->is_dirty() || in->last < CEPH_NOSNAP); // special case for cow snap items (not predirtied) + ceph_assert(in->is_dirty() || in->last < CEPH_NOSNAP); // special case for cow snap items (not predirtied) } } @@ -2449,7 +2449,7 @@ void CDir::_committed(int r, version_t v) remove_dentry(dn); } else { dout(15) << " dir " << committed_version << " < dn " << dn->get_version() << " still dirty " << *dn << dendl; - assert(dn->is_dirty()); + ceph_assert(dn->is_dirty()); } } @@ -2491,7 +2491,7 @@ void CDir::_committed(int r, version_t v) void CDir::encode_export(bufferlist& bl) { - assert(!is_projected()); + ceph_assert(!is_projected()); encode(first, bl); encode(fnode, bl); encode(dirty_old_rstat, bl); @@ -2576,7 +2576,7 @@ void CDir::decode_import(bufferlist::const_iterator& blp, LogSegment *ls) void CDir::abort_import() { - assert(is_auth()); + ceph_assert(is_auth()); state_clear(CDir::STATE_AUTH); remove_bloom(); clear_replica_map(); @@ -2688,7 +2688,7 @@ void CDir::set_dir_auth(const mds_authority_t &a) // unpin parent of frozen dir/tree? if (inode->is_auth()) { - assert(!is_frozen_tree_root()); + ceph_assert(!is_frozen_tree_root()); if (is_frozen_dir()) inode->auth_unpin(this); } @@ -2704,7 +2704,7 @@ void CDir::set_dir_auth(const mds_authority_t &a) // pin parent of frozen dir/tree? if (inode->is_auth()) { - assert(!is_frozen_tree_root()); + ceph_assert(!is_frozen_tree_root()); if (is_frozen_dir()) inode->auth_pin(this); } @@ -2759,7 +2759,7 @@ void CDir::auth_unpin(void *by) auth_pins--; #ifdef MDS_AUTHPIN_SET - assert(auth_pin_set.count(by)); + ceph_assert(auth_pin_set.count(by)); auth_pin_set.erase(auth_pin_set.find(by)); #endif if (auth_pins == 0) @@ -2768,7 +2768,7 @@ void CDir::auth_unpin(void *by) dout(10) << "auth_unpin by " << by << " on " << *this << " count now " << auth_pins << " + " << nested_auth_pins << dendl; - assert(auth_pins >= 0); + ceph_assert(auth_pins >= 0); int newcum = get_cum_auth_pins(); @@ -2782,15 +2782,15 @@ void CDir::auth_unpin(void *by) void CDir::adjust_nested_auth_pins(int inc, int dirinc, void *by) { - assert(inc); + ceph_assert(inc); nested_auth_pins += inc; dir_auth_pins += dirinc; dout(15) << __func__ << " " << inc << "/" << dirinc << " on " << *this << " by " << by << " count now " << auth_pins << " + " << nested_auth_pins << dendl; - assert(nested_auth_pins >= 0); - assert(dir_auth_pins >= 0); + ceph_assert(nested_auth_pins >= 0); + ceph_assert(dir_auth_pins >= 0); int newcum = get_cum_auth_pins(); @@ -2808,7 +2808,7 @@ void CDir::adjust_nested_auth_pins(int inc, int dirinc, void *by) #ifdef MDS_VERIFY_FRAGSTAT void CDir::verify_fragstat() { - assert(is_complete()); + ceph_assert(is_complete()); if (inode->is_stray()) return; @@ -2859,8 +2859,8 @@ void CDir::verify_fragstat() bool CDir::freeze_tree() { - assert(!is_frozen()); - assert(!is_freezing()); + ceph_assert(!is_frozen()); + ceph_assert(!is_freezing()); auth_pin(this); if (is_freezeable(true)) { @@ -2878,7 +2878,7 @@ bool CDir::freeze_tree() void CDir::_freeze_tree() { dout(10) << __func__ << " " << *this << dendl; - assert(is_freezeable(true)); + ceph_assert(is_freezeable(true)); // twiddle state if (state_test(STATE_FREEZINGTREE)) { @@ -2898,8 +2898,8 @@ void CDir::_freeze_tree() auth = authority(); } - assert(auth.first >= 0); - assert(auth.second == CDIR_AUTH_UNKNOWN); + ceph_assert(auth.first >= 0); + ceph_assert(auth.second == CDIR_AUTH_UNKNOWN); auth.second = auth.first; inode->mdcache->adjust_subtree_auth(this, auth); if (!was_subtree) @@ -2924,11 +2924,11 @@ void CDir::unfreeze_tree() if (is_auth()) { // must be subtree - assert(is_subtree_root()); + ceph_assert(is_subtree_root()); // for debug purpose, caller should ensure 'dir_auth.second == dir_auth.first' mds_authority_t auth = get_dir_auth(); - assert(auth.first >= 0); - assert(auth.second == auth.first); + ceph_assert(auth.first >= 0); + ceph_assert(auth.second == auth.first); auth.second = CDIR_AUTH_UNKNOWN; inode->mdcache->adjust_subtree_auth(this, auth); } @@ -2939,7 +2939,7 @@ void CDir::unfreeze_tree() finish_waiting(WAIT_FROZEN, -1); // freezing. stop it. - assert(state_test(STATE_FREEZINGTREE)); + ceph_assert(state_test(STATE_FREEZINGTREE)); state_clear(STATE_FREEZINGTREE); --num_freezing_trees; auth_unpin(this); @@ -3009,7 +3009,7 @@ CDir *CDir::get_freezing_tree_root() CDir *CDir::get_frozen_tree_root() { - assert(is_frozen()); + ceph_assert(is_frozen()); CDir *dir = this; while (1) { if (dir->is_frozen_tree_root()) @@ -3066,8 +3066,8 @@ void CDir::maybe_finish_freeze() bool CDir::freeze_dir() { - assert(!is_frozen()); - assert(!is_freezing()); + ceph_assert(!is_frozen()); + ceph_assert(!is_freezing()); auth_pin(this); if (is_freezeable_dir(true)) { @@ -3114,7 +3114,7 @@ void CDir::unfreeze_dir() finish_waiting(WAIT_FROZEN, -1); // still freezing. stop. - assert(state_test(STATE_FREEZINGDIR)); + ceph_assert(state_test(STATE_FREEZINGDIR)); state_clear(STATE_FREEZINGDIR); auth_unpin(this); @@ -3129,7 +3129,7 @@ void CDir::unfreeze_dir() */ void CDir::dump(Formatter *f, int flags) const { - assert(f != NULL); + ceph_assert(f != NULL); if (flags & DUMP_PATH) { f->dump_stream("path") << get_path(); } @@ -3213,7 +3213,7 @@ void CDir::dump_load(Formatter *f) void CDir::scrub_info_create() const { - assert(!scrub_infop); + ceph_assert(!scrub_infop); // break out of const-land to set up implicit initial state CDir *me = const_cast(this); @@ -3235,13 +3235,13 @@ void CDir::scrub_info_create() const void CDir::scrub_initialize(const ScrubHeaderRefConst& header) { dout(20) << __func__ << dendl; - assert(is_complete()); - assert(header != nullptr); + ceph_assert(is_complete()); + ceph_assert(header != nullptr); // FIXME: weird implicit construction, is someone else meant // to be calling scrub_info_create first? scrub_info(); - assert(scrub_infop && !scrub_infop->directory_scrubbing); + ceph_assert(scrub_infop && !scrub_infop->directory_scrubbing); scrub_infop->recursive_start.version = get_projected_version(); scrub_infop->recursive_start.time = ceph_clock_now(); @@ -3277,13 +3277,13 @@ void CDir::scrub_initialize(const ScrubHeaderRefConst& header) void CDir::scrub_finished() { dout(20) << __func__ << dendl; - assert(scrub_infop && scrub_infop->directory_scrubbing); + ceph_assert(scrub_infop && scrub_infop->directory_scrubbing); - assert(scrub_infop->directories_to_scrub.empty()); - assert(scrub_infop->directories_scrubbing.empty()); + ceph_assert(scrub_infop->directories_to_scrub.empty()); + ceph_assert(scrub_infop->directories_scrubbing.empty()); scrub_infop->directories_scrubbed.clear(); - assert(scrub_infop->others_to_scrub.empty()); - assert(scrub_infop->others_scrubbing.empty()); + ceph_assert(scrub_infop->others_to_scrub.empty()); + ceph_assert(scrub_infop->others_scrubbing.empty()); scrub_infop->others_scrubbed.clear(); scrub_infop->directory_scrubbing = false; @@ -3346,7 +3346,7 @@ int CDir::_next_dentry_on_set(dentry_key_set &dns, bool missing_okay, int CDir::scrub_dentry_next(MDSInternalContext *cb, CDentry **dnout) { dout(20) << __func__ << dendl; - assert(scrub_infop && scrub_infop->directory_scrubbing); + ceph_assert(scrub_infop && scrub_infop->directory_scrubbing); dout(20) << "trying to scrub directories underneath us" << dendl; int rval = _next_dentry_on_set(scrub_infop->directories_to_scrub, true, @@ -3358,7 +3358,7 @@ int CDir::scrub_dentry_next(MDSInternalContext *cb, CDentry **dnout) } else if (rval == EAGAIN) { // we don't need to do anything else } else { // we emptied out the directory scrub set - assert(rval == ENOENT); + ceph_assert(rval == ENOENT); dout(20) << "no directories left, moving on to other kinds of dentries" << dendl; @@ -3376,21 +3376,21 @@ int CDir::scrub_dentry_next(MDSInternalContext *cb, CDentry **dnout) void CDir::scrub_dentries_scrubbing(list *out_dentries) { dout(20) << __func__ << dendl; - assert(scrub_infop && scrub_infop->directory_scrubbing); + ceph_assert(scrub_infop && scrub_infop->directory_scrubbing); for (set::iterator i = scrub_infop->directories_scrubbing.begin(); i != scrub_infop->directories_scrubbing.end(); ++i) { CDentry *d = lookup(i->name, i->snapid); - assert(d); + ceph_assert(d); out_dentries->push_back(d); } for (set::iterator i = scrub_infop->others_scrubbing.begin(); i != scrub_infop->others_scrubbing.end(); ++i) { CDentry *d = lookup(i->name, i->snapid); - assert(d); + ceph_assert(d); out_dentries->push_back(d); } } @@ -3398,12 +3398,12 @@ void CDir::scrub_dentries_scrubbing(list *out_dentries) void CDir::scrub_dentry_finished(CDentry *dn) { dout(20) << __func__ << " on dn " << *dn << dendl; - assert(scrub_infop && scrub_infop->directory_scrubbing); + ceph_assert(scrub_infop && scrub_infop->directory_scrubbing); dentry_key_t dn_key = dn->key(); if (scrub_infop->directories_scrubbing.erase(dn_key)) { scrub_infop->directories_scrubbed.insert(dn_key); } else { - assert(scrub_infop->others_scrubbing.count(dn_key)); + ceph_assert(scrub_infop->others_scrubbing.count(dn_key)); scrub_infop->others_scrubbing.erase(dn_key); scrub_infop->others_scrubbed.insert(dn_key); } @@ -3423,7 +3423,7 @@ void CDir::scrub_maybe_delete_info() bool CDir::scrub_local() { - assert(is_complete()); + ceph_assert(is_complete()); bool rval = check_rstats(true); scrub_info(); diff --git a/src/mds/CDir.h b/src/mds/CDir.h index 423b7f77f500a..33a5fa9b01580 100644 --- a/src/mds/CDir.h +++ b/src/mds/CDir.h @@ -207,7 +207,7 @@ public: public: version_t get_version() const { return fnode.version; } void set_version(version_t v) { - assert(projected_fnode.empty()); + ceph_assert(projected_fnode.empty()); projected_version = fnode.version = v; } version_t get_projected_version() const { return projected_version; } @@ -454,7 +454,7 @@ protected: void inc_num_dirty() { num_dirty++; } void dec_num_dirty() { - assert(num_dirty > 0); + ceph_assert(num_dirty > 0); num_dirty--; } int get_num_dirty() const { diff --git a/src/mds/CInode.cc b/src/mds/CInode.cc index ce9f7da32390c..7b7f2bdfaa348 100644 --- a/src/mds/CInode.cc +++ b/src/mds/CInode.cc @@ -59,7 +59,7 @@ protected: MDSRank *get_mds() override {return in->mdcache->mds;} public: explicit CInodeIOContext(CInode *in_) : in(in_) { - assert(in != NULL); + ceph_assert(in != NULL); } }; @@ -341,7 +341,7 @@ bool CInode::split_need_snapflush(CInode *cowin, CInode *in) bool need_flush = false; for (auto it = client_need_snapflush.lower_bound(cowin->first); it != client_need_snapflush.end() && it->first < in->first; ) { - assert(!it->second.empty()); + ceph_assert(!it->second.empty()); if (cowin->last >= it->first) { cowin->auth_pin(this); need_flush = true; @@ -368,7 +368,7 @@ void CInode::mark_dirty_rstat() } else { // under cross-MDS rename. // DIRTYRSTAT flag will get cleared when rename finishes - assert(state_test(STATE_AMBIGUOUSAUTH)); + ceph_assert(state_test(STATE_AMBIGUOUSAUTH)); } } } @@ -410,7 +410,7 @@ CInode::projected_inode &CInode::project_inode(bool xattr, bool snap) void CInode::pop_and_dirty_projected_inode(LogSegment *ls) { - assert(!projected_nodes.empty()); + ceph_assert(!projected_nodes.empty()); auto &front = projected_nodes.front(); dout(15) << __func__ << " " << front.inode.ino << " v" << front.inode.version << dendl; @@ -444,7 +444,7 @@ sr_t *CInode::prepare_new_srnode(snapid_t snapid) new_srnode = new sr_t(*cur_srnode); if (!new_srnode->past_parents.empty()) { // convert past_parents to past_parent_snaps - assert(snaprealm); + ceph_assert(snaprealm); auto& snaps = snaprealm->get_snaps(); for (auto p : snaps) { if (p >= new_srnode->current_parent_since) @@ -471,14 +471,14 @@ sr_t *CInode::prepare_new_srnode(snapid_t snapid) void CInode::project_snaprealm(sr_t *new_srnode) { dout(10) << __func__ << " " << new_srnode << dendl; - assert(projected_nodes.back().snapnode == projected_inode::UNDEF_SRNODE); + ceph_assert(projected_nodes.back().snapnode == projected_inode::UNDEF_SRNODE); projected_nodes.back().snapnode = new_srnode; ++num_projected_srnodes; } void CInode::mark_snaprealm_global(sr_t *new_srnode) { - assert(!is_dir()); + ceph_assert(!is_dir()); // 'last_destroyed' is no longer used, use it to store origin 'current_parent_since' new_srnode->last_destroyed = new_srnode->current_parent_since; new_srnode->current_parent_since = mdcache->get_global_snaprealm()->get_newest_seq() + 1; @@ -513,7 +513,7 @@ void CInode::project_snaprealm_past_parent(SnapRealm *newparent) if parent DNE, we need to find what the parent actually is and fill that in */ void CInode::record_snaprealm_past_parent(sr_t *new_snap, SnapRealm *newparent) { - assert(!new_snap->is_parent_global()); + ceph_assert(!new_snap->is_parent_global()); SnapRealm *oldparent; if (!snaprealm) { oldparent = find_snaprealm(); @@ -539,7 +539,7 @@ void CInode::record_snaprealm_past_parent(sr_t *new_snap, SnapRealm *newparent) void CInode::record_snaprealm_parent_dentry(sr_t *new_snap, SnapRealm *newparent, CDentry *dn, bool primary_dn) { - assert(new_snap->is_parent_global()); + ceph_assert(new_snap->is_parent_global()); SnapRealm *oldparent = dn->get_dir()->inode->find_snaprealm(); auto& snaps = oldparent->get_snaps(); @@ -558,7 +558,7 @@ void CInode::record_snaprealm_parent_dentry(sr_t *new_snap, SnapRealm *newparent void CInode::early_pop_projected_snaprealm() { - assert(!projected_nodes.empty()); + ceph_assert(!projected_nodes.empty()); if (projected_nodes.front().snapnode != projected_inode::UNDEF_SRNODE) { pop_projected_snaprealm(projected_nodes.front().snapnode, true); projected_nodes.front().snapnode = projected_inode::UNDEF_SRNODE; @@ -593,7 +593,7 @@ void CInode::pop_projected_snaprealm(sr_t *next_snaprealm, bool early) // we should be able to open these up (or have them already be open). bool ok = snaprealm->_open_parents(NULL); - assert(ok); + ceph_assert(ok); if (invalidate_cached_snaps) snaprealm->invalidate_cached_snaps(); @@ -602,7 +602,7 @@ void CInode::pop_projected_snaprealm(sr_t *next_snaprealm, bool early) dout(10) << " realm " << *snaprealm << " parent " << *snaprealm->parent << dendl; } else { dout(10) << __func__ << (early ? " (early) null" : " null") << dendl; - assert(snaprealm); + ceph_assert(snaprealm); snaprealm->merge_to(NULL); } } @@ -617,7 +617,7 @@ __u32 InodeStoreBase::hash_dentry_name(std::string_view dn) int which = inode.dir_layout.dl_dir_hash; if (!which) which = CEPH_STR_HASH_LINUX; - assert(ceph_str_hash_valid(which)); + ceph_assert(ceph_str_hash_valid(which)); return ceph_str_hash(which, dn.data(), dn.length()); } @@ -674,7 +674,7 @@ void CInode::verify_dirfrags() bad = true; } } - assert(!bad); + ceph_assert(!bad); } void CInode::force_dirfrags() @@ -720,13 +720,13 @@ CDir *CInode::get_approx_dirfrag(frag_t fg) CDir *CInode::get_or_open_dirfrag(MDCache *mdcache, frag_t fg) { - assert(is_dir()); + ceph_assert(is_dir()); // have it? CDir *dir = get_dirfrag(fg); if (!dir) { // create it. - assert(is_auth() || mdcache->mds->is_any_replay()); + ceph_assert(is_auth() || mdcache->mds->is_any_replay()); dir = new CDir(this, fg, mdcache, is_auth()); add_dirfrag(dir); } @@ -735,7 +735,7 @@ CDir *CInode::get_or_open_dirfrag(MDCache *mdcache, frag_t fg) CDir *CInode::add_dirfrag(CDir *dir) { - assert(dirfrags.count(dir->dirfrag().frag) == 0); + ceph_assert(dirfrags.count(dir->dirfrag().frag) == 0); dirfrags[dir->dirfrag().frag] = dir; if (stickydir_ref > 0) { @@ -751,7 +751,7 @@ CDir *CInode::add_dirfrag(CDir *dir) void CInode::close_dirfrag(frag_t fg) { dout(14) << __func__ << " " << fg << dendl; - assert(dirfrags.count(fg)); + ceph_assert(dirfrags.count(fg)); CDir *dir = dirfrags[fg]; dir->remove_null_dentries(); @@ -772,7 +772,7 @@ void CInode::close_dirfrag(frag_t fg) for (const auto &p : dir->items) dout(14) << __func__ << " LEFTOVER dn " << *p.second << dendl; - assert(dir->get_num_ref() == 0); + ceph_assert(dir->get_num_ref() == 0); delete dir; dirfrags.erase(fg); } @@ -818,7 +818,7 @@ void CInode::get_stickydirs() void CInode::put_stickydirs() { - assert(stickydir_ref > 0); + ceph_assert(stickydir_ref > 0); stickydir_ref--; if (stickydir_ref == 0) { put(PIN_STICKYDIRS); @@ -898,7 +898,7 @@ bool CInode::is_ancestor_of(const CInode *other) const return true; const CDentry *pdn = other->get_oldest_parent_dn(); if (!pdn) { - assert(other->is_base()); + ceph_assert(other->is_base()); break; } other = pdn->get_dir()->get_inode(); @@ -913,7 +913,7 @@ bool CInode::is_projected_ancestor_of(const CInode *other) const return true; const CDentry *pdn = other->get_projected_parent_dn(); if (!pdn) { - assert(other->is_base()); + ceph_assert(other->is_base()); break; } other = pdn->get_dir()->get_inode(); @@ -958,7 +958,7 @@ void CInode::make_path(filepath& fp, bool projected) const { const CDentry *use_parent = projected ? get_projected_parent_dn() : parent; if (use_parent) { - assert(!is_base()); + ceph_assert(!is_base()); use_parent->make_path(fp, projected); } else { fp = filepath(ino()); @@ -980,7 +980,7 @@ version_t CInode::pre_dirty() pv = _cdentry->pre_dirty(get_projected_version()); dout(10) << "pre_dirty " << pv << " (current v " << inode.version << ")" << dendl; } else { - assert(is_base()); + ceph_assert(is_base()); pv = get_projected_version() + 1; } // force update backtrace for old format inode (see mempool_inode::decode) @@ -997,7 +997,7 @@ void CInode::_mark_dirty(LogSegment *ls) if (!state_test(STATE_DIRTY)) { state_set(STATE_DIRTY); get(PIN_DIRTY); - assert(ls); + ceph_assert(ls); } // move myself to this segment's dirty list @@ -1017,10 +1017,10 @@ void CInode::mark_dirty(version_t pv, LogSegment *ls) { // only auth can get dirty. "dirty" async data in replicas is relative to // filelock state, not the dirty flag. - assert(is_auth()); + ceph_assert(is_auth()); // touch my private version - assert(inode.version < pv); + ceph_assert(inode.version < pv); inode.version = pv; _mark_dirty(ls); @@ -1069,7 +1069,7 @@ object_t InodeStoreBase::get_object_name(inodeno_t ino, frag_t fg, const char *s void CInode::store(MDSInternalContextBase *fin) { dout(10) << __func__ << " " << get_version() << dendl; - assert(is_base()); + ceph_assert(is_base()); if (snaprealm) purge_stale_snap_data(snaprealm->get_snaps()); @@ -1118,7 +1118,7 @@ void CInode::_stored(int r, version_t v, Context *fin) void CInode::flush(MDSInternalContextBase *fin) { dout(10) << __func__ << " " << *this << dendl; - assert(is_auth() && can_auth_pin()); + ceph_assert(is_auth() && can_auth_pin()); MDSGatherBuilder gather(g_ceph_context); @@ -1249,7 +1249,7 @@ struct C_IO_Inode_StoredBacktrace : public CInodeIOContext { void CInode::store_backtrace(MDSInternalContextBase *fin, int op_prio) { dout(10) << __func__ << " on " << *this << dendl; - assert(is_dirty_parent()); + ceph_assert(is_dirty_parent()); if (op_prio < 0) op_prio = CEPH_MSG_PRIO_DEFAULT; @@ -1365,7 +1365,7 @@ void CInode::mark_dirty_parent(LogSegment *ls, bool dirty_pool) dout(10) << __func__ << dendl; state_set(STATE_DIRTYPARENT); get(PIN_DIRTYPARENT); - assert(ls); + ceph_assert(ls); } if (dirty_pool) state_set(STATE_DIRTYPOOL); @@ -1405,7 +1405,7 @@ void CInode::verify_diri_backtrace(bufferlist &bl, int err) if (err) { MDSRank *mds = mdcache->mds; mds->clog->error() << "bad backtrace on directory inode " << ino(); - assert(!"bad backtrace" == (g_conf()->mds_verify_backtrace > 1)); + ceph_assert(!"bad backtrace" == (g_conf()->mds_verify_backtrace > 1)); mark_dirty_parent(mds->mdlog->get_current_segment(), false); mds->mdlog->flush(); @@ -1804,7 +1804,7 @@ void CInode::decode_lock_state(int type, const bufferlist& bl) CDir *dir = get_dirfrag(fg); if (is_auth()) { - assert(dir); // i am auth; i had better have this dir open + ceph_assert(dir); // i am auth; i had better have this dir open dout(10) << fg << " first " << dir->first << " -> " << fgfirst << " on " << *dir << dendl; dir->first = fgfirst; @@ -1867,7 +1867,7 @@ void CInode::decode_lock_state(int type, const bufferlist& bl) CDir *dir = get_dirfrag(fg); if (is_auth()) { - assert(dir); // i am auth; i had better have this dir open + ceph_assert(dir); // i am auth; i had better have this dir open dout(10) << fg << " first " << dir->first << " -> " << fgfirst << " on " << *dir << dendl; dir->first = fgfirst; @@ -1950,7 +1950,7 @@ void CInode::clear_scatter_dirty() void CInode::clear_dirty_scattered(int type) { dout(10) << __func__ << " " << type << " on " << *this << dendl; - assert(is_dir()); + ceph_assert(is_dir()); switch (type) { case CEPH_LOCK_IFILE: item_dirty_dirfrag_dir.remove_myself(); @@ -1978,7 +1978,7 @@ void CInode::clear_dirty_scattered(int type) void CInode::start_scatter(ScatterLock *lock) { dout(10) << __func__ << " " << *lock << " on " << *this << dendl; - assert(is_auth()); + ceph_assert(is_auth()); mempool_inode *pi = get_projected_inode(); for (const auto &p : dirfrags) { @@ -2025,7 +2025,7 @@ void CInode::finish_scatter_update(ScatterLock *lock, CDir *dir, version_t inode_version, version_t dir_accounted_version) { frag_t fg = dir->get_frag(); - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); if (dir->is_frozen()) { dout(10) << __func__ << " " << fg << " frozen, marking " << *lock << " stale " << *dir << dendl; @@ -2073,7 +2073,7 @@ void CInode::finish_scatter_update(ScatterLock *lock, CDir *dir, le->metablob.add_dir_context(dir); le->metablob.add_dir(dir, true); - assert(!dir->is_frozen()); + ceph_assert(!dir->is_frozen()); mut->auth_pin(dir); if (lock->get_type() == CEPH_LOCK_INEST && @@ -2132,7 +2132,7 @@ void CInode::finish_scatter_gather_update(int type) LogChannelRef clog = mdcache->mds->clog; dout(10) << __func__ << " " << type << " on " << *this << dendl; - assert(is_auth()); + ceph_assert(is_auth()); switch (type) { case CEPH_LOCK_IFILE: @@ -2142,7 +2142,7 @@ void CInode::finish_scatter_gather_update(int type) bool dirstat_valid = true; // adjust summation - assert(is_auth()); + ceph_assert(is_auth()); mempool_inode *pi = get_projected_inode(); bool touched_mtime = false, touched_chattr = false; @@ -2177,7 +2177,7 @@ void CInode::finish_scatter_gather_update(int type) pf->fragstat.nsubdirs < 0) { clog->error() << "bad/negative dir size on " << dir->dirfrag() << " " << pf->fragstat; - assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter); + ceph_assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter); if (pf->fragstat.nfiles < 0) pf->fragstat.nfiles = 0; @@ -2214,7 +2214,7 @@ void CInode::finish_scatter_gather_update(int type) } else { clog->error() << "unmatched fragstat on " << ino() << ", inode has " << pi->dirstat << ", dirfrags have " << dirstat; - assert(!"unmatched fragstat" == g_conf()->mds_verify_scatter); + ceph_assert(!"unmatched fragstat" == g_conf()->mds_verify_scatter); } // trust the dirfrags for now version_t v = pi->dirstat.version; @@ -2233,7 +2233,7 @@ void CInode::finish_scatter_gather_update(int type) make_path_string(path); clog->error() << "Inconsistent statistics detected: fragstat on inode " << ino() << " (" << path << "), inode has " << pi->dirstat; - assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter); + ceph_assert(!"bad/negative fragstat" == g_conf()->mds_verify_scatter); if (pi->dirstat.nfiles < 0) pi->dirstat.nfiles = 0; @@ -2246,7 +2246,7 @@ void CInode::finish_scatter_gather_update(int type) case CEPH_LOCK_INEST: { // adjust summation - assert(is_auth()); + ceph_assert(is_auth()); fragtree_t tmpdft = dirfragtree; nest_info_t rstat; @@ -2325,7 +2325,7 @@ void CInode::finish_scatter_gather_update(int type) clog->error() << "inconsistent rstat on inode " << ino() << ", inode has " << pi->rstat << ", directory fragments have " << rstat; - assert(!"unmatched rstat" == g_conf()->mds_verify_scatter); + ceph_assert(!"unmatched rstat" == g_conf()->mds_verify_scatter); } // trust the dirfrag for now version_t v = pi->rstat.version; @@ -2351,7 +2351,7 @@ void CInode::finish_scatter_gather_update(int type) void CInode::finish_scatter_gather_update_accounted(int type, MutationRef& mut, EMetaBlob *metablob) { dout(10) << __func__ << " " << type << " on " << *this << dendl; - assert(is_auth()); + ceph_assert(is_auth()); for (const auto &p : dirfrags) { CDir *dir = p.second; @@ -2362,7 +2362,7 @@ void CInode::finish_scatter_gather_update_accounted(int type, MutationRef& mut, continue; // nothing to do. dout(10) << " journaling updated frag accounted_ on " << *dir << dendl; - assert(dir->is_projected()); + ceph_assert(dir->is_projected()); fnode_t *pf = dir->get_projected_fnode(); pf->version = dir->pre_dirty(); mut->add_projected_fnode(dir); @@ -2461,8 +2461,8 @@ void CInode::take_waiting(uint64_t mask, MDSInternalContextBase::vec& ls) bool CInode::freeze_inode(int auth_pin_allowance) { - assert(auth_pin_allowance > 0); // otherwise we need to adjust parent's nested_auth_pins - assert(auth_pins >= auth_pin_allowance); + ceph_assert(auth_pin_allowance > 0); // otherwise we need to adjust parent's nested_auth_pins + ceph_assert(auth_pins >= auth_pin_allowance); if (auth_pins > auth_pin_allowance) { dout(10) << "freeze_inode - waiting for auth_pins to drop to " << auth_pin_allowance << dendl; auth_pin_freeze_allowance = auth_pin_allowance; @@ -2472,7 +2472,7 @@ bool CInode::freeze_inode(int auth_pin_allowance) } dout(10) << "freeze_inode - frozen" << dendl; - assert(auth_pins == auth_pin_allowance); + ceph_assert(auth_pins == auth_pin_allowance); if (!state_test(STATE_FROZEN)) { get(PIN_FROZEN); state_set(STATE_FROZEN); @@ -2503,13 +2503,13 @@ void CInode::unfreeze_inode() void CInode::freeze_auth_pin() { - assert(state_test(CInode::STATE_FROZEN)); + ceph_assert(state_test(CInode::STATE_FROZEN)); state_set(CInode::STATE_FROZENAUTHPIN); } void CInode::unfreeze_auth_pin() { - assert(state_test(CInode::STATE_FROZENAUTHPIN)); + ceph_assert(state_test(CInode::STATE_FROZENAUTHPIN)); state_clear(CInode::STATE_FROZENAUTHPIN); if (!state_test(STATE_FREEZING|STATE_FROZEN)) { MDSInternalContextBase::vec finished; @@ -2520,7 +2520,7 @@ void CInode::unfreeze_auth_pin() void CInode::clear_ambiguous_auth(MDSInternalContextBase::vec& finished) { - assert(state_test(CInode::STATE_AMBIGUOUSAUTH)); + ceph_assert(state_test(CInode::STATE_AMBIGUOUSAUTH)); state_clear(CInode::STATE_AMBIGUOUSAUTH); take_waiting(CInode::WAIT_SINGLEAUTH, finished); } @@ -2572,7 +2572,7 @@ void CInode::auth_unpin(void *by) auth_pins--; #ifdef MDS_AUTHPIN_SET - assert(auth_pin_set.count(by)); + ceph_assert(auth_pin_set.count(by)); auth_pin_set.erase(auth_pin_set.find(by)); #endif @@ -2583,7 +2583,7 @@ void CInode::auth_unpin(void *by) << " now " << auth_pins << "+" << nested_auth_pins << dendl; - assert(auth_pins >= 0); + ceph_assert(auth_pins >= 0); if (parent) parent->adjust_nested_auth_pins(-1, -1, by); @@ -2601,12 +2601,12 @@ void CInode::auth_unpin(void *by) void CInode::adjust_nested_auth_pins(int a, void *by) { - assert(a); + ceph_assert(a); nested_auth_pins += a; dout(35) << __func__ << " by " << by << " change " << a << " yields " << auth_pins << "+" << nested_auth_pins << dendl; - assert(nested_auth_pins >= 0); + ceph_assert(nested_auth_pins >= 0); if (g_conf()->mds_debug_auth_pins) { // audit @@ -2616,7 +2616,7 @@ void CInode::adjust_nested_auth_pins(int a, void *by) if (!dir->is_subtree_root() && dir->get_cum_auth_pins()) s++; } - assert(s == nested_auth_pins); + ceph_assert(s == nested_auth_pins); } if (parent) @@ -2655,7 +2655,7 @@ snapid_t CInode::get_oldest_snap() CInode::mempool_old_inode& CInode::cow_old_inode(snapid_t follows, bool cow_head) { - assert(follows >= first); + ceph_assert(follows >= first); mempool_inode *pi = cow_head ? get_projected_inode() : get_previous_projected_inode(); mempool_xattr_map *px = cow_head ? get_projected_xattrs() : get_previous_projected_xattrs(); @@ -2688,7 +2688,7 @@ CInode::mempool_old_inode& CInode::cow_old_inode(snapid_t follows, bool cow_head void CInode::split_old_inode(snapid_t snap) { auto it = old_inodes.lower_bound(snap); - assert(it != old_inodes.end() && it->second.first < snap); + ceph_assert(it != old_inodes.end() && it->second.first < snap); mempool_old_inode &old = old_inodes[snap - 1]; old = it->second; @@ -2815,7 +2815,7 @@ void CInode::decode_snap_blob(const bufferlist& snapbl) decode(snaprealm->srnode, p); if (is_base()) { bool ok = snaprealm->_open_parents(NULL); - assert(ok); + ceph_assert(ok); } else { if ((snaprealm->srnode.flags ^ old_flags) & sr_t::PARENT_GLOBAL) { snaprealm->close_parents(); @@ -2824,7 +2824,7 @@ void CInode::decode_snap_blob(const bufferlist& snapbl) } dout(20) << __func__ << " " << *snaprealm << dendl; } else if (snaprealm) { - assert(mdcache->mds->is_any_replay()); + ceph_assert(mdcache->mds->is_any_replay()); snaprealm->merge_to(NULL); } } @@ -2886,14 +2886,14 @@ bool CInode::choose_ideal_loner() set_loner_cap(want_loner_cap); changed = true; } else - assert(loner_cap == want_loner_cap); + ceph_assert(loner_cap == want_loner_cap); } return changed; } bool CInode::try_set_loner() { - assert(want_loner_cap >= 0); + ceph_assert(want_loner_cap >= 0); if (loner_cap >= 0 && loner_cap != want_loner_cap) return false; set_loner_cap(want_loner_cap); @@ -2949,7 +2949,7 @@ void CInode::choose_lock_state(SimpleLock *lock, int allissued) } else { // our states have already been chosen during rejoin. if (lock->is_xlocked()) - assert(lock->get_state() == LOCK_LOCK); + ceph_assert(lock->get_state() == LOCK_LOCK); } } @@ -3000,12 +3000,12 @@ void CInode::adjust_num_caps_wanted(int d) mdcache->open_file_table.remove_inode(this); num_caps_wanted +=d; - assert(num_caps_wanted >= 0); + ceph_assert(num_caps_wanted >= 0); } Capability *CInode::add_client_cap(client_t client, Session *session, SnapRealm *conrealm) { - assert(last == CEPH_NOSNAP); + ceph_assert(last == CEPH_NOSNAP); if (client_caps.empty()) { get(PIN_CAPS); if (conrealm) @@ -3023,7 +3023,7 @@ Capability *CInode::add_client_cap(client_t client, Session *session, SnapRealm uint64_t cap_id = ++mdcache->last_cap_id; auto ret = client_caps.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple(this, cap_id, client)); - assert(ret.second == true); + ceph_assert(ret.second == true); Capability *cap = &ret.first->second; session->add_cap(cap); @@ -3038,7 +3038,7 @@ Capability *CInode::add_client_cap(client_t client, Session *session, SnapRealm void CInode::remove_client_cap(client_t client) { auto it = client_caps.find(client); - assert(it != client_caps.end()); + ceph_assert(it != client_caps.end()); Capability *cap = &it->second; cap->item_session_caps.remove_myself(); @@ -3274,8 +3274,8 @@ bool CInode::issued_caps_need_gather(SimpleLock *lock) void CInode::replicate_relax_locks() { //dout(10) << " relaxing locks on " << *this << dendl; - assert(is_auth()); - assert(!is_replicated()); + ceph_assert(is_auth()); + ceph_assert(!is_replicated()); authlock.replicate_relax(); linklock.replicate_relax(); @@ -3299,7 +3299,7 @@ int CInode::encode_inodestat(bufferlist& bl, Session *session, int getattr_caps) { client_t client = session->get_client(); - assert(snapid); + ceph_assert(snapid); bool valid = true; @@ -3500,7 +3500,7 @@ int CInode::encode_inodestat(bufferlist& bl, Session *session, << " seq " << cap->get_last_seq() << dendl; } else if (cap && cap->is_new() && !dir_realm) { // alway issue new caps to client, otherwise the caps get lost - assert(cap->is_stale()); + ceph_assert(cap->is_stale()); issue = cap->pending() | CEPH_CAP_PIN; cap->issue_norevoke(issue); dout(10) << "encode_inodestat issuing " << ccap_string(issue) @@ -3604,7 +3604,7 @@ int CInode::encode_inodestat(bufferlist& bl, Session *session, ENCODE_FINISH(bl); } else { - assert(session->get_connection()); + ceph_assert(session->get_connection()); encode(oi->ino, bl); encode(snapid, bl); @@ -3663,7 +3663,7 @@ int CInode::encode_inodestat(bufferlist& bl, Session *session, void CInode::encode_cap_message(const MClientCaps::ref &m, Capability *cap) { - assert(cap); + ceph_assert(cap); client_t client = cap->get_client(); @@ -3956,7 +3956,7 @@ void CInode::decode_import(bufferlist::const_iterator& p, frag_t fg; decode(fg, q); CDir *dir = get_dirfrag(fg); - assert(dir); // we should have all bounds open + ceph_assert(dir); // we should have all bounds open // Only take the remote's fragstat/rstat if we are non-auth for // this dirfrag AND the lock is NOT in a scattered (MIX) state. @@ -4230,7 +4230,7 @@ next: } bool validate_directory_data() { - assert(in->is_dir()); + ceph_assert(in->is_dir()); if (in->is_base()) { if (!shadow_in) { @@ -4319,7 +4319,7 @@ next: // check each dirfrag... for (const auto &p : in->dirfrags) { CDir *dir = p.second; - assert(dir->get_version() > 0); + ceph_assert(dir->get_version() > 0); nest_info.add(dir->fnode.accounted_rstat); dir_info.add(dir->fnode.accounted_fragstat); if (dir->scrub_infop->pending_scrub_error) { @@ -4610,7 +4610,7 @@ void CInode::dump(Formatter *f, int flags) const void CInode::scrub_info_create() const { dout(25) << __func__ << dendl; - assert(!scrub_infop); + ceph_assert(!scrub_infop); // break out of const-land to set up implicit initial state CInode *me = const_cast(this); @@ -4641,11 +4641,11 @@ void CInode::scrub_initialize(CDentry *scrub_parent, if (scrub_is_in_progress()) { dout(20) << __func__ << " inode moved during scrub, reinitializing " << dendl; - assert(scrub_infop->scrub_parent); + ceph_assert(scrub_infop->scrub_parent); CDentry *dn = scrub_infop->scrub_parent; CDir *dir = dn->dir; dn->put(CDentry::PIN_SCRUBPARENT); - assert(dir->scrub_infop && dir->scrub_infop->directory_scrubbing); + ceph_assert(dir->scrub_infop && dir->scrub_infop->directory_scrubbing); dir->scrub_infop->directories_scrubbing.erase(dn->key()); dir->scrub_infop->others_scrubbing.erase(dn->key()); } @@ -4683,7 +4683,7 @@ void CInode::scrub_initialize(CDentry *scrub_parent, int CInode::scrub_dirfrag_next(frag_t* out_dirfrag) { dout(20) << __func__ << dendl; - assert(scrub_is_in_progress()); + ceph_assert(scrub_is_in_progress()); if (!is_dir()) { return -ENOTDIR; @@ -4709,8 +4709,8 @@ int CInode::scrub_dirfrag_next(frag_t* out_dirfrag) void CInode::scrub_dirfrags_scrubbing(list* out_dirfrags) { - assert(out_dirfrags != NULL); - assert(scrub_infop != NULL); + ceph_assert(out_dirfrags != NULL); + ceph_assert(scrub_infop != NULL); out_dirfrags->clear(); std::map::iterator i = @@ -4731,11 +4731,11 @@ void CInode::scrub_dirfrags_scrubbing(list* out_dirfrags) void CInode::scrub_dirfrag_finished(frag_t dirfrag) { dout(20) << __func__ << " on frag " << dirfrag << dendl; - assert(scrub_is_in_progress()); + ceph_assert(scrub_is_in_progress()); std::map::iterator i = scrub_infop->dirfrag_stamps.find(dirfrag); - assert(i != scrub_infop->dirfrag_stamps.end()); + ceph_assert(i != scrub_infop->dirfrag_stamps.end()); scrub_stamp_info_t &si = i->second; si.last_scrub_stamp = si.scrub_start_stamp; @@ -4744,7 +4744,7 @@ void CInode::scrub_dirfrag_finished(frag_t dirfrag) void CInode::scrub_finished(MDSInternalContextBase **c) { dout(20) << __func__ << dendl; - assert(scrub_is_in_progress()); + ceph_assert(scrub_is_in_progress()); for (std::map::iterator i = scrub_infop->dirfrag_stamps.begin(); i != scrub_infop->dirfrag_stamps.end(); @@ -4753,7 +4753,7 @@ void CInode::scrub_finished(MDSInternalContextBase **c) { derr << i->second.last_scrub_version << " != " << i->second.scrub_start_version << dendl; } - assert(i->second.last_scrub_version == i->second.scrub_start_version); + ceph_assert(i->second.last_scrub_version == i->second.scrub_start_version); } scrub_infop->last_scrub_version = scrub_infop->scrub_start_version; @@ -4790,7 +4790,7 @@ int64_t CInode::get_backtrace_pool() const } else { // Files are required to have an explicit layout that specifies // a pool - assert(inode.layout.pool_id != -1); + ceph_assert(inode.layout.pool_id != -1); return inode.layout.pool_id; } } @@ -4838,8 +4838,8 @@ void CInode::maybe_export_pin(bool update) void CInode::set_export_pin(mds_rank_t rank) { - assert(is_dir()); - assert(is_projected()); + ceph_assert(is_dir()); + ceph_assert(is_projected()); get_projected_inode()->export_pin = rank; maybe_export_pin(true); } diff --git a/src/mds/CInode.h b/src/mds/CInode.h index a9ac0045de84f..87a7cca09dbd0 100644 --- a/src/mds/CInode.h +++ b/src/mds/CInode.h @@ -365,7 +365,7 @@ class CInode : public MDSCacheObject, public InodeStoreBase, public Counterchildren_scrubbed = true; } void scrub_set_finisher(MDSInternalContextBase *c) { - assert(!scrub_infop->on_finish); + ceph_assert(!scrub_infop->on_finish); scrub_infop->on_finish = c; } @@ -470,7 +470,7 @@ public: return &projected_nodes.back().inode; } mempool_inode *get_previous_projected_inode() { - assert(!projected_nodes.empty()); + ceph_assert(!projected_nodes.empty()); auto it = projected_nodes.rbegin(); ++it; if (it != projected_nodes.rend()) @@ -721,11 +721,11 @@ public: close_dirfrags(); close_snaprealm(); clear_file_locks(); - assert(num_projected_xattrs == 0); - assert(num_projected_srnodes == 0); - assert(num_caps_wanted == 0); - assert(num_subtree_roots == 0); - assert(num_exporting_dirs == 0); + ceph_assert(num_projected_xattrs == 0); + ceph_assert(num_projected_srnodes == 0); + ceph_assert(num_caps_wanted == 0); + ceph_assert(num_subtree_roots == 0); + ceph_assert(num_exporting_dirs == 0); } @@ -833,7 +833,7 @@ public: void decode_store(bufferlist::const_iterator& bl); void encode_replica(mds_rank_t rep, bufferlist& bl, uint64_t features, bool need_recover) { - assert(is_auth()); + ceph_assert(is_auth()); // relax locks? if (!is_replicated()) @@ -884,7 +884,7 @@ public: void finish_export(); void abort_export() { put(PIN_TEMPEXPORTING); - assert(state_test(STATE_EXPORTINGCAPS)); + ceph_assert(state_test(STATE_EXPORTINGCAPS)); state_clear(STATE_EXPORTINGCAPS); put(PIN_EXPORTINGCAPS); } @@ -1090,9 +1090,9 @@ public: #endif << dendl; #ifdef MDS_REF_SET - assert(ref_map[by] > 0); + ceph_assert(ref_map[by] > 0); #endif - assert(ref > 0); + ceph_assert(ref > 0); } void bad_get(int by) override { generic_dout(0) << " bad get " << *this << " by " << by << " " << pin_name(by) << " was " << ref @@ -1101,7 +1101,7 @@ public: #endif << dendl; #ifdef MDS_REF_SET - assert(ref_map[by] >= 0); + ceph_assert(ref_map[by] >= 0); #endif } void first_get() override; @@ -1112,12 +1112,12 @@ public: // -- hierarchy stuff -- public: void set_primary_parent(CDentry *p) { - assert(parent == 0 || + ceph_assert(parent == 0 || g_conf().get_val("mds_hack_allow_loading_invalid_metadata")); parent = p; } void remove_primary_parent(CDentry *dn) { - assert(dn == parent); + ceph_assert(dn == parent); parent = 0; } void add_remote_parent(CDentry *p); @@ -1130,7 +1130,7 @@ public: projected_parent.push_back(dn); } void pop_projected_parent() { - assert(projected_parent.size()); + ceph_assert(projected_parent.size()); parent = projected_parent.front(); projected_parent.pop_front(); } diff --git a/src/mds/Capability.h b/src/mds/Capability.h index a2a5d5b9feae1..bd3fcd448cd21 100644 --- a/src/mds/Capability.h +++ b/src/mds/Capability.h @@ -155,7 +155,7 @@ public: _revokes.pop_back(); } else { // no change. - assert(_pending == c); + ceph_assert(_pending == c); } //last_issue = ++last_sent; diff --git a/src/mds/DamageTable.cc b/src/mds/DamageTable.cc index c726af3fb3db5..c474b078b21d1 100644 --- a/src/mds/DamageTable.cc +++ b/src/mds/DamageTable.cc @@ -258,7 +258,7 @@ void DamageTable::erase(damage_entry_id_t damage_id) } DamageEntryRef entry = by_id_entry->second; - assert(entry->id == damage_id); // Sanity + ceph_assert(entry->id == damage_id); // Sanity const auto type = entry->get_type(); if (type == DAMAGE_ENTRY_DIRFRAG) { diff --git a/src/mds/DamageTable.h b/src/mds/DamageTable.h index fee50644f900f..a408036cb21ed 100644 --- a/src/mds/DamageTable.h +++ b/src/mds/DamageTable.h @@ -195,7 +195,7 @@ public: explicit DamageTable(const mds_rank_t rank_) : rank(rank_) { - assert(rank_ != MDS_RANK_NONE); + ceph_assert(rank_ != MDS_RANK_NONE); } void dump(Formatter *f) const; diff --git a/src/mds/FSMap.cc b/src/mds/FSMap.cc index 43ac91e3d1ba7..87a6a5362d9de 100644 --- a/src/mds/FSMap.cc +++ b/src/mds/FSMap.cc @@ -243,11 +243,11 @@ std::shared_ptr FSMap::create_filesystem(std::string_view name, fs->fscid = next_filesystem_id++; // ANONYMOUS is only for upgrades from legacy mdsmaps, we should // have initialized next_filesystem_id such that it's never used here. - assert(fs->fscid != FS_CLUSTER_ID_ANONYMOUS); + ceph_assert(fs->fscid != FS_CLUSTER_ID_ANONYMOUS); } else { // Use anon fscid because this will get thrown away when encoding // as legacy MDSMap for legacy mons. - assert(filesystems.empty()); + ceph_assert(filesystems.empty()); fs->fscid = FS_CLUSTER_ID_ANONYMOUS; } filesystems[fs->fscid] = fs; @@ -400,7 +400,7 @@ void FSMap::encode(bufferlist& bl, uint64_t features) const } else { // MDSMonitor should never have created multiple filesystems // until the quorum features indicated Jewel - assert(filesystems.size() == 1); + ceph_assert(filesystems.size() == 1); auto fs = filesystems.begin()->second; // Take the MDSMap for the enabled filesystem, and populated its @@ -511,7 +511,7 @@ void FSMap::decode(bufferlist::const_iterator& p) decode(legacy_mds_map.inline_data_enabled, p); if (ev >= 8) { - assert(struct_v >= 5); + ceph_assert(struct_v >= 5); decode(legacy_mds_map.enabled, p); decode(legacy_mds_map.fs_name, p); } else { @@ -684,8 +684,8 @@ mds_gid_t FSMap::find_standby_for(mds_role_t role, std::string_view name) const for (const auto &i : standby_daemons) { const auto &gid = i.first; const auto &info = i.second; - assert(info.state == MDSMap::STATE_STANDBY); - assert(info.rank == MDS_RANK_NONE); + ceph_assert(info.state == MDSMap::STATE_STANDBY); + ceph_assert(info.rank == MDS_RANK_NONE); if (info.laggy()) { continue; @@ -720,7 +720,7 @@ mds_gid_t FSMap::find_unused_for(mds_role_t role, for (const auto &i : standby_daemons) { const auto &gid = i.first; const auto &info = i.second; - assert(info.state == MDSMap::STATE_STANDBY); + ceph_assert(info.state == MDSMap::STATE_STANDBY); if (info.laggy() || info.rank >= 0) continue; @@ -754,53 +754,53 @@ mds_gid_t FSMap::find_replacement_for(mds_role_t role, std::string_view name, void FSMap::sanity() const { if (legacy_client_fscid != FS_CLUSTER_ID_NONE) { - assert(filesystems.count(legacy_client_fscid) == 1); + ceph_assert(filesystems.count(legacy_client_fscid) == 1); } for (const auto &i : filesystems) { auto fs = i.second; - assert(fs->mds_map.compat.compare(compat) == 0); - assert(fs->fscid == i.first); + ceph_assert(fs->mds_map.compat.compare(compat) == 0); + ceph_assert(fs->fscid == i.first); for (const auto &j : fs->mds_map.mds_info) { - assert(j.second.rank != MDS_RANK_NONE); - assert(mds_roles.count(j.first) == 1); - assert(standby_daemons.count(j.first) == 0); - assert(standby_epochs.count(j.first) == 0); - assert(mds_roles.at(j.first) == i.first); + ceph_assert(j.second.rank != MDS_RANK_NONE); + ceph_assert(mds_roles.count(j.first) == 1); + ceph_assert(standby_daemons.count(j.first) == 0); + ceph_assert(standby_epochs.count(j.first) == 0); + ceph_assert(mds_roles.at(j.first) == i.first); if (j.second.state != MDSMap::STATE_STANDBY_REPLAY) { - assert(fs->mds_map.up.at(j.second.rank) == j.first); - assert(fs->mds_map.failed.count(j.second.rank) == 0); - assert(fs->mds_map.damaged.count(j.second.rank) == 0); + ceph_assert(fs->mds_map.up.at(j.second.rank) == j.first); + ceph_assert(fs->mds_map.failed.count(j.second.rank) == 0); + ceph_assert(fs->mds_map.damaged.count(j.second.rank) == 0); } } for (const auto &j : fs->mds_map.up) { mds_rank_t rank = j.first; - assert(fs->mds_map.in.count(rank) == 1); + ceph_assert(fs->mds_map.in.count(rank) == 1); mds_gid_t gid = j.second; - assert(fs->mds_map.mds_info.count(gid) == 1); + ceph_assert(fs->mds_map.mds_info.count(gid) == 1); } } for (const auto &i : standby_daemons) { - assert(i.second.state == MDSMap::STATE_STANDBY); - assert(i.second.rank == MDS_RANK_NONE); - assert(i.second.global_id == i.first); - assert(standby_epochs.count(i.first) == 1); - assert(mds_roles.count(i.first) == 1); - assert(mds_roles.at(i.first) == FS_CLUSTER_ID_NONE); + ceph_assert(i.second.state == MDSMap::STATE_STANDBY); + ceph_assert(i.second.rank == MDS_RANK_NONE); + ceph_assert(i.second.global_id == i.first); + ceph_assert(standby_epochs.count(i.first) == 1); + ceph_assert(mds_roles.count(i.first) == 1); + ceph_assert(mds_roles.at(i.first) == FS_CLUSTER_ID_NONE); } for (const auto &i : standby_epochs) { - assert(standby_daemons.count(i.first) == 1); + ceph_assert(standby_daemons.count(i.first) == 1); } for (const auto &i : mds_roles) { if (i.second == FS_CLUSTER_ID_NONE) { - assert(standby_daemons.count(i.first) == 1); + ceph_assert(standby_daemons.count(i.first) == 1); } else { - assert(filesystems.count(i.second) == 1); - assert(filesystems.at(i.second)->mds_map.mds_info.count(i.first) == 1); + ceph_assert(filesystems.count(i.second) == 1); + ceph_assert(filesystems.at(i.second)->mds_map.mds_info.count(i.first) == 1); } } } @@ -810,11 +810,11 @@ void FSMap::promote( const std::shared_ptr &filesystem, mds_rank_t assigned_rank) { - assert(gid_exists(standby_gid)); + ceph_assert(gid_exists(standby_gid)); bool is_standby_replay = mds_roles.at(standby_gid) != FS_CLUSTER_ID_NONE; if (!is_standby_replay) { - assert(standby_daemons.count(standby_gid)); - assert(standby_daemons.at(standby_gid).state == MDSMap::STATE_STANDBY); + ceph_assert(standby_daemons.count(standby_gid)); + ceph_assert(standby_daemons.at(standby_gid).state == MDSMap::STATE_STANDBY); } MDSMap &mds_map = filesystem->mds_map; @@ -823,9 +823,9 @@ void FSMap::promote( if (!is_standby_replay) { mds_map.mds_info[standby_gid] = standby_daemons.at(standby_gid); } else { - assert(mds_map.mds_info.count(standby_gid)); - assert(mds_map.mds_info.at(standby_gid).state == MDSMap::STATE_STANDBY_REPLAY); - assert(mds_map.mds_info.at(standby_gid).rank == assigned_rank); + ceph_assert(mds_map.mds_info.count(standby_gid)); + ceph_assert(mds_map.mds_info.at(standby_gid).state == MDSMap::STATE_STANDBY_REPLAY); + ceph_assert(mds_map.mds_info.at(standby_gid).rank == assigned_rank); } MDSMap::mds_info_t &info = mds_map.mds_info[standby_gid]; @@ -863,10 +863,10 @@ void FSMap::assign_standby_replay( const fs_cluster_id_t leader_ns, const mds_rank_t leader_rank) { - assert(mds_roles.at(standby_gid) == FS_CLUSTER_ID_NONE); - assert(gid_exists(standby_gid)); - assert(!gid_has_rank(standby_gid)); - assert(standby_daemons.count(standby_gid)); + ceph_assert(mds_roles.at(standby_gid) == FS_CLUSTER_ID_NONE); + ceph_assert(gid_exists(standby_gid)); + ceph_assert(!gid_has_rank(standby_gid)); + ceph_assert(standby_daemons.count(standby_gid)); // Insert to the filesystem auto fs = filesystems.at(leader_ns); @@ -902,7 +902,7 @@ void FSMap::erase(mds_gid_t who, epoch_t blacklist_epoch) // STANDBY will pick it up. fs->mds_map.failed.insert(info.rank); } - assert(fs->mds_map.up.at(info.rank) == info.global_id); + ceph_assert(fs->mds_map.up.at(info.rank) == info.global_id); fs->mds_map.up.erase(info.rank); } fs->mds_map.mds_info.erase(who); @@ -915,7 +915,7 @@ void FSMap::erase(mds_gid_t who, epoch_t blacklist_epoch) void FSMap::damaged(mds_gid_t who, epoch_t blacklist_epoch) { - assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE); + ceph_assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE); auto fs = filesystems.at(mds_roles.at(who)); mds_rank_t rank = fs->mds_map.mds_info[who].rank; @@ -923,7 +923,7 @@ void FSMap::damaged(mds_gid_t who, epoch_t blacklist_epoch) fs->mds_map.failed.erase(rank); fs->mds_map.damaged.insert(rank); - assert(fs->mds_map.epoch == epoch); + ceph_assert(fs->mds_map.epoch == epoch); } /** @@ -945,8 +945,8 @@ bool FSMap::undamaged(const fs_cluster_id_t fscid, const mds_rank_t rank) void FSMap::insert(const MDSMap::mds_info_t &new_info) { - assert(new_info.state == MDSMap::STATE_STANDBY); - assert(new_info.rank == MDS_RANK_NONE); + ceph_assert(new_info.state == MDSMap::STATE_STANDBY); + ceph_assert(new_info.rank == MDS_RANK_NONE); mds_roles[new_info.global_id] = FS_CLUSTER_ID_NONE; standby_daemons[new_info.global_id] = new_info; standby_epochs[new_info.global_id] = epoch; @@ -954,7 +954,7 @@ void FSMap::insert(const MDSMap::mds_info_t &new_info) std::list FSMap::stop(mds_gid_t who) { - assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE); + ceph_assert(mds_roles.at(who) != FS_CLUSTER_ID_NONE); auto fs = filesystems.at(mds_roles.at(who)); const auto &info = fs->mds_map.mds_info.at(who); fs->mds_map.up.erase(info.rank); diff --git a/src/mds/FSMap.h b/src/mds/FSMap.h index b6d98b978b664..a60b7076ac6b5 100644 --- a/src/mds/FSMap.h +++ b/src/mds/FSMap.h @@ -156,7 +156,7 @@ public: void set_legacy_client_fscid(fs_cluster_id_t fscid) { - assert(fscid == FS_CLUSTER_ID_NONE || filesystems.count(fscid)); + ceph_assert(fscid == FS_CLUSTER_ID_NONE || filesystems.count(fscid)); legacy_client_fscid = fscid; } @@ -337,7 +337,7 @@ public: if (mds_roles.at(who) == FS_CLUSTER_ID_NONE) { auto &info = standby_daemons.at(who); fn(&info); - assert(info.state == MDSMap::STATE_STANDBY); + ceph_assert(info.state == MDSMap::STATE_STANDBY); standby_epochs[who] = epoch; } else { const auto &fs = filesystems[mds_roles.at(who)]; diff --git a/src/mds/InoTable.cc b/src/mds/InoTable.cc index 7508ffa28919f..803d5431c0d76 100644 --- a/src/mds/InoTable.cc +++ b/src/mds/InoTable.cc @@ -44,7 +44,7 @@ void InoTable::reset_state() inodeno_t InoTable::project_alloc_id(inodeno_t id) { dout(10) << "project_alloc_id " << id << " to " << projected_free << "/" << free << dendl; - assert(is_active()); + ceph_assert(is_active()); if (!id) id = projected_free.range_start(); projected_free.erase(id); @@ -60,7 +60,7 @@ void InoTable::apply_alloc_id(inodeno_t id) void InoTable::project_alloc_ids(interval_set& ids, int want) { - assert(is_active()); + ceph_assert(is_active()); while (want > 0) { inodeno_t start = projected_free.range_start(); inodeno_t end = projected_free.end_after(start); @@ -100,7 +100,7 @@ void InoTable::apply_release_ids(interval_set& ids) void InoTable::replay_alloc_id(inodeno_t id) { - assert(mds); // Only usable in online mode + ceph_assert(mds); // Only usable in online mode dout(10) << "replay_alloc_id " << id << dendl; if (free.contains(id)) { @@ -114,7 +114,7 @@ void InoTable::replay_alloc_id(inodeno_t id) } void InoTable::replay_alloc_ids(interval_set& ids) { - assert(mds); // Only usable in online mode + ceph_assert(mds); // Only usable in online mode dout(10) << "replay_alloc_ids " << ids << dendl; interval_set is; @@ -215,7 +215,7 @@ bool InoTable::repair(inodeno_t id) return false; } - assert(is_marked_free(id)); + ceph_assert(is_marked_free(id)); dout(10) << "repair: before status. ino = " << id << " pver =" << projected_version << " ver= " << version << dendl; free.erase(id); projected_free.erase(id); diff --git a/src/mds/JournalPointer.cc b/src/mds/JournalPointer.cc index d11847f846c22..797798aacb3f2 100644 --- a/src/mds/JournalPointer.cc +++ b/src/mds/JournalPointer.cc @@ -44,7 +44,7 @@ std::string JournalPointer::get_object_id() const */ int JournalPointer::load(Objecter *objecter) { - assert(objecter != NULL); + ceph_assert(objecter != NULL); // Blocking read of data std::string const object_id = get_object_id(); @@ -77,9 +77,9 @@ int JournalPointer::load(Objecter *objecter) */ int JournalPointer::save(Objecter *objecter) const { - assert(objecter != NULL); + ceph_assert(objecter != NULL); // It is not valid to persist a null pointer - assert(!is_null()); + ceph_assert(!is_null()); // Serialize JournalPointer object bufferlist data; @@ -109,7 +109,7 @@ int JournalPointer::save(Objecter *objecter) const */ void JournalPointer::save(Objecter *objecter, Context *completion) const { - assert(objecter != NULL); + ceph_assert(objecter != NULL); bufferlist data; encode(data); diff --git a/src/mds/LocalLock.h b/src/mds/LocalLock.h index 6609827165e78..d405a6b33bb2e 100644 --- a/src/mds/LocalLock.h +++ b/src/mds/LocalLock.h @@ -39,7 +39,7 @@ public: return !is_xlocked(); } void get_wrlock(client_t client) { - assert(can_wrlock()); + ceph_assert(can_wrlock()); SimpleLock::get_wrlock(); last_wrlock_client = client; } diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index affb7ae2d1257..e0950815e8fa1 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -67,7 +67,7 @@ protected: public: explicit LockerContext(Locker *locker_) : locker(locker_) { - assert(locker != NULL); + ceph_assert(locker != NULL); } }; @@ -81,7 +81,7 @@ protected: public: explicit LockerLogContext(Locker *locker_) : locker(locker_) { - assert(locker != NULL); + ceph_assert(locker != NULL); } }; @@ -113,7 +113,7 @@ void Locker::dispatch(const Message::const_ref &m) break; default: derr << "locker unknown message " << m->get_type() << dendl; - assert(0 == "locker unknown message"); + ceph_assert(0 == "locker unknown message"); } } @@ -438,7 +438,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, p != mdr->remote_auth_pins.end(); ++p) { if (mustpin.count(p->first)) { - assert(p->second == p->first->authority().first); + ceph_assert(p->second == p->first->authority().first); map >::iterator q = mustpin_remote.find(p->second); if (q != mustpin_remote.end()) q->second.insert(p->first); @@ -475,7 +475,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, mds->send_message_mds(req, p->first); // put in waiting list - assert(mdr->more()->waiting_on_slave.count(p->first) == 0); + ceph_assert(mdr->more()->waiting_on_slave.count(p->first) == 0); mdr->more()->waiting_on_slave.insert(p->first); } return false; @@ -529,7 +529,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, // hose any stray locks if (existing != mdr->locks.end() && *existing == *p) { - assert(need_wrlock || need_remote_wrlock); + ceph_assert(need_wrlock || need_remote_wrlock); SimpleLock *lock = *existing; if (mdr->wrlocks.count(lock)) { if (!need_wrlock) @@ -594,7 +594,7 @@ bool Locker::acquire_locks(MDRequestRef& mdr, dout(10) << " got wrlock on " << **p << " " << *(*p)->get_parent() << dendl; } } else { - assert(mdr->is_master()); + ceph_assert(mdr->is_master()); if ((*p)->needs_recover()) { if (mds->is_cluster_degraded()) { if (!mdr->is_queued_for_replay()) { @@ -660,7 +660,7 @@ void Locker::notify_freeze_waiter(MDSCacheObject *o) dir = dn->get_dir(); } else { dir = dynamic_cast(o); - assert(dir); + ceph_assert(dir); } if (dir) { if (dir->is_freezing_dir()) @@ -679,7 +679,7 @@ void Locker::set_xlocks_done(MutationImpl *mut, bool skip_dentry) p != mut->xlocks.end(); ++p) { MDSCacheObject *object = (*p)->get_parent(); - assert(object->is_auth()); + ceph_assert(object->is_auth()); if (skip_dentry && ((*p)->get_type() == CEPH_LOCK_DN || (*p)->get_type() == CEPH_LOCK_DVERSION)) continue; @@ -707,7 +707,7 @@ void Locker::_drop_non_rdlocks(MutationImpl *mut, set *pneed_issue) SimpleLock *lock = *mut->xlocks.begin(); MDSCacheObject *p = lock->get_parent(); if (!p->is_auth()) { - assert(lock->get_sm()->can_remote_xlock); + ceph_assert(lock->get_sm()->can_remote_xlock); slaves.insert(p->authority().first); lock->put_xlock(); mut->locks.erase(lock); @@ -749,7 +749,7 @@ void Locker::_drop_non_rdlocks(MutationImpl *mut, set *pneed_issue) void Locker::cancel_locking(MutationImpl *mut, set *pneed_issue) { SimpleLock *lock = mut->locking; - assert(lock); + ceph_assert(lock); dout(10) << "cancel_locking " << *lock << " on " << *mut << dendl; if (lock->get_parent()->is_auth()) { @@ -821,7 +821,7 @@ void Locker::drop_rdlocks_for_early_reply(MutationImpl *mut) void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInternalContextBase::vec *pfinishers) { dout(10) << "eval_gather " << *lock << " on " << *lock->get_parent() << dendl; - assert(!lock->is_stable()); + ceph_assert(!lock->is_stable()); int next = lock->get_next_state(); @@ -833,7 +833,7 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt bool need_issue = false; int loner_issued = 0, other_issued = 0, xlocker_issued = 0; - assert(!caps || in != NULL); + ceph_assert(!caps || in != NULL); if (caps && in->is_head()) { in->get_caps_issued(&loner_issued, &other_issued, &xlocker_issued, lock->get_cap_shift(), lock->get_cap_mask()); @@ -870,7 +870,7 @@ void Locker::eval_gather(SimpleLock *lock, bool first, bool *pneed_issue, MDSInt << " on " << *lock->get_parent() << dendl; if (lock->get_sm() == &sm_filelock) { - assert(in); + ceph_assert(in); if (in->state_test(CInode::STATE_RECOVERING)) { dout(7) << "eval_gather finished gather, but still recovering" << dendl; return; @@ -1081,7 +1081,7 @@ bool Locker::eval(CInode *in, int mask, bool caps_imported) if (in->get_wanted_loner() >= 0) { dout(10) << "eval end set loner to client." << in->get_loner() << dendl; bool ok = in->try_set_loner(); - assert(ok); + ceph_assert(ok); mask = -1; goto retry; } @@ -1104,7 +1104,7 @@ public: C_Locker_Eval(Locker *l, MDSCacheObject *pp, int m) : LockerContext(l), p(pp), mask(m) { // We are used as an MDSCacheObject waiter, so should // only be invoked by someone already holding the big lock. - assert(locker->mds->mds_lock.is_locked_by_me()); + ceph_assert(locker->mds->mds_lock.is_locked_by_me()); p->get(MDSCacheObject::PIN_PTRWAITER); } void finish(int r) override { @@ -1129,7 +1129,7 @@ void Locker::try_eval(MDSCacheObject *p, int mask) } if (mask & CEPH_LOCK_DN) { - assert(mask == CEPH_LOCK_DN); + ceph_assert(mask == CEPH_LOCK_DN); bool need_issue = false; // ignore this, no caps on dentries CDentry *dn = static_cast(p); eval_any(&dn->lock, &need_issue); @@ -1365,7 +1365,7 @@ bool Locker::rdlock_start(SimpleLock *lock, MDRequestRef& mut, bool as_anon) lock->get_state() == LOCK_SNAP_SYNC) { // okay, we actually need to kick the head's lock to get ourselves synced up. CInode *head = mdcache->get_inode(in->ino()); - assert(head); + ceph_assert(head); SimpleLock *hlock = head->get_lock(CEPH_LOCK_IFILE); if (hlock->get_state() == LOCK_SYNC) hlock = head->get_lock(lock->get_type()); @@ -1586,7 +1586,7 @@ void Locker::remote_wrlock_start(SimpleLock *lock, mds_rank_t target, MDRequestR lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, target); - assert(mut->more()->waiting_on_slave.count(target) == 0); + ceph_assert(mut->more()->waiting_on_slave.count(target) == 0); mut->more()->waiting_on_slave.insert(target); } @@ -1660,8 +1660,8 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) return false; } else { // replica - assert(lock->get_sm()->can_remote_xlock); - assert(!mut->slave_request); + ceph_assert(lock->get_sm()->can_remote_xlock); + ceph_assert(!mut->slave_request); // wait for single auth if (lock->get_parent()->is_ambiguous_auth()) { @@ -1688,7 +1688,7 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) lock->get_parent()->set_object_info(r->get_object_info()); mds->send_message_mds(r, auth); - assert(mut->more()->waiting_on_slave.count(auth) == 0); + ceph_assert(mut->more()->waiting_on_slave.count(auth) == 0); mut->more()->waiting_on_slave.insert(auth); return false; @@ -1697,7 +1697,7 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequestRef& mut) void Locker::_finish_xlock(SimpleLock *lock, client_t xlocker, bool *pneed_issue) { - assert(!lock->is_stable()); + ceph_assert(!lock->is_stable()); if (lock->get_type() != CEPH_LOCK_DN && lock->get_type() != CEPH_LOCK_ISNAP && lock->get_num_rdlocks() == 0 && @@ -1734,7 +1734,7 @@ void Locker::xlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue // drop ref lock->put_xlock(); - assert(mut); + ceph_assert(mut); mut->xlocks.erase(lock); mut->locks.erase(lock); @@ -1742,7 +1742,7 @@ void Locker::xlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue // remote xlock? if (!lock->get_parent()->is_auth()) { - assert(lock->get_sm()->can_remote_xlock); + ceph_assert(lock->get_sm()->can_remote_xlock); // tell auth dout(7) << "xlock_finish releasing remote xlock on " << *lock->get_parent() << dendl; @@ -1786,7 +1786,7 @@ void Locker::xlock_export(SimpleLock *lock, MutationImpl *mut) mut->locks.erase(lock); MDSCacheObject *p = lock->get_parent(); - assert(p->state_test(CInode::STATE_AMBIGUOUSAUTH)); // we are exporting this (inode) + ceph_assert(p->state_test(CInode::STATE_AMBIGUOUSAUTH)); // we are exporting this (inode) if (!lock->is_stable()) lock->get_parent()->auth_unpin(lock); @@ -1877,7 +1877,7 @@ void Locker::file_update_finish(CInode *in, MutationRef& mut, unsigned flags, auto q = p->second.find(client); if (q != p->second.end()) { SimpleLock *lock = in->get_lock(p->first); - assert(lock); + ceph_assert(lock); dout(10) << " completing client_snap_caps for " << ccap_string(p->first) << " lock " << *lock << " on " << *in << dendl; lock->put_wrlock(); @@ -1922,7 +1922,7 @@ Capability* Locker::issue_new_caps(CInode *in, } // my needs - assert(session->info.inst.name.is_client()); + ceph_assert(session->info.inst.name.is_client()); client_t my_client = session->get_client(); int my_want = ceph_caps_for_mode(mode); @@ -1995,7 +1995,7 @@ bool Locker::issue_caps(CInode *in, Capability *only_cap) << " on " << *in << dendl; } - assert(in->is_head()); + ceph_assert(in->is_head()); // count conflicts with int nissued = 0; @@ -2177,7 +2177,7 @@ void Locker::resume_stale_caps(Session *session) for (xlist::iterator p = session->caps.begin(); !p.end(); ++p) { Capability *cap = *p; CInode *in = cap->get_inode(); - assert(in->is_head()); + ceph_assert(in->is_head()); if (cap->is_stale()) { dout(10) << " clearing stale flag on " << *in << dendl; cap->clear_stale(); @@ -2224,7 +2224,7 @@ public: void Locker::request_inode_file_caps(CInode *in) { - assert(!in->is_auth()); + ceph_assert(!in->is_auth()); int wanted = in->get_caps_wanted() & ~CEPH_CAP_PIN; if (wanted != in->replica_caps_wanted) { @@ -2257,14 +2257,14 @@ void Locker::request_inode_file_caps(CInode *in) void Locker::handle_inode_file_caps(const MInodeFileCaps::const_ref &m) { // nobody should be talking to us during recovery. - assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); + ceph_assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); // ok CInode *in = mdcache->get_inode(m->get_ino()); mds_rank_t from = mds_rank_t(m->get_source().num()); - assert(in); - assert(in->is_auth()); + ceph_assert(in); + ceph_assert(in->is_auth()); dout(7) << "handle_inode_file_caps replica mds." << from << " wants caps " << ccap_string(m->get_caps()) << " on " << *in << dendl; @@ -2344,8 +2344,8 @@ bool Locker::check_inode_max_size(CInode *in, bool force_wrlock, uint64_t new_max_size, uint64_t new_size, utime_t new_mtime) { - assert(in->is_auth()); - assert(in->is_file()); + ceph_assert(in->is_auth()); + ceph_assert(in->is_file()); CInode::mempool_inode *latest = in->get_projected_inode(); CInode::mempool_inode::client_range_map new_ranges; @@ -2551,7 +2551,7 @@ void Locker::adjust_cap_wanted(Capability *cap, int wanted, int issue_seq) } if (mdcache->open_file_table.should_log_open(cur)) { - assert(cur->last == CEPH_NOSNAP); + ceph_assert(cur->last == CEPH_NOSNAP); EOpen *le = new EOpen(mds->mdlog); mds->mdlog->start_entry(le); le->add_clean_inode(cur); @@ -2562,13 +2562,13 @@ void Locker::adjust_cap_wanted(Capability *cap, int wanted, int issue_seq) void Locker::snapflush_nudge(CInode *in) { - assert(in->last != CEPH_NOSNAP); + ceph_assert(in->last != CEPH_NOSNAP); if (in->client_snap_caps.empty()) return; CInode *head = mdcache->get_inode(in->ino()); - assert(head); - assert(head->is_auth()); + ceph_assert(head); + ceph_assert(head->is_auth()); if (head->client_need_snapflush.empty()) return; @@ -2593,7 +2593,7 @@ void Locker::snapflush_nudge(CInode *in) void Locker::mark_need_snapflush_inode(CInode *in) { - assert(in->last != CEPH_NOSNAP); + ceph_assert(in->last != CEPH_NOSNAP); if (!in->item_caps.is_on_list()) { need_snapflush_inodes.push_back(&in->item_caps); utime_t now = ceph_clock_now(); @@ -2614,8 +2614,8 @@ void Locker::_do_null_snapflush(CInode *head_in, client_t client, snapid_t last) if (clients.count(client)) { dout(10) << " doing async NULL snapflush on " << snapid << " from client." << client << dendl; CInode *sin = mdcache->pick_inode_snap(head_in, snapid - 1); - assert(sin); - assert(sin->first <= snapid); + ceph_assert(sin); + ceph_assert(sin->first <= snapid); _do_snap_update(sin, snapid, 0, sin->first - 1, client, MClientCaps::ref(), MClientCaps::ref()); head_in->remove_need_snapflush(sin, snapid, client); } @@ -2759,7 +2759,7 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) dout(7) << "handle_client_caps no cap for client." << client << " on " << *head_in << dendl; return; } - assert(cap); + ceph_assert(cap); // freezing|frozen? if (should_defer_client_cap_frozen(head_in)) { @@ -2841,7 +2841,7 @@ void Locker::handle_client_caps(const MClientCaps::const_ref &m) in = mdcache->pick_inode_snap(head_in, follows); // intermediate snap inodes while (in != head_in) { - assert(in->last != CEPH_NOSNAP); + ceph_assert(in->last != CEPH_NOSNAP); if (in->is_auth() && dirty) { dout(10) << " updating intermediate snapped inode " << *in << dendl; _do_cap_update(in, NULL, dirty, follows, m, MClientCaps::ref()); @@ -3187,7 +3187,7 @@ void Locker::_update_cap_fields(CInode *in, int dirty, const MClientCaps::const_ return; /* m must be valid if there are dirty caps */ - assert(m); + ceph_assert(m); uint64_t features = m->get_connection()->get_features(); if (m->get_ctime() > pi->ctime) { @@ -3287,7 +3287,7 @@ bool Locker::_do_cap_update(CInode *in, Capability *cap, << " issued " << ccap_string(cap ? cap->issued() : 0) << " wanted " << ccap_string(cap ? cap->wanted() : 0) << " on " << *in << dendl; - assert(in->is_auth()); + ceph_assert(in->is_auth()); client_t client = m->get_source().num(); CInode::mempool_inode *latest = in->get_projected_inode(); @@ -3666,7 +3666,7 @@ void Locker::handle_client_lease(const MClientLease::const_ref &m) { dout(10) << "handle_client_lease " << *m << dendl; - assert(m->get_source().is_client()); + ceph_assert(m->get_source().is_client()); client_t client = m->get_source().num(); CInode *in = mdcache->get_inode(m->get_ino(), m->get_last()); @@ -3774,7 +3774,7 @@ void Locker::revoke_client_leases(SimpleLock *lock) ClientLease *l = p->second; n++; - assert(lock->get_type() == CEPH_LOCK_DN); + ceph_assert(lock->get_type() == CEPH_LOCK_DN); CDentry *dn = static_cast(lock->get_parent()); int mask = 1 | CEPH_LOCK_DN; // old and new bits @@ -3868,7 +3868,7 @@ SimpleLock *Locker::get_lock(int lock_type, const MDSCacheObjectInfo &info) void Locker::handle_lock(const MLock::const_ref &m) { // nobody should be talking to us during recovery. - assert(mds->is_rejoin() || mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); + ceph_assert(mds->is_rejoin() || mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); SimpleLock *lock = get_lock(m->get_lock_type(), m->get_object_info()); if (!lock) { @@ -3920,7 +3920,7 @@ void Locker::handle_reqrdlock(SimpleLock *lock, const MLock::const_ref &m) !parent->is_frozen()) { dout(7) << "handle_reqrdlock got rdlock request on " << *lock << " on " << *parent << dendl; - assert(parent->is_auth()); // replica auth pinned if they're doing this! + ceph_assert(parent->is_auth()); // replica auth pinned if they're doing this! if (lock->is_stable()) { simple_sync(lock); } else { @@ -3953,14 +3953,14 @@ void Locker::handle_simple_lock(SimpleLock *lock, const MLock::const_ref &m) switch (m->get_action()) { // -- replica -- case LOCK_AC_SYNC: - assert(lock->get_state() == LOCK_LOCK); + ceph_assert(lock->get_state() == LOCK_LOCK); lock->decode_locked_state(m->get_data()); lock->set_state(LOCK_SYNC); lock->finish_waiters(SimpleLock::WAIT_RD|SimpleLock::WAIT_STABLE); break; case LOCK_AC_LOCK: - assert(lock->get_state() == LOCK_SYNC); + ceph_assert(lock->get_state() == LOCK_SYNC); lock->set_state(LOCK_SYNC_LOCK); if (lock->is_leased()) revoke_client_leases(lock); @@ -3972,9 +3972,9 @@ void Locker::handle_simple_lock(SimpleLock *lock, const MLock::const_ref &m) // -- auth -- case LOCK_AC_LOCKACK: - assert(lock->get_state() == LOCK_SYNC_LOCK || + ceph_assert(lock->get_state() == LOCK_SYNC_LOCK || lock->get_state() == LOCK_SYNC_EXCL); - assert(lock->is_gathering(from)); + ceph_assert(lock->is_gathering(from)); lock->remove_gather(from); if (lock->is_gathering()) { @@ -4039,8 +4039,8 @@ void Locker::simple_eval(SimpleLock *lock, bool *need_issue) { dout(10) << "simple_eval " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->is_stable()); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->is_stable()); if (lock->get_parent()->is_freezing_or_frozen()) { // dentry/snap lock in unreadable state can block path traverse @@ -4092,8 +4092,8 @@ void Locker::simple_eval(SimpleLock *lock, bool *need_issue) bool Locker::simple_sync(SimpleLock *lock, bool *need_issue) { dout(7) << "simple_sync on " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->is_stable()); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->is_stable()); CInode *in = 0; if (lock->get_cap_shift()) @@ -4133,7 +4133,7 @@ bool Locker::simple_sync(SimpleLock *lock, bool *need_issue) bool need_recover = false; if (lock->get_type() == CEPH_LOCK_IFILE) { - assert(in); + ceph_assert(in); if (in->state_test(CInode::STATE_NEEDSRECOVER)) { mds->mdcache->queue_file_recover(in); need_recover = true; @@ -4175,8 +4175,8 @@ bool Locker::simple_sync(SimpleLock *lock, bool *need_issue) void Locker::simple_excl(SimpleLock *lock, bool *need_issue) { dout(7) << "simple_excl on " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->is_stable()); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->is_stable()); CInode *in = 0; if (lock->get_cap_shift()) @@ -4230,9 +4230,9 @@ void Locker::simple_excl(SimpleLock *lock, bool *need_issue) void Locker::simple_lock(SimpleLock *lock, bool *need_issue) { dout(7) << "simple_lock on " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->is_stable()); - assert(lock->get_state() != LOCK_LOCK); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->is_stable()); + ceph_assert(lock->get_state() != LOCK_LOCK); CInode *in = 0; if (lock->get_cap_shift()) @@ -4270,7 +4270,7 @@ void Locker::simple_lock(SimpleLock *lock, bool *need_issue) bool need_recover = false; if (lock->get_type() == CEPH_LOCK_IFILE) { - assert(in); + ceph_assert(in); if(in->state_test(CInode::STATE_NEEDSRECOVER)) { mds->mdcache->queue_file_recover(in); need_recover = true; @@ -4316,9 +4316,9 @@ void Locker::simple_lock(SimpleLock *lock, bool *need_issue) void Locker::simple_xlock(SimpleLock *lock) { dout(7) << "simple_xlock on " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); + ceph_assert(lock->get_parent()->is_auth()); //assert(lock->is_stable()); - assert(lock->get_state() != LOCK_XLOCK); + ceph_assert(lock->get_state() != LOCK_XLOCK); CInode *in = 0; if (lock->get_cap_shift()) @@ -4475,8 +4475,8 @@ void Locker::scatter_eval(ScatterLock *lock, bool *need_issue) { dout(10) << "scatter_eval " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->is_stable()); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->is_stable()); if (lock->get_parent()->is_freezing_or_frozen()) { dout(20) << " freezing|frozen" << dendl; @@ -4636,7 +4636,7 @@ void Locker::scatter_nudge(ScatterLock *lock, MDSInternalContextBase *c, bool fo // handle_file_lock due to AC_NUDGE, because the rest of the // time we are replicated or have dirty data and won't get // called. bailing here avoids an infinite loop. - assert(!c); + ceph_assert(!c); break; } } else { @@ -4696,10 +4696,10 @@ void Locker::scatter_tempsync(ScatterLock *lock, bool *need_issue) { dout(10) << "scatter_tempsync " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->is_stable()); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->is_stable()); - assert(0 == "not fully implemented, at least not for filelock"); + ceph_assert(0 == "not fully implemented, at least not for filelock"); CInode *in = static_cast(lock->get_parent()); @@ -4756,9 +4756,9 @@ void Locker::local_wrlock_grab(LocalLock *lock, MutationRef& mut) dout(7) << "local_wrlock_grab on " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->can_wrlock()); - assert(!mut->wrlocks.count(lock)); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->can_wrlock()); + ceph_assert(!mut->wrlocks.count(lock)); lock->get_wrlock(mut->get_client()); mut->wrlocks.insert(lock); mut->locks.insert(lock); @@ -4769,9 +4769,9 @@ bool Locker::local_wrlock_start(LocalLock *lock, MDRequestRef& mut) dout(7) << "local_wrlock_start on " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); + ceph_assert(lock->get_parent()->is_auth()); if (lock->can_wrlock()) { - assert(!mut->wrlocks.count(lock)); + ceph_assert(!mut->wrlocks.count(lock)); lock->get_wrlock(mut->get_client()); mut->wrlocks.insert(lock); mut->locks.insert(lock); @@ -4801,7 +4801,7 @@ bool Locker::local_xlock_start(LocalLock *lock, MDRequestRef& mut) dout(7) << "local_xlock_start on " << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); + ceph_assert(lock->get_parent()->is_auth()); if (!lock->can_xlock_local()) { lock->add_waiter(SimpleLock::WAIT_WR|SimpleLock::WAIT_STABLE, new C_MDS_RetryRequest(mdcache, mut)); return false; @@ -4843,8 +4843,8 @@ void Locker::file_eval(ScatterLock *lock, bool *need_issue) << " filelock=" << *lock << " on " << *lock->get_parent() << dendl; - assert(lock->get_parent()->is_auth()); - assert(lock->is_stable()); + ceph_assert(lock->get_parent()->is_auth()); + ceph_assert(lock->is_stable()); if (lock->get_parent()->is_freezing_or_frozen()) return; @@ -4940,8 +4940,8 @@ void Locker::scatter_mix(ScatterLock *lock, bool *need_issue) dout(7) << "scatter_mix " << *lock << " on " << *lock->get_parent() << dendl; CInode *in = static_cast(lock->get_parent()); - assert(in->is_auth()); - assert(lock->is_stable()); + ceph_assert(in->is_auth()); + ceph_assert(lock->is_stable()); if (lock->get_state() == LOCK_LOCK) { in->start_scatter(lock); @@ -5032,10 +5032,10 @@ void Locker::file_excl(ScatterLock *lock, bool *need_issue) CInode *in = static_cast(lock->get_parent()); dout(7) << "file_excl " << *lock << " on " << *lock->get_parent() << dendl; - assert(in->is_auth()); - assert(lock->is_stable()); + ceph_assert(in->is_auth()); + ceph_assert(lock->is_stable()); - assert((in->get_loner() >= 0 && in->get_mds_caps_wanted().empty()) || + ceph_assert((in->get_loner() >= 0 && in->get_mds_caps_wanted().empty()) || (lock->get_state() == LOCK_XSYN)); // must do xsyn -> excl -> switch (lock->get_state()) { @@ -5095,8 +5095,8 @@ void Locker::file_xsyn(SimpleLock *lock, bool *need_issue) { dout(7) << "file_xsyn on " << *lock << " on " << *lock->get_parent() << dendl; CInode *in = static_cast(lock->get_parent()); - assert(in->is_auth()); - assert(in->get_loner() >= 0 && in->get_mds_caps_wanted().empty()); + ceph_assert(in->is_auth()); + ceph_assert(in->get_loner() >= 0 && in->get_mds_caps_wanted().empty()); switch (lock->get_state()) { case LOCK_EXCL: lock->set_state(LOCK_EXCL_XSYN); break; @@ -5133,9 +5133,9 @@ void Locker::file_recover(ScatterLock *lock) CInode *in = static_cast(lock->get_parent()); dout(7) << "file_recover " << *lock << " on " << *in << dendl; - assert(in->is_auth()); + ceph_assert(in->is_auth()); //assert(lock->is_stable()); - assert(lock->get_state() == LOCK_PRE_SCAN); // only called from MDCache::start_files_to_recover() + ceph_assert(lock->get_state() == LOCK_PRE_SCAN); // only called from MDCache::start_files_to_recover() int gather = 0; @@ -5185,7 +5185,7 @@ void Locker::handle_file_lock(ScatterLock *lock, const MLock::const_ref &m) switch (m->get_action()) { // -- replica -- case LOCK_AC_SYNC: - assert(lock->get_state() == LOCK_LOCK || + ceph_assert(lock->get_state() == LOCK_LOCK || lock->get_state() == LOCK_MIX || lock->get_state() == LOCK_MIX_SYNC2); @@ -5233,7 +5233,7 @@ void Locker::handle_file_lock(ScatterLock *lock, const MLock::const_ref &m) break; case LOCK_AC_MIX: - assert(lock->get_state() == LOCK_SYNC || + ceph_assert(lock->get_state() == LOCK_SYNC || lock->get_state() == LOCK_LOCK || lock->get_state() == LOCK_SYNC_MIX2); @@ -5259,14 +5259,14 @@ void Locker::handle_file_lock(ScatterLock *lock, const MLock::const_ref &m) // -- auth -- case LOCK_AC_LOCKACK: - assert(lock->get_state() == LOCK_SYNC_LOCK || + ceph_assert(lock->get_state() == LOCK_SYNC_LOCK || lock->get_state() == LOCK_MIX_LOCK || lock->get_state() == LOCK_MIX_LOCK2 || lock->get_state() == LOCK_MIX_EXCL || lock->get_state() == LOCK_SYNC_EXCL || lock->get_state() == LOCK_SYNC_MIX || lock->get_state() == LOCK_MIX_TSYN); - assert(lock->is_gathering(from)); + ceph_assert(lock->is_gathering(from)); lock->remove_gather(from); if (lock->get_state() == LOCK_MIX_LOCK || @@ -5290,8 +5290,8 @@ void Locker::handle_file_lock(ScatterLock *lock, const MLock::const_ref &m) break; case LOCK_AC_SYNCACK: - assert(lock->get_state() == LOCK_MIX_SYNC); - assert(lock->is_gathering(from)); + ceph_assert(lock->get_state() == LOCK_MIX_SYNC); + ceph_assert(lock->is_gathering(from)); lock->remove_gather(from); lock->decode_locked_state(m->get_data()); @@ -5307,8 +5307,8 @@ void Locker::handle_file_lock(ScatterLock *lock, const MLock::const_ref &m) break; case LOCK_AC_MIXACK: - assert(lock->get_state() == LOCK_SYNC_MIX); - assert(lock->is_gathering(from)); + ceph_assert(lock->get_state() == LOCK_SYNC_MIX); + ceph_assert(lock->is_gathering(from)); lock->remove_gather(from); if (lock->is_gathering()) { diff --git a/src/mds/LogEvent.cc b/src/mds/LogEvent.cc index 494aa112fabc1..9a6e26beee673 100644 --- a/src/mds/LogEvent.cc +++ b/src/mds/LogEvent.cc @@ -175,7 +175,7 @@ LogEvent *LogEvent::decode_event(bufferlist& bl, bufferlist::const_iterator& p, return NULL; } - assert(p.end()); + ceph_assert(p.end()); return le; } diff --git a/src/mds/LogSegment.h b/src/mds/LogSegment.h index cff5dea0df758..732588417572d 100644 --- a/src/mds/LogSegment.h +++ b/src/mds/LogSegment.h @@ -78,7 +78,7 @@ class LogSegment { void wait_for_expiry(MDSInternalContextBase *c) { - assert(c != NULL); + ceph_assert(c != NULL); expiry_waiters.push_back(c); } diff --git a/src/mds/MDBalancer.cc b/src/mds/MDBalancer.cc index 7e5062502d77f..26c3177deac84 100644 --- a/src/mds/MDBalancer.cc +++ b/src/mds/MDBalancer.cc @@ -69,7 +69,7 @@ int MDBalancer::proc_message(const Message::const_ref &m) default: derr << " balancer unknown message " << m->get_type() << dendl_impl; - assert(0 == "balancer unknown message"); + ceph_assert(0 == "balancer unknown message"); } return 0; @@ -100,7 +100,7 @@ void MDBalancer::handle_export_pins(void) while (it != q.end()) { auto cur = it++; CInode *in = *cur; - assert(in->is_dir()); + ceph_assert(in->is_dir()); mds_rank_t export_pin = in->get_export_pin(false); bool remove = true; @@ -548,7 +548,7 @@ void MDBalancer::queue_merge(CDir *dir) { const auto frag = dir->dirfrag(); auto callback = [this, frag](int r) { - assert(frag.frag != frag_t()); + ceph_assert(frag.frag != frag_t()); // frag must be in this set because only one context is in flight // for a given frag at a time (because merge_pending is checked before @@ -560,7 +560,7 @@ void MDBalancer::queue_merge(CDir *dir) dout(10) << "drop merge on " << frag << " because not in cache" << dendl; return; } - assert(dir->dirfrag() == frag); + ceph_assert(dir->dirfrag() == frag); if(!dir->is_auth()) { dout(10) << "drop merge on " << *dir << " because lost auth" << dendl; @@ -900,7 +900,7 @@ void MDBalancer::try_rebalance(balance_state_t& state) if (dir->inode->is_base()) continue; - assert(dir->inode->authority().first == target); // cuz that's how i put it in the map, dummy + ceph_assert(dir->inode->authority().first == target); // cuz that's how i put it in the map, dummy if (pop <= amount-have) { dout(5) << "reexporting " << *dir << " pop " << pop @@ -1010,7 +1010,7 @@ void MDBalancer::find_exports(CDir *dir, return; } - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); double need = amount - have; if (need < amount * g_conf()->mds_bal_min_start) @@ -1033,8 +1033,8 @@ void MDBalancer::find_exports(CDir *dir, CInode *in = *it; ++it; - assert(in->is_dir()); - assert(in->get_parent_dir() == dir); + ceph_assert(in->is_dir()); + ceph_assert(in->get_parent_dir() == dir); list dfls; in->get_nested_dirfrags(dfls); diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index e961cad2ce7bb..28ac5a41a397b 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -93,7 +93,7 @@ protected: MDCache *mdcache; MDSRank *get_mds() override { - assert(mdcache != NULL); + ceph_assert(mdcache != NULL); return mdcache->mds; } public: @@ -113,7 +113,7 @@ protected: MDCache *mdcache; MDSRank *get_mds() override { - assert(mdcache != NULL); + ceph_assert(mdcache != NULL); return mdcache->mds; } public: @@ -126,7 +126,7 @@ protected: MDCache *mdcache; MDSRank *get_mds() override { - assert(mdcache != NULL); + ceph_assert(mdcache != NULL); return mdcache->mds; } public: @@ -251,11 +251,11 @@ void MDCache::add_inode(CInode *in) // add to lru, inode map if (in->last == CEPH_NOSNAP) { auto &p = inode_map[in->ino()]; - assert(!p); // should be no dup inos! + ceph_assert(!p); // should be no dup inos! p = in; } else { auto &p = snap_inode_map[in->vino()]; - assert(!p); // should be no dup inos! + ceph_assert(!p); // should be no dup inos! p = in; } @@ -285,7 +285,7 @@ void MDCache::remove_inode(CInode *o) if (o->get_parent_dn()) { // FIXME: multiple parents? CDentry *dn = o->get_parent_dn(); - assert(!dn->is_dirty()); + ceph_assert(!dn->is_dirty()); dn->dir->unlink_inode(dn); // leave dentry ... FIXME? } @@ -322,7 +322,7 @@ void MDCache::remove_inode(CInode *o) } // delete it - assert(o->get_num_ref() == 0); + ceph_assert(o->get_num_ref() == 0); delete o; } @@ -382,7 +382,7 @@ void MDCache::create_unlinked_system_inode(CInode *in, inodeno_t ino, else in->inode_auth = mds_authority_t(mds_rank_t(in->ino() - MDS_INO_MDSDIR_OFFSET), CDIR_AUTH_UNKNOWN); in->open_snaprealm(); // empty snaprealm - assert(!in->snaprealm->parent); // created its own + ceph_assert(!in->snaprealm->parent); // created its own in->snaprealm->srnode.seq = 1; } } @@ -554,7 +554,7 @@ void MDCache::_create_system_file_finish(MutationRef& mut, CDentry *dn, version_ if (in->inode.is_dir()) { CDir *dir = in->get_dirfrag(frag_t()); - assert(dir); + ceph_assert(dir); dir->mark_dirty(1, mut->ls); dir->mark_new(mut->ls); } @@ -615,7 +615,7 @@ void MDCache::open_mydir_frag(MDSInternalContextBase *c) return; } CDir *mydir = myin->get_or_open_dirfrag(this, frag_t()); - assert(mydir); + ceph_assert(mydir); adjust_subtree_auth(mydir, mds->get_nodeid()); mydir->fetch(c); }) @@ -632,9 +632,9 @@ void MDCache::open_root() return; } if (mds->get_nodeid() == mds->mdsmap->get_root()) { - assert(root->is_auth()); + ceph_assert(root->is_auth()); CDir *rootdir = root->get_or_open_dirfrag(this, frag_t()); - assert(rootdir); + ceph_assert(rootdir); if (!rootdir->is_subtree_root()) adjust_subtree_auth(rootdir, mds->get_nodeid()); if (!rootdir->is_complete()) { @@ -642,7 +642,7 @@ void MDCache::open_root() return; } } else { - assert(!root->is_auth()); + ceph_assert(!root->is_auth()); CDir *rootdir = root->get_dirfrag(frag_t()); if (!rootdir) { open_remote_dirfrag(root, frag_t(), new C_MDS_RetryOpenRoot(this)); @@ -656,7 +656,7 @@ void MDCache::open_root() return; } CDir *mydir = myin->get_or_open_dirfrag(this, frag_t()); - assert(mydir); + ceph_assert(mydir); adjust_subtree_auth(mydir, mds->get_nodeid()); populate_mydir(); @@ -664,9 +664,9 @@ void MDCache::open_root() void MDCache::populate_mydir() { - assert(myin); + ceph_assert(myin); CDir *mydir = myin->get_or_open_dirfrag(this, frag_t()); - assert(mydir); + ceph_assert(mydir); dout(10) << "populate_mydir " << *mydir << dendl; @@ -702,8 +702,8 @@ void MDCache::populate_mydir() new C_MDS_RetryOpenRoot(this)); return; } - assert(straydn); - assert(strays[i]); + ceph_assert(straydn); + ceph_assert(strays[i]); // we make multiple passes through this method; make sure we only pin each stray once. if (!strays[i]->state_test(CInode::STATE_STRAYPINNED)) { strays[i]->get(CInode::PIN_STRAY); @@ -724,7 +724,7 @@ void MDCache::populate_mydir() // DamageTable applies special handling to strays: it will // have damaged() us out if one is damaged. - assert(!dir->state_test(CDir::STATE_BADFRAG)); + ceph_assert(!dir->state_test(CDir::STATE_BADFRAG)); if (dir->get_version() == 0) { dir->fetch(new C_MDS_RetryOpenRoot(this)); @@ -740,7 +740,7 @@ void MDCache::populate_mydir() // okay! dout(10) << "populate_mydir done" << dendl; - assert(!open); + ceph_assert(!open); open = true; mds->queue_waiters(waiting_for_open); @@ -758,10 +758,10 @@ CDir *MDCache::get_stray_dir(CInode *in) in->name_stray_dentry(straydname); CInode *strayi = get_stray(); - assert(strayi); + ceph_assert(strayi); frag_t fg = strayi->pick_dirfrag(straydname); CDir *straydir = strayi->get_dirfrag(fg); - assert(straydir); + ceph_assert(straydir); return straydir; } @@ -775,7 +775,7 @@ CDentry *MDCache::get_or_create_stray_dentry(CInode *in) straydn = straydir->add_null_dentry(straydname); straydn->mark_new(); } else { - assert(straydn->get_projected_linkage()->is_null()); + ceph_assert(straydn->get_projected_linkage()->is_null()); } straydn->state_set(CDentry::STATE_STRAY); @@ -836,8 +836,8 @@ void MDCache::adjust_subtree_auth(CDir *dir, mds_authority_t auth, bool adjust_p } else { root = get_subtree_root(dir); // subtree root } - assert(root); - assert(subtrees.count(root)); + ceph_assert(root); + ceph_assert(subtrees.count(root)); dout(7) << " current root is " << *root << dendl; if (root == dir) { @@ -846,7 +846,7 @@ void MDCache::adjust_subtree_auth(CDir *dir, mds_authority_t auth, bool adjust_p } else { // i am a new subtree. dout(10) << " new subtree at " << *dir << dendl; - assert(subtrees.count(dir) == 0); + ceph_assert(subtrees.count(dir) == 0); subtrees[dir]; // create empty subtree bounds list for me. dir->get(CDir::PIN_SUBTREE); @@ -928,7 +928,7 @@ void MDCache::try_subtree_merge_at(CDir *dir, set *to_eval, bool adjust return; auto it = subtrees.find(dir); - assert(it != subtrees.end()); + ceph_assert(it != subtrees.end()); // merge with parent? CDir *parent = dir; @@ -985,7 +985,7 @@ void MDCache::eval_subtree_root(CInode *diri) { // evaluate subtree inode filelock? // (we should scatter the filelock on subtree bounds) - assert(diri->is_auth()); + ceph_assert(diri->is_auth()); mds->locker->try_eval(diri, CEPH_LOCK_IFILE | CEPH_LOCK_INEST); } @@ -1009,8 +1009,8 @@ void MDCache::adjust_bounded_subtree_auth(CDir *dir, const set& bounds, m } else { root = get_subtree_root(dir); // subtree root } - assert(root); - assert(subtrees.count(root)); + ceph_assert(root); + ceph_assert(subtrees.count(root)); dout(7) << " current root is " << *root << dendl; mds_authority_t oldauth = dir->authority(); @@ -1021,7 +1021,7 @@ void MDCache::adjust_bounded_subtree_auth(CDir *dir, const set& bounds, m } else { // i am a new subtree. dout(10) << " new subtree at " << *dir << dendl; - assert(subtrees.count(dir) == 0); + ceph_assert(subtrees.count(dir) == 0); subtrees[dir]; // create empty subtree bounds list for me. dir->get(CDir::PIN_SUBTREE); @@ -1066,7 +1066,7 @@ void MDCache::adjust_bounded_subtree_auth(CDir *dir, const set& bounds, m dout(10) << " want bound " << *bound << dendl; CDir *t = get_subtree_root(bound->get_parent_dir()); if (subtrees[t].count(bound) == 0) { - assert(t != dir); + ceph_assert(t != dir); dout(10) << " new bound " << *bound << dendl; adjust_subtree_auth(bound, t->authority()); } @@ -1243,20 +1243,20 @@ CDir *MDCache::get_projected_subtree_root(CDir *dir) void MDCache::remove_subtree(CDir *dir) { dout(10) << "remove_subtree " << *dir << dendl; - assert(subtrees.count(dir)); - assert(subtrees[dir].empty()); + ceph_assert(subtrees.count(dir)); + ceph_assert(subtrees[dir].empty()); subtrees.erase(dir); dir->put(CDir::PIN_SUBTREE); if (dir->get_parent_dir()) { CDir *p = get_subtree_root(dir->get_parent_dir()); - assert(subtrees[p].count(dir)); + ceph_assert(subtrees[p].count(dir)); subtrees[p].erase(dir); } } void MDCache::get_subtree_bounds(CDir *dir, set& bounds) { - assert(subtrees.count(dir)); + ceph_assert(subtrees.count(dir)); bounds = subtrees[dir]; } @@ -1274,7 +1274,7 @@ void MDCache::get_wouldbe_subtree_bounds(CDir *dir, set& bounds) CDir *t = *p; while (t != root) { t = t->get_parent_dir(); - assert(t); + ceph_assert(t); if (t == dir) { bounds.insert(*p); continue; @@ -1287,7 +1287,7 @@ void MDCache::get_wouldbe_subtree_bounds(CDir *dir, set& bounds) void MDCache::verify_subtree_bounds(CDir *dir, const set& bounds) { // for debugging only. - assert(subtrees.count(dir)); + ceph_assert(subtrees.count(dir)); if (bounds != subtrees[dir]) { dout(0) << "verify_subtree_bounds failed" << dendl; set b = bounds; @@ -1301,13 +1301,13 @@ void MDCache::verify_subtree_bounds(CDir *dir, const set& bounds) for (const auto &cd : b) dout(0) << " extra bound " << *cd << dendl; } - assert(bounds == subtrees[dir]); + ceph_assert(bounds == subtrees[dir]); } void MDCache::verify_subtree_bounds(CDir *dir, const list& bounds) { // for debugging only. - assert(subtrees.count(dir)); + ceph_assert(subtrees.count(dir)); // make sure that any bounds i do have are properly noted as such. int failed = 0; @@ -1319,7 +1319,7 @@ void MDCache::verify_subtree_bounds(CDir *dir, const list& bounds) failed++; } } - assert(failed == 0); + ceph_assert(failed == 0); } void MDCache::project_subtree_rename(CInode *diri, CDir *olddir, CDir *newdir) @@ -1337,10 +1337,10 @@ void MDCache::adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop) if (pop) { map > >::iterator p = projected_subtree_renames.find(diri); - assert(p != projected_subtree_renames.end()); - assert(!p->second.empty()); - assert(p->second.front().first == olddir); - assert(p->second.front().second == newdir); + ceph_assert(p != projected_subtree_renames.end()); + ceph_assert(!p->second.empty()); + ceph_assert(p->second.front().first == olddir); + ceph_assert(p->second.front().second == newdir); p->second.pop_front(); if (p->second.empty()) projected_subtree_renames.erase(p); @@ -1368,9 +1368,9 @@ void MDCache::adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop) } else if (dir->is_subtree_root()) { // children are fine. change parent. dout(10) << "moving " << *dir << " from " << *oldparent << " to " << *newparent << dendl; - assert(subtrees[oldparent].count(dir)); + ceph_assert(subtrees[oldparent].count(dir)); subtrees[oldparent].erase(dir); - assert(subtrees.count(newparent)); + ceph_assert(subtrees.count(newparent)); subtrees[newparent].insert(dir); // caller is responsible for 'eval diri' try_subtree_merge_at(dir, NULL, false); @@ -1385,7 +1385,7 @@ void MDCache::adjust_subtree_after_rename(CInode *diri, CDir *olddir, bool pop) CDir *bound = *p; CDir *broot = get_subtree_root(bound->get_parent_dir()); if (broot != oldparent) { - assert(broot == newparent); + ceph_assert(broot == newparent); tomove.push_back(bound); } } @@ -1479,7 +1479,7 @@ int MDCache::num_subtrees_fullnonauth() CInode *MDCache::pick_inode_snap(CInode *in, snapid_t follows) { dout(10) << "pick_inode_snap follows " << follows << " on " << *in << dendl; - assert(in->last == CEPH_NOSNAP); + ceph_assert(in->last == CEPH_NOSNAP); auto p = snap_inode_map.upper_bound(vinodeno_t(in->ino(), follows)); if (p != snap_inode_map.end() && p->second->ino() == in->ino()) { @@ -1501,7 +1501,7 @@ CInode *MDCache::pick_inode_snap(CInode *in, snapid_t follows) */ CInode *MDCache::cow_inode(CInode *in, snapid_t last) { - assert(last >= in->first); + ceph_assert(last >= in->first); CInode *oldin = new CInode(this, true, in->first, last); oldin->inode = *in->get_previous_projected_inode(); @@ -1519,12 +1519,12 @@ CInode *MDCache::cow_inode(CInode *in, snapid_t last) if (in->last != CEPH_NOSNAP) { CInode *head_in = get_inode(in->ino()); - assert(head_in); + ceph_assert(head_in); if (head_in->split_need_snapflush(oldin, in)) { oldin->client_snap_caps = in->client_snap_caps; for (const auto &p : in->client_snap_caps) { SimpleLock *lock = oldin->get_lock(p.first); - assert(lock); + ceph_assert(lock); for (const auto &q : p.second) { oldin->auth_pin(lock); lock->set_state(LOCK_SNAP_SYNC); // gathering @@ -1550,7 +1550,7 @@ CInode *MDCache::cow_inode(CInode *in, snapid_t last) if (issued & cinode_lock_info[i].wr_caps) { int lockid = cinode_lock_info[i].lock; SimpleLock *lock = oldin->get_lock(lockid); - assert(lock); + ceph_assert(lock); oldin->client_snap_caps[lockid].insert(client); oldin->auth_pin(lock); lock->set_state(LOCK_SNAP_SYNC); // gathering @@ -1585,17 +1585,17 @@ void MDCache::journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, return; } dout(10) << "journal_cow_dentry follows " << follows << " on " << *dn << dendl; - assert(dn->is_auth()); + ceph_assert(dn->is_auth()); // nothing to cow on a null dentry, fix caller if (!dnl) dnl = dn->get_projected_linkage(); - assert(!dnl->is_null()); + ceph_assert(!dnl->is_null()); CInode *in = dnl->is_primary() ? dnl->get_inode() : NULL; bool cow_head = false; if (in && in->state_test(CInode::STATE_AMBIGUOUSAUTH)) { - assert(in->is_frozen_inode()); + ceph_assert(in->is_frozen_inode()); cow_head = true; } if (in && (in->is_multiversion() || cow_head)) { @@ -1603,10 +1603,10 @@ void MDCache::journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, SnapRealm *realm = NULL; if (in->get_projected_parent_dn() != dn) { - assert(follows == CEPH_NOSNAP); + ceph_assert(follows == CEPH_NOSNAP); realm = dn->dir->inode->find_snaprealm(); snapid_t dir_follows = get_global_snaprealm()->get_newest_seq(); - assert(dir_follows >= realm->get_newest_seq()); + ceph_assert(dir_follows >= realm->get_newest_seq()); if (dir_follows+1 > dn->first) { snapid_t oldfirst = dn->first; @@ -1628,13 +1628,13 @@ void MDCache::journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, follows = dir_follows; if (in->snaprealm) { realm = in->snaprealm; - assert(follows >= realm->get_newest_seq()); + ceph_assert(follows >= realm->get_newest_seq()); } } else { realm = in->find_snaprealm(); if (follows == CEPH_NOSNAP) { follows = get_global_snaprealm()->get_newest_seq(); - assert(follows >= realm->get_newest_seq()); + ceph_assert(follows >= realm->get_newest_seq()); } } @@ -1656,7 +1656,7 @@ void MDCache::journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, SnapRealm *realm = dn->dir->inode->find_snaprealm(); if (follows == CEPH_NOSNAP) { follows = get_global_snaprealm()->get_newest_seq(); - assert(follows >= realm->get_newest_seq()); + ceph_assert(follows >= realm->get_newest_seq()); } // already cloned? @@ -1693,7 +1693,7 @@ void MDCache::journal_cow_dentry(MutationImpl *mut, EMetaBlob *metablob, metablob->add_primary_dentry(olddn, 0, true, false, false, need_snapflush); mut->add_cow_dentry(olddn); } else { - assert(dnl->is_remote()); + ceph_assert(dnl->is_remote()); CDentry *olddn = dn->dir->add_remote_dentry(dn->get_name(), dnl->get_remote_ino(), dnl->get_remote_d_type(), oldfirst, follows); olddn->pre_dirty(); @@ -1765,7 +1765,7 @@ void MDCache::project_rstat_inode_to_frag(CInode *cur, CDir *parent, snapid_t fi const set snaps = prealm->get_snaps(); if (cur->last != CEPH_NOSNAP) { - assert(cur->dirty_old_rstats.empty()); + ceph_assert(cur->dirty_old_rstats.empty()); set::const_iterator q = snaps.lower_bound(std::max(first, floor)); if (q == snaps.end() || *q > cur->last) return; @@ -1776,8 +1776,8 @@ void MDCache::project_rstat_inode_to_frag(CInode *cur, CDir *parent, snapid_t fi if (cur->state_test(CInode::STATE_AMBIGUOUSAUTH) && cur->is_auth()) { // rename src inode is not projected in the slave rename prep case. so we should // avoid updateing the inode. - assert(linkunlink < 0); - assert(cur->is_frozen_inode()); + ceph_assert(linkunlink < 0); + ceph_assert(cur->is_frozen_inode()); update = false; } _project_rstat_inode_to_frag(*curi, std::max(first, floor), cur->last, parent, @@ -1908,7 +1908,7 @@ void MDCache::_project_rstat_inode_to_frag(CInode::mempool_inode& inode, snapid_ // apply dout(20) << " project to [" << first << "," << last << "] " << *prstat << dendl; - assert(last >= first); + ceph_assert(last >= first); prstat->add(delta); if (update_inode) inode.accounted_rstat = inode.rstat; @@ -2093,7 +2093,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, bool do_parent_mtime = flags & PREDIRTY_DIR; bool shallow = flags & PREDIRTY_SHALLOW; - assert(mds->mdlog->entry_is_open()); + ceph_assert(mds->mdlog->entry_is_open()); // make sure stamp is set if (mut->get_mds_stamp() == utime_t()) @@ -2111,7 +2111,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, << " " << *in << dendl; if (!parent) { - assert(primary_dn); + ceph_assert(primary_dn); parent = in->get_projected_parent_dn()->get_dir(); } @@ -2128,7 +2128,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, bool first = true; while (parent) { //assert(cur->is_auth() || !primary_dn); // this breaks the rename auth twiddle hack - assert(parent->is_auth()); + ceph_assert(parent->is_auth()); // opportunistically adjust parent dirfrag CInode *pin = parent->get_inode(); @@ -2141,9 +2141,9 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, pf->version = parent->pre_dirty(); if (do_parent_mtime || linkunlink) { - assert(mut->wrlocks.count(&pin->filelock)); - assert(mut->wrlocks.count(&pin->nestlock)); - assert(cfollows == CEPH_NOSNAP); + ceph_assert(mut->wrlocks.count(&pin->filelock)); + ceph_assert(mut->wrlocks.count(&pin->nestlock)); + ceph_assert(cfollows == CEPH_NOSNAP); // update stale fragstat/rstat? parent->resync_accounted_fragstat(); @@ -2185,7 +2185,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, // because we are about to write into the dirfrag fnode and that needs // to commit before the lock can cycle. if (linkunlink) { - assert(pin->nestlock.get_num_wrlocks() || mut->is_slave()); + ceph_assert(pin->nestlock.get_num_wrlocks() || mut->is_slave()); } if (mut->wrlocks.count(&pin->nestlock) == 0) { @@ -2257,7 +2257,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, if (!mut->wrlocks.count(&pin->versionlock)) mds->locker->local_wrlock_grab(&pin->versionlock, mut); - assert(mut->wrlocks.count(&pin->nestlock) || + ceph_assert(mut->wrlocks.count(&pin->nestlock) || mut->is_slave()); pin->last_dirstat_prop = mut->get_mds_stamp(); @@ -2287,7 +2287,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, if (parent->get_frag() == frag_t()) { // i.e., we are the only frag if (pi.inode.dirstat.size() < 0) - assert(!"negative dirstat size" == g_conf()->mds_verify_scatter); + ceph_assert(!"negative dirstat size" == g_conf()->mds_verify_scatter); if (pi.inode.dirstat.size() != pf->fragstat.size()) { mds->clog->error() << "unmatched fragstat size on single dirfrag " << parent->dirfrag() << ", inode has " << pi.inode.dirstat @@ -2296,7 +2296,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, // trust the dirfrag for now pi.inode.dirstat = pf->fragstat; - assert(!"unmatched fragstat size" == g_conf()->mds_verify_scatter); + ceph_assert(!"unmatched fragstat size" == g_conf()->mds_verify_scatter); } } } @@ -2315,7 +2315,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, if (pin->is_base()) break; parentdn = pin->get_projected_parent_dn(); - assert(parentdn); + ceph_assert(parentdn); // rstat dout(10) << "predirty_journal_parents frag->inode on " << *parent << dendl; @@ -2343,7 +2343,7 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, // trust the dirfrag for now pi.inode.rstat = pf->rstat; - assert(!"unmatched rstat rbytes" == g_conf()->mds_verify_scatter); + ceph_assert(!"unmatched rstat rbytes" == g_conf()->mds_verify_scatter); } } @@ -2359,8 +2359,8 @@ void MDCache::predirty_journal_parents(MutationRef mut, EMetaBlob *blob, } // now, stick it in the blob - assert(parent); - assert(parent->is_auth()); + ceph_assert(parent); + ceph_assert(parent->is_auth()); blob->add_dir_context(parent); blob->add_dir(parent, true); for (list::iterator p = lsi.begin(); @@ -2405,7 +2405,7 @@ void MDCache::log_master_commit(metareqid_t reqid) void MDCache::_logged_master_commit(metareqid_t reqid) { dout(10) << "_logged_master_commit " << reqid << dendl; - assert(uncommitted_masters.count(reqid)); + ceph_assert(uncommitted_masters.count(reqid)); uncommitted_masters[reqid].ls->uncommitted_masters.erase(reqid); mds->queue_waiters(uncommitted_masters[reqid].waiters); uncommitted_masters.erase(reqid); @@ -2416,7 +2416,7 @@ void MDCache::_logged_master_commit(metareqid_t reqid) void MDCache::committed_master_slave(metareqid_t r, mds_rank_t from) { dout(10) << "committed_master_slave mds." << from << " on " << r << dendl; - assert(uncommitted_masters.count(r)); + ceph_assert(uncommitted_masters.count(r)); uncommitted_masters[r].slaves.erase(from); if (!uncommitted_masters[r].recovering && uncommitted_masters[r].slaves.empty()) log_master_commit(r); @@ -2425,7 +2425,7 @@ void MDCache::committed_master_slave(metareqid_t r, mds_rank_t from) void MDCache::logged_master_update(metareqid_t reqid) { dout(10) << "logged_master_update " << reqid << dendl; - assert(uncommitted_masters.count(reqid)); + ceph_assert(uncommitted_masters.count(reqid)); uncommitted_masters[reqid].safe = true; auto p = pending_masters.find(reqid); if (p != pending_masters.end()) { @@ -2680,7 +2680,7 @@ void MDCache::dump_resolve_status(Formatter *f) const void MDCache::resolve_start(MDSInternalContext *resolve_done_) { dout(10) << "resolve_start" << dendl; - assert(!resolve_done); + ceph_assert(!resolve_done); resolve_done.reset(resolve_done_); if (mds->mdsmap->get_root() != mds->get_nodeid()) { @@ -2946,7 +2946,7 @@ void MDCache::handle_mds_failure(mds_rank_t who) remove_ambiguous_slave_update(p->first, mdr->slave_to_mds); if (!mdr->more()->waiting_on_slave.empty()) { - assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid()); + ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid()); // will rollback, no need to wait mdr->reset_slave_request(); mdr->more()->waiting_on_slave.clear(); @@ -2962,7 +2962,7 @@ void MDCache::handle_mds_failure(mds_rank_t who) if (mdr->is_slave() && mdr->slave_did_prepare()) { if (mdr->more()->waiting_on_slave.count(who)) { - assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid()); + ceph_assert(mdr->more()->srcdn_auth_mds == mds->get_nodeid()); dout(10) << " slave request " << *mdr << " no longer need rename notity ack from mds." << who << dendl; mdr->more()->waiting_on_slave.erase(who); @@ -2990,7 +2990,7 @@ void MDCache::handle_mds_failure(mds_rank_t who) if (mdr->more()->srcdn_auth_mds == who) { dout(10) << " master request " << *mdr << " waiting for rename srcdn's auth mds." << who << " to recover" << dendl; - assert(mdr->more()->witnessed.count(who) == 0); + ceph_assert(mdr->more()->witnessed.count(who) == 0); if (mdr->more()->is_ambiguous_auth) mdr->clear_ambiguous_auth(); // rename srcdn's auth mds failed, all witnesses will rollback @@ -3005,13 +3005,13 @@ void MDCache::handle_mds_failure(mds_rank_t who) << mdr->more()->srcdn_auth_mds << " to reply" << dendl; // waiting for the slave (rename srcdn's auth mds), delay sending resolve ack // until either the request is committing or the slave also fails. - assert(mdr->more()->waiting_on_slave.size() == 1); + ceph_assert(mdr->more()->waiting_on_slave.size() == 1); pending_masters.insert(p->first); } else { dout(10) << " master request " << *mdr << " no longer witnessed by slave mds." << who << " to recover" << dendl; if (srcdn_auth >= 0) - assert(mdr->more()->witnessed.count(srcdn_auth) == 0); + ceph_assert(mdr->more()->witnessed.count(srcdn_auth) == 0); // discard this peer's prepare (if any) mdr->more()->witnessed.erase(who); @@ -3095,7 +3095,7 @@ void MDCache::handle_mds_recovery(mds_rank_t who) if (dir->authority().first != who || dir->authority().second == mds->get_nodeid()) continue; - assert(!dir->is_auth()); + ceph_assert(!dir->is_auth()); // wake any waiters list q; @@ -3171,7 +3171,7 @@ void MDCache::handle_resolve(const MMDSResolve::const_ref &m) if (mds->is_clientreplay() || mds->is_active() || mds->is_stopping()) { for (auto p = m->slave_requests.begin(); p != m->slave_requests.end(); ++p) { if (uncommitted_masters.count(p->first) && !uncommitted_masters[p->first].safe) { - assert(!p->second.committing); + ceph_assert(!p->second.committing); pending_masters.insert(p->first); } } @@ -3198,7 +3198,7 @@ void MDCache::handle_resolve(const MMDSResolve::const_ref &m) if (p.second.inode_caps.length() > 0) { // slave wants to export caps (rename) - assert(mds->is_resolve()); + ceph_assert(mds->is_resolve()); inodeno_t ino; map cap_exports; @@ -3206,7 +3206,7 @@ void MDCache::handle_resolve(const MMDSResolve::const_ref &m) decode(ino, q); decode(cap_exports, q); - assert(get_inode(ino)); + ceph_assert(get_inode(ino)); for (map::iterator q = cap_exports.begin(); q != cap_exports.end(); @@ -3231,7 +3231,7 @@ void MDCache::handle_resolve(const MMDSResolve::const_ref &m) } else { // ABORT dout(10) << " ambiguous slave request " << p << " will ABORT" << dendl; - assert(!p.second.committing); + ceph_assert(!p.second.committing); ack->add_abort(p.first); } } @@ -3255,11 +3255,11 @@ void MDCache::handle_resolve(const MMDSResolve::const_ref &m) map >::iterator next = p; ++next; CDir *dir = get_dirfrag(p->first); - assert(dir); + ceph_assert(dir); dout(10) << "checking ambiguous import " << *dir << dendl; if (migrator->is_importing(dir->dirfrag()) && migrator->get_import_peer(dir->dirfrag()) == from) { - assert(migrator->get_import_state(dir->dirfrag()) == Migrator::IMPORT_ACKING); + ceph_assert(migrator->get_import_state(dir->dirfrag()) == Migrator::IMPORT_ACKING); // check if sender claims the subtree bool claimed_by_sender = false; @@ -3349,8 +3349,8 @@ void MDCache::discard_delayed_resolve(mds_rank_t who) void MDCache::maybe_resolve_finish() { - assert(resolve_ack_gather.empty()); - assert(resolve_need_rollback.empty()); + ceph_assert(resolve_ack_gather.empty()); + ceph_assert(resolve_need_rollback.empty()); if (!resolve_gather.empty()) { dout(10) << "maybe_resolve_finish still waiting for resolves (" @@ -3363,7 +3363,7 @@ void MDCache::maybe_resolve_finish() finish_committed_masters(); if (resolve_done) { - assert(mds->is_resolve()); + ceph_assert(mds->is_resolve()); trim_unlinked_inodes(); recalc_auth_bits(false); resolve_done.release()->complete(0); @@ -3384,8 +3384,8 @@ void MDCache::handle_resolve_ack(const MMDSResolveAck::const_ref &ack) } if (ambiguous_slave_updates.count(from)) { - assert(mds->mdsmap->is_clientreplay_or_active_or_stopping(from)); - assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); + ceph_assert(mds->mdsmap->is_clientreplay_or_active_or_stopping(from)); + ceph_assert(mds->is_clientreplay() || mds->is_active() || mds->is_stopping()); } for (const auto &p : ack->commit) { @@ -3399,7 +3399,7 @@ void MDCache::handle_resolve_ack(const MMDSResolveAck::const_ref &ack) if (mds->is_resolve()) { // replay MDSlaveUpdate *su = get_uncommitted_slave_update(p.first, from); - assert(su); + ceph_assert(su); // log commit mds->mdlog->start_submit_entry(new ESlaveUpdate(mds->mdlog, "unknown", p.first, from, @@ -3414,7 +3414,7 @@ void MDCache::handle_resolve_ack(const MMDSResolveAck::const_ref &ack) if (p.second.length() > 0) mdr->more()->inode_import.share(p.second); - assert(mdr->slave_request == 0); // shouldn't be doing anything! + ceph_assert(mdr->slave_request == 0); // shouldn't be doing anything! request_finish(mdr); } } @@ -3424,7 +3424,7 @@ void MDCache::handle_resolve_ack(const MMDSResolveAck::const_ref &ack) if (mds->is_resolve()) { MDSlaveUpdate *su = get_uncommitted_slave_update(metareq, from); - assert(su); + ceph_assert(su); // perform rollback (and journal a rollback entry) // note: this will hold up the resolve a bit, until the rollback entries journal. @@ -3462,7 +3462,7 @@ void MDCache::handle_resolve_ack(const MMDSResolveAck::const_ref &ack) void MDCache::add_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master, MDSlaveUpdate *su) { - assert(uncommitted_slave_updates[master].count(reqid) == 0); + ceph_assert(uncommitted_slave_updates[master].count(reqid) == 0); uncommitted_slave_updates[master][reqid] = su; for(set::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p) uncommitted_slave_rename_olddir[*p]++; @@ -3472,7 +3472,7 @@ void MDCache::add_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master, void MDCache::finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t master) { - assert(uncommitted_slave_updates[master].count(reqid)); + ceph_assert(uncommitted_slave_updates[master].count(reqid)); MDSlaveUpdate* su = uncommitted_slave_updates[master][reqid]; uncommitted_slave_updates[master].erase(reqid); @@ -3482,7 +3482,7 @@ void MDCache::finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t mast for(set::iterator p = su->olddirs.begin(); p != su->olddirs.end(); ++p) { CInode *diri = *p; map::iterator it = uncommitted_slave_rename_olddir.find(diri); - assert(it != uncommitted_slave_rename_olddir.end()); + ceph_assert(it != uncommitted_slave_rename_olddir.end()); it->second--; if (it->second == 0) { uncommitted_slave_rename_olddir.erase(it); @@ -3497,20 +3497,20 @@ void MDCache::finish_uncommitted_slave_update(metareqid_t reqid, mds_rank_t mast } } } else - assert(it->second > 0); + ceph_assert(it->second > 0); } // removed the inodes that were unlinked by slave update for(set::iterator p = su->unlinked.begin(); p != su->unlinked.end(); ++p) { CInode *in = *p; map::iterator it = uncommitted_slave_unlink.find(in); - assert(it != uncommitted_slave_unlink.end()); + ceph_assert(it != uncommitted_slave_unlink.end()); it->second--; if (it->second == 0) { uncommitted_slave_unlink.erase(it); if (!in->get_projected_parent_dn()) mds->mdcache->remove_inode_recursive(in); } else - assert(it->second > 0); + ceph_assert(it->second > 0); } delete su; } @@ -3522,14 +3522,14 @@ MDSlaveUpdate* MDCache::get_uncommitted_slave_update(metareqid_t reqid, mds_rank if (uncommitted_slave_updates.count(master) && uncommitted_slave_updates[master].count(reqid)) { su = uncommitted_slave_updates[master][reqid]; - assert(su); + ceph_assert(su); } return su; } void MDCache::finish_rollback(metareqid_t reqid) { auto p = resolve_need_rollback.find(reqid); - assert(p != resolve_need_rollback.end()); + ceph_assert(p != resolve_need_rollback.end()); if (mds->is_resolve()) finish_uncommitted_slave_update(reqid, p->second); resolve_need_rollback.erase(p); @@ -3574,7 +3574,7 @@ void MDCache::disambiguate_my_imports() dout(10) << "disambiguate_my_imports" << dendl; if (!mds->is_resolve()) { - assert(my_ambiguous_imports.empty()); + ceph_assert(my_ambiguous_imports.empty()); return; } @@ -3586,7 +3586,7 @@ void MDCache::disambiguate_my_imports() map >::iterator q = my_ambiguous_imports.begin(); CDir *dir = get_dirfrag(q->first); - assert(dir); + ceph_assert(dir); if (dir->authority() != me_ambig) { dout(10) << "ambiguous import auth known, must not be me " << *dir << dendl; @@ -3599,7 +3599,7 @@ void MDCache::disambiguate_my_imports() CDir *root = get_subtree_root(dir); if (root != dir) dout(10) << " subtree root is " << *root << dendl; - assert(root->dir_auth.first != mds->get_nodeid()); // no us! + ceph_assert(root->dir_auth.first != mds->get_nodeid()); // no us! try_trim_non_auth_subtree(root); } else { dout(10) << "ambiguous import auth unclaimed, must be me " << *dir << dendl; @@ -3607,7 +3607,7 @@ void MDCache::disambiguate_my_imports() mds->mdlog->start_submit_entry(new EImportFinish(dir, true)); } } - assert(my_ambiguous_imports.empty()); + ceph_assert(my_ambiguous_imports.empty()); mds->mdlog->flush(); // verify all my subtrees are unambiguous! @@ -3618,7 +3618,7 @@ void MDCache::disambiguate_my_imports() if (dir->is_ambiguous_dir_auth()) { dout(0) << "disambiguate_imports uh oh, dir_auth is still ambiguous for " << *dir << dendl; } - assert(!dir->is_ambiguous_dir_auth()); + ceph_assert(!dir->is_ambiguous_dir_auth()); } show_subtrees(); @@ -3627,7 +3627,7 @@ void MDCache::disambiguate_my_imports() void MDCache::add_ambiguous_import(dirfrag_t base, const vector& bounds) { - assert(my_ambiguous_imports.count(base) == 0); + ceph_assert(my_ambiguous_imports.count(base) == 0); my_ambiguous_imports[base] = bounds; } @@ -3651,7 +3651,7 @@ void MDCache::add_ambiguous_import(CDir *base, const set& bounds) void MDCache::cancel_ambiguous_import(CDir *dir) { dirfrag_t df = dir->dirfrag(); - assert(my_ambiguous_imports.count(df)); + ceph_assert(my_ambiguous_imports.count(df)); dout(10) << "cancel_ambiguous_import " << df << " bounds " << my_ambiguous_imports[df] << " " << *dir @@ -3661,7 +3661,7 @@ void MDCache::cancel_ambiguous_import(CDir *dir) void MDCache::finish_ambiguous_import(dirfrag_t df) { - assert(my_ambiguous_imports.count(df)); + ceph_assert(my_ambiguous_imports.count(df)); vector bounds; bounds.swap(my_ambiguous_imports[df]); my_ambiguous_imports.erase(df); @@ -3670,7 +3670,7 @@ void MDCache::finish_ambiguous_import(dirfrag_t df) << " bounds " << bounds << dendl; CDir *dir = get_dirfrag(df); - assert(dir); + ceph_assert(dir); // adjust dir_auth, import maps adjust_bounded_subtree_auth(dir, bounds, mds->get_nodeid()); @@ -3709,7 +3709,7 @@ void MDCache::remove_inode_recursive(CInode *in) bool MDCache::expire_recursive(CInode *in, expiremap &expiremap) { - assert(!in->is_auth()); + ceph_assert(!in->is_auth()); dout(10) << __func__ << ":" << *in << dendl; @@ -3934,7 +3934,7 @@ void MDCache::dump_rejoin_status(Formatter *f) const void MDCache::rejoin_start(MDSInternalContext *rejoin_done_) { dout(10) << "rejoin_start" << dendl; - assert(!rejoin_done); + ceph_assert(!rejoin_done); rejoin_done.reset(rejoin_done_); rejoin_gather = recovery_set; @@ -3970,8 +3970,8 @@ void MDCache::rejoin_send_rejoins() return; } - assert(!migrator->is_importing()); - assert(!migrator->is_exporting()); + ceph_assert(!migrator->is_importing()); + ceph_assert(!migrator->is_exporting()); if (!mds->is_rejoin()) { disambiguate_other_imports(); @@ -4019,11 +4019,11 @@ void MDCache::rejoin_send_rejoins() p != subtrees.end(); ++p) { CDir *dir = p->first; - assert(dir->is_subtree_root()); + ceph_assert(dir->is_subtree_root()); if (dir->is_ambiguous_dir_auth()) { // exporter is recovering, importer is survivor. - assert(rejoins.count(dir->authority().first)); - assert(!rejoins.count(dir->authority().second)); + ceph_assert(rejoins.count(dir->authority().first)); + ceph_assert(!rejoins.count(dir->authority().second)); continue; } @@ -4032,7 +4032,7 @@ void MDCache::rejoin_send_rejoins() continue; // skip my own regions! mds_rank_t auth = dir->get_dir_auth().first; - assert(auth >= 0); + ceph_assert(auth >= 0); if (rejoins.count(auth) == 0) continue; // don't care about this node's subtrees @@ -4096,7 +4096,7 @@ void MDCache::rejoin_send_rejoins() q != mdr->remote_auth_pins.end(); ++q) { if (!q->first->is_auth()) { - assert(q->second == q->first->authority().first); + ceph_assert(q->second == q->first->authority().first); if (rejoins.count(q->second) == 0) continue; const MMDSCacheRejoin::ref &rejoin = rejoins[q->second]; @@ -4146,7 +4146,7 @@ void MDCache::rejoin_send_rejoins() << " " << q->first->get_parent() << dendl; MDSCacheObjectInfo i; q->first->get_parent()->set_object_info(i); - assert(i.ino); + ceph_assert(i.ino); rejoin->add_inode_wrlock(vinodeno_t(i.ino, i.snapid), q->first->get_type(), mdr->reqid, mdr->attempt); } @@ -4155,8 +4155,8 @@ void MDCache::rejoin_send_rejoins() // send the messages for (auto &p : rejoins) { - assert(rejoin_sent.count(p.first) == 0); - assert(rejoin_ack_gather.count(p.first) == 0); + ceph_assert(rejoin_sent.count(p.first) == 0); + ceph_assert(rejoin_ack_gather.count(p.first) == 0); rejoin_sent.insert(p.first); rejoin_ack_gather.insert(p.first); mds->send_message_mds(p.second, p.first); @@ -4198,12 +4198,12 @@ void MDCache::rejoin_walk(CDir *dir, const MMDSCacheRejoin::ref &rejoin) rejoin->add_weak_dirfrag(dir->dirfrag()); for (auto &p : dir->items) { CDentry *dn = p.second; - assert(dn->last == CEPH_NOSNAP); + ceph_assert(dn->last == CEPH_NOSNAP); CDentry::linkage_t *dnl = dn->get_linkage(); dout(15) << " add_weak_primary_dentry " << *dn << dendl; - assert(dnl->is_primary()); + ceph_assert(dnl->is_primary()); CInode *in = dnl->get_inode(); - assert(dnl->get_inode()->is_dir()); + ceph_assert(dnl->get_inode()->is_dir()); rejoin->add_weak_primary_dentry(dir->ino(), dn->get_name(), dn->first, dn->last, in->ino()); in->get_nested_dirfrags(nested); if (in->is_dirty_scattered()) { @@ -4232,7 +4232,7 @@ void MDCache::rejoin_walk(CDir *dir, const MMDSCacheRejoin::ref &rejoin) for (auto it2 = in->remote_parents.begin(); it2 != in->remote_parents.end(); ) { CDentry *remote_dn = *it2; ++it2; - assert(remote_dn->last != CEPH_NOSNAP); + ceph_assert(remote_dn->last != CEPH_NOSNAP); remote_dn->unlink_remote(remote_dn->get_linkage()); } } @@ -4245,7 +4245,7 @@ void MDCache::rejoin_walk(CDir *dir, const MMDSCacheRejoin::ref &rejoin) continue; } else { // Inventing null/remote dentry shouldn't cause problem - assert(!dnl->is_primary()); + ceph_assert(!dnl->is_primary()); } } @@ -4347,7 +4347,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) // check cap exports for (auto p = weak->cap_exports.begin(); p != weak->cap_exports.end(); ++p) { CInode *in = get_inode(p->first); - assert(!in || in->is_auth()); + ceph_assert(!in || in->is_auth()); for (auto q = p->second.begin(); q != p->second.end(); ++q) { dout(10) << " claiming cap import " << p->first << " client." << q->first << " on " << *in << dendl; Capability *cap = rejoin_import_cap(in, q->first, q->second, from); @@ -4365,11 +4365,11 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) encode(imported_caps, ack->imported_caps); } else { - assert(mds->is_rejoin()); + ceph_assert(mds->is_rejoin()); // we may have already received a strong rejoin from the sender. rejoin_scour_survivor_replicas(from, NULL, acked_inodes, gather_locks); - assert(gather_locks.empty()); + ceph_assert(gather_locks.empty()); // check cap exports. rejoin_client_map.insert(weak->client_map.begin(), weak->client_map.end()); @@ -4378,7 +4378,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) for (auto p = weak->cap_exports.begin(); p != weak->cap_exports.end(); ++p) { CInode *in = get_inode(p->first); - assert(!in || in->is_auth()); + ceph_assert(!in || in->is_auth()); // note for (auto q = p->second.begin(); q != p->second.end(); ++q) { dout(10) << " claiming cap import " << p->first << " client." << q->first << dendl; @@ -4390,7 +4390,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) // assimilate any potentially dirty scatterlock state for (const auto &p : weak->inode_scatterlocks) { CInode *in = get_inode(p.first); - assert(in); + ceph_assert(in); in->decode_lock_state(CEPH_LOCK_IFILE, p.second.file); in->decode_lock_state(CEPH_LOCK_INEST, p.second.nest); in->decode_lock_state(CEPH_LOCK_IDFT, p.second.dft); @@ -4408,7 +4408,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) CInode *diri = get_inode(p.ino); if (!diri) dout(0) << " missing dir ino " << p.ino << dendl; - assert(diri); + ceph_assert(diri); list ls; if (diri->dirfragtree.is_leaf(p.frag)) { @@ -4425,7 +4425,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) dout(0) << " missing dir for " << p.frag << " (which maps to " << fg << ") on " << *diri << dendl; continue; } - assert(dir); + ceph_assert(dir); if (dirs_to_share.count(dir)) { dout(10) << " already have " << p.frag << " -> " << fg << " " << *dir << dendl; } else { @@ -4444,7 +4444,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) CInode *diri = get_inode(p.first); if (!diri) dout(0) << " missing dir ino " << p.first << dendl; - assert(diri); + ceph_assert(diri); // weak dentries CDir *dir = 0; @@ -4456,15 +4456,15 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) dir = diri->get_dirfrag(fg); if (!dir) dout(0) << " missing dir frag " << fg << " on " << *diri << dendl; - assert(dir); - assert(dirs_to_share.count(dir)); + ceph_assert(dir); + ceph_assert(dirs_to_share.count(dir)); } // and dentry CDentry *dn = dir->lookup(q.first.name, q.first.snapid); - assert(dn); + ceph_assert(dn); CDentry::linkage_t *dnl = dn->get_linkage(); - assert(dnl->is_primary()); + ceph_assert(dnl->is_primary()); if (survivor && dn->is_replica(from)) dentry_remove_replica(dn, from, gather_locks); @@ -4477,7 +4477,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) // inode CInode *in = dnl->get_inode(); - assert(in); + ceph_assert(in); if (survivor && in->is_replica(from)) inode_remove_replica(in, from, true, gather_locks); @@ -4503,7 +4503,7 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) p != weak->weak_inodes.end(); ++p) { CInode *in = get_inode(*p); - assert(in); // hmm fixme wrt stray? + ceph_assert(in); // hmm fixme wrt stray? if (survivor && in->is_replica(from)) inode_remove_replica(in, from, true, gather_locks); unsigned inonce = in->add_replica(from); @@ -4518,13 +4518,13 @@ void MDCache::handle_cache_rejoin_weak(const MMDSCacheRejoin::const_ref &weak) } } - assert(rejoin_gather.count(from)); + ceph_assert(rejoin_gather.count(from)); rejoin_gather.erase(from); if (survivor) { // survivor. do everything now. for (const auto &p : weak->inode_scatterlocks) { CInode *in = get_inode(p.first); - assert(in); + ceph_assert(in); dout(10) << " including base inode (due to potential scatterlock update) " << *in << dendl; acked_inodes.insert(in->vino()); ack->add_inode_base(in, mds->mdsmap->get_up_features()); @@ -4628,7 +4628,7 @@ CDir *MDCache::rejoin_invent_dirfrag(dirfrag_t df) if (!in) in = rejoin_invent_inode(df.ino, CEPH_NOSNAP); if (!in->is_dir()) { - assert(in->state_test(CInode::STATE_REJOINUNDEF)); + ceph_assert(in->state_test(CInode::STATE_REJOINUNDEF)); in->inode.mode = S_IFDIR; in->inode.dir_layout.dl_dir_hash = g_conf()->mds_default_dir_hash; } @@ -4644,12 +4644,12 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron mds_rank_t from = mds_rank_t(strong->get_source().num()); // only a recovering node will get a strong rejoin. - assert(mds->is_rejoin()); + ceph_assert(mds->is_rejoin()); // assimilate any potentially dirty scatterlock state for (const auto &p : strong->inode_scatterlocks) { CInode *in = get_inode(p.first); - assert(in); + ceph_assert(in); in->decode_lock_state(CEPH_LOCK_IFILE, p.second.file); in->decode_lock_state(CEPH_LOCK_INEST, p.second.nest); in->decode_lock_state(CEPH_LOCK_IDFT, p.second.dft); @@ -4715,7 +4715,7 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron else { frag_t fg = diri->pick_dirfrag(ss.name); dir = diri->get_dirfrag(fg); - assert(dir); + ceph_assert(dir); dn = dir->lookup(ss.name, ss.snapid); } if (!dn) { @@ -4759,9 +4759,9 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron const MMDSCacheRejoin::slave_reqid& r = ss_req_it->second; dout(10) << " dn xlock by " << r << " on " << *dn << dendl; MDRequestRef mdr = request_get(r.reqid); // should have this from auth_pin above. - assert(mdr->is_auth_pinned(dn)); + ceph_assert(mdr->is_auth_pinned(dn)); if (!mdr->xlocks.count(&dn->versionlock)) { - assert(dn->versionlock.can_xlock_local()); + ceph_assert(dn->versionlock.can_xlock_local()); dn->versionlock.get_xlock(mdr, mdr->get_client()); mdr->xlocks.insert(&dn->versionlock); mdr->locks.insert(&dn->versionlock); @@ -4782,24 +4782,24 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron if (d.is_primary()) { if (vinodeno_t(d.ino, ss.snapid) != dnl->get_inode()->vino()) { // the survivor missed MDentryUnlink+MDentryLink messages ? - assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0); + ceph_assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0); CInode *in = get_inode(d.ino, ss.snapid); - assert(in); - assert(in->get_parent_dn()); + ceph_assert(in); + ceph_assert(in->get_parent_dn()); rejoin_unlinked_inodes[from].insert(in); dout(7) << " sender has primary dentry but wrong inode" << dendl; } } else { // the survivor missed MDentryLink message ? - assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0); + ceph_assert(strong->strong_inodes.count(dnl->get_inode()->vino()) == 0); dout(7) << " sender doesn't have primay dentry" << dendl; } } else { if (d.is_primary()) { // the survivor missed MDentryUnlink message ? CInode *in = get_inode(d.ino, ss.snapid); - assert(in); - assert(in->get_parent_dn()); + ceph_assert(in); + ceph_assert(in->get_parent_dn()); rejoin_unlinked_inodes[from].insert(in); dout(7) << " sender has primary dentry but we don't" << dendl; } @@ -4810,7 +4810,7 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron for (const auto &p : strong->strong_inodes) { CInode *in = get_inode(p.first); - assert(in); + ceph_assert(in); in->add_replica(from, p.second.nonce); dout(10) << " have " << *in << dendl; @@ -4844,10 +4844,10 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron else mdr = request_start_slave(r.reqid, r.attempt, strong); if (strong->frozen_authpin_inodes.count(in->vino())) { - assert(!in->get_num_auth_pins()); + ceph_assert(!in->get_num_auth_pins()); mdr->freeze_auth_pin(in); } else { - assert(!in->is_frozen_auth_pin()); + ceph_assert(!in->is_frozen_auth_pin()); } mdr->auth_pin(in); } @@ -4859,9 +4859,9 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron SimpleLock *lock = in->get_lock(q.first); dout(10) << " inode xlock by " << q.second << " on " << *lock << " on " << *in << dendl; MDRequestRef mdr = request_get(q.second.reqid); // should have this from auth_pin above. - assert(mdr->is_auth_pinned(in)); + ceph_assert(mdr->is_auth_pinned(in)); if (!mdr->xlocks.count(&in->versionlock)) { - assert(in->versionlock.can_xlock_local()); + ceph_assert(in->versionlock.can_xlock_local()); in->versionlock.get_xlock(mdr, mdr->get_client()); mdr->xlocks.insert(&in->versionlock); mdr->locks.insert(&in->versionlock); @@ -4886,7 +4886,7 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron dout(10) << " inode wrlock by " << r << " on " << *lock << " on " << *in << dendl; MDRequestRef mdr = request_get(r.reqid); // should have this from auth_pin above. if (in->is_auth()) - assert(mdr->is_auth_pinned(in)); + ceph_assert(mdr->is_auth_pinned(in)); lock->set_state(LOCK_MIX); if (lock == &in->filelock) in->loner_cap = -1; @@ -4898,7 +4898,7 @@ void MDCache::handle_cache_rejoin_strong(const MMDSCacheRejoin::const_ref &stron } // done? - assert(rejoin_gather.count(from)); + ceph_assert(rejoin_gather.count(from)); rejoin_gather.erase(from); if (rejoin_gather.empty() && rejoin_ack_gather.count(mds->get_nodeid())) { rejoin_gather_finish(); @@ -4912,7 +4912,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) dout(7) << "handle_cache_rejoin_ack from " << ack->get_source() << dendl; mds_rank_t from = mds_rank_t(ack->get_source().num()); - assert(mds->get_state() >= MDSMap::STATE_REJOIN); + ceph_assert(mds->get_state() >= MDSMap::STATE_REJOIN); bool survivor = !mds->is_rejoin(); // for sending cache expire message @@ -4971,7 +4971,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) CDentry::linkage_t *dnl = dn->get_linkage(); - assert(dn->last == q.first.snapid); + ceph_assert(dn->last == q.first.snapid); if (dn->first != q.second.first) { dout(10) << " adjust dn.first " << dn->first << " -> " << q.second.first << " on " << *dn << dendl; dn->first = q.second.first; @@ -5037,7 +5037,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) for (list::iterator q = ls.begin(); q != ls.end(); ++q) { if ((*q)->is_auth() || ack->strong_dirfrags.count((*q)->dirfrag())) continue; - assert((*q)->get_num_any() == 0); + ceph_assert((*q)->get_num_any() == 0); (*p)->close_dirfrag((*q)->get_frag()); } } @@ -5045,7 +5045,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) // full dirfrags for (const auto &p : ack->dirfrag_bases) { CDir *dir = get_dirfrag(p.first); - assert(dir); + ceph_assert(dir); auto q = p.second.cbegin(); dir->_decode_base(q); dout(10) << " got dir replica " << *dir << dendl; @@ -5061,7 +5061,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) decode(last, p); decode(basebl, p); CInode *in = get_inode(ino, last); - assert(in); + ceph_assert(in); auto q = basebl.cbegin(); snapid_t sseq = 0; if (in->snaprealm) @@ -5088,7 +5088,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) decode(lockbl, p); CInode *in = get_inode(ino, last); - assert(in); + ceph_assert(in); in->set_replica_nonce(nonce); auto q = lockbl.cbegin(); in->_decode_locks_rejoin(q, rejoin_waiters, rejoin_eval_locks, survivor); @@ -5098,7 +5098,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) // FIXME: This can happen if entire subtree, together with the inode subtree root // belongs to, were trimmed between sending cache rejoin and receiving rejoin ack. - assert(isolated_inodes.empty()); + ceph_assert(isolated_inodes.empty()); map > peer_imported; auto bp = ack->imported_caps.cbegin(); @@ -5108,12 +5108,12 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) p != peer_imported.end(); ++p) { auto& ex = cap_exports.at(p->first); - assert(ex.first == from); + ceph_assert(ex.first == from); for (map::iterator q = p->second.begin(); q != p->second.end(); ++q) { auto r = ex.second.find(q->first); - assert(r != ex.second.end()); + ceph_assert(r != ex.second.end()); dout(10) << " exporting caps for client." << q->first << " ino " << p->first << dendl; Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(q->first.v)); @@ -5133,7 +5133,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) ex.second.erase(r); } - assert(ex.second.empty()); + ceph_assert(ex.second.empty()); } for (auto p : updated_realms) { @@ -5153,7 +5153,7 @@ void MDCache::handle_cache_rejoin_ack(const MMDSCacheRejoin::const_ref &ack) } // done? - assert(rejoin_ack_gather.count(from)); + ceph_assert(rejoin_ack_gather.count(from)); rejoin_ack_gather.erase(from); if (!survivor) { if (rejoin_gather.empty()) { @@ -5231,14 +5231,14 @@ void MDCache::rejoin_trim_undef_inodes() } } - assert(rejoin_undef_inodes.empty()); + ceph_assert(rejoin_undef_inodes.empty()); } void MDCache::rejoin_gather_finish() { dout(10) << "rejoin_gather_finish" << dendl; - assert(mds->is_rejoin()); - assert(rejoin_ack_gather.count(mds->get_nodeid())); + ceph_assert(mds->is_rejoin()); + ceph_assert(rejoin_ack_gather.count(mds->get_nodeid())); if (open_undef_inodes_dirfrags()) return; @@ -5277,19 +5277,19 @@ void MDCache::rejoin_open_ino_finish(inodeno_t ino, int ret) if (ret < 0) { cap_imports_missing.insert(ino); } else if (ret == mds->get_nodeid()) { - assert(get_inode(ino)); + ceph_assert(get_inode(ino)); } else { auto p = cap_imports.find(ino); - assert(p != cap_imports.end()); + ceph_assert(p != cap_imports.end()); for (auto q = p->second.begin(); q != p->second.end(); ++q) { - assert(q->second.count(MDS_RANK_NONE)); - assert(q->second.size() == 1); + ceph_assert(q->second.count(MDS_RANK_NONE)); + ceph_assert(q->second.size() == 1); rejoin_export_caps(p->first, q->first, q->second[MDS_RANK_NONE], ret); } cap_imports.erase(p); } - assert(cap_imports_num_opening > 0); + ceph_assert(cap_imports_num_opening > 0); cap_imports_num_opening--; if (cap_imports_num_opening == 0) { @@ -5305,7 +5305,7 @@ public: map > session_map; C_MDC_RejoinSessionsOpened(MDCache *c) : MDCacheLogContext(c) {} void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); mdcache->rejoin_open_sessions_finish(session_map); } }; @@ -5328,8 +5328,8 @@ void MDCache::rejoin_prefetch_ino_finish(inodeno_t ino, int ret) cap_imports_missing.insert(ino); } else if (ret != mds->get_nodeid()) { for (auto q = p->second.begin(); q != p->second.end(); ++q) { - assert(q->second.count(MDS_RANK_NONE)); - assert(q->second.size() == 1); + ceph_assert(q->second.count(MDS_RANK_NONE)); + ceph_assert(q->second.size() == 1); rejoin_export_caps(p->first, q->first, q->second[MDS_RANK_NONE], ret); } cap_imports.erase(p); @@ -5346,7 +5346,7 @@ bool MDCache::process_imported_caps() open_file_table.wait_for_prefetch( new MDSInternalContextWrapper(mds, new FunctionContext([this](int r) { - assert(rejoin_gather.count(mds->get_nodeid())); + ceph_assert(rejoin_gather.count(mds->get_nodeid())); process_imported_caps(); }) ) @@ -5357,7 +5357,7 @@ bool MDCache::process_imported_caps() for (auto p = cap_imports.begin(); p != cap_imports.end(); ++p) { CInode *in = get_inode(p->first); if (in) { - assert(in->is_auth()); + ceph_assert(in->is_auth()); cap_imports_missing.erase(p->first); continue; } @@ -5396,7 +5396,7 @@ bool MDCache::process_imported_caps() p != rejoin_slave_exports.end(); ++p) { CInode *in = get_inode(p->first); - assert(in); + ceph_assert(in); for (map::iterator q = p->second.second.begin(); q != p->second.second.end(); ++q) { @@ -5414,8 +5414,8 @@ bool MDCache::process_imported_caps() cap->merge(q->second, true); Capability::Import& im = rejoin_imported_caps[p->second.first][p->first][q->first]; - assert(cap->get_last_seq() == im.issue_seq); - assert(cap->get_mseq() == im.mseq); + ceph_assert(cap->get_last_seq() == im.issue_seq); + ceph_assert(cap->get_mseq() == im.mseq); cap->set_cap_id(im.cap_id); // send cap import because we assigned a new cap ID do_cap_import(session, in, cap, q->second.cap_id, q->second.seq, q->second.mseq - 1, @@ -5435,7 +5435,7 @@ bool MDCache::process_imported_caps() ++p; continue; } - assert(in->is_auth()); + ceph_assert(in->is_auth()); for (auto q = p->second.begin(); q != p->second.end(); ++q) { Session *session; { @@ -5469,9 +5469,9 @@ bool MDCache::process_imported_caps() } else { trim_non_auth(); - assert(rejoin_gather.count(mds->get_nodeid())); + ceph_assert(rejoin_gather.count(mds->get_nodeid())); rejoin_gather.erase(mds->get_nodeid()); - assert(!rejoin_ack_gather.count(mds->get_nodeid())); + ceph_assert(!rejoin_ack_gather.count(mds->get_nodeid())); maybe_send_pending_rejoins(); } return false; @@ -5511,7 +5511,7 @@ void MDCache::rebuild_need_snapflush(CInode *head_in, SnapRealm *realm, for (int i = 0; i < num_cinode_locks; i++) { int lockid = cinode_lock_info[i].lock; SimpleLock *lock = in->get_lock(lockid); - assert(lock); + ceph_assert(lock); in->client_snap_caps[lockid].insert(client); in->auth_pin(lock); lock->set_state(LOCK_SNAP_SYNC); @@ -5576,7 +5576,7 @@ void MDCache::prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t i void MDCache::prepare_realm_merge(SnapRealm *realm, SnapRealm *parent_realm, map& splits) { - assert(parent_realm); + ceph_assert(parent_realm); vector split_inos; vector split_realms; @@ -5591,7 +5591,7 @@ void MDCache::prepare_realm_merge(SnapRealm *realm, SnapRealm *parent_realm, split_realms.push_back((*p)->inode->ino()); for (const auto& p : realm->client_caps) { - assert(!p.second->empty()); + ceph_assert(!p.second->empty()); auto em = splits.emplace(std::piecewise_construct, std::forward_as_tuple(p.first), std::forward_as_tuple()); if (em.second) { auto update = MClientSnap::create(CEPH_SNAP_OP_SPLIT); @@ -5791,7 +5791,7 @@ void MDCache::do_cap_import(Session *session, CInode *in, Capability *cap, reap->set_cap_peer(p_cap_id, p_seq, p_mseq, peer, p_flags); mds->send_message_client_counted(reap, session); } else { - assert(0); + ceph_assert(0); } } @@ -5799,7 +5799,7 @@ void MDCache::do_delayed_cap_imports() { dout(10) << "do_delayed_cap_imports" << dendl; - assert(delayed_imported_caps.empty()); + ceph_assert(delayed_imported_caps.empty()); } struct C_MDC_OpenSnapRealms : public MDCacheContext { @@ -5819,7 +5819,7 @@ void MDCache::open_snaprealms() while (it != rejoin_pending_snaprealms.end()) { CInode *in = *it; SnapRealm *realm = in->snaprealm; - assert(realm); + ceph_assert(realm); if (realm->have_past_parents_open() || realm->open_parents(gather.new_sub())) { dout(10) << " past parents now open on " << *in << dendl; @@ -5837,7 +5837,7 @@ void MDCache::open_snaprealms() !p.end(); ++p) { CInode *child = *p; auto q = reconnected_caps.find(child->ino()); - assert(q != reconnected_caps.end()); + ceph_assert(q != reconnected_caps.end()); for (auto r = q->second.begin(); r != q->second.end(); ++r) { if (r->second.snap_follows > 0) { if (r->second.snap_follows < child->first - 1) { @@ -5875,7 +5875,7 @@ void MDCache::open_snaprealms() gather.activate(); } else { // for multimds, must succeed the first time - assert(recovery_set.empty()); + ceph_assert(recovery_set.empty()); dout(10) << "open_snaprealms - waiting for " << gather.num_subs_remaining() << dendl; @@ -5902,12 +5902,12 @@ void MDCache::open_snaprealms() dout(5) << warn_str.str() << dendl; } } - assert(rejoin_waiters.empty()); - assert(rejoin_pending_snaprealms.empty()); + ceph_assert(rejoin_waiters.empty()); + ceph_assert(rejoin_pending_snaprealms.empty()); dout(10) << "open_snaprealms - all open" << dendl; do_delayed_cap_imports(); - assert(rejoin_done); + ceph_assert(rejoin_done); rejoin_done.release()->complete(0); reconnected_caps.clear(); } @@ -5924,7 +5924,7 @@ bool MDCache::open_undef_inodes_dirfrags() p != rejoin_undef_inodes.end(); ++p) { CInode *in = *p; - assert(!in->is_base()); + ceph_assert(!in->is_base()); fetch_queue.insert(in->get_parent_dir()); } @@ -5948,10 +5948,10 @@ bool MDCache::open_undef_inodes_dirfrags() if (diri->state_test(CInode::STATE_REJOINUNDEF)) continue; if (dir->state_test(CDir::STATE_REJOINUNDEF)) - assert(diri->dirfragtree.is_leaf(dir->get_frag())); + ceph_assert(diri->dirfragtree.is_leaf(dir->get_frag())); dir->fetch(gather.new_sub()); } - assert(gather.has_subs()); + ceph_assert(gather.has_subs()); gather.activate(); return true; } @@ -5961,10 +5961,10 @@ void MDCache::opened_undef_inode(CInode *in) { rejoin_undef_inodes.erase(in); if (in->is_dir()) { // FIXME: re-hash dentries if necessary - assert(in->inode.dir_layout.dl_dir_hash == g_conf()->mds_default_dir_hash); + ceph_assert(in->inode.dir_layout.dl_dir_hash == g_conf()->mds_default_dir_hash); if (in->has_dirfrags() && !in->dirfragtree.is_leaf(frag_t())) { CDir *dir = in->get_dirfrag(frag_t()); - assert(dir); + ceph_assert(dir); rejoin_undef_dirfrags.erase(dir); in->force_dirfrags(); list ls; @@ -6205,7 +6205,7 @@ struct C_MDC_QueuedCow : public MDCacheContext { void MDCache::queue_file_recover(CInode *in) { dout(10) << "queue_file_recover " << *in << dendl; - assert(in->is_auth()); + ceph_assert(in->is_auth()); // cow? /* @@ -6231,7 +6231,7 @@ void MDCache::queue_file_recover(CInode *in) snapid_t snapid = *s.begin(); CInode *cow_inode = 0; journal_cow_inode(mut, &le->metablob, in, snapid-1, &cow_inode); - assert(cow_inode); + ceph_assert(cow_inode); recovery_queue.enqueue(cow_inode); s.erase(*s.begin()); } @@ -6291,7 +6291,7 @@ void MDCache::identify_files_to_recover() if (in->filelock.is_stable()) { in->auth_pin(&in->filelock); } else { - assert(in->filelock.get_state() == LOCK_XLOCKSNAP); + ceph_assert(in->filelock.get_state() == LOCK_XLOCKSNAP); } in->filelock.set_state(LOCK_PRE_SCAN); rejoin_recover_q.push_back(in); @@ -6354,7 +6354,7 @@ void MDCache::truncate_inode(CInode *in, LogSegment *ls) if (!in->client_need_snapflush.empty() && (in->get_caps_issued() & CEPH_CAP_FILE_BUFFER)) { - assert(in->filelock.is_xlocked()); + ceph_assert(in->filelock.is_xlocked()); in->filelock.set_xlock_snap_sync(new C_MDC_RetryTruncate(this, in, ls)); mds->locker->issue_caps(in); return; @@ -6370,7 +6370,7 @@ struct C_IO_MDC_TruncateFinish : public MDCacheIOContext { MDCacheIOContext(c, false), in(i), ls(l) { } void finish(int r) override { - assert(r == 0 || r == -ENOENT); + ceph_assert(r == 0 || r == -ENOENT); mdcache->truncate_inode_finish(in, ls); } void print(ostream& out) const override { @@ -6385,10 +6385,10 @@ void MDCache::_truncate_inode(CInode *in, LogSegment *ls) << pi->truncate_from << " -> " << pi->truncate_size << " on " << *in << dendl; - assert(pi->is_truncating()); - assert(pi->truncate_size < (1ULL << 63)); - assert(pi->truncate_from < (1ULL << 63)); - assert(pi->truncate_size < pi->truncate_from); + ceph_assert(pi->is_truncating()); + ceph_assert(pi->truncate_size < (1ULL << 63)); + ceph_assert(pi->truncate_from < (1ULL << 63)); + ceph_assert(pi->truncate_size < pi->truncate_from); SnapRealm *realm = in->find_snaprealm(); @@ -6400,7 +6400,7 @@ void MDCache::_truncate_inode(CInode *in, LogSegment *ls) } else { dout(10) << " NO realm, using null context" << dendl; snapc = &nullsnap; - assert(in->last == CEPH_NOSNAP); + ceph_assert(in->last == CEPH_NOSNAP); } dout(10) << "_truncate_inode snapc " << snapc << " on " << *in << dendl; filer.truncate(in->inode.ino, &in->inode.layout, *snapc, @@ -6425,7 +6425,7 @@ void MDCache::truncate_inode_finish(CInode *in, LogSegment *ls) dout(10) << "truncate_inode_finish " << *in << dendl; set::iterator p = ls->truncating_inodes.find(in); - assert(p != ls->truncating_inodes.end()); + ceph_assert(p != ls->truncating_inodes.end()); ls->truncating_inodes.erase(p); // update @@ -6484,7 +6484,7 @@ void MDCache::remove_recovered_truncate(CInode *in, LogSegment *ls) << ls->seq << "/" << ls->offset << dendl; // if we have the logseg the truncate started in, it must be in our list. set::iterator p = ls->truncating_inodes.find(in); - assert(p != ls->truncating_inodes.end()); + ceph_assert(p != ls->truncating_inodes.end()); ls->truncating_inodes.erase(p); in->put(CInode::PIN_TRUNCATING); } @@ -6504,7 +6504,7 @@ void MDCache::start_recovered_truncates() if (!in->client_need_snapflush.empty() && (in->get_caps_issued() & CEPH_CAP_FILE_BUFFER)) { - assert(in->filelock.is_stable()); + ceph_assert(in->filelock.is_stable()); in->filelock.set_state(LOCK_XLOCKDONE); in->auth_pin(&in->filelock); in->filelock.set_xlock_snap_sync(new C_MDC_RetryTruncate(this, in, ls)); @@ -6725,14 +6725,14 @@ bool MDCache::trim_dentry(CDentry *dn, expiremap& expiremap) CDentry::linkage_t *dnl = dn->get_linkage(); CDir *dir = dn->get_dir(); - assert(dir); + ceph_assert(dir); CDir *con = get_subtree_root(dir); if (con) dout(12) << " in container " << *con << dendl; else { dout(12) << " no container; under a not-yet-linked dir" << dendl; - assert(dn->is_auth()); + ceph_assert(dn->is_auth()); } // If replica dentry is not readable, it's likely we will receive @@ -6760,11 +6760,11 @@ bool MDCache::trim_dentry(CDentry *dn, expiremap& expiremap) } else if (dnl->is_primary()) { // expire the inode, too. CInode *in = dnl->get_inode(); - assert(in); + ceph_assert(in); if (trim_inode(dn, in, con, expiremap)) return true; // purging stray instead of trimming } else { - assert(dnl->is_null()); + ceph_assert(dnl->is_null()); } if (!dn->is_auth()) { @@ -6780,7 +6780,7 @@ bool MDCache::trim_dentry(CDentry *dn, expiremap& expiremap) if (a == mds->get_nodeid()) continue; // on export, ignore myself. dout(12) << " sending expire to mds." << a << " on " << *dn << dendl; - assert(a != mds->get_nodeid()); + ceph_assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) em.first->second = MCacheExpire::create(mds->get_nodeid()); @@ -6806,11 +6806,11 @@ void MDCache::trim_dirfrag(CDir *dir, CDir *con, expiremap& expiremap) dout(15) << "trim_dirfrag " << *dir << dendl; if (dir->is_subtree_root()) { - assert(!dir->is_auth() || + ceph_assert(!dir->is_auth() || (!dir->is_replicated() && dir->inode->is_base())); remove_subtree(dir); // remove from subtree map } - assert(dir->get_num_ref() == 0); + ceph_assert(dir->get_num_ref() == 0); CInode *in = dir->get_inode(); @@ -6836,7 +6836,7 @@ void MDCache::trim_dirfrag(CDir *dir, CDir *con, expiremap& expiremap) if (a == mds->get_nodeid()) continue; // on export, ignore myself. dout(12) << " sending expire to mds." << a << " on " << *dir << dendl; - assert(a != mds->get_nodeid()); + ceph_assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */ @@ -6855,7 +6855,7 @@ void MDCache::trim_dirfrag(CDir *dir, CDir *con, expiremap& expiremap) bool MDCache::trim_inode(CDentry *dn, CInode *in, CDir *con, expiremap& expiremap) { dout(15) << "trim_inode " << *in << dendl; - assert(in->get_num_ref() == 0); + ceph_assert(in->get_num_ref() == 0); if (in->is_dir()) { // If replica inode's dirfragtreelock is not readable, it's likely @@ -6874,7 +6874,7 @@ bool MDCache::trim_inode(CDentry *dn, CInode *in, CDir *con, expiremap& expirema in->get_dirfrags(dfls); for (list::iterator p = dfls.begin(); p != dfls.end(); ++p) { CDir *dir = *p; - assert(!dir->is_subtree_root()); + ceph_assert(!dir->is_subtree_root()); trim_dirfrag(dir, con ? con:dir, expiremap); // if no container (e.g. root dirfrag), use *p } } @@ -6905,7 +6905,7 @@ bool MDCache::trim_inode(CDentry *dn, CInode *in, CDir *con, expiremap& expirema if (a == mds->get_nodeid()) continue; // on export, ignore myself. dout(12) << " sending expire to mds." << a << " on " << *in << dendl; - assert(a != mds->get_nodeid()); + ceph_assert(a != mds->get_nodeid()); auto em = expiremap.emplace(std::piecewise_construct, std::forward_as_tuple(a), std::forward_as_tuple()); if (em.second) em.first->second = MCacheExpire::create(mds->get_nodeid()); /* new */ @@ -6981,7 +6981,7 @@ void MDCache::trim_non_auth() } else { // non-auth. expire. CDir *dir = dn->get_dir(); - assert(dir); + ceph_assert(dir); // unlink the dentry dout(10) << " removing " << *dn << dendl; @@ -6995,17 +6995,17 @@ void MDCache::trim_non_auth() in->get_dirfrags(ls); for (list::iterator p = ls.begin(); p != ls.end(); ++p) { CDir *subdir = *p; - assert(!subdir->is_subtree_root()); + ceph_assert(!subdir->is_subtree_root()); in->close_dirfrag(subdir->dirfrag().frag); } dir->unlink_inode(dn, false); remove_inode(in); } else { - assert(dnl->is_null()); + ceph_assert(dnl->is_null()); } - assert(!dir->has_bloom()); + ceph_assert(!dir->has_bloom()); dir->remove_dentry(dn); // adjust the dir state dir->state_clear(CDir::STATE_COMPLETE); // dir incomplete! @@ -7045,13 +7045,13 @@ void MDCache::trim_non_auth() p != ls.end(); ++p) { dout(10) << " removing " << **p << dendl; - assert((*p)->get_num_ref() == 1); // SUBTREE + ceph_assert((*p)->get_num_ref() == 1); // SUBTREE remove_subtree((*p)); in->close_dirfrag((*p)->dirfrag().frag); } dout(10) << " removing " << *in << dendl; - assert(!in->get_parent_dn()); - assert(in->get_num_ref() == 0); + ceph_assert(!in->get_parent_dn()); + ceph_assert(in->get_num_ref() == 0); remove_inode(in); } } @@ -7107,7 +7107,7 @@ bool MDCache::trim_non_auth_subtree(CDir *dir) dout(20) << "trim_non_auth_subtree(" << dir << ") removing inode " << in << " with dentry" << dn << dendl; dir->unlink_inode(dn, false); remove_inode(in); - assert(!dir->has_bloom()); + ceph_assert(!dir->has_bloom()); dir->remove_dentry(dn); } else { dout(20) << "trim_non_auth_subtree(" << dir << ") keeping inode " << in << " with dentry " << dn <close_dirfrag(dir->get_frag()); dout(10) << " removing " << *diri << dendl; - assert(!diri->get_parent_dn()); - assert(diri->get_num_ref() == 0); + ceph_assert(!diri->get_parent_dn()); + ceph_assert(diri->get_num_ref() == 0); remove_inode(diri); } break; @@ -7250,14 +7250,14 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) // check container? if (p.first.ino > 0) { CInode *expired_inode = get_inode(p.first.ino); - assert(expired_inode); // we had better have this. + ceph_assert(expired_inode); // we had better have this. CDir *parent_dir = expired_inode->get_approx_dirfrag(p.first.frag); - assert(parent_dir); + ceph_assert(parent_dir); int export_state = -1; if (parent_dir->is_auth() && parent_dir->is_exporting()) { export_state = migrator->get_export_state(parent_dir); - assert(export_state >= 0); + ceph_assert(export_state >= 0); } if (!parent_dir->is_auth() || @@ -7271,7 +7271,7 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) // not auth. dout(7) << "delaying nonauth|warned expires for " << *parent_dir << dendl; - assert(parent_dir->is_frozen_tree_root()); + ceph_assert(parent_dir->is_frozen_tree_root()); // make a message container @@ -7283,7 +7283,7 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) em.first->second->add_realm(p.first, p.second); continue; } - assert(export_state <= Migrator::EXPORT_PREPPING || + ceph_assert(export_state <= Migrator::EXPORT_PREPPING || (export_state == Migrator::EXPORT_WARNING && !migrator->export_has_warned(parent_dir, from))); @@ -7300,9 +7300,9 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) if (!in) { dout(0) << " inode expire on " << q.first << " from " << from << ", don't have it" << dendl; - assert(in); + ceph_assert(in); } - assert(in->is_auth()); + ceph_assert(in->is_auth()); dout(20) << __func__ << ": expiring inode " << *in << dendl; // check nonce @@ -7354,11 +7354,11 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) } dout(0) << " dir expire on " << q.first << " from " << from << ", don't have it" << dendl; - assert(dir); + ceph_assert(dir); } dout(20) << __func__ << ": expiring dirfrag " << *dir << dendl; - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); // check nonce if (nonce == dir->get_replica_nonce(from)) { @@ -7379,14 +7379,14 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) for (const auto &pd : p.second.dentries) { dout(10) << " dn expires in dir " << pd.first << dendl; CInode *diri = get_inode(pd.first.ino); - assert(diri); + ceph_assert(diri); CDir *dir = diri->get_dirfrag(pd.first.frag); if (!dir) { dout(0) << " dn expires on " << pd.first << " from " << from << ", must have refragmented" << dendl; } else { - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); } for (const auto &p : pd.second) { @@ -7398,8 +7398,8 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) } else { // which dirfrag for this dentry? CDir *dir = diri->get_dirfrag(diri->pick_dirfrag(p.first.first)); - assert(dir); - assert(dir->is_auth()); + ceph_assert(dir); + ceph_assert(dir->is_auth()); dn = dir->lookup(p.first.first, p.first.second); } @@ -7409,7 +7409,7 @@ void MDCache::handle_cache_expire(const MCacheExpire::const_ref &m) else dout(0) << " missing dentry for " << p.first.first << " snap " << p.first.second << dendl; } - assert(dn); + ceph_assert(dn); if (nonce == dn->get_replica_nonce(from)) { dout(7) << " dentry_expire on " << *dn << " from mds." << from << dendl; @@ -7516,7 +7516,7 @@ void MDCache::check_memory_usage() static MemoryModel::snap baseline = last; // check client caps - assert(CInode::count() == inode_map.size() + snap_inode_map.size() + num_shadow_inodes); + ceph_assert(CInode::count() == inode_map.size() + snap_inode_map.size() + num_shadow_inodes); double caps_per_inode = 0.0; if (CInode::count()) caps_per_inode = (double)Capability::count() / (double)CInode::count(); @@ -7665,7 +7665,7 @@ bool MDCache::shutdown_pass() } if (num_auth_subtree > 0) { - assert(mds->get_nodeid() > 0); + ceph_assert(mds->get_nodeid() > 0); dout(7) << "still have " << num_auth_subtree << " auth subtrees" << dendl; show_subtrees(); return false; @@ -7711,8 +7711,8 @@ bool MDCache::shutdown_pass() show_cache(); return false; } - assert(!migrator->is_exporting()); - assert(!migrator->is_importing()); + ceph_assert(!migrator->is_exporting()); + ceph_assert(!migrator->is_importing()); if ((myin && myin->is_auth_pinned()) || (mydir && mydir->is_auth_pinned())) { @@ -7736,7 +7736,7 @@ bool MDCache::shutdown_pass() if (!did_shutdown_log_cap) { // flush journal header dout(7) << "writing header for (now-empty) journal" << dendl; - assert(mds->mdlog->empty()); + ceph_assert(mds->mdlog->empty()); mds->mdlog->write_head(0); // NOTE: filer active checker below will block us until this completes. did_shutdown_log_cap = true; @@ -7769,11 +7769,11 @@ bool MDCache::shutdown_pass() remove_subtree(mydir); myin->close_dirfrag(mydir->get_frag()); } - assert(subtrees.empty()); + ceph_assert(subtrees.empty()); if (myin) { remove_inode(myin); - assert(!myin); + ceph_assert(!myin); } if (global_snaprealm) { @@ -7968,7 +7968,7 @@ void MDCache::dispatch(const Message::const_ref &m) default: derr << "cache unknown message " << m->get_type() << dendl; - assert(0 == "cache unknown message"); + ceph_assert(0 == "cache unknown message"); } } @@ -7982,7 +7982,7 @@ int MDCache::path_traverse(MDRequestRef& mdr, MDSContextFactory& cf, // who bool null_okay = (onfail == MDS_TRAVERSE_DISCOVERXLOCK); bool forward = (onfail == MDS_TRAVERSE_FORWARD); - assert(!forward || mdr); // forward requires a request + ceph_assert(!forward || mdr); // forward requires a request snapid_t snapid = CEPH_NOSNAP; if (mdr) @@ -8086,7 +8086,7 @@ int MDCache::path_traverse(MDRequestRef& mdr, MDSContextFactory& cf, // who return 1; } } - assert(curdir); + ceph_assert(curdir); #ifdef MDS_VERIFY_FRAGSTAT if (curdir->is_complete()) @@ -8163,7 +8163,7 @@ int MDCache::path_traverse(MDRequestRef& mdr, MDSContextFactory& cf, // who // do we have inode? if (!in) { - assert(dnl->is_remote()); + ceph_assert(dnl->is_remote()); // do i have it? in = get_inode(dnl->get_remote_ino()); if (in) { @@ -8171,7 +8171,7 @@ int MDCache::path_traverse(MDRequestRef& mdr, MDSContextFactory& cf, // who dn->link_remote(dnl, in); } else { dout(7) << "remote link to " << dnl->get_remote_ino() << ", which i don't have" << dendl; - assert(mdr); // we shouldn't hit non-primary dentries doing a non-mdr traversal! + ceph_assert(mdr); // we shouldn't hit non-primary dentries doing a non-mdr traversal! if (mds->damage_table.is_remote_damaged(dnl->get_remote_ino())) { dout(4) << "traverse: remote dentry points to damaged ino " << *dn << dendl; @@ -8298,7 +8298,7 @@ int MDCache::path_traverse(MDRequestRef& mdr, MDSContextFactory& cf, // who if (mds->logger) mds->logger->inc(l_mds_traverse_hit); dout(10) << "path_traverse finish on snapid " << snapid << dendl; if (mdr) - assert(mdr->snapid == snapid); + ceph_assert(mdr->snapid == snapid); return 0; } @@ -8343,9 +8343,9 @@ CInode *MDCache::cache_traverse(const filepath& fp) void MDCache::open_remote_dirfrag(CInode *diri, frag_t approxfg, MDSInternalContextBase *fin) { dout(10) << "open_remote_dir on " << *diri << dendl; - assert(diri->is_dir()); - assert(!diri->is_auth()); - assert(diri->get_dirfrag(approxfg) == 0); + ceph_assert(diri->is_dir()); + ceph_assert(!diri->is_auth()); + ceph_assert(diri->get_dirfrag(approxfg) == 0); discover_dir_frag(diri, approxfg, fin); } @@ -8368,12 +8368,12 @@ CInode *MDCache::get_dentry_inode(CDentry *dn, MDRequestRef& mdr, bool projected else dnl = dn->get_linkage(); - assert(!dnl->is_null()); + ceph_assert(!dnl->is_null()); if (dnl->is_primary()) return dnl->inode; - assert(dnl->is_remote()); + ceph_assert(dnl->is_remote()); CInode *in = get_inode(dnl->get_remote_ino()); if (in) { dout(7) << "get_dentry_inode linking in remote in " << *in << dendl; @@ -8448,7 +8448,7 @@ void MDCache::make_trace(vector& trace, CInode *in) return; CInode *parent = in->get_parent_inode(); - assert(parent); + ceph_assert(parent); make_trace(trace, parent); CDentry *dn = in->get_parent_dn(); @@ -8632,7 +8632,7 @@ void MDCache::_open_ino_traverse_dir(inodeno_t ino, open_ino_info_t& info, int r void MDCache::_open_ino_fetch_dir(inodeno_t ino, const MMDSOpenIno::const_ref &m, CDir *dir, bool parent) { if (dir->state_test(CDir::STATE_REJOINUNDEF)) - assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag())); + ceph_assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag())); dir->fetch(new C_MDC_OpenInoTraverseDir(this, ino, m, parent)); if (mds->logger) mds->logger->inc(l_mds_openino_dir_fetch); @@ -8779,7 +8779,7 @@ void MDCache::do_open_ino(inodeno_t ino, open_ino_info_t& info, int err) fetch_backtrace(ino, info.pool, fin->bl, new C_OnFinisher(fin, mds->finisher)); } else { - assert(!info.ancestors.empty()); + ceph_assert(!info.ancestors.empty()); info.checking = mds->get_nodeid(); open_ino(info.ancestors[0].dirino, mds->mdsmap->get_metadata_pool(), new C_MDC_OpenInoParentOpened(this, ino), info.want_replica); @@ -8997,7 +8997,7 @@ void MDCache::find_ino_peers(inodeno_t ino, MDSInternalContextBase *c, mds_rank_ c->complete(-ESTALE); return; } - assert(!in); + ceph_assert(!in); ceph_tid_t tid = ++find_ino_peer_last_tid; find_ino_peer_info_t& fip = find_ino_peer[tid]; @@ -9142,7 +9142,7 @@ MDRequestRef MDCache::request_start(const MClientRequest::const_ref& req) // did we win a forward race against a slave? if (active_requests.count(req->get_reqid())) { MDRequestRef& mdr = active_requests[req->get_reqid()]; - assert(mdr); + ceph_assert(mdr); if (mdr->is_slave()) { dout(10) << "request_start already had " << *mdr << ", waiting for finish" << dendl; mdr->more()->waiting_for_finish.push_back(new C_MDS_RetryMessage(mds, req)); @@ -9184,7 +9184,7 @@ MDRequestRef MDCache::request_start_slave(metareqid_t ri, __u32 attempt, const M params.dispatched = m->get_dispatch_stamp(); MDRequestRef mdr = mds->op_tracker.create_request(¶ms); - assert(active_requests.count(mdr->reqid) == 0); + ceph_assert(active_requests.count(mdr->reqid) == 0); active_requests[mdr->reqid] = mdr; dout(7) << "request_start_slave " << *mdr << " by mds." << by << dendl; return mdr; @@ -9200,7 +9200,7 @@ MDRequestRef MDCache::request_start_internal(int op) MDRequestRef mdr = mds->op_tracker.create_request(¶ms); - assert(active_requests.count(mdr->reqid) == 0); + ceph_assert(active_requests.count(mdr->reqid) == 0); active_requests[mdr->reqid] = mdr; dout(7) << "request_start_internal " << *mdr << " op " << op << dendl; return mdr; @@ -9209,7 +9209,7 @@ MDRequestRef MDCache::request_start_internal(int op) MDRequestRef MDCache::request_get(metareqid_t rid) { ceph::unordered_map::iterator p = active_requests.find(rid); - assert(p != active_requests.end()); + ceph_assert(p != active_requests.end()); dout(7) << "request_get " << rid << " " << *p->second << dendl; return p->second; } @@ -9428,15 +9428,15 @@ void MDCache::request_kill(MDRequestRef& mdr) if (mdr->has_more() && (!mdr->more()->witnessed.empty() || !mdr->more()->waiting_on_slave.empty())) { if (!mdr->done_locking) { - assert(mdr->more()->witnessed.empty()); + ceph_assert(mdr->more()->witnessed.empty()); mdr->aborted = true; dout(10) << "request_kill " << *mdr << " -- waiting for slave reply, delaying" << dendl; } else { dout(10) << "request_kill " << *mdr << " -- already started slave prep, no-op" << dendl; } - assert(mdr->used_prealloc_ino == 0); - assert(mdr->prealloc_inos.empty()); + ceph_assert(mdr->used_prealloc_ino == 0); + ceph_assert(mdr->prealloc_inos.empty()); mdr->session = NULL; mdr->item_session_request.remove_myself(); @@ -9473,7 +9473,7 @@ void MDCache::do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool vector split_realms; if (notify_clients) { - assert(in->snaprealm->have_past_parents_open()); + ceph_assert(in->snaprealm->have_past_parents_open()); if (snapop == CEPH_SNAP_OP_SPLIT) { // notify clients of update|split for (elist::iterator p = in->snaprealm->inodes_with_caps.begin(member_offset(CInode, item_caps)); @@ -9502,7 +9502,7 @@ void MDCache::do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool for (const auto& p : realm->client_caps) { const auto& client = p.first; const auto& caps = p.second; - assert(!caps->empty()); + ceph_assert(!caps->empty()); auto em = updates.emplace(std::piecewise_construct, std::forward_as_tuple(client), std::forward_as_tuple()); if (em.second) { @@ -9575,7 +9575,7 @@ void MDCache::do_realm_invalidate_and_update_notify(CInode *in, int snapop, bool void MDCache::send_snap_update(CInode *in, version_t stid, int snap_op) { dout(10) << __func__ << " " << *in << " stid " << stid << dendl; - assert(in->is_auth()); + ceph_assert(in->is_auth()); set mds_set; if (stid > 0) { @@ -9622,7 +9622,7 @@ void MDCache::handle_snap_update(const MMDSSnapUpdate::const_ref &m) CInode *in = get_inode(m->get_ino()); if (in) { - assert(!in->is_auth()); + ceph_assert(!in->is_auth()); if (mds->get_state() > MDSMap::STATE_REJOIN || (mds->is_rejoin() && !in->is_rejoining())) { auto p = m->snap_blob.cbegin(); @@ -9897,7 +9897,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) mds_rank_t whoami = mds->get_nodeid(); mds_rank_t from = mds_rank_t(dis->get_source().num()); - assert(from != whoami); + ceph_assert(from != whoami); if (mds->get_state() <= MDSMap::STATE_REJOIN) { if (mds->get_state() < MDSMap::STATE_REJOIN && @@ -9930,7 +9930,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) << dendl; cur = get_inode(dis->get_base_ino()); - assert(cur); + ceph_assert(cur); // add root reply->starts_with = MDiscoverReply::INODE; @@ -9966,7 +9966,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) } } - assert(reply); + ceph_assert(reply); // add content // do some fidgeting to include a dir if they asked for the base dir, or just root. @@ -9990,7 +9990,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) fg = cur->pick_dirfrag(dis->get_dentry(i)); } else { // requester explicity specified the frag - assert(dis->wants_base_dir() || MDS_INO_IS_BASE(dis->get_base_ino())); + ceph_assert(dis->wants_base_dir() || MDS_INO_IS_BASE(dis->get_base_ino())); fg = dis->get_base_dir_frag(); if (!cur->dirfragtree.is_leaf(fg)) fg = cur->dirfragtree[fg.value()]; @@ -10067,7 +10067,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) // original request was sent. reply->set_base_dir_frag(curdir->get_frag()); } else { - assert(!curdir->is_ambiguous_auth()); // would be frozen. + ceph_assert(!curdir->is_ambiguous_auth()); // would be frozen. if (!reply->trace.length()) reply->starts_with = MDiscoverReply::DIR; replicate_dir(curdir, from, reply->trace); @@ -10078,7 +10078,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) CDentry *dn = 0; if (curdir->get_version() == 0) { // fetch newly opened dir - assert(!curdir->has_bloom()); + ceph_assert(!curdir->has_bloom()); } else if (dis->get_want().depth() > 0) { // lookup dentry dn = curdir->lookup(dis->get_dentry(i), snapid); @@ -10119,7 +10119,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) else dn = curdir->add_null_dentry(dis->get_dentry(i), snapid, snapid); } - assert(dn); + ceph_assert(dn); // don't add replica to purging dentry/inode if (dn->state_test(CDentry::STATE_PURGING)) { @@ -10172,7 +10172,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) // add inode CInode *next = dnl->get_inode(); - assert(next->is_auth()); + ceph_assert(next->is_auth()); replicate_inode(next, from, reply->trace, mds->mdsmap->get_up_features()); dout(7) << "handle_discover added inode " << *next << dendl; @@ -10183,7 +10183,7 @@ void MDCache::handle_discover(const MDiscover::const_ref &dis) } // how did we do? - assert(!reply->is_empty()); + ceph_assert(!reply->is_empty()); dout(7) << "handle_discover sending result back to asker mds." << from << dendl; mds->send_message(reply, dis->get_connection()); } @@ -10226,7 +10226,7 @@ void MDCache::handle_discover_reply(const MDiscoverReply::const_ref &m) if (!p.end() && next == MDiscoverReply::INODE) { cur = add_replica_inode(p, NULL, finished); dout(7) << "discover_reply got base inode " << *cur << dendl; - assert(cur->is_base()); + ceph_assert(cur->is_base()); next = MDiscoverReply::DIR; @@ -10237,7 +10237,7 @@ void MDCache::handle_discover_reply(const MDiscoverReply::const_ref &m) waiting_for_base_ino[from].erase(cur->ino()); } } - assert(cur); + ceph_assert(cur); // loop over discover results. // indexes follow each ([[dir] dentry] inode) @@ -10249,7 +10249,7 @@ void MDCache::handle_discover_reply(const MDiscoverReply::const_ref &m) if (next == MDiscoverReply::DIR) { curdir = add_replica_dir(p, cur, mds_rank_t(m->get_source().num()), finished); if (cur->ino() == m->get_base_ino() && curdir->get_frag() != m->get_base_dir_frag()) { - assert(m->get_wanted_base_dir()); + ceph_assert(m->get_wanted_base_dir()); cur->take_dir_waiting(m->get_base_dir_frag(), finished); } } else { @@ -10372,7 +10372,7 @@ CDir *MDCache::add_replica_dir(bufferlist::const_iterator& p, CInode *diri, mds_ dirfrag_t df; decode(df, p); - assert(diri->ino() == df.ino); + ceph_assert(diri->ino() == df.ino); // add it (_replica_) CDir *dir = diri->get_dirfrag(df.frag); @@ -10449,7 +10449,7 @@ CInode *MDCache::add_replica_inode(bufferlist::const_iterator& p, CDentry *dn, M in->inode_auth.first = in->ino() - MDS_INO_MDSDIR_OFFSET; dout(10) << "add_replica_inode added " << *in << dendl; if (dn) { - assert(dn->get_linkage()->is_null()); + ceph_assert(dn->get_linkage()->is_null()); dn->dir->link_primary_inode(dn, in); } } else { @@ -10625,8 +10625,8 @@ void MDCache::handle_dentry_link(const MDentryLink::const_ref &m) dout(7) << "handle_dentry_link on " << *dn << dendl; CDentry::linkage_t *dnl = dn->get_linkage(); - assert(!dn->is_auth()); - assert(dnl->is_null()); + ceph_assert(!dn->is_auth()); + ceph_assert(dnl->is_null()); } } @@ -10712,12 +10712,12 @@ void MDCache::handle_dentry_unlink(const MDentryUnlink::const_ref &m) if (dnl->is_primary()) { CInode *in = dnl->get_inode(); dn->dir->unlink_inode(dn); - assert(straydn); + ceph_assert(straydn); straydn->dir->link_primary_inode(straydn, in); // in->first is lazily updated on replica; drag it forward so // that we always keep it in sync with the dnq - assert(straydn->first >= in->first); + ceph_assert(straydn->first >= in->first); in->first = straydn->first; // update subtree map? @@ -10727,8 +10727,8 @@ void MDCache::handle_dentry_unlink(const MDentryUnlink::const_ref &m) if (m->snapbl.length()) { bool hadrealm = (in->snaprealm ? true : false); in->decode_snap_blob(m->snapbl); - assert(in->snaprealm); - assert(in->snaprealm->have_past_parents_open()); + ceph_assert(in->snaprealm); + ceph_assert(in->snaprealm->have_past_parents_open()); if (!hadrealm) do_realm_invalidate_and_update_notify(in, CEPH_SNAP_OP_SPLIT, false); } @@ -10740,18 +10740,18 @@ void MDCache::handle_dentry_unlink(const MDentryUnlink::const_ref &m) straydn = NULL; } else { - assert(!straydn); - assert(dnl->is_remote()); + ceph_assert(!straydn); + ceph_assert(dnl->is_remote()); dn->dir->unlink_inode(dn); } - assert(dnl->is_null()); + ceph_assert(dnl->is_null()); } } // race with trim_dentry() if (straydn) { - assert(straydn->get_num_ref() == 0); - assert(straydn->get_linkage()->is_null()); + ceph_assert(straydn->get_num_ref() == 0); + ceph_assert(straydn->get_linkage()->is_null()); expiremap ex; trim_dentry(straydn, ex); send_expire_messages(ex); @@ -10873,7 +10873,7 @@ void MDCache::adjust_dir_fragments(CInode *diri, if (bits > 0) { // SPLIT - assert(srcfrags.size() == 1); + ceph_assert(srcfrags.size() == 1); CDir *dir = srcfrags.front(); dir->split(bits, resultfrags, waiters, replay); @@ -10888,12 +10888,12 @@ void MDCache::adjust_dir_fragments(CInode *diri, // was i a bound? if (parent_subtree) { - assert(subtrees[parent_subtree].count(dir)); + ceph_assert(subtrees[parent_subtree].count(dir)); subtrees[parent_subtree].erase(dir); for (list::iterator p = resultfrags.begin(); p != resultfrags.end(); ++p) { - assert((*p)->is_subtree_root()); + ceph_assert((*p)->is_subtree_root()); subtrees[parent_subtree].insert(*p); } } @@ -10937,7 +10937,7 @@ void MDCache::adjust_dir_fragments(CInode *diri, } for (CDir *dir : srcfrags) { - assert(dir->is_subtree_root()); + ceph_assert(dir->is_subtree_root()); dout(10) << " taking srcfrag subtree bounds from " << *dir << dendl; map >::iterator q = subtrees.find(dir); set::iterator r = q->second.begin(); @@ -10958,7 +10958,7 @@ void MDCache::adjust_dir_fragments(CInode *diri, f->merge(srcfrags, waiters, replay); if (any_subtree) { - assert(f->is_subtree_root()); + ceph_assert(f->is_subtree_root()); subtrees[f].swap(new_bounds); if (parent_subtree) subtrees[parent_subtree].insert(f); @@ -11034,7 +11034,7 @@ bool MDCache::can_fragment(CInode *diri, list& dirs) void MDCache::split_dir(CDir *dir, int bits) { dout(7) << __func__ << " " << *dir << " bits " << bits << dendl; - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); CInode *diri = dir->inode; list dirs; @@ -11053,7 +11053,7 @@ void MDCache::split_dir(CDir *dir, int bits) MDRequestRef mdr = request_start_internal(CEPH_MDS_OP_FRAGMENTDIR); mdr->more()->fragment_base = dir->dirfrag(); - assert(fragments.count(dir->dirfrag()) == 0); + ceph_assert(fragments.count(dir->dirfrag()) == 0); fragment_info_t& info = fragments[dir->dirfrag()]; info.mdr = mdr; info.dirs.push_back(dir); @@ -11091,7 +11091,7 @@ void MDCache::merge_dir(CInode *diri, frag_t frag) MDRequestRef mdr = request_start_internal(CEPH_MDS_OP_FRAGMENTDIR); mdr->more()->fragment_base = basedirfrag; - assert(fragments.count(basedirfrag) == 0); + ceph_assert(fragments.count(basedirfrag) == 0); fragment_info_t& info = fragments[basedirfrag]; info.mdr = mdr; info.dirs = dirs; @@ -11110,7 +11110,7 @@ void MDCache::fragment_freeze_dirs(list& dirs) dir->auth_pin(dir); // until we mark and complete them dir->state_set(CDir::STATE_FRAGMENTING); dir->freeze_dir(); - assert(dir->is_freezing_dir()); + ceph_assert(dir->is_freezing_dir()); } } @@ -11159,7 +11159,7 @@ void MDCache::fragment_mark_and_complete(MDRequestRef& mdr) ready = false; } else if (dir->is_new()) { dout(15) << " committing new " << *dir << dendl; - assert(dir->is_dirty()); + ceph_assert(dir->is_dirty()); dir->commit(0, gather.new_sub(), true); ready = false; } @@ -11172,7 +11172,7 @@ void MDCache::fragment_mark_and_complete(MDRequestRef& mdr) for (auto &p : dir->items) { CDentry *dn = p.second; dn->get(CDentry::PIN_FRAGMENTING); - assert(!dn->state_test(CDentry::STATE_FRAGMENTING)); + ceph_assert(!dn->state_test(CDentry::STATE_FRAGMENTING)); dn->state_set(CDentry::STATE_FRAGMENTING); } dir->state_set(CDir::STATE_DNPINNEDFRAG); @@ -11192,7 +11192,7 @@ void MDCache::fragment_mark_and_complete(MDRequestRef& mdr) ++p) { CDir *dir = *p; if (!dir->is_frozen_dir()) { - assert(dir->is_freezing_dir()); + ceph_assert(dir->is_freezing_dir()); dir->add_waiter(CDir::WAIT_FROZEN, gather.new_sub()); } } @@ -11214,7 +11214,7 @@ void MDCache::fragment_unmark_unfreeze_dirs(list& dirs) CDir *dir = *p; dout(10) << " frag " << *dir << dendl; - assert(dir->state_test(CDir::STATE_FRAGMENTING)); + ceph_assert(dir->state_test(CDir::STATE_FRAGMENTING)); dir->state_clear(CDir::STATE_FRAGMENTING); if (dir->state_test(CDir::STATE_DNPINNEDFRAG)) { @@ -11222,7 +11222,7 @@ void MDCache::fragment_unmark_unfreeze_dirs(list& dirs) for (auto &p : dir->items) { CDentry *dn = p.second; - assert(dn->state_test(CDentry::STATE_FRAGMENTING)); + ceph_assert(dn->state_test(CDentry::STATE_FRAGMENTING)); dn->state_clear(CDentry::STATE_FRAGMENTING); dn->put(CDentry::PIN_FRAGMENTING); } @@ -11236,7 +11236,7 @@ void MDCache::fragment_unmark_unfreeze_dirs(list& dirs) bool MDCache::fragment_are_all_frozen(CDir *dir) { - assert(dir->is_frozen_dir()); + ceph_assert(dir->is_frozen_dir()); map::iterator p; for (p = fragments.lower_bound(dirfrag_t(dir->ino(), 0)); p != fragments.end() && p->first.ino == dir->ino(); @@ -11350,7 +11350,7 @@ public: resultfrags.swap(l); } void finish(int r) override { - assert(r == 0 || r == -ENOENT); + ceph_assert(r == 0 || r == -ENOENT); mdcache->_fragment_finish(basedirfrag, resultfrags); } void print(ostream& out) const override { @@ -11368,7 +11368,7 @@ void MDCache::fragment_frozen(MDRequestRef& mdr, int r) return; } - assert(r == 0); + ceph_assert(r == 0); fragment_info_t& info = it->second; dout(10) << "fragment_frozen " << basedirfrag.frag << " by " << info.bits << " on " << info.dirs.front()->get_inode() << dendl; @@ -11436,7 +11436,7 @@ void MDCache::dispatch_fragment_dir(MDRequestRef& mdr) mds->queue_waiters(waiters); for (list::iterator p = le->orig_frags.begin(); p != le->orig_frags.end(); ++p) - assert(!diri->dirfragtree.is_leaf(*p)); + ceph_assert(!diri->dirfragtree.is_leaf(*p)); le->metablob.add_dir_context(*info.resultfrags.begin()); for (list::iterator p = info.resultfrags.begin(); @@ -11484,7 +11484,7 @@ void MDCache::_fragment_logged(MDRequestRef& mdr) { dirfrag_t basedirfrag = mdr->more()->fragment_base; map::iterator it = fragments.find(basedirfrag); - assert(it != fragments.end()); + ceph_assert(it != fragments.end()); fragment_info_t &info = it->second; CInode *diri = info.resultfrags.front()->get_inode(); @@ -11518,7 +11518,7 @@ void MDCache::_fragment_stored(MDRequestRef& mdr) { dirfrag_t basedirfrag = mdr->more()->fragment_base; map::iterator it = fragments.find(basedirfrag); - assert(it != fragments.end()); + ceph_assert(it != fragments.end()); fragment_info_t &info = it->second; CInode *diri = info.resultfrags.front()->get_inode(); @@ -11560,7 +11560,7 @@ void MDCache::_fragment_stored(MDRequestRef& mdr) for (auto &p : dir->items) { CDentry *dn = p.second; - assert(dn->state_test(CDentry::STATE_FRAGMENTING)); + ceph_assert(dn->state_test(CDentry::STATE_FRAGMENTING)); dn->state_clear(CDentry::STATE_FRAGMENTING); dn->put(CDentry::PIN_FRAGMENTING); } @@ -11577,7 +11577,7 @@ void MDCache::_fragment_committed(dirfrag_t basedirfrag, list& resultfrag { dout(10) << "fragment_committed " << basedirfrag << dendl; map::iterator it = uncommitted_fragments.find(basedirfrag); - assert(it != uncommitted_fragments.end()); + ceph_assert(it != uncommitted_fragments.end()); ufragment &uf = it->second; // remove old frags @@ -11608,7 +11608,7 @@ void MDCache::_fragment_committed(dirfrag_t basedirfrag, list& resultfrag 0, gather.new_sub()); } - assert(gather.has_subs()); + ceph_assert(gather.has_subs()); gather.activate(); } @@ -11617,7 +11617,7 @@ void MDCache::_fragment_finish(dirfrag_t basedirfrag, list& resultfrags) dout(10) << "fragment_finish " << basedirfrag << "resultfrags.size=" << resultfrags.size() << dendl; map::iterator it = uncommitted_fragments.find(basedirfrag); - assert(it != uncommitted_fragments.end()); + ceph_assert(it != uncommitted_fragments.end()); ufragment &uf = it->second; // unmark & auth_unpin @@ -11693,7 +11693,7 @@ void MDCache::add_uncommitted_fragment(dirfrag_t basedirfrag, int bits, listsecond; CInode *diri = get_inode(p->first.ino); - assert(diri); + ceph_assert(diri); if (uf.committed) { list frags; @@ -11825,7 +11825,7 @@ void MDCache::rollback_uncommitted_fragments() diri->verify_dirfrags(); for (list::iterator q = old_frags.begin(); q != old_frags.end(); ++q) - assert(!diri->dirfragtree.is_leaf(*q)); + ceph_assert(!diri->dirfragtree.is_leaf(*q)); for (list::iterator q = resultfrags.begin(); q != resultfrags.end(); ++q) { CDir *dir = *q; @@ -11917,7 +11917,7 @@ void MDCache::show_subtrees(int dbl) // sanity check //dout(25) << "saw depth " << d << " " << *dir << dendl; if (seen.count(dir)) dout(0) << "aah, already seen " << *dir << dendl; - assert(seen.count(dir) == 0); + ceph_assert(seen.count(dir) == 0); seen.insert(dir); // nested items? @@ -11971,11 +11971,11 @@ void MDCache::show_subtrees(int dbl) << " " << auth << *dir << dendl; if (dir->ino() == MDS_INO_ROOT) - assert(dir->inode == root); + ceph_assert(dir->inode == root); if (dir->ino() == MDS_INO_MDSDIR(mds->get_nodeid())) - assert(dir->inode == myin); + ceph_assert(dir->inode == myin); if (dir->inode->is_stray() && (MDS_INO_STRAY_OWNER(dir->ino()) == mds->get_nodeid())) - assert(strays[MDS_INO_STRAY_INDEX(dir->ino())] == dir->inode); + ceph_assert(strays[MDS_INO_STRAY_INDEX(dir->ino())] == dir->inode); // nested items? if (!subtrees[dir].empty()) { @@ -12001,7 +12001,7 @@ void MDCache::show_subtrees(int dbl) dout(10) << "*** stray/lost entry in subtree map: " << *p->first << dendl; lost++; } - assert(lost == 0); + ceph_assert(lost == 0); } void MDCache::show_cache() @@ -12050,7 +12050,7 @@ int MDCache::cache_status(Formatter *f) void MDCache::dump_tree(CInode *in, const int cur_depth, const int max_depth, Formatter *f) { - assert(in); + ceph_assert(in); if ((max_depth >= 0) && (cur_depth > max_depth)) { return; } @@ -12244,7 +12244,7 @@ void MDCache::enqueue_scrub_work(MDRequestRef& mdr) return; // TODO: Remove this restriction - assert(in->is_auth()); + ceph_assert(in->is_auth()); bool locked = mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks); if (!locked) @@ -12295,7 +12295,7 @@ void MDCache::enqueue_scrub_work(MDRequestRef& mdr) auto& expiring_segments = mds->mdlog->get_expiring_segments(); for (auto logseg : expiring_segments) logseg->wait_for_expiry(gather.new_sub()); - assert(gather.has_subs()); + ceph_assert(gather.has_subs()); gather.set_finisher(new MDSInternalContextWrapper(mds, fin)); gather.activate(); } @@ -12491,11 +12491,11 @@ void MDCache::repair_inode_stats_work(MDRequestRef& mdr) for (list::iterator p = frags.begin(); p != frags.end(); ++p) { CDir *dir = diri->get_dirfrag(*p); if (!dir) { - assert(mdr->is_auth_pinned(diri)); + ceph_assert(mdr->is_auth_pinned(diri)); dir = diri->get_or_open_dirfrag(this, *p); } if (dir->get_version() == 0) { - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); dir->fetch(new C_MDS_RetryRequest(this, mdr)); return; } @@ -12530,8 +12530,8 @@ do_rdlocks: diri->dirfragtree.get_leaves(frags); for (list::iterator p = frags.begin(); p != frags.end(); ++p) { CDir *dir = diri->get_dirfrag(*p); - assert(dir); - assert(dir->get_version() > 0); + ceph_assert(dir); + ceph_assert(dir->get_version() > 0); dir_info.add(dir->fnode.accounted_fragstat); nest_info.add(dir->fnode.accounted_rstat); } @@ -12625,7 +12625,7 @@ void MDCache::flush_dentry_work(MDRequestRef& mdr) return; // TODO: Is this necessary? Fix it if so - assert(in->is_auth()); + ceph_assert(in->is_auth()); bool locked = mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks); if (!locked) return; @@ -12743,7 +12743,7 @@ void MDCache::maybe_eval_stray(CInode *in, bool delay) { void MDCache::clear_dirty_bits_for_stray(CInode* diri) { dout(10) << __func__ << " " << *diri << dendl; - assert(diri->get_projected_parent_dir()->inode->is_stray()); + ceph_assert(diri->get_projected_parent_dir()->inode->is_stray()); list ls; diri->get_dirfrags(ls); for (auto &p : ls) { diff --git a/src/mds/MDCache.h b/src/mds/MDCache.h index 8f6f466e54d23..d16ecc48abaca 100644 --- a/src/mds/MDCache.h +++ b/src/mds/MDCache.h @@ -193,7 +193,7 @@ public: * on to StrayManager (i.e. this is a stray you've just created) */ void notify_stray(CDentry *dn) { - assert(dn->get_dir()->get_inode()->is_stray()); + ceph_assert(dn->get_dir()->get_inode()->is_stray()); stray_manager.eval_stray(dn); } @@ -334,7 +334,7 @@ public: CDir *get_subtree_root(CDir *dir); CDir *get_projected_subtree_root(CDir *dir); bool is_leaf_subtree(CDir *dir) { - assert(subtrees.count(dir)); + ceph_assert(subtrees.count(dir)); return subtrees[dir].empty(); } void remove_subtree(CDir *dir); @@ -501,7 +501,7 @@ public: void remove_ambiguous_slave_update(metareqid_t reqid, mds_rank_t master) { auto p = ambiguous_slave_updates.find(master); auto q = p->second.find(reqid); - assert(q != p->second.end()); + ceph_assert(q != p->second.end()); p->second.erase(q); if (p->second.empty()) ambiguous_slave_updates.erase(p); @@ -519,7 +519,7 @@ public: return my_ambiguous_imports.count(base); } void get_ambiguous_import_bounds(dirfrag_t base, vector& bounds) { - assert(my_ambiguous_imports.count(base)); + ceph_assert(my_ambiguous_imports.count(base)); bounds = my_ambiguous_imports[base]; } void cancel_ambiguous_import(CDir *); @@ -623,8 +623,8 @@ public: return NULL; } void remove_replay_cap_reconnect(inodeno_t ino, client_t client) { - assert(cap_imports[ino].size() == 1); - assert(cap_imports[ino][client].size() == 1); + ceph_assert(cap_imports[ino].size() == 1); + ceph_assert(cap_imports[ino][client].size() == 1); cap_imports.erase(ino); } void wait_replay_cap_reconnect(inodeno_t ino, MDSInternalContextBase *c) { @@ -1173,7 +1173,7 @@ private: void rollback_uncommitted_fragment(dirfrag_t basedirfrag, list& old_frags); public: void wait_for_uncommitted_fragment(dirfrag_t dirfrag, MDSInternalContextBase *c) { - assert(uncommitted_fragments.count(dirfrag)); + ceph_assert(uncommitted_fragments.count(dirfrag)); uncommitted_fragments[dirfrag].waiters.push_back(c); } void split_dir(CDir *dir, int byn); @@ -1218,7 +1218,7 @@ public: void show_subtrees(int dbl=10); CInode *hack_pick_random_inode() { - assert(!inode_map.empty()); + ceph_assert(!inode_map.empty()); int n = rand() % inode_map.size(); auto p = inode_map.begin(); while (n--) ++p; diff --git a/src/mds/MDLog.cc b/src/mds/MDLog.cc index ed2694095f1ce..94d933bfd98c8 100644 --- a/src/mds/MDLog.cc +++ b/src/mds/MDLog.cc @@ -159,11 +159,11 @@ void MDLog::create(MDSInternalContextBase *c) ino = MDS_INO_LOG_OFFSET + mds->get_nodeid(); // Instantiate Journaler and start async write to RADOS - assert(journaler == NULL); + ceph_assert(journaler == NULL); journaler = new Journaler("mdlog", ino, mds->mdsmap->get_metadata_pool(), CEPH_FS_ONDISK_MAGIC, mds->objecter, logger, l_mdl_jlat, mds->finisher); - assert(journaler->is_readonly()); + ceph_assert(journaler->is_readonly()); journaler->set_write_error_handler(new C_MDL_WriteError(this)); journaler->set_writeable(); journaler->create(&mds->mdcache->default_log_layout, g_conf()->mds_journal_format); @@ -187,7 +187,7 @@ void MDLog::open(MDSInternalContextBase *c) { dout(5) << "open discovering log bounds" << dendl; - assert(!recovery_thread.is_started()); + ceph_assert(!recovery_thread.is_started()); recovery_thread.set_completion(c); recovery_thread.create("md_recov_open"); @@ -221,8 +221,8 @@ void MDLog::reopen(MDSInternalContextBase *c) // Because we will call append() at the completion of this, check that we have already // read the whole journal. - assert(journaler != NULL); - assert(journaler->get_read_pos() == journaler->get_write_pos()); + ceph_assert(journaler != NULL); + ceph_assert(journaler->get_read_pos() == journaler->get_write_pos()); delete journaler; journaler = NULL; @@ -253,9 +253,9 @@ void MDLog::append() void MDLog::_start_entry(LogEvent *e) { - assert(submit_mutex.is_locked_by_me()); + ceph_assert(submit_mutex.is_locked_by_me()); - assert(cur_event == NULL); + ceph_assert(cur_event == NULL); cur_event = e; event_seq++; @@ -269,22 +269,22 @@ void MDLog::_start_entry(LogEvent *e) void MDLog::cancel_entry(LogEvent *le) { - assert(le == cur_event); + ceph_assert(le == cur_event); cur_event = NULL; delete le; } void MDLog::_submit_entry(LogEvent *le, MDSLogContextBase *c) { - assert(submit_mutex.is_locked_by_me()); - assert(!mds->is_any_replay()); - assert(!capped); + ceph_assert(submit_mutex.is_locked_by_me()); + ceph_assert(!mds->is_any_replay()); + ceph_assert(!capped); - assert(le == cur_event); + ceph_assert(le == cur_event); cur_event = NULL; // let the event register itself in the segment - assert(!segments.empty()); + ceph_assert(!segments.empty()); LogSegment *ls = segments.rbegin()->second; ls->num_events++; @@ -401,7 +401,7 @@ void MDLog::_submit_thread() MDSLogContextBase *fin; if (data.fin) { fin = dynamic_cast(data.fin); - assert(fin); + ceph_assert(fin); fin->set_write_pos(new_write_pos); } else { fin = new C_MDL_Flushed(this, new_write_pos); @@ -420,7 +420,7 @@ void MDLog::_submit_thread() if (data.fin) { MDSInternalContextBase* fin = dynamic_cast(data.fin); - assert(fin); + ceph_assert(fin); C_MDL_Flushed *fin2 = new C_MDL_Flushed(this, fin); fin2->set_write_pos(journaler->get_write_pos()); journaler->wait_for_flush(fin2); @@ -488,11 +488,11 @@ void MDLog::cap() void MDLog::shutdown() { - assert(mds->mds_lock.is_locked_by_me()); + ceph_assert(mds->mds_lock.is_locked_by_me()); dout(5) << "shutdown" << dendl; if (submit_thread.is_started()) { - assert(mds->is_daemon_stopping()); + ceph_assert(mds->is_daemon_stopping()); if (submit_thread.am_self()) { // Called suicide from the thread: trust it to do no work after @@ -544,7 +544,7 @@ void MDLog::_start_new_segment() void MDLog::_prepare_new_segment() { - assert(submit_mutex.is_locked_by_me()); + ceph_assert(submit_mutex.is_locked_by_me()); uint64_t seq = event_seq + 1; dout(7) << __func__ << " seq " << seq << dendl; @@ -562,7 +562,7 @@ void MDLog::_prepare_new_segment() void MDLog::_journal_segment_subtree_map(MDSInternalContextBase *onsync) { - assert(submit_mutex.is_locked_by_me()); + ceph_assert(submit_mutex.is_locked_by_me()); dout(7) << __func__ << dendl; ESubtreeMap *sle = mds->mdcache->create_subtree_map(); @@ -643,7 +643,7 @@ void MDLog::trim(int m) // look at first segment LogSegment *ls = p->second; - assert(ls); + ceph_assert(ls); ++p; if (pending_events.count(ls->seq) || @@ -660,7 +660,7 @@ void MDLog::trim(int m) dout(5) << "trim already expired segment " << ls->seq << "/" << ls->offset << ", " << ls->num_events << " events" << dendl; } else { - assert(expiring_segments.count(ls) == 0); + ceph_assert(expiring_segments.count(ls) == 0); new_expiring_segments++; expiring_segments.insert(ls); expiring_events += ls->num_events; @@ -749,7 +749,7 @@ int MDLog::trim_all() dout(5) << "trim already expired segment " << ls->seq << "/" << ls->offset << ", " << ls->num_events << " events" << dendl; } else { - assert(expiring_segments.count(ls) == 0); + ceph_assert(expiring_segments.count(ls) == 0); expiring_segments.insert(ls); expiring_events += ls->num_events; submit_mutex.Unlock(); @@ -780,7 +780,7 @@ void MDLog::try_expire(LogSegment *ls, int op_prio) } else { dout(10) << "try_expire expired segment " << ls->seq << "/" << ls->offset << dendl; submit_mutex.Lock(); - assert(expiring_segments.count(ls)); + ceph_assert(expiring_segments.count(ls)); expiring_segments.erase(ls); expiring_events -= ls->num_events; _expired(ls); @@ -805,7 +805,7 @@ void MDLog::_maybe_expired(LogSegment *ls, int op_prio) void MDLog::_trim_expired_segments() { - assert(submit_mutex.is_locked_by_me()); + ceph_assert(submit_mutex.is_locked_by_me()); uint64_t oft_committed_seq = mds->mdcache->open_file_table.get_committed_log_seq(); @@ -861,7 +861,7 @@ void MDLog::trim_expired_segments() void MDLog::_expired(LogSegment *ls) { - assert(submit_mutex.is_locked_by_me()); + ceph_assert(submit_mutex.is_locked_by_me()); dout(5) << "_expired segment " << ls->seq << "/" << ls->offset << ", " << ls->num_events << " events" << dendl; @@ -891,8 +891,8 @@ void MDLog::_expired(LogSegment *ls) void MDLog::replay(MDSInternalContextBase *c) { - assert(journaler->is_active()); - assert(journaler->is_readonly()); + ceph_assert(journaler->is_active()); + ceph_assert(journaler->is_readonly()); // empty? if (journaler->get_read_pos() == journaler->get_write_pos()) { @@ -912,7 +912,7 @@ void MDLog::replay(MDSInternalContextBase *c) dout(10) << "replay start, from " << journaler->get_read_pos() << " to " << journaler->get_write_pos() << dendl; - assert(num_events == 0 || already_replayed); + ceph_assert(num_events == 0 || already_replayed); if (already_replayed) { // Ensure previous instance of ReplayThread is joined before // we create another one @@ -938,7 +938,7 @@ void MDLog::replay(MDSInternalContextBase *c) */ void MDLog::_recovery_thread(MDSInternalContextBase *completion) { - assert(journaler == NULL); + ceph_assert(journaler == NULL); if (g_conf()->mds_journal_format > JOURNAL_FORMAT_MAX) { dout(0) << "Configuration value for mds_journal_format is out of bounds, max is " << JOURNAL_FORMAT_MAX << dendl; @@ -959,7 +959,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) jp.front = default_log_ino; int write_result = jp.save(mds->objecter); // Nothing graceful we can do for this - assert(write_result >= 0); + ceph_assert(write_result >= 0); } else if (read_result == -EBLACKLISTED) { derr << "Blacklisted during JournalPointer read! Respawning..." << dendl; mds->respawn(); @@ -1005,7 +1005,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) mds->clog->error() << "Error recovering journal " << jp.front << ": " << cpp_strerror(recovery_result); mds->damaged_unlocked(); - assert(recovery_result == 0); // Unreachable because damaged() calls respawn() + ceph_assert(recovery_result == 0); // Unreachable because damaged() calls respawn() } // We could read journal, so we can erase it. @@ -1021,7 +1021,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) jp.back = 0; int write_result = jp.save(mds->objecter); // Nothing graceful we can do for this - assert(write_result >= 0); + ceph_assert(write_result >= 0); } } @@ -1051,7 +1051,7 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) mds->clog->error() << "Error recovering journal " << jp.front << ": " << cpp_strerror(recovery_result); mds->damaged_unlocked(); - assert(recovery_result == 0); // Unreachable because damaged() calls respawn() + ceph_assert(recovery_result == 0); // Unreachable because damaged() calls respawn() } /* Check whether the front journal format is acceptable or needs re-write */ @@ -1099,9 +1099,9 @@ void MDLog::_recovery_thread(MDSInternalContextBase *completion) */ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journal, MDSInternalContextBase *completion) { - assert(!jp_in.is_null()); - assert(completion != NULL); - assert(old_journal != NULL); + ceph_assert(!jp_in.is_null()); + ceph_assert(completion != NULL); + ceph_assert(old_journal != NULL); JournalPointer jp = jp_in; @@ -1110,7 +1110,7 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa inodeno_t secondary_ino = MDS_INO_LOG_BACKUP_OFFSET + mds->get_nodeid(); jp.back = (jp.front == primary_ino ? secondary_ino : primary_ino); int write_result = jp.save(mds->objecter); - assert(write_result == 0); + ceph_assert(write_result == 0); /* Create the new Journaler file */ Journaler *new_journal = new Journaler("mdlog", jp.back, @@ -1163,13 +1163,13 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa break; // Read one serialized LogEvent - assert(old_journal->is_readable()); + ceph_assert(old_journal->is_readable()); bufferlist bl; uint64_t le_pos = old_journal->get_read_pos(); bool r = old_journal->try_read_entry(bl); if (!r && old_journal->get_error()) continue; - assert(r); + ceph_assert(r); // Update segment_pos_rewrite LogEvent *le = LogEvent::decode(bl); @@ -1203,7 +1203,7 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa if (le->get_type() == EVENT_SUBTREEMAP || le->get_type() == EVENT_SUBTREEMAP_TEST) { ESubtreeMap *sle = dynamic_cast(le); - assert(sle != NULL); + ceph_assert(sle != NULL); dout(20) << __func__ << " zeroing expire_pos in subtreemap event at " << le_pos << " seq=" << sle->event_seq << dendl; sle->expire_pos = 0; @@ -1236,28 +1236,28 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa // If failed to rewrite journal, leave the part written journal // as garbage to be cleaned up next startup. - assert(r == 0); + ceph_assert(r == 0); /* Now that the new journal is safe, we can flip the pointers */ inodeno_t const tmp = jp.front; jp.front = jp.back; jp.back = tmp; write_result = jp.save(mds->objecter); - assert(write_result == 0); + ceph_assert(write_result == 0); /* Delete the old journal to free space */ dout(1) << "New journal flushed, erasing old journal" << dendl; C_SaferCond erase_waiter; old_journal->erase(&erase_waiter); int erase_result = erase_waiter.wait(); - assert(erase_result == 0); + ceph_assert(erase_result == 0); { Mutex::Locker l(mds->mds_lock); if (mds->is_daemon_stopping()) { delete new_journal; return; } - assert(journaler == old_journal); + ceph_assert(journaler == old_journal); journaler = NULL; delete old_journal; } @@ -1265,7 +1265,7 @@ void MDLog::_reformat_journal(JournalPointer const &jp_in, Journaler *old_journa /* Update the pointer to reflect we're back in clean single journal state. */ jp.back = 0; write_result = jp.save(mds->objecter); - assert(write_result == 0); + ceph_assert(write_result == 0); /* Reset the Journaler object to its default state */ dout(1) << "Journal rewrite complete, continuing with normal startup" << dendl; @@ -1369,7 +1369,7 @@ void MDLog::_replay_thread() journaler->get_read_pos() == journaler->get_write_pos()) break; - assert(journaler->is_readable() || mds->is_daemon_stopping()); + ceph_assert(journaler->is_readable() || mds->is_daemon_stopping()); // read it uint64_t pos = journaler->get_read_pos(); @@ -1377,7 +1377,7 @@ void MDLog::_replay_thread() bool r = journaler->try_read_entry(bl); if (!r && journaler->get_error()) continue; - assert(r); + ceph_assert(r); // unpack event LogEvent *le = LogEvent::decode(bl); @@ -1444,7 +1444,7 @@ void MDLog::_replay_thread() // done! if (r == 0) { - assert(journaler->get_read_pos() == journaler->get_write_pos()); + ceph_assert(journaler->get_read_pos() == journaler->get_write_pos()); dout(10) << "_replay - complete, " << num_events << " events" << dendl; diff --git a/src/mds/MDLog.h b/src/mds/MDLog.h index cbc609a6fbe6a..5fa77a43e0701 100644 --- a/src/mds/MDLog.h +++ b/src/mds/MDLog.h @@ -143,7 +143,7 @@ protected: void set_safe_pos(uint64_t pos) { Mutex::Locker l(submit_mutex); - assert(pos >= safe_pos); + ceph_assert(pos >= safe_pos); safe_pos = pos; } friend class MDSLogContextBase; @@ -172,7 +172,7 @@ protected: friend class MDCache; uint64_t get_last_segment_seq() const { - assert(!segments.empty()); + ceph_assert(!segments.empty()); return segments.rbegin()->first; } LogSegment *get_oldest_segment() { @@ -238,7 +238,7 @@ public: } LogSegment *get_current_segment() { - assert(!segments.empty()); + ceph_assert(!segments.empty()); return segments.rbegin()->second; } diff --git a/src/mds/MDSCacheObject.h b/src/mds/MDSCacheObject.h index c9c4590e77be3..cbfc89da0a0eb 100644 --- a/src/mds/MDSCacheObject.h +++ b/src/mds/MDSCacheObject.h @@ -164,9 +164,9 @@ protected: virtual void last_put() {} virtual void bad_put(int by) { #ifdef MDS_REF_SET - assert(ref_map[by] > 0); + ceph_assert(ref_map[by] > 0); #endif - assert(ref > 0); + ceph_assert(ref > 0); } virtual void _put() {} void put(int by) { @@ -191,7 +191,7 @@ protected: virtual void first_get() {} virtual void bad_get(int by) { #ifdef MDS_REF_SET - assert(by < 0 || ref_map[by] == 0); + ceph_assert(by < 0 || ref_map[by] == 0); #endif ceph_abort(); } @@ -276,11 +276,11 @@ protected: get_replicas()[mds] = nonce; } unsigned get_replica_nonce(mds_rank_t mds) { - assert(get_replicas().count(mds)); + ceph_assert(get_replicas().count(mds)); return get_replicas()[mds]; } void remove_replica(mds_rank_t mds) { - assert(get_replicas().count(mds)); + ceph_assert(get_replicas().count(mds)); get_replicas().erase(mds); if (get_replicas().empty()) { put(PIN_REPLICATED); diff --git a/src/mds/MDSContext.cc b/src/mds/MDSContext.cc index ca62a2acb7987..b6eb2750bbde2 100644 --- a/src/mds/MDSContext.cc +++ b/src/mds/MDSContext.cc @@ -25,8 +25,8 @@ void MDSInternalContextBase::complete(int r) { MDSRank *mds = get_mds(); dout(10) << "MDSInternalContextBase::complete: " << typeid(*this).name() << dendl; - assert(mds != NULL); - assert(mds->mds_lock.is_locked_by_me()); + ceph_assert(mds != NULL); + ceph_assert(mds->mds_lock.is_locked_by_me()); MDSContext::complete(r); } @@ -100,7 +100,7 @@ void MDSIOContextBase::complete(int r) { MDSRank *mds = get_mds(); dout(10) << "MDSIOContextBase::complete: " << typeid(*this).name() << dendl; - assert(mds != NULL); + ceph_assert(mds != NULL); Mutex::Locker l(mds->mds_lock); if (mds->is_daemon_stopping()) { diff --git a/src/mds/MDSContext.h b/src/mds/MDSContext.h index 16457f61726a8..0ea6885334773 100644 --- a/src/mds/MDSContext.h +++ b/src/mds/MDSContext.h @@ -69,7 +69,7 @@ protected: public: explicit MDSInternalContext(MDSRank *mds_) : mds(mds_) { - assert(mds != NULL); + ceph_assert(mds != NULL); } }; @@ -142,7 +142,7 @@ protected: public: explicit MDSIOContext(MDSRank *mds_) : mds(mds_) { - assert(mds != NULL); + ceph_assert(mds != NULL); } }; @@ -192,7 +192,7 @@ protected: public: C_IO_Wrapper(MDSRank *mds_, MDSInternalContextBase *wrapped_) : MDSIOContext(mds_), async(true), wrapped(wrapped_) { - assert(wrapped != NULL); + ceph_assert(wrapped != NULL); } ~C_IO_Wrapper() override { diff --git a/src/mds/MDSDaemon.cc b/src/mds/MDSDaemon.cc index 78c4f578c8a40..bc247f02105bc 100644 --- a/src/mds/MDSDaemon.cc +++ b/src/mds/MDSDaemon.cc @@ -180,113 +180,113 @@ void MDSDaemon::set_up_admin_socket() { int r; AdminSocket *admin_socket = g_ceph_context->get_admin_socket(); - assert(asok_hook == nullptr); + ceph_assert(asok_hook == nullptr); asok_hook = new MDSSocketHook(this); r = admin_socket->register_command("status", "status", asok_hook, "high-level status of MDS"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_ops_in_flight", "dump_ops_in_flight", asok_hook, "show the ops currently in flight"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("ops", "ops", asok_hook, "show the ops currently in flight"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_blocked_ops", "dump_blocked_ops", asok_hook, "show the blocked ops currently in flight"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_historic_ops", "dump_historic_ops", asok_hook, "show recent ops"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump_historic_ops_by_duration", "dump_historic_ops_by_duration", asok_hook, "show recent ops, sorted by op duration"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("scrub_path", "scrub_path name=path,type=CephString " "name=scrubops,type=CephChoices," "strings=force|recursive|repair,n=N,req=false", asok_hook, "scrub an inode and output results"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("tag path", "tag path name=path,type=CephString" " name=tag,type=CephString", asok_hook, "Apply scrub tag recursively"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("flush_path", "flush_path name=path,type=CephString", asok_hook, "flush an inode (and its dirfrags)"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("export dir", "export dir " "name=path,type=CephString " "name=rank,type=CephInt", asok_hook, "migrate a subtree to named MDS"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump cache", "dump cache name=path,type=CephString,req=false", asok_hook, "dump metadata cache (optionally to a file)"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("cache status", "cache status", asok_hook, "show cache status"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump tree", "dump tree " "name=root,type=CephString,req=true " "name=depth,type=CephInt,req=false ", asok_hook, "dump metadata cache for subtree"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump loads", "dump loads", asok_hook, "dump metadata loads"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump snaps", "dump snaps name=server,type=CephChoices,strings=--server,req=false", asok_hook, "dump snapshots"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("session evict", "session evict name=client_id,type=CephString", asok_hook, "Evict a CephFS client"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("osdmap barrier", "osdmap barrier name=target_epoch,type=CephInt", asok_hook, "Wait until the MDS has this OSD map epoch"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("session ls", "session ls", asok_hook, "Enumerate connected CephFS clients"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("flush journal", "flush journal", asok_hook, "Flush the journal to the backing store"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("force_readonly", "force_readonly", asok_hook, "Force MDS to read-only mode"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("get subtrees", "get subtrees", asok_hook, "Return the subtree map"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dirfrag split", "dirfrag split " "name=path,type=CephString,req=true " @@ -294,31 +294,31 @@ void MDSDaemon::set_up_admin_socket() "name=bits,type=CephInt,req=true ", asok_hook, "Fragment directory by path"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dirfrag merge", "dirfrag merge " "name=path,type=CephString,req=true " "name=frag,type=CephString,req=true", asok_hook, "De-fragment directory by path"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dirfrag ls", "dirfrag ls " "name=path,type=CephString,req=true", asok_hook, "List fragments in directory"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("openfiles ls", "openfiles ls", asok_hook, "List the opening files and their caps"); - assert(r == 0); + ceph_assert(r == 0); r = admin_socket->register_command("dump inode", "dump inode " "name=number,type=CephInt,req=true", asok_hook, "dump inode by inode number"); - assert(r == 0); + ceph_assert(r == 0); } void MDSDaemon::clean_up_admin_socket() @@ -542,7 +542,7 @@ void MDSDaemon::reset_tick() tick_event = timer.add_event_after( g_conf()->mds_tick_interval, new FunctionContext([this](int) { - assert(mds_lock.is_locked_by_me()); + ceph_assert(mds_lock.is_locked_by_me()); tick(); })); } @@ -564,7 +564,7 @@ void MDSDaemon::send_command_reply(const MCommand::const_ref &m, MDSRank *mds_ra { auto priv = m->get_connection()->get_priv(); auto session = static_cast(priv.get()); - assert(session != NULL); + ceph_assert(session != NULL); // If someone is using a closed session for sending commands (e.g. // the ceph CLI) then we should feel free to clean up this connection // as soon as we've sent them a response. @@ -576,7 +576,7 @@ void MDSDaemon::send_command_reply(const MCommand::const_ref &m, MDSRank *mds_ra if (!live_session) { // This session only existed to issue commands, so terminate it // as soon as we can. - assert(session->is_closed()); + ceph_assert(session->is_closed()); session->get_connection()->mark_disposable(); } priv.reset(); @@ -591,7 +591,7 @@ void MDSDaemon::handle_command(const MCommand::const_ref &m) { auto priv = m->get_connection()->get_priv(); auto session = static_cast(priv.get()); - assert(session != NULL); + ceph_assert(session != NULL); int r = 0; cmdmap_t cmdmap; @@ -698,8 +698,8 @@ int MDSDaemon::_handle_command( Context **run_later, bool *need_reply) { - assert(outbl != NULL); - assert(outs != NULL); + ceph_assert(outbl != NULL); + ceph_assert(outs != NULL); class SuicideLater : public Context { @@ -820,7 +820,7 @@ int MDSDaemon::_handle_command( // FIXME harmonize `session kill` with admin socket session evict int64_t session_id = 0; bool got = cmd_getval(cct, cmdmap, "session_id", session_id); - assert(got); + ceph_assert(got); bool killed = mds_rank->evict_client(session_id, false, g_conf()->mds_session_blacklist_on_evict, ss); @@ -1018,7 +1018,7 @@ void MDSDaemon::_handle_mds_map(const MDSMap &mdsmap) void MDSDaemon::handle_signal(int signum) { - assert(signum == SIGINT || signum == SIGTERM); + ceph_assert(signum == SIGINT || signum == SIGTERM); derr << "*** got signal " << sig_str(signum) << " ***" << dendl; { Mutex::Locker l(mds_lock); @@ -1031,10 +1031,10 @@ void MDSDaemon::handle_signal(int signum) void MDSDaemon::suicide() { - assert(mds_lock.is_locked()); + ceph_assert(mds_lock.is_locked()); // make sure we don't suicide twice - assert(stopping == false); + ceph_assert(stopping == false); stopping = true; dout(1) << "suicide! Wanted state " @@ -1106,7 +1106,7 @@ void MDSDaemon::respawn() /* Print CWD for the user's interest */ char buf[PATH_MAX]; char *cwd = getcwd(buf, sizeof(buf)); - assert(cwd); + ceph_assert(cwd); dout(1) << " cwd " << cwd << dendl; /* Fall back to a best-effort: just running in our CWD */ diff --git a/src/mds/MDSMap.cc b/src/mds/MDSMap.cc index 446b59cb61651..02caebaaf2511 100644 --- a/src/mds/MDSMap.cc +++ b/src/mds/MDSMap.cc @@ -411,7 +411,7 @@ void MDSMap::get_health(list >& summary, if (m == m_end) { std::cerr << "Up rank " << u.first << " GID " << u.second << " not found!" << std::endl; } - assert(m != m_end); + ceph_assert(m != m_end); const mds_info_t &mds_info(m->second); if (mds_info.laggy()) { laggy.insert(mds_info.name); @@ -805,7 +805,7 @@ void MDSMap::decode(bufferlist::const_iterator& p) decode(inline_data_enabled, p); if (ev >= 8) { - assert(struct_v >= 5); + ceph_assert(struct_v >= 5); decode(enabled, p); decode(fs_name, p); } else { diff --git a/src/mds/MDSMap.h b/src/mds/MDSMap.h index 6ff298ac14ef5..37a26e60028b1 100644 --- a/src/mds/MDSMap.h +++ b/src/mds/MDSMap.h @@ -278,7 +278,7 @@ public: mds_rank_t get_old_max_mds() const { return old_max_mds; } mds_rank_t get_standby_count_wanted(mds_rank_t standby_daemon_count) const { - assert(standby_daemon_count >= 0); + ceph_assert(standby_daemon_count >= 0); std::set s; get_standby_replay_mds_set(s); mds_rank_t standbys_avail = (mds_rank_t)s.size()+standby_daemon_count; @@ -313,7 +313,7 @@ public: return mds_info.at(gid); } const mds_info_t& get_mds_info(mds_rank_t m) const { - assert(up.count(m) && mds_info.count(up.at(m))); + ceph_assert(up.count(m) && mds_info.count(up.at(m))); return mds_info.at(up.at(m)); } mds_gid_t find_mds_gid_by_name(std::string_view s) const { @@ -391,7 +391,7 @@ public: ++p) { std::map::const_iterator q = mds_info.find(p->second); - assert(q != mds_info.end()); + ceph_assert(q != mds_info.end()); if (first) { cached_up_features = q->second.mds_features; first = false; @@ -408,7 +408,7 @@ public: */ void get_down_mds_set(std::set *s) const { - assert(s != NULL); + ceph_assert(s != NULL); s->insert(failed.begin(), failed.end()); s->insert(damaged.begin(), damaged.end()); } diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 5ef0627719181..58180e8616edd 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -384,7 +384,7 @@ void MDSRankDispatcher::shutdown() // It should never be possible for shutdown to get called twice, because // anyone picking up mds_lock checks if stopping is true and drops // out if it is. - assert(stopping == false); + ceph_assert(stopping == false); stopping = true; dout(1) << __func__ << ": shutting down rank " << whoami << dendl; @@ -443,8 +443,8 @@ class C_MDS_VoidFn : public MDSInternalContext C_MDS_VoidFn(MDSRank *mds_, fn_ptr fn_) : MDSInternalContext(mds_), fn(fn_) { - assert(mds_); - assert(fn_); + ceph_assert(mds_); + ceph_assert(fn_); } void finish(int r) override @@ -494,8 +494,8 @@ void MDSRank::respawn() void MDSRank::damaged() { - assert(whoami != MDS_RANK_NONE); - assert(mds_lock.is_locked_by_me()); + ceph_assert(whoami != MDS_RANK_NONE); + ceph_assert(mds_lock.is_locked_by_me()); beacon.set_want_state(*mdsmap, MDSMap::STATE_DAMAGED); monc->flush_log(); // Flush any clog error from before we were called @@ -558,8 +558,8 @@ void *MDSRank::ProgressThread::entry() void MDSRank::ProgressThread::shutdown() { - assert(mds->mds_lock.is_locked_by_me()); - assert(mds->stopping); + ceph_assert(mds->mds_lock.is_locked_by_me()); + ceph_assert(mds->stopping); if (am_self()) { // Stopping is set, we will fall out of our main loop naturally @@ -809,7 +809,7 @@ bool MDSRank::handle_deferrable_message(const Message::const_ref &m) */ void MDSRank::_advance_queues() { - assert(mds_lock.is_locked_by_me()); + ceph_assert(mds_lock.is_locked_by_me()); if (!finished_queue.empty()) { dout(7) << "mds has " << finished_queue.size() << " queued contexts" << dendl; @@ -853,7 +853,7 @@ void MDSRank::heartbeat_reset() // after a call to suicide() completes, in which case MDSRank::hb // has been freed and we are a no-op. if (!hb) { - assert(stopping); + ceph_assert(stopping); return; } @@ -905,7 +905,7 @@ Session *MDSRank::get_session(const Message::const_ref &m) << dendl; imported_session->info.auth_name = session->info.auth_name; //assert(session->info.auth_name == imported_session->info.auth_name); - assert(session->info.inst == imported_session->info.inst); + ceph_assert(session->info.inst == imported_session->info.inst); imported_session->set_connection(session->get_connection().get()); // send out any queued messages while (!session->preopen_out_queue.empty()) { @@ -913,7 +913,7 @@ Session *MDSRank::get_session(const Message::const_ref &m) session->preopen_out_queue.pop_front(); } imported_session->auth_caps = session->auth_caps; - assert(session->get_nref() == 1); + ceph_assert(session->get_nref() == 1); imported_session->get_connection()->set_priv(imported_session->get()); session = imported_session; } @@ -926,7 +926,7 @@ Session *MDSRank::get_session(const Message::const_ref &m) void MDSRank::send_message(const Message::ref& m, const ConnectionRef& c) { - assert(c); + ceph_assert(c); c->send_message2(m); } @@ -951,7 +951,7 @@ void MDSRank::send_message_mds(const Message::ref& m, mds_rank_t mds) void MDSRank::forward_message_mds(const MClientRequest::const_ref& m, mds_rank_t mds) { - assert(mds != whoami); + ceph_assert(mds != whoami); /* * don't actually forward if non-idempotent! @@ -1073,7 +1073,7 @@ void MDSRank::boot_start(BootStep step, int r) clog->error() << "Error loading MDS rank " << whoami << ": " << cpp_strerror(r); damaged(); - assert(r == 0); // Unreachable, damaged() calls respawn() + ceph_assert(r == 0); // Unreachable, damaged() calls respawn() } else { // Completely unexpected error, give up and die dout(0) << "boot_start encountered an error, failing" << dendl; @@ -1082,7 +1082,7 @@ void MDSRank::boot_start(BootStep step, int r) } } - assert(is_starting() || is_any_replay()); + ceph_assert(is_starting() || is_any_replay()); switch(step) { case MDS_BOOT_INITIAL: @@ -1166,7 +1166,7 @@ void MDSRank::boot_start(BootStep step, int r) } break; case MDS_BOOT_REPLAY_DONE: - assert(is_any_replay()); + ceph_assert(is_any_replay()); // Sessiontable and inotable should be in sync after replay, validate // that they are consistent. @@ -1179,7 +1179,7 @@ void MDSRank::boot_start(BootStep step, int r) void MDSRank::validate_sessions() { - assert(mds_lock.is_locked_by_me()); + ceph_assert(mds_lock.is_locked_by_me()); bool valid = true; // Identify any sessions which have state inconsistent with other, @@ -1198,14 +1198,14 @@ void MDSRank::validate_sessions() if (!valid) { damaged(); - assert(valid); + ceph_assert(valid); } } void MDSRank::starting_done() { dout(3) << "starting_done" << dendl; - assert(is_starting()); + ceph_assert(is_starting()); request_state(MDSMap::STATE_ACTIVE); mdlog->start_new_segment(); @@ -1281,7 +1281,7 @@ class MDSRank::C_MDS_StandbyReplayRestart : public MDSInternalContext { public: explicit C_MDS_StandbyReplayRestart(MDSRank *m) : MDSInternalContext(m) {} void finish(int r) override { - assert(!r); + ceph_assert(!r); mds->standby_replay_restart(); } }; @@ -1325,7 +1325,7 @@ void MDSRank::replay_done() if (is_standby_replay()) { // The replay was done in standby state, and we are still in that state - assert(standby_replaying); + ceph_assert(standby_replaying); dout(10) << "setting replay timer" << dendl; timer.add_event_after(g_conf()->mds_replay_interval, new C_MDS_StandbyReplayRestart(this)); @@ -1338,8 +1338,8 @@ void MDSRank::replay_done() return; } else { // Replay is complete, journal read should be up to date - assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos()); - assert(!is_standby_replay()); + ceph_assert(mdlog->get_journaler()->get_read_pos() == mdlog->get_journaler()->get_write_pos()); + ceph_assert(!is_standby_replay()); // Reformat and come back here if (mdlog->get_journaler()->get_stream_format() < g_conf()->mds_journal_format) { @@ -1468,7 +1468,7 @@ void MDSRank::rejoin_done() // The root should always have a subtree! clog->error() << "No subtrees found for root MDS rank!"; damaged(); - assert(mdcache->is_subtrees()); + ceph_assert(mdcache->is_subtrees()); } else { dout(1) << " empty cache, no subtrees, leaving cluster" << dendl; request_state(MDSMap::STATE_STOPPED); @@ -1530,7 +1530,7 @@ void MDSRank::active_start() void MDSRank::recovery_done(int oldstate) { dout(1) << "recovery_done -- successful recovery!" << dendl; - assert(is_clientreplay() || is_active()); + ceph_assert(is_clientreplay() || is_active()); if (oldstate == MDSMap::STATE_CREATING) return; @@ -1600,7 +1600,7 @@ void MDSRank::boot_create() snapserver->save(fin.new_sub()); } - assert(g_conf()->mds_kill_create_at != 1); + ceph_assert(g_conf()->mds_kill_create_at != 1); // ok now journal it mdlog->journal_segment_subtree_map(fin.new_sub()); @@ -1629,7 +1629,7 @@ void MDSRank::stopping_start() } dout(20) << __func__ << " matched " << victims.size() << " sessions" << dendl; - assert(!victims.empty()); + ceph_assert(!victims.empty()); C_GatherBuilder gather(g_ceph_context, new C_MDSInternalNoop); for (const auto &s : victims) { @@ -1656,7 +1656,7 @@ void MDSRankDispatcher::handle_mds_map( const MDSMap &oldmap) { // I am only to be passed MDSMaps in which I hold a rank - assert(whoami != MDS_RANK_NONE); + ceph_assert(whoami != MDS_RANK_NONE); MDSMap::DaemonState oldstate = state; mds_gid_t mds_gid = mds_gid_t(monc->get_global_id()); @@ -1741,7 +1741,7 @@ void MDSRankDispatcher::handle_mds_map( restart.insert(r); handle_mds_failure(r); } else { - assert(info.state == MDSMap::STATE_STARTING || + ceph_assert(info.state == MDSMap::STATE_STARTING || info.state == MDSMap::STATE_ACTIVE); // -> stopped (missing) -> starting -> active restart.insert(r); @@ -1757,7 +1757,7 @@ void MDSRankDispatcher::handle_mds_map( restart.insert(r); handle_mds_failure(r); } else { - assert(info.state == MDSMap::STATE_CREATING || + ceph_assert(info.state == MDSMap::STATE_CREATING || info.state == MDSMap::STATE_STARTING || info.state == MDSMap::STATE_ACTIVE); } @@ -1800,7 +1800,7 @@ void MDSRankDispatcher::handle_mds_map( } else if (is_starting()) { boot_start(); } else if (is_stopping()) { - assert(oldstate == MDSMap::STATE_ACTIVE); + ceph_assert(oldstate == MDSMap::STATE_ACTIVE); stopping_start(); } } @@ -2289,7 +2289,7 @@ void MDSRank::command_flush_path(Formatter *f, std::string_view path) */ void MDSRank::command_flush_journal(Formatter *f) { - assert(f != NULL); + ceph_assert(f != NULL); std::stringstream ss; const int r = _command_flush_journal(ss); @@ -2386,7 +2386,7 @@ int MDSRank::_command_flush_journal(std::ostream& ss) int r = cond.wait(); mds_lock.Lock(); - assert(r == 0); // MDLog is not allowed to raise errors via wait_for_expiry + ceph_assert(r == 0); // MDLog is not allowed to raise errors via wait_for_expiry } dout(5) << __func__ << ": expiry complete, expire_pos/trim_pos is now " << std::hex << @@ -2419,7 +2419,7 @@ int MDSRank::_command_flush_journal(std::ostream& ss) void MDSRank::command_get_subtrees(Formatter *f) { - assert(f != NULL); + ceph_assert(f != NULL); Mutex::Locker l(mds_lock); std::list subtrees; @@ -2872,10 +2872,10 @@ bool MDSRank::evict_client(int64_t session_id, bool wait, bool blacklist, std::ostream& err_ss, Context *on_killed) { - assert(mds_lock.is_locked_by_me()); + ceph_assert(mds_lock.is_locked_by_me()); // Mutually exclusive args - assert(!(wait && on_killed != nullptr)); + ceph_assert(!(wait && on_killed != nullptr)); if (is_any_replay()) { err_ss << "MDS is replaying log"; @@ -2899,7 +2899,7 @@ bool MDSRank::evict_client(int64_t session_id, std::vector cmd = {tmp}; auto kill_client_session = [this, session_id, wait, on_killed](){ - assert(mds_lock.is_locked_by_me()); + ceph_assert(mds_lock.is_locked_by_me()); Session *session = sessionmap.get_session( entity_name_t(CEPH_ENTITY_TYPE_CLIENT, session_id)); if (session) { @@ -2926,7 +2926,7 @@ bool MDSRank::evict_client(int64_t session_id, }; auto apply_blacklist = [this, cmd](std::function fn){ - assert(mds_lock.is_locked_by_me()); + ceph_assert(mds_lock.is_locked_by_me()); Context *on_blacklist_done = new FunctionContext([this, fn](int r) { objecter->wait_for_latest_osdmap( @@ -3014,9 +3014,9 @@ bool MDSRankDispatcher::handle_command( std::stringstream *ss, bool *need_reply) { - assert(r != nullptr); - assert(ds != nullptr); - assert(ss != nullptr); + ceph_assert(r != nullptr); + ceph_assert(ds != nullptr); + ceph_assert(ss != nullptr); *need_reply = true; diff --git a/src/mds/MDSRank.h b/src/mds/MDSRank.h index 1bbc0e7be50ae..69a1fb9269f1b 100644 --- a/src/mds/MDSRank.h +++ b/src/mds/MDSRank.h @@ -391,7 +391,7 @@ class MDSRank { waiting_for_active_peer[who].push_back(c); } void wait_for_cluster_recovered(MDSInternalContextBase *c) { - assert(cluster_degraded); + ceph_assert(cluster_degraded); waiting_for_active_peer[MDS_RANK_NONE].push_back(c); } diff --git a/src/mds/MDSTable.cc b/src/mds/MDSTable.cc index dd5b7ba2e6e5b..bc575033019a1 100644 --- a/src/mds/MDSTable.cc +++ b/src/mds/MDSTable.cc @@ -41,7 +41,7 @@ class MDSTableIOContext : public MDSIOContextBase MDSRank *get_mds() override {return ida->mds;} public: explicit MDSTableIOContext(MDSTable *ida_) : ida(ida_) { - assert(ida != NULL); + ceph_assert(ida != NULL); } }; @@ -69,7 +69,7 @@ void MDSTable::save(MDSInternalContextBase *onfinish, version_t v) } dout(10) << "save v " << version << dendl; - assert(is_active()); + ceph_assert(is_active()); bufferlist bl; encode(version, bl); @@ -154,7 +154,7 @@ void MDSTable::load(MDSInternalContextBase *onfinish) { dout(10) << "load" << dendl; - assert(is_undef()); + ceph_assert(is_undef()); state = STATE_OPENING; C_IO_MT_Load *c = new C_IO_MT_Load(this, onfinish); @@ -166,7 +166,7 @@ void MDSTable::load(MDSInternalContextBase *onfinish) void MDSTable::load_2(int r, bufferlist& bl, Context *onfinish) { - assert(is_opening()); + ceph_assert(is_opening()); state = STATE_ACTIVE; if (r == -EBLACKLISTED) { mds->respawn(); @@ -177,7 +177,7 @@ void MDSTable::load_2(int r, bufferlist& bl, Context *onfinish) mds->clog->error() << "error reading table object '" << get_object_name() << "' " << r << " (" << cpp_strerror(r) << ")"; mds->damaged(); - assert(r >= 0); // Should be unreachable because damaged() calls respawn() + ceph_assert(r >= 0); // Should be unreachable because damaged() calls respawn() } dout(10) << "load_2 got " << bl.length() << " bytes" << dendl; @@ -192,7 +192,7 @@ void MDSTable::load_2(int r, bufferlist& bl, Context *onfinish) mds->clog->error() << "error decoding table object '" << get_object_name() << "': " << e.what(); mds->damaged(); - assert(r >= 0); // Should be unreachable because damaged() calls respawn() + ceph_assert(r >= 0); // Should be unreachable because damaged() calls respawn() } if (onfinish) { diff --git a/src/mds/MDSTableClient.cc b/src/mds/MDSTableClient.cc index e633ec346c62a..af0fe5b384153 100644 --- a/src/mds/MDSTableClient.cc +++ b/src/mds/MDSTableClient.cc @@ -49,7 +49,7 @@ public: void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) { dout(10) << "handle_request " << *m << dendl; - assert(m->table == table); + ceph_assert(m->table == table); if (mds->get_state() < MDSMap::STATE_RESOLVE) { if (mds->get_want_state() == CEPH_MDS_STATE_RESOLVE) { @@ -67,7 +67,7 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) break; case TABLESERVER_OP_NOTIFY_PREP: - assert(g_conf()->mds_kill_mdstable_at != 9); + ceph_assert(g_conf()->mds_kill_mdstable_at != 9); handle_notify_prep(m); break; @@ -75,7 +75,7 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) if (pending_prepare.count(reqid)) { dout(10) << "got agree on " << reqid << " atid " << tid << dendl; - assert(g_conf()->mds_kill_mdstable_at != 3); + ceph_assert(g_conf()->mds_kill_mdstable_at != 3); MDSInternalContextBase *onfinish = pending_prepare[reqid].onfinish; *pending_prepare[reqid].ptid = tid; @@ -89,19 +89,19 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) } else if (prepared_update.count(tid)) { dout(10) << "got duplicated agree on " << reqid << " atid " << tid << dendl; - assert(prepared_update[tid] == reqid); - assert(!server_ready); + ceph_assert(prepared_update[tid] == reqid); + ceph_assert(!server_ready); } else if (pending_commit.count(tid)) { dout(10) << "stray agree on " << reqid << " tid " << tid << ", already committing, will resend COMMIT" << dendl; - assert(!server_ready); + ceph_assert(!server_ready); // will re-send commit when receiving the server ready message } else { dout(10) << "stray agree on " << reqid << " tid " << tid << ", sending ROLLBACK" << dendl; - assert(!server_ready); + ceph_assert(!server_ready); auto req = MMDSTableRequest::create(table, TABLESERVER_OP_ROLLBACK, 0, tid); mds->send_message_mds(req, mds->get_mds_map()->get_tableserver()); } @@ -112,7 +112,7 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) pending_commit[tid]->pending_commit_tids[table].count(tid)) { dout(10) << "got ack on tid " << tid << ", logging" << dendl; - assert(g_conf()->mds_kill_mdstable_at != 7); + ceph_assert(g_conf()->mds_kill_mdstable_at != 7); // remove from committing list pending_commit[tid]->pending_commit_tids[table].erase(tid); @@ -127,7 +127,7 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) break; case TABLESERVER_OP_SERVER_READY: - assert(!server_ready); + ceph_assert(!server_ready); server_ready = true; if (last_reqid == ~0ULL) @@ -139,7 +139,7 @@ void MDSTableClient::handle_request(const MMDSTableRequest::const_ref &m) break; default: - assert(0 == "unrecognized mds_table_client request op"); + ceph_assert(0 == "unrecognized mds_table_client request op"); } } @@ -185,16 +185,16 @@ void MDSTableClient::commit(version_t tid, LogSegment *ls) { dout(10) << "commit " << tid << dendl; - assert(prepared_update.count(tid)); + ceph_assert(prepared_update.count(tid)); prepared_update.erase(tid); - assert(pending_commit.count(tid) == 0); + ceph_assert(pending_commit.count(tid) == 0); pending_commit[tid] = ls; ls->pending_commit_tids[table].insert(tid); notify_commit(tid); - assert(g_conf()->mds_kill_mdstable_at != 4); + ceph_assert(g_conf()->mds_kill_mdstable_at != 4); if (server_ready) { // send message diff --git a/src/mds/MDSTableServer.cc b/src/mds/MDSTableServer.cc index 684801cd9b535..138c0d669faf8 100644 --- a/src/mds/MDSTableServer.cc +++ b/src/mds/MDSTableServer.cc @@ -26,14 +26,14 @@ void MDSTableServer::handle_request(const MMDSTableRequest::const_ref &req) { - assert(req->op >= 0); + ceph_assert(req->op >= 0); switch (req->op) { case TABLESERVER_OP_QUERY: return handle_query(req); case TABLESERVER_OP_PREPARE: return handle_prepare(req); case TABLESERVER_OP_COMMIT: return handle_commit(req); case TABLESERVER_OP_ROLLBACK: return handle_rollback(req); case TABLESERVER_OP_NOTIFY_ACK: return handle_notify_ack(req); - default: assert(0 == "unrecognized mds_table_server request op"); + default: ceph_assert(0 == "unrecognized mds_table_server request op"); } } @@ -56,7 +56,7 @@ void MDSTableServer::handle_prepare(const MMDSTableRequest::const_ref &req) dout(7) << "handle_prepare " << *req << dendl; mds_rank_t from = mds_rank_t(req->get_source().num()); - assert(g_conf()->mds_kill_mdstable_at != 1); + ceph_assert(g_conf()->mds_kill_mdstable_at != 1); projected_version++; @@ -73,12 +73,12 @@ void MDSTableServer::_prepare_logged(const MMDSTableRequest::const_ref &req, ver dout(7) << "_create_logged " << *req << " tid " << tid << dendl; mds_rank_t from = mds_rank_t(req->get_source().num()); - assert(g_conf()->mds_kill_mdstable_at != 2); + ceph_assert(g_conf()->mds_kill_mdstable_at != 2); _note_prepare(from, req->reqid); bufferlist out; _prepare(req->bl, req->reqid, from, out); - assert(version == tid); + ceph_assert(version == tid); auto reply = MMDSTableRequest::create(table, TABLESERVER_OP_AGREE, req->reqid, tid); reply->bl = std::move(out); @@ -141,7 +141,7 @@ void MDSTableServer::handle_commit(const MMDSTableRequest::const_ref &req) return; } - assert(g_conf()->mds_kill_mdstable_at != 5); + ceph_assert(g_conf()->mds_kill_mdstable_at != 5); projected_version++; committing_tids.insert(tid); @@ -159,7 +159,7 @@ void MDSTableServer::handle_commit(const MMDSTableRequest::const_ref &req) else { // wtf. dout(0) << "got commit for tid " << tid << " > " << version << dendl; - assert(tid <= version); + ceph_assert(tid <= version); } } @@ -167,7 +167,7 @@ void MDSTableServer::_commit_logged(const MMDSTableRequest::const_ref &req) { dout(7) << "_commit_logged, sending ACK" << dendl; - assert(g_conf()->mds_kill_mdstable_at != 6); + ceph_assert(g_conf()->mds_kill_mdstable_at != 6); version_t tid = req->get_tid(); pending_for_mds.erase(tid); @@ -196,10 +196,10 @@ void MDSTableServer::handle_rollback(const MMDSTableRequest::const_ref &req) { dout(7) << "handle_rollback " << *req << dendl; - assert(g_conf()->mds_kill_mdstable_at != 8); + ceph_assert(g_conf()->mds_kill_mdstable_at != 8); version_t tid = req->get_tid(); - assert(pending_for_mds.count(tid)); - assert(!committing_tids.count(tid)); + ceph_assert(pending_for_mds.count(tid)); + ceph_assert(!committing_tids.count(tid)); projected_version++; committing_tids.insert(tid); @@ -326,7 +326,7 @@ void MDSTableServer::handle_mds_recovery(mds_rank_t who) for (auto p = pending_for_mds.begin(); p != pending_for_mds.end(); ++p) { if (p->second.mds != who) continue; - assert(!pending_notifies.count(p->second.tid)); + ceph_assert(!pending_notifies.count(p->second.tid)); if (p->second.reqid >= next_reqid) next_reqid = p->second.reqid + 1; diff --git a/src/mds/Mantle.cc b/src/mds/Mantle.cc index f98f8760f173f..15d325e86da7c 100644 --- a/src/mds/Mantle.cc +++ b/src/mds/Mantle.cc @@ -82,7 +82,7 @@ int Mantle::balance(std::string_view script, /* set the name of the global mds table */ lua_setglobal(L, "mds"); - assert(lua_gettop(L) == 1); + ceph_assert(lua_gettop(L) == 1); if (lua_pcall(L, 0, 1, 0) != LUA_OK) { mantle_dout(0) << "WARNING: mantle could not execute script: " << lua_tostring(L, -1) << mantle_dendl; diff --git a/src/mds/Migrator.cc b/src/mds/Migrator.cc index 8f4453ed6245f..b4dfe09040568 100644 --- a/src/mds/Migrator.cc +++ b/src/mds/Migrator.cc @@ -75,7 +75,7 @@ protected: } public: explicit MigratorContext(Migrator *mig_) : mig(mig_) { - assert(mig != NULL); + ceph_assert(mig != NULL); } }; @@ -87,7 +87,7 @@ protected: } public: explicit MigratorLogContext(Migrator *mig_) : mig(mig_) { - assert(mig != NULL); + ceph_assert(mig != NULL); } }; @@ -148,7 +148,7 @@ void Migrator::dispatch(const Message::const_ref &m) default: derr << "migrator unknown message " << m->get_type() << dendl; - assert(0 == "migrator unknown message"); + ceph_assert(0 == "migrator unknown message"); } } @@ -166,7 +166,7 @@ public: void Migrator::export_empty_import(CDir *dir) { dout(7) << "export_empty_import " << *dir << dendl; - assert(dir->is_subtree_root()); + ceph_assert(dir->is_subtree_root()); if (dir->inode->is_auth()) { dout(7) << " inode is auth" << dendl; @@ -256,7 +256,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) dout(10) << "export_try_cancel " << *dir << dendl; map::iterator it = export_state.find(dir); - assert(it != export_state.end()); + ceph_assert(it != export_state.end()); int state = it->second.state; switch (state) { @@ -358,7 +358,7 @@ void Migrator::export_try_cancel(CDir *dir, bool notify_peer) // drop locks if (state == EXPORT_LOCKING || state == EXPORT_DISCOVERING) { MDRequestRef mdr = static_cast(mut.get()); - assert(mdr); + ceph_assert(mdr); mds->mdcache->request_kill(mdr); } else if (mut) { mds->locker->drop_locks(mut.get()); @@ -380,7 +380,7 @@ void Migrator::export_cancel_finish(export_state_iterator& it) total_exporting_size -= it->second.approx_size; export_state.erase(it); - assert(dir->state_test(CDir::STATE_EXPORTING)); + ceph_assert(dir->state_test(CDir::STATE_EXPORTING)); dir->clear_exporting(); if (unpin) { @@ -498,19 +498,19 @@ void Migrator::handle_mds_failure_or_stop(mds_rank_t who) break; case IMPORT_DISCOVERED: - assert(diri); + ceph_assert(diri); dout(10) << "import state=discovered : unpinning inode " << *diri << dendl; import_reverse_discovered(df, diri); break; case IMPORT_PREPPING: - assert(dir); + ceph_assert(dir); dout(10) << "import state=prepping : unpinning base+bounds " << *dir << dendl; import_reverse_prepping(dir, q->second); break; case IMPORT_PREPPED: - assert(dir); + ceph_assert(dir); dout(10) << "import state=prepped : unpinning base+bounds, unfreezing " << *dir << dendl; { set bounds; @@ -523,18 +523,18 @@ void Migrator::handle_mds_failure_or_stop(mds_rank_t who) // notify bystanders ; wait in aborting state q->second.state = IMPORT_ABORTING; import_notify_abort(dir, bounds); - assert(g_conf()->mds_kill_import_at != 10); + ceph_assert(g_conf()->mds_kill_import_at != 10); } break; case IMPORT_LOGGINGSTART: - assert(dir); + ceph_assert(dir); dout(10) << "import state=loggingstart : reversing import on " << *dir << dendl; import_reverse(dir); break; case IMPORT_ACKING: - assert(dir); + ceph_assert(dir); // hrm. make this an ambiguous import, and wait for exporter recovery to disambiguate dout(10) << "import state=acking : noting ambiguous import " << *dir << dendl; { @@ -545,13 +545,13 @@ void Migrator::handle_mds_failure_or_stop(mds_rank_t who) break; case IMPORT_FINISHING: - assert(dir); + ceph_assert(dir); dout(10) << "import state=finishing : finishing import on " << *dir << dendl; import_finish(dir, true); break; case IMPORT_ABORTING: - assert(dir); + ceph_assert(dir); dout(10) << "import state=aborting : ignoring repeat failure " << *dir << dendl; break; } @@ -560,7 +560,7 @@ void Migrator::handle_mds_failure_or_stop(mds_rank_t who) if (bystanders_entry != q->second.bystanders.end()) { q->second.bystanders.erase(bystanders_entry); if (q->second.state == IMPORT_ABORTING) { - assert(dir); + ceph_assert(dir); dout(10) << "faking export_notify_ack from mds." << who << " on aborting import " << *dir << " from mds." << q->second.peer << dendl; @@ -630,20 +630,20 @@ void Migrator::audit() continue; if (p->second.state == IMPORT_DISCOVERED) { CInode *in = cache->get_inode(p->first.ino); - assert(in); + ceph_assert(in); continue; } CDir *dir = cache->get_dirfrag(p->first); - assert(dir); + ceph_assert(dir); if (p->second.state == IMPORT_PREPPING) continue; if (p->second.state == IMPORT_ABORTING) { - assert(!dir->is_ambiguous_dir_auth()); - assert(dir->get_dir_auth().first != mds->get_nodeid()); + ceph_assert(!dir->is_ambiguous_dir_auth()); + ceph_assert(dir->get_dir_auth().first != mds->get_nodeid()); continue; } - assert(dir->is_ambiguous_dir_auth()); - assert(dir->authority().first == mds->get_nodeid() || + ceph_assert(dir->is_ambiguous_dir_auth()); + ceph_assert(dir->authority().first == mds->get_nodeid() || dir->authority().second == mds->get_nodeid()); } @@ -658,8 +658,8 @@ void Migrator::audit() p->second.state == EXPORT_FREEZING || p->second.state == EXPORT_CANCELLING) continue; - assert(dir->is_ambiguous_dir_auth()); - assert(dir->authority().first == mds->get_nodeid() || + ceph_assert(dir->is_ambiguous_dir_auth()); + ceph_assert(dir->authority().first == mds->get_nodeid() || dir->authority().second == mds->get_nodeid()); } @@ -723,7 +723,7 @@ class C_MDC_ExportFreeze : public MigratorContext { public: C_MDC_ExportFreeze(Migrator *m, CDir *e, uint64_t t) : MigratorContext(m), ex(e), tid(t) { - assert(ex != NULL); + ceph_assert(ex != NULL); } void finish(int r) override { if (r >= 0) @@ -768,8 +768,8 @@ void Migrator::get_export_lock_set(CDir *dir, set& locks) void Migrator::export_dir(CDir *dir, mds_rank_t dest) { dout(7) << "export_dir " << *dir << " to " << dest << dendl; - assert(dir->is_auth()); - assert(dest != mds->get_nodeid()); + ceph_assert(dir->is_auth()); + ceph_assert(dest != mds->get_nodeid()); if (!(mds->is_active() || mds->is_stopping())) { dout(7) << "i'm not active, no exports for now" << dendl; @@ -834,7 +834,7 @@ void Migrator::export_dir(CDir *dir, mds_rank_t dest) while (n--) ++p; CDir *bd = *p; if (!(bd->is_frozen() || bd->is_freezing())) { - assert(bd->is_auth()); + ceph_assert(bd->is_auth()); dir->state_set(CDir::STATE_AUXSUBTREE); mds->mdcache->adjust_subtree_auth(dir, mds->get_nodeid()); dout(0) << "export_dir: create aux subtree " << *bd << " under " << *dir << dendl; @@ -850,7 +850,7 @@ void Migrator::export_dir(CDir *dir, mds_rank_t dest) MDRequestRef mdr = mds->mdcache->request_start_internal(CEPH_MDS_OP_EXPORTDIR); mdr->more()->export_dir = dir; - assert(export_state.count(dir) == 0); + ceph_assert(export_state.count(dir) == 0); export_state_t& stat = export_state[dir]; num_locking_exports++; stat.state = EXPORT_LOCKING; @@ -1018,14 +1018,14 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) if (it == export_state.end() || it->second.tid != mdr->reqid.tid) { // export must have aborted. dout(7) << "export must have aborted " << *mdr << dendl; - assert(mdr->killed || mdr->aborted); + ceph_assert(mdr->killed || mdr->aborted); if (mdr->aborted) { mdr->aborted = false; mds->mdcache->request_kill(mdr); } return; } - assert(it->second.state == EXPORT_LOCKING); + ceph_assert(it->second.state == EXPORT_LOCKING); mds_rank_t dest = it->second.peer; @@ -1081,7 +1081,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) return; } - assert(g_conf()->mds_kill_export_at != 1); + ceph_assert(g_conf()->mds_kill_export_at != 1); auto parent = it->second.parent; @@ -1097,7 +1097,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) auto discover = MExportDirDiscover::create(dir->dirfrag(), path, mds->get_nodeid(), it->second.tid); mds->send_message_mds(discover, dest); - assert(g_conf()->mds_kill_export_at != 2); + ceph_assert(g_conf()->mds_kill_export_at != 2); it->second.last_cum_auth_pins_change = ceph_clock_now(); it->second.approx_size = results.front().second; @@ -1106,7 +1106,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) // start the freeze, but hold it up with an auth_pin. dir->freeze_tree(); - assert(dir->is_freezing_tree()); + ceph_assert(dir->is_freezing_tree()); dir->add_waiter(CDir::WAIT_FROZEN, new C_MDC_ExportFreeze(this, dir, it->second.tid)); return; } @@ -1128,7 +1128,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) for (auto& p : results) { CDir *sub = p.first; - assert(sub != dir); + ceph_assert(sub != dir); dout(7) << " sub " << *sub << dendl; sub->auth_pin(this); @@ -1137,7 +1137,7 @@ void Migrator::dispatch_export_dir(MDRequestRef& mdr, int count) MDRequestRef _mdr = mds->mdcache->request_start_internal(CEPH_MDS_OP_EXPORTDIR); _mdr->more()->export_dir = sub; - assert(export_state.count(sub) == 0); + ceph_assert(export_state.count(sub) == 0); auto& stat = export_state[sub]; num_locking_exports++; stat.state = EXPORT_LOCKING; @@ -1221,8 +1221,8 @@ void Migrator::adjust_export_after_rename(CInode* diri, CDir *olddir) dout(7) << "adjust_export_after_rename " << *diri << dendl; auto &stat = export_state.at(freezing_dir); - assert(stat.state == EXPORT_DISCOVERING || - stat.state == EXPORT_FREEZING); + ceph_assert(stat.state == EXPORT_DISCOVERING || + stat.state == EXPORT_FREEZING); if (g_conf()->mds_thrash_exports) { if (rand() % 3 == 0) { @@ -1265,7 +1265,7 @@ void Migrator::handle_export_discover_ack(const MExportDirDiscoverAck::const_ref { CDir *dir = cache->get_dirfrag(m->get_dirfrag()); mds_rank_t dest(m->get_source().num()); - assert(dir); + ceph_assert(dir); dout(7) << "export_discover_ack from " << m->get_source() << " on " << *dir << dendl; @@ -1278,18 +1278,18 @@ void Migrator::handle_export_discover_ack(const MExportDirDiscoverAck::const_ref it->second.peer != dest) { dout(7) << "must have aborted" << dendl; } else { - assert(it->second.state == EXPORT_DISCOVERING); + ceph_assert(it->second.state == EXPORT_DISCOVERING); if (m->is_success()) { // release locks to avoid deadlock MDRequestRef mdr = static_cast(it->second.mut.get()); - assert(mdr); + ceph_assert(mdr); mds->mdcache->request_finish(mdr); it->second.mut.reset(); // freeze the subtree it->second.state = EXPORT_FREEZING; dir->auth_unpin(this); - assert(g_conf()->mds_kill_export_at != 3); + ceph_assert(g_conf()->mds_kill_export_at != 3); } else { dout(7) << "peer failed to discover (not active?), canceling" << dendl; @@ -1304,7 +1304,7 @@ class C_M_ExportSessionsFlushed : public MigratorContext { public: C_M_ExportSessionsFlushed(Migrator *m, CDir *d, uint64_t t) : MigratorContext(m), dir(d), tid(t) { - assert(dir != NULL); + ceph_assert(dir != NULL); } void finish(int r) override { mig->export_sessions_flushed(dir, tid); @@ -1324,8 +1324,8 @@ void Migrator::export_sessions_flushed(CDir *dir, uint64_t tid) return; } - assert(it->second.state == EXPORT_PREPPING || it->second.state == EXPORT_WARNING); - assert(it->second.warning_ack_waiting.count(MDS_RANK_NONE) > 0); + ceph_assert(it->second.state == EXPORT_PREPPING || it->second.state == EXPORT_WARNING); + ceph_assert(it->second.warning_ack_waiting.count(MDS_RANK_NONE) > 0); it->second.warning_ack_waiting.erase(MDS_RANK_NONE); if (it->second.state == EXPORT_WARNING && it->second.warning_ack_waiting.empty()) export_go(dir); // start export. @@ -1341,9 +1341,9 @@ void Migrator::export_frozen(CDir *dir, uint64_t tid) return; } - assert(it->second.state == EXPORT_FREEZING); - assert(dir->is_frozen_tree_root()); - assert(dir->get_cum_auth_pins() == 0); + ceph_assert(it->second.state == EXPORT_FREEZING); + ceph_assert(dir->is_frozen_tree_root()); + ceph_assert(dir->get_cum_auth_pins() == 0); CInode *diri = dir->get_inode(); @@ -1370,7 +1370,7 @@ void Migrator::export_frozen(CDir *dir, uint64_t tid) cache->show_subtrees(); // CDir::_freeze_tree() should have forced it into subtree. - assert(dir->get_dir_auth() == mds_authority_t(mds->get_nodeid(), mds->get_nodeid())); + ceph_assert(dir->get_dir_auth() == mds_authority_t(mds->get_nodeid(), mds->get_nodeid())); // note the bounds. set bounds; cache->get_subtree_bounds(dir, bounds); @@ -1426,7 +1426,7 @@ void Migrator::export_frozen(CDir *dir, uint64_t tid) inodes_added.insert(cur->inode->ino()); // prepend dentry + inode - assert(cur->inode->is_auth()); + ceph_assert(cur->inode->is_auth()); bufferlist bl; cache->replicate_dentry(cur->inode->parent, it->second.peer, bl); dout(7) << " added " << *cur->inode->parent << dendl; @@ -1468,7 +1468,7 @@ void Migrator::export_frozen(CDir *dir, uint64_t tid) assert (g_conf()->mds_kill_export_at != 4); // make sure any new instantiations of caps are flushed out - assert(it->second.warning_ack_waiting.empty()); + ceph_assert(it->second.warning_ack_waiting.empty()); set export_client_set; get_export_client_set(dir, export_client_set); @@ -1501,7 +1501,7 @@ void Migrator::get_export_client_set(CDir *dir, set& client_set) for (auto& q : ls) { if (!q->state_test(CDir::STATE_EXPORTBOUND)) { // include nested dirfrag - assert(q->get_dir_auth().first == CDIR_AUTH_PARENT); + ceph_assert(q->get_dir_auth().first == CDIR_AUTH_PARENT); dfs.push_back(q); // it's ours, recurse (later) } } @@ -1524,7 +1524,7 @@ void Migrator::handle_export_prep_ack(const MExportDirPrepAck::const_ref &m) { CDir *dir = cache->get_dirfrag(m->get_dirfrag()); mds_rank_t dest(m->get_source().num()); - assert(dir); + ceph_assert(dir); dout(7) << "export_prep_ack " << *dir << dendl; @@ -1538,7 +1538,7 @@ void Migrator::handle_export_prep_ack(const MExportDirPrepAck::const_ref &m) dout(7) << "export must have aborted" << dendl; return; } - assert(it->second.state == EXPORT_PREPPING); + ceph_assert(it->second.state == EXPORT_PREPPING); if (!m->is_success()) { dout(7) << "peer couldn't acquire all needed locks or wasn't active, canceling" << dendl; @@ -1551,10 +1551,10 @@ void Migrator::handle_export_prep_ack(const MExportDirPrepAck::const_ref &m) set bounds; cache->get_subtree_bounds(dir, bounds); - assert(it->second.warning_ack_waiting.empty() || + ceph_assert(it->second.warning_ack_waiting.empty() || (it->second.warning_ack_waiting.size() == 1 && it->second.warning_ack_waiting.count(MDS_RANK_NONE) > 0)); - assert(it->second.notify_ack_waiting.empty()); + ceph_assert(it->second.notify_ack_waiting.empty()); for (const auto &p : dir->get_replicas()) { if (p.first == it->second.peer) continue; @@ -1576,7 +1576,7 @@ void Migrator::handle_export_prep_ack(const MExportDirPrepAck::const_ref &m) it->second.state = EXPORT_WARNING; - assert(g_conf()->mds_kill_export_at != 6); + ceph_assert(g_conf()->mds_kill_export_at != 6); // nobody to warn? if (it->second.warning_ack_waiting.empty()) export_go(dir); // start export. @@ -1589,7 +1589,7 @@ class C_M_ExportGo : public MigratorContext { public: C_M_ExportGo(Migrator *m, CDir *d, uint64_t t) : MigratorContext(m), dir(d), tid(t) { - assert(dir != NULL); + ceph_assert(dir != NULL); } void finish(int r) override { mig->export_go_synced(dir, tid); @@ -1599,7 +1599,7 @@ public: void Migrator::export_go(CDir *dir) { auto it = export_state.find(dir); - assert(it != export_state.end()); + ceph_assert(it != export_state.end()); dout(7) << "export_go " << *dir << " to " << it->second.peer << dendl; // first sync log to flush out e.g. any cap imports @@ -1617,7 +1617,7 @@ void Migrator::export_go_synced(CDir *dir, uint64_t tid) dout(7) << "export must have aborted on " << dir << dendl; return; } - assert(it->second.state == EXPORT_WARNING); + ceph_assert(it->second.state == EXPORT_WARNING); mds_rank_t dest = it->second.peer; dout(7) << "export_go_synced " << *dir << " to " << dest << dendl; @@ -1625,10 +1625,10 @@ void Migrator::export_go_synced(CDir *dir, uint64_t tid) cache->show_subtrees(); it->second.state = EXPORT_EXPORTING; - assert(g_conf()->mds_kill_export_at != 7); + ceph_assert(g_conf()->mds_kill_export_at != 7); - assert(dir->is_frozen_tree_root()); - assert(dir->get_cum_auth_pins() == 0); + ceph_assert(dir->is_frozen_tree_root()); + ceph_assert(dir->get_cum_auth_pins() == 0); // set ambiguous auth cache->adjust_subtree_auth(dir, mds->get_nodeid(), dest); @@ -1657,7 +1657,7 @@ void Migrator::export_go_synced(CDir *dir, uint64_t tid) // send mds->send_message_mds(req, dest); - assert(g_conf()->mds_kill_export_at != 8); + ceph_assert(g_conf()->mds_kill_export_at != 8); mds->hit_export_target(dest, num_exported_inodes+1); @@ -1682,7 +1682,7 @@ void Migrator::encode_export_inode(CInode *in, bufferlist& enc_state, map& exported_client_metadata_map) { dout(7) << "encode_export_inode " << *in << dendl; - assert(!in->is_replica(mds->get_nodeid())); + ceph_assert(!in->is_replica(mds->get_nodeid())); // relax locks? if (!in->is_replicated()) { @@ -1742,7 +1742,7 @@ void Migrator::finish_export_inode_caps(CInode *in, mds_rank_t peer, cap->get_cap_id(), cap->get_mseq(), mds->get_osd_epoch_barrier()); map::iterator q = peer_imported.find(p.first); - assert(q != peer_imported.end()); + ceph_assert(q != peer_imported.end()); m->set_cap_peer(q->second.cap_id, q->second.issue_seq, q->second.mseq, (q->second.cap_id > 0 ? peer : -1), 0); mds->send_message_client_counted(m, p.first); @@ -1776,7 +1776,7 @@ void Migrator::finish_export_inode(CInode *in, mds_rank_t peer, in->policylock.export_twiddle(); // mark auth - assert(in->is_auth()); + ceph_assert(in->is_auth()); in->state_clear(CInode::STATE_AUTH); in->replica_nonce = CInode::EXPORT_NONCE; @@ -1807,7 +1807,7 @@ uint64_t Migrator::encode_export_dir(bufferlist& exportbl, dout(7) << "encode_export_dir " << *dir << " " << dir->get_num_head_items() << " head items" << dendl; - assert(dir->get_projected_version() == dir->get_version()); + ceph_assert(dir->get_projected_version() == dir->get_version()); #ifdef MDS_VERIFY_FRAGSTAT if (dir->is_complete()) @@ -1875,7 +1875,7 @@ uint64_t Migrator::encode_export_dir(bufferlist& exportbl, CDir *t = *p; if (!t->state_test(CDir::STATE_EXPORTBOUND)) { // include nested dirfrag - assert(t->get_dir_auth().first == CDIR_AUTH_PARENT); + ceph_assert(t->get_dir_auth().first == CDIR_AUTH_PARENT); subdirs.push_front(t); // it's ours, recurse (later) } } @@ -1898,7 +1898,7 @@ void Migrator::finish_export_dir(CDir *dir, mds_rank_t peer, dir->clear_replica_map(); // mark - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); dir->state_clear(CDir::STATE_AUTH); dir->remove_bloom(); dir->replica_nonce = CDir::EXPORT_NONCE; @@ -1955,8 +1955,8 @@ void Migrator::handle_export_ack(const MExportDirAck::const_ref &m) { CDir *dir = cache->get_dirfrag(m->get_dirfrag()); mds_rank_t dest(m->get_source().num()); - assert(dir); - assert(dir->is_frozen_tree_root()); // i'm exporting! + ceph_assert(dir); + ceph_assert(dir->is_frozen_tree_root()); // i'm exporting! // yay! dout(7) << "handle_export_ack " << *dir << dendl; @@ -1964,9 +1964,9 @@ void Migrator::handle_export_ack(const MExportDirAck::const_ref &m) mds->hit_export_target(dest, -1); map::iterator it = export_state.find(dir); - assert(it != export_state.end()); - assert(it->second.state == EXPORT_EXPORTING); - assert(it->second.tid == m->get_tid()); + ceph_assert(it != export_state.end()); + ceph_assert(it->second.state == EXPORT_EXPORTING); + ceph_assert(it->second.tid == m->get_tid()); auto bp = m->imported_caps.cbegin(); decode(it->second.peer_imported, bp); @@ -2006,7 +2006,7 @@ void Migrator::export_notify_abort(CDir *dir, export_state_t& stat, set& { dout(7) << "export_notify_abort " << *dir << dendl; - assert(stat.state == EXPORT_CANCELLING); + ceph_assert(stat.state == EXPORT_CANCELLING); if (stat.notify_ack_waiting.empty()) { stat.state = EXPORT_CANCELLED; @@ -2160,7 +2160,7 @@ void Migrator::handle_export_notify_ack(const MExportDirNotifyAck::const_ref &m) { CDir *dir = cache->get_dirfrag(m->get_dirfrag()); mds_rank_t dest(m->get_source().num()); - assert(dir); + ceph_assert(dir); mds_rank_t from = mds_rank_t(m->get_source().num()); mds->hit_export_target(dest, -1); @@ -2200,7 +2200,7 @@ void Migrator::handle_export_notify_ack(const MExportDirNotifyAck::const_ref &m) // reversing import dout(7) << "handle_export_notify_ack from " << m->get_source() << ": aborting import on " << *dir << dendl; - assert(stat.bystanders.count(from)); + ceph_assert(stat.bystanders.count(from)); stat.bystanders.erase(from); if (stat.bystanders.empty()) import_reverse_unfreeze(dir); @@ -2227,7 +2227,7 @@ void Migrator::export_finish(CDir *dir) } else { dout(7) << "not sending MExportDirFinish last, dest has failed" << dendl; } - assert(g_conf()->mds_kill_export_at != 13); + ceph_assert(g_conf()->mds_kill_export_at != 13); // finish export (adjust local cache state) int num_dentries = 0; @@ -2235,7 +2235,7 @@ void Migrator::export_finish(CDir *dir) finish_export_dir(dir, it->second.peer, it->second.peer_imported, finished, &num_dentries); - assert(!dir->is_auth()); + ceph_assert(!dir->is_auth()); cache->adjust_subtree_auth(dir, it->second.peer); // unpin bounds @@ -2279,7 +2279,7 @@ void Migrator::export_finish(CDir *dir) total_exporting_size -= it->second.approx_size; export_state.erase(it); - assert(dir->state_test(CDir::STATE_EXPORTING)); + ceph_assert(dir->state_test(CDir::STATE_EXPORTING)); dir->clear_exporting(); cache->show_subtrees(); @@ -2331,7 +2331,7 @@ private: void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bool started) { mds_rank_t from = m->get_source_mds(); - assert(from != mds->get_nodeid()); + ceph_assert(from != mds->get_nodeid()); dout(7) << "handle_export_discover on " << m->get_path() << dendl; @@ -2348,7 +2348,7 @@ void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bo import_state_t *p_state; map::iterator it = import_state.find(df); if (!started) { - assert(it == import_state.end()); + ceph_assert(it == import_state.end()); p_state = &import_state[df]; p_state->state = IMPORT_DISCOVERING; p_state->peer = from; @@ -2361,7 +2361,7 @@ void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bo dout(7) << " dropping obsolete message" << dendl; return; } - assert(it->second.state == IMPORT_DISCOVERING); + ceph_assert(it->second.state == IMPORT_DISCOVERING); p_state = &it->second; } @@ -2397,7 +2397,7 @@ void Migrator::handle_export_discover(const MExportDirDiscover::const_ref &m, bo p_state->state = IMPORT_DISCOVERED; // pin inode in the cache (for now) - assert(in->is_dir()); + ceph_assert(in->is_dir()); in->get(CInode::PIN_IMPORTING); // reply @@ -2432,20 +2432,20 @@ void Migrator::handle_export_cancel(const MExportDirCancel::const_ref &m) dirfrag_t df = m->get_dirfrag(); map::iterator it = import_state.find(df); if (it == import_state.end()) { - assert(0 == "got export_cancel in weird state"); + ceph_assert(0 == "got export_cancel in weird state"); } else if (it->second.state == IMPORT_DISCOVERING) { import_reverse_discovering(df); } else if (it->second.state == IMPORT_DISCOVERED) { CInode *in = cache->get_inode(df.ino); - assert(in); + ceph_assert(in); import_reverse_discovered(df, in); } else if (it->second.state == IMPORT_PREPPING) { CDir *dir = mds->mdcache->get_dirfrag(df); - assert(dir); + ceph_assert(dir); import_reverse_prepping(dir, it->second); } else if (it->second.state == IMPORT_PREPPED) { CDir *dir = mds->mdcache->get_dirfrag(df); - assert(dir); + ceph_assert(dir); set bounds; cache->get_subtree_bounds(dir, bounds); import_remove_pins(dir, bounds); @@ -2453,7 +2453,7 @@ void Migrator::handle_export_cancel(const MExportDirCancel::const_ref &m) cache->adjust_subtree_auth(dir, it->second.peer); import_reverse_unfreeze(dir); } else { - assert(0 == "got export_cancel in weird state"); + ceph_assert(0 == "got export_cancel in weird state"); } } @@ -2481,7 +2481,7 @@ private: void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_assim) { mds_rank_t oldauth = mds_rank_t(m->get_source().num()); - assert(oldauth != mds->get_nodeid()); + ceph_assert(oldauth != mds->get_nodeid()); CDir *dir; CInode *diri; @@ -2490,11 +2490,11 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a // assimilate root dir. map::iterator it = import_state.find(m->get_dirfrag()); if (!did_assim) { - assert(it != import_state.end()); - assert(it->second.state == IMPORT_DISCOVERED); - assert(it->second.peer == oldauth); + ceph_assert(it != import_state.end()); + ceph_assert(it->second.state == IMPORT_DISCOVERED); + ceph_assert(it->second.peer == oldauth); diri = cache->get_inode(m->get_dirfrag().ino); - assert(diri); + ceph_assert(diri); auto p = m->basedir.cbegin(); dir = cache->add_replica_dir(p, diri, oldauth, finished); dout(7) << "handle_export_prep on " << *dir << " (first pass)" << dendl; @@ -2505,15 +2505,15 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a dout(7) << "handle_export_prep obsolete message, dropping" << dendl; return; } - assert(it->second.state == IMPORT_PREPPING); - assert(it->second.peer == oldauth); + ceph_assert(it->second.state == IMPORT_PREPPING); + ceph_assert(it->second.peer == oldauth); dir = cache->get_dirfrag(m->get_dirfrag()); - assert(dir); + ceph_assert(dir); dout(7) << "handle_export_prep on " << *dir << " (subsequent pass)" << dendl; diri = dir->get_inode(); } - assert(dir->is_auth() == false); + ceph_assert(dir->is_auth() == false); cache->show_subtrees(); @@ -2532,7 +2532,7 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a it->second.state = IMPORT_PREPPING; it->second.bound_ls = m->get_bounds(); it->second.bystanders = m->get_bystanders(); - assert(g_conf()->mds_kill_import_at != 3); + ceph_assert(g_conf()->mds_kill_import_at != 3); // bystander list dout(7) << "bystanders are " << it->second.bystanders << dendl; @@ -2555,18 +2555,18 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a CDir *cur = 0; if (start == 'd') { cur = cache->get_dirfrag(df); - assert(cur); + ceph_assert(cur); dout(10) << " had " << *cur << dendl; } else if (start == 'f') { CInode *in = cache->get_inode(df.ino); - assert(in); + ceph_assert(in); dout(10) << " had " << *in << dendl; cur = cache->add_replica_dir(q, in, oldauth, finished); dout(10) << " added " << *cur << dendl; } else if (start == '-') { // nothing } else - assert(0 == "unrecognized start char"); + ceph_assert(0 == "unrecognized start char"); while (!q.end()) { CDentry *dn = cache->add_replica_dentry(q, cur, finished); @@ -2585,7 +2585,7 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a p != import_bound_fragset.end(); ++p) { CInode *in = cache->get_inode(p->first); - assert(in); + ceph_assert(in); in->get_stickydirs(); dout(7) << " set stickydirs on bound inode " << *in << dendl; } @@ -2608,7 +2608,7 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a p != import_bound_fragset.end(); ++p) { CInode *in = cache->get_inode(p->first); - assert(in); + ceph_assert(in); // map fragset into a frag_t list, based on the inode fragtree list fglist; @@ -2672,7 +2672,7 @@ void Migrator::handle_export_prep(const MExportDirPrep::const_ref &m, bool did_a dout(7) << " sending export_prep_ack on " << *dir << dendl; mds->send_message(MExportDirPrepAck::create(dir->dirfrag(), success, m->get_tid()), m->get_connection()); - assert(g_conf()->mds_kill_import_at != 4); + ceph_assert(g_conf()->mds_kill_import_at != 4); } @@ -2697,18 +2697,18 @@ void Migrator::handle_export_dir(const MExportDir::const_ref &m) { assert (g_conf()->mds_kill_import_at != 5); CDir *dir = cache->get_dirfrag(m->dirfrag); - assert(dir); + ceph_assert(dir); mds_rank_t oldauth = mds_rank_t(m->get_source().num()); dout(7) << "handle_export_dir importing " << *dir << " from " << oldauth << dendl; - assert(!dir->is_auth()); + ceph_assert(!dir->is_auth()); map::iterator it = import_state.find(m->dirfrag); - assert(it != import_state.end()); - assert(it->second.state == IMPORT_PREPPED); - assert(it->second.tid == m->get_tid()); - assert(it->second.peer == oldauth); + ceph_assert(it != import_state.end()); + ceph_assert(it->second.state == IMPORT_PREPPED); + ceph_assert(it->second.tid == m->get_tid()); + ceph_assert(it->second.peer == oldauth); if (!dir->get_inode()->dirfragtree.is_leaf(dir->get_frag())) dir->get_inode()->dirfragtree.force_to_leaf(g_ceph_context, dir->get_frag()); @@ -2733,7 +2733,7 @@ void Migrator::handle_export_dir(const MExportDir::const_ref &m) map client_metadata_map; decode(client_map, cmp); decode(client_metadata_map, cmp); - assert(cmp.end()); + ceph_assert(cmp.end()); le->cmapv = mds->server->prepare_force_open_sessions(client_map, client_metadata_map, onlogged->imported_session_map); encode(client_map, le->client_map, mds->mdsmap->get_up_features()); @@ -2757,7 +2757,7 @@ void Migrator::handle_export_dir(const MExportDir::const_ref &m) set import_bounds; for (const auto &bound : m->bounds) { CDir *bd = cache->get_dirfrag(bound); - assert(bd); + ceph_assert(bd); le->metablob.add_dir(bd, false); // note that parent metadata is already in the event import_bounds.insert(bd); } @@ -2804,7 +2804,7 @@ void Migrator::import_remove_pins(CDir *dir, set& bounds) continue; did.insert(p->ino); CInode *in = cache->get_inode(p->ino); - assert(in); + ceph_assert(in); in->put_stickydirs(); } @@ -2818,7 +2818,7 @@ void Migrator::import_remove_pins(CDir *dir, set& bounds) } else if (stat.state >= IMPORT_PREPPED) { // bounding dirfrags for (auto bd : bounds) { - assert(bd->state_test(CDir::STATE_IMPORTBOUND)); + ceph_assert(bd->state_test(CDir::STATE_IMPORTBOUND)); bd->put(CDir::PIN_IMPORTBOUND); bd->state_clear(CDir::STATE_IMPORTBOUND); } @@ -2855,7 +2855,7 @@ void Migrator::import_reverse(CDir *dir) import_remove_pins(dir, bounds); // update auth, with possible subtree merge. - assert(dir->is_subtree_root()); + ceph_assert(dir->is_subtree_root()); if (mds->is_resolve()) cache->trim_non_auth_subtree(dir); @@ -2939,7 +2939,7 @@ void Migrator::import_reverse(CDir *dir) ++q) { Capability *cap = in->get_client_cap(q->first); if (!cap) { - assert(!stat.session_map.count(q->first)); + ceph_assert(!stat.session_map.count(q->first)); continue; } if (cap->is_importing()) @@ -3011,7 +3011,7 @@ void Migrator::import_notify_abort(CDir *dir, set& bounds) void Migrator::import_reverse_unfreeze(CDir *dir) { dout(7) << "import_reverse_unfreeze " << *dir << dendl; - assert(!dir->is_auth()); + ceph_assert(!dir->is_auth()); cache->discard_delayed_expire(dir); dir->unfreeze_tree(); if (dir->is_subtree_root()) @@ -3025,7 +3025,7 @@ void Migrator::import_reverse_final(CDir *dir) // clean up map::iterator it = import_state.find(dir->dirfrag()); - assert(it != import_state.end()); + ceph_assert(it != import_state.end()); MutationRef mut = it->second.mut; import_state.erase(it); @@ -3095,12 +3095,12 @@ void Migrator::import_logged_start(dirfrag_t df, CDir *dir, mds_rank_t from, void Migrator::handle_export_finish(const MExportDirFinish::const_ref &m) { CDir *dir = cache->get_dirfrag(m->get_dirfrag()); - assert(dir); + ceph_assert(dir); dout(7) << "handle_export_finish on " << *dir << (m->is_last() ? " last" : "") << dendl; map::iterator it = import_state.find(m->get_dirfrag()); - assert(it != import_state.end()); - assert(it->second.tid == m->get_tid()); + ceph_assert(it != import_state.end()); + ceph_assert(it->second.tid == m->get_tid()); import_finish(dir, false, m->is_last()); } @@ -3110,23 +3110,23 @@ void Migrator::import_finish(CDir *dir, bool notify, bool last) dout(7) << "import_finish on " << *dir << dendl; map::iterator it = import_state.find(dir->dirfrag()); - assert(it != import_state.end()); - assert(it->second.state == IMPORT_ACKING || it->second.state == IMPORT_FINISHING); + ceph_assert(it != import_state.end()); + ceph_assert(it->second.state == IMPORT_ACKING || it->second.state == IMPORT_FINISHING); if (it->second.state == IMPORT_ACKING) { - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); cache->adjust_subtree_auth(dir, mds->get_nodeid(), mds->get_nodeid()); } // log finish - assert(g_conf()->mds_kill_import_at != 9); + ceph_assert(g_conf()->mds_kill_import_at != 9); if (it->second.state == IMPORT_ACKING) { for (map >::iterator p = it->second.peer_exports.begin(); p != it->second.peer_exports.end(); ++p) { CInode *in = p->first; - assert(in->is_auth()); + ceph_assert(in->is_auth()); for (map::iterator q = p->second.begin(); q != p->second.end(); ++q) { @@ -3136,7 +3136,7 @@ void Migrator::import_finish(CDir *dir, bool notify, bool last) Session *session = r->second.first; Capability *cap = in->get_client_cap(q->first); - assert(cap); + ceph_assert(cap); cap->merge(q->second, true); cap->clear_importing(); mds->mdcache->do_cap_import(session, in, cap, q->second.cap_id, q->second.seq, @@ -3152,7 +3152,7 @@ void Migrator::import_finish(CDir *dir, bool notify, bool last) } if (!last) { - assert(it->second.state == IMPORT_ACKING); + ceph_assert(it->second.state == IMPORT_ACKING); it->second.state = IMPORT_FINISHING; return; } @@ -3242,7 +3242,7 @@ void Migrator::decode_import_inode(CDentry *dn, bufferlist::const_iterator& blp, // link before state -- or not! -sage if (dn->get_linkage()->get_inode() != in) { - assert(!dn->get_linkage()->get_inode()); + ceph_assert(!dn->get_linkage()->get_inode()); dn->dir->link_primary_inode(dn, in); } @@ -3364,9 +3364,9 @@ int Migrator::decode_import_dir(bufferlist::const_iterator& blp, decode(df, blp); CInode *diri = cache->get_inode(df.ino); - assert(diri); + ceph_assert(diri); CDir *dir = diri->get_or_open_dirfrag(mds->mdcache, df.frag); - assert(dir); + ceph_assert(dir); dout(7) << "decode_import_dir " << *dir << dendl; @@ -3433,7 +3433,7 @@ int Migrator::decode_import_dir(bufferlist::const_iterator& blp, if (icode == 'N') { // null dentry - assert(dn->get_linkage()->is_null()); + ceph_assert(dn->get_linkage()->is_null()); // fall thru } @@ -3444,14 +3444,14 @@ int Migrator::decode_import_dir(bufferlist::const_iterator& blp, decode(ino, blp); decode(d_type, blp); if (dn->get_linkage()->is_remote()) { - assert(dn->get_linkage()->get_remote_ino() == ino); + ceph_assert(dn->get_linkage()->get_remote_ino() == ino); } else { dir->link_remote_inode(dn, ino, d_type); } } else if (icode == 'I') { // inode - assert(le); + ceph_assert(le); decode_import_inode(dn, blp, oldauth, ls, peer_exports, updated_scatterlocks); } @@ -3524,10 +3524,10 @@ void Migrator::export_caps(CInode *in) mds_rank_t dest = in->authority().first; dout(7) << "export_caps to mds." << dest << " " << *in << dendl; - assert(in->is_any_caps()); - assert(!in->is_auth()); - assert(!in->is_ambiguous_auth()); - assert(!in->state_test(CInode::STATE_EXPORTINGCAPS)); + ceph_assert(in->is_any_caps()); + ceph_assert(!in->is_auth()); + ceph_assert(!in->is_ambiguous_auth()); + ceph_assert(!in->state_test(CInode::STATE_EXPORTINGCAPS)); auto ex = MExportCaps::create(); ex->ino = in->ino(); @@ -3542,7 +3542,7 @@ void Migrator::handle_export_caps_ack(const MExportCapsAck::const_ref &ack) mds_rank_t from = ack->get_source().num(); CInode *in = cache->get_inode(ack->ino); if (in) { - assert(!in->is_auth()); + ceph_assert(!in->is_auth()); dout(10) << "handle_export_caps_ack " << *ack << " from " << ack->get_source() << " on " << *in << dendl; @@ -3608,8 +3608,8 @@ void Migrator::handle_export_caps(const MExportCaps::const_ref &ex) dout(10) << "handle_export_caps " << *ex << " from " << ex->get_source() << dendl; CInode *in = cache->get_inode(ex->ino); - assert(in); - assert(in->is_auth()); + ceph_assert(in); + ceph_assert(in->is_auth()); // FIXME if (!in->can_auth_pin()) { @@ -3629,7 +3629,7 @@ void Migrator::handle_export_caps(const MExportCaps::const_ref &ex) // decode new caps auto blp = ex->cap_bl.cbegin(); decode_import_inode_caps(in, false, blp, finish->peer_exports); - assert(!finish->peer_exports.empty()); // thus, inode is pinned. + ceph_assert(!finish->peer_exports.empty()); // thus, inode is pinned. // journal open client sessions ESessions *le = new ESessions(pv, std::move(client_map), @@ -3646,13 +3646,13 @@ void Migrator::logged_import_caps(CInode *in, { dout(10) << "logged_import_caps on " << *in << dendl; // see export_go() vs export_go_synced() - assert(in->is_auth()); + ceph_assert(in->is_auth()); // force open client sessions and finish cap import mds->server->finish_force_open_sessions(imported_session_map); auto it = peer_exports.find(in); - assert(it != peer_exports.end()); + ceph_assert(it != peer_exports.end()); // clients will release caps from the exporter when they receive the cap import message. map imported_caps; diff --git a/src/mds/Migrator.h b/src/mds/Migrator.h index 2065b9bd27df0..0dfb78b17a7ed 100644 --- a/src/mds/Migrator.h +++ b/src/mds/Migrator.h @@ -260,18 +260,18 @@ public: int get_import_state(dirfrag_t df) const { map::const_iterator it = import_state.find(df); - assert(it != import_state.end()); + ceph_assert(it != import_state.end()); return it->second.state; } int get_import_peer(dirfrag_t df) const { map::const_iterator it = import_state.find(df); - assert(it != import_state.end()); + ceph_assert(it != import_state.end()); return it->second.peer; } int get_export_state(CDir *dir) const { map::const_iterator it = export_state.find(dir); - assert(it != export_state.end()); + ceph_assert(it != export_state.end()); return it->second.state; } // this returns true if we are export @dir, @@ -280,21 +280,21 @@ public: // only returns meaningful results during EXPORT_WARNING state. bool export_has_warned(CDir *dir, mds_rank_t who) { map::iterator it = export_state.find(dir); - assert(it != export_state.end()); - assert(it->second.state == EXPORT_WARNING); + ceph_assert(it != export_state.end()); + ceph_assert(it->second.state == EXPORT_WARNING); return (it->second.warning_ack_waiting.count(who) == 0); } bool export_has_notified(CDir *dir, mds_rank_t who) const { map::const_iterator it = export_state.find(dir); - assert(it != export_state.end()); - assert(it->second.state == EXPORT_NOTIFYING); + ceph_assert(it != export_state.end()); + ceph_assert(it->second.state == EXPORT_NOTIFYING); return (it->second.notify_ack_waiting.count(who) == 0); } void export_freeze_inc_num_waiters(CDir *dir) { map::iterator it = export_state.find(dir); - assert(it != export_state.end()); + ceph_assert(it != export_state.end()); it->second.num_remote_waiters++; } void find_stale_export_freeze(); diff --git a/src/mds/Mutation.cc b/src/mds/Mutation.cc index 9cc8311379025..6c4660ec7fa81 100644 --- a/src/mds/Mutation.cc +++ b/src/mds/Mutation.cc @@ -28,7 +28,7 @@ void MutationImpl::pin(MDSCacheObject *o) void MutationImpl::unpin(MDSCacheObject *o) { - assert(pins.count(o)); + ceph_assert(pins.count(o)); o->put(MDSCacheObject::PIN_REQUEST); pins.erase(o); } @@ -52,7 +52,7 @@ void MutationImpl::drop_pins() void MutationImpl::start_locking(SimpleLock *lock, int target) { - assert(locking == NULL); + ceph_assert(locking == NULL); pin(lock->get_parent()); locking = lock; locking_target_mds = target; @@ -60,7 +60,7 @@ void MutationImpl::start_locking(SimpleLock *lock, int target) void MutationImpl::finish_locking(SimpleLock *lock) { - assert(locking == lock); + ceph_assert(locking == lock); locking = NULL; locking_target_mds = -1; } @@ -82,7 +82,7 @@ void MutationImpl::auth_pin(MDSCacheObject *object) void MutationImpl::auth_unpin(MDSCacheObject *object) { - assert(auth_pins.count(object)); + ceph_assert(auth_pins.count(object)); object->auth_unpin(this); auth_pins.erase(object); } @@ -92,7 +92,7 @@ void MutationImpl::drop_local_auth_pins() for (set::iterator it = auth_pins.begin(); it != auth_pins.end(); ++it) { - assert((*it)->is_auth()); + ceph_assert((*it)->is_auth()); (*it)->auth_unpin(this); } auth_pins.clear(); @@ -215,7 +215,7 @@ bool MDRequestImpl::did_ino_allocation() const bool MDRequestImpl::freeze_auth_pin(CInode *inode) { - assert(!more()->rename_inode || more()->rename_inode == inode); + ceph_assert(!more()->rename_inode || more()->rename_inode == inode); more()->rename_inode = inode; more()->is_freeze_authpin = true; auth_pin(inode); @@ -229,7 +229,7 @@ bool MDRequestImpl::freeze_auth_pin(CInode *inode) void MDRequestImpl::unfreeze_auth_pin(bool clear_inode) { - assert(more()->is_freeze_authpin); + ceph_assert(more()->is_freeze_authpin); CInode *inode = more()->rename_inode; if (inode->is_frozen_auth_pin()) inode->unfreeze_auth_pin(); @@ -248,8 +248,8 @@ void MDRequestImpl::set_remote_frozen_auth_pin(CInode *inode) void MDRequestImpl::set_ambiguous_auth(CInode *inode) { - assert(!more()->rename_inode || more()->rename_inode == inode); - assert(!more()->is_ambiguous_auth); + ceph_assert(!more()->rename_inode || more()->rename_inode == inode); + ceph_assert(!more()->is_ambiguous_auth); inode->set_ambiguous_auth(); more()->rename_inode = inode; @@ -259,7 +259,7 @@ void MDRequestImpl::set_ambiguous_auth(CInode *inode) void MDRequestImpl::clear_ambiguous_auth() { CInode *inode = more()->rename_inode; - assert(inode && more()->is_ambiguous_auth); + ceph_assert(inode && more()->is_ambiguous_auth); inode->clear_ambiguous_auth(); more()->is_ambiguous_auth = false; } @@ -295,13 +295,13 @@ const filepath& MDRequestImpl::get_filepath2() void MDRequestImpl::set_filepath(const filepath& fp) { - assert(!client_request); + ceph_assert(!client_request); more()->filepath1 = fp; } void MDRequestImpl::set_filepath2(const filepath& fp) { - assert(!client_request); + ceph_assert(!client_request); more()->filepath2 = fp; } diff --git a/src/mds/Mutation.h b/src/mds/Mutation.h index fdd4a016da7a7..0446456233576 100644 --- a/src/mds/Mutation.h +++ b/src/mds/Mutation.h @@ -95,13 +95,13 @@ public: reqid(ri), attempt(att), slave_to_mds(slave_to) { } ~MutationImpl() override { - assert(locking == NULL); - assert(pins.empty()); - assert(auth_pins.empty()); - assert(xlocks.empty()); - assert(rdlocks.empty()); - assert(wrlocks.empty()); - assert(remote_wrlocks.empty()); + ceph_assert(locking == NULL); + ceph_assert(pins.empty()); + ceph_assert(auth_pins.empty()); + ceph_assert(xlocks.empty()); + ceph_assert(rdlocks.empty()); + ceph_assert(wrlocks.empty()); + ceph_assert(remote_wrlocks.empty()); } bool is_master() const { return slave_to_mds == MDS_RANK_NONE; } diff --git a/src/mds/OpenFileTable.cc b/src/mds/OpenFileTable.cc index 86d5579274ffd..a8795d2ade8f2 100644 --- a/src/mds/OpenFileTable.cc +++ b/src/mds/OpenFileTable.cc @@ -36,8 +36,8 @@ void OpenFileTable::get_ref(CInode *in) do { auto p = anchor_map.find(in->ino()); if (p != anchor_map.end()) { - assert(in->state_test(CInode::STATE_TRACKEDBYOFT)); - assert(p->second.nref > 0); + ceph_assert(in->state_test(CInode::STATE_TRACKEDBYOFT)); + ceph_assert(p->second.nref > 0); p->second.nref++; break; } @@ -48,13 +48,13 @@ void OpenFileTable::get_ref(CInode *in) auto ret = anchor_map.emplace(std::piecewise_construct, std::forward_as_tuple(in->ino()), std::forward_as_tuple(in->ino(), (pin ? pin->ino() : inodeno_t(0)), (dn ? dn->get_name() : string()), in->d_type(), 1)); - assert(ret.second == true); + ceph_assert(ret.second == true); in->state_set(CInode::STATE_TRACKEDBYOFT); auto ret1 = dirty_items.emplace(in->ino(), (int)DIRTY_NEW); if (!ret1.second) { int omap_idx = ret1.first->second; - assert(omap_idx >= 0); + ceph_assert(omap_idx >= 0); ret.first->second.omap_idx = omap_idx; } @@ -65,10 +65,10 @@ void OpenFileTable::get_ref(CInode *in) void OpenFileTable::put_ref(CInode *in) { do { - assert(in->state_test(CInode::STATE_TRACKEDBYOFT)); + ceph_assert(in->state_test(CInode::STATE_TRACKEDBYOFT)); auto p = anchor_map.find(in->ino()); - assert(p != anchor_map.end()); - assert(p->second.nref > 0); + ceph_assert(p != anchor_map.end()); + ceph_assert(p->second.nref > 0); if (p->second.nref > 1) { p->second.nref--; @@ -78,11 +78,11 @@ void OpenFileTable::put_ref(CInode *in) CDentry *dn = in->get_parent_dn(); CInode *pin = dn ? dn->get_dir()->get_inode() : nullptr; if (dn) { - assert(p->second.dirino == pin->ino()); - assert(p->second.d_name == dn->get_name()); + ceph_assert(p->second.dirino == pin->ino()); + ceph_assert(p->second.d_name == dn->get_name()); } else { - assert(p->second.dirino == inodeno_t(0)); - assert(p->second.d_name == ""); + ceph_assert(p->second.dirino == inodeno_t(0)); + ceph_assert(p->second.d_name == ""); } int omap_idx = p->second.omap_idx; @@ -92,10 +92,10 @@ void OpenFileTable::put_ref(CInode *in) auto ret = dirty_items.emplace(in->ino(), omap_idx); if (!ret.second) { if (ret.first->second == DIRTY_NEW) { - assert(omap_idx < 0); + ceph_assert(omap_idx < 0); dirty_items.erase(ret.first); } else { - assert(omap_idx >= 0); + ceph_assert(omap_idx >= 0); ret.first->second = omap_idx; } } @@ -109,7 +109,7 @@ void OpenFileTable::add_inode(CInode *in) dout(10) << __func__ << " " << *in << dendl; if (!in->is_dir()) { auto p = anchor_map.find(in->ino()); - assert(p == anchor_map.end()); + ceph_assert(p == anchor_map.end()); } get_ref(in); } @@ -119,8 +119,8 @@ void OpenFileTable::remove_inode(CInode *in) dout(10) << __func__ << " " << *in << dendl; if (!in->is_dir()) { auto p = anchor_map.find(in->ino()); - assert(p != anchor_map.end()); - assert(p->second.nref == 1); + ceph_assert(p != anchor_map.end()); + ceph_assert(p->second.nref == 1); } put_ref(in); } @@ -128,10 +128,10 @@ void OpenFileTable::remove_inode(CInode *in) void OpenFileTable::add_dirfrag(CDir *dir) { dout(10) << __func__ << " " << *dir << dendl; - assert(!dir->state_test(CDir::STATE_TRACKEDBYOFT)); + ceph_assert(!dir->state_test(CDir::STATE_TRACKEDBYOFT)); dir->state_set(CDir::STATE_TRACKEDBYOFT); auto ret = dirfrags.insert(dir->dirfrag()); - assert(ret.second); + ceph_assert(ret.second); get_ref(dir->get_inode()); dirty_items.emplace(dir->ino(), (int)DIRTY_UNDEF); } @@ -139,10 +139,10 @@ void OpenFileTable::add_dirfrag(CDir *dir) void OpenFileTable::remove_dirfrag(CDir *dir) { dout(10) << __func__ << " " << *dir << dendl; - assert(dir->state_test(CDir::STATE_TRACKEDBYOFT)); + ceph_assert(dir->state_test(CDir::STATE_TRACKEDBYOFT)); dir->state_clear(CDir::STATE_TRACKEDBYOFT); auto p = dirfrags.find(dir->dirfrag()); - assert(p != dirfrags.end()); + ceph_assert(p != dirfrags.end()); dirfrags.erase(p); dirty_items.emplace(dir->ino(), (int)DIRTY_UNDEF); put_ref(dir->get_inode()); @@ -152,10 +152,10 @@ void OpenFileTable::notify_link(CInode *in) { dout(10) << __func__ << " " << *in << dendl; auto p = anchor_map.find(in->ino()); - assert(p != anchor_map.end()); - assert(p->second.nref > 0); - assert(p->second.dirino == inodeno_t(0)); - assert(p->second.d_name == ""); + ceph_assert(p != anchor_map.end()); + ceph_assert(p->second.nref > 0); + ceph_assert(p->second.dirino == inodeno_t(0)); + ceph_assert(p->second.d_name == ""); CDentry *dn = in->get_parent_dn(); CInode *pin = dn->get_dir()->get_inode(); @@ -171,13 +171,13 @@ void OpenFileTable::notify_unlink(CInode *in) { dout(10) << __func__ << " " << *in << dendl; auto p = anchor_map.find(in->ino()); - assert(p != anchor_map.end()); - assert(p->second.nref > 0); + ceph_assert(p != anchor_map.end()); + ceph_assert(p->second.nref > 0); CDentry *dn = in->get_parent_dn(); CInode *pin = dn->get_dir()->get_inode(); - assert(p->second.dirino == pin->ino()); - assert(p->second.d_name == dn->get_name()); + ceph_assert(p->second.dirino == pin->ino()); + ceph_assert(p->second.d_name == dn->get_name()); p->second.dirino = inodeno_t(0); p->second.d_name = ""; @@ -229,8 +229,8 @@ void OpenFileTable::_commit_finish(int r, uint64_t log_seq, MDSInternalContextBa return; } - assert(log_seq <= committing_log_seq); - assert(log_seq >= committed_log_seq); + ceph_assert(log_seq <= committing_log_seq); + ceph_assert(log_seq >= committed_log_seq); committed_log_seq = log_seq; num_pending_commit--; @@ -290,9 +290,9 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p { dout(10) << __func__ << " log_seq " << log_seq << dendl; - assert(num_pending_commit == 0); + ceph_assert(num_pending_commit == 0); num_pending_commit++; - assert(log_seq >= committing_log_seq); + ceph_assert(log_seq >= committing_log_seq); committing_log_seq = log_seq; omap_version++; @@ -330,7 +330,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p if (journal_state == JOURNAL_NONE) journal_state = JOURNAL_START; else - assert(journal_state == JOURNAL_START); + ceph_assert(journal_state == JOURNAL_START); bufferlist header; _encode_header(header, journal_state); @@ -434,7 +434,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p if (first_commit) { auto q = loaded_anchor_map.find(it.first); if (q != loaded_anchor_map.end()) { - assert(p != anchor_map.end()); + ceph_assert(p != anchor_map.end()); p->second.omap_idx = q->second.omap_idx; bool same = p->second == q->second; if (same) { @@ -462,7 +462,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p if (p != anchor_map.end()) { omap_idx = p->second.omap_idx; if (omap_idx < 0) { - assert(it.second == DIRTY_NEW); + ceph_assert(it.second == DIRTY_NEW); // find omap object to store the key for (unsigned i = first_free_idx; i < omap_num_objs; i++) { if (omap_num_items[i] < MAX_ITEMS_PER_OBJ) @@ -470,7 +470,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p } if (omap_idx < 0) { ++omap_num_objs; - assert(omap_num_objs <= MAX_OBJECTS); + ceph_assert(omap_num_objs <= MAX_OBJECTS); omap_num_items.resize(omap_num_objs); omap_updates.resize(omap_num_objs); omap_updates.back().clear = true; @@ -484,7 +484,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p } else { omap_idx = it.second; unsigned& count = omap_num_items.at(omap_idx); - assert(count > 0); + ceph_assert(count > 0); --count; if ((unsigned)omap_idx < first_free_idx && count < MAX_ITEMS_PER_OBJ) first_free_idx = omap_idx; @@ -518,7 +518,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p int omap_idx = it.second.omap_idx; unsigned& count = omap_num_items.at(omap_idx); - assert(count > 0); + ceph_assert(count > 0); --count; auto& ctl = omap_updates.at(omap_idx); @@ -549,7 +549,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p if (omap_num_items[i] > 0) used_objs = i + 1; } - assert(total_items == anchor_map.size()); + ceph_assert(total_items == anchor_map.size()); // adjust omap object count if (used_objs < omap_num_objs) { omap_num_objs = used_objs; @@ -559,8 +559,8 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p // does not change. if (!journaled && old_num_objs == omap_num_objs && objs_to_write.size() <= 1) { - assert(journal_state == JOURNAL_NONE); - assert(!gather.has_subs()); + ceph_assert(journal_state == JOURNAL_NONE); + ceph_assert(!gather.has_subs()); unsigned omap_idx = objs_to_write.empty() ? 0 : objs_to_write.front(); create_op_func(omap_idx, true); @@ -578,19 +578,19 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p } if (journal_state == JOURNAL_START) { - assert(gather.has_subs()); + ceph_assert(gather.has_subs()); journal_state = JOURNAL_FINISH; } else { // only object count changes - assert(journal_state == JOURNAL_NONE); - assert(!gather.has_subs()); + ceph_assert(journal_state == JOURNAL_NONE); + ceph_assert(!gather.has_subs()); } for (unsigned omap_idx = 0; omap_idx < omap_updates.size(); omap_idx++) { auto& ctl = omap_updates[omap_idx]; - assert(ctl.to_update.empty() && ctl.to_remove.empty()); + ceph_assert(ctl.to_update.empty() && ctl.to_remove.empty()); if (ctl.journal_idx == 0) - assert(ctl.journaled_update.empty() && ctl.journaled_remove.empty()); + ceph_assert(ctl.journaled_update.empty() && ctl.journaled_remove.empty()); bool first = true; for (auto& it : ctl.journaled_update) { @@ -626,7 +626,7 @@ void OpenFileTable::commit(MDSInternalContextBase *c, uint64_t log_seq, int op_p create_op_func(omap_idx, first); } - assert(!ops_map.empty()); + ceph_assert(!ops_map.empty()); if (journal_state == JOURNAL_FINISH) { gather.set_finisher(new C_OnFinisher(new C_IO_OFT_Journal(this, log_seq, c, ops_map), mds->finisher)); @@ -707,7 +707,7 @@ void OpenFileTable::_load_finish(int op_r, int header_r, int values_r, std::make_tuple()); RecoveredAnchor& anchor = it->second; decode(anchor, p); - assert(ino == anchor.ino); + ceph_assert(ino == anchor.ino); anchor.omap_idx = idx; anchor.auth = MDS_RANK_NONE; @@ -772,7 +772,7 @@ void OpenFileTable::_load_finish(int op_r, int header_r, int values_r, omap_num_items.resize(omap_num_objs); journal_state = jstate; } else if (version == omap_version) { - assert(omap_num_objs == num_objs); + ceph_assert(omap_num_objs == num_objs); if (jstate > journal_state) journal_state = jstate; } @@ -858,10 +858,10 @@ void OpenFileTable::_load_finish(int op_r, int header_r, int values_r, for (auto& q : to_remove) { inodeno_t ino; sscanf(q.c_str(), "%llx",(unsigned long long*)&ino.val); - assert(ino.val > 0); + ceph_assert(ino.val > 0); if (loaded_anchor_map.erase(ino)) { unsigned& count = omap_num_items[omap_idx]; - assert(count > 0); + ceph_assert(count > 0); --count; } auto r = loaded_dirfrags.lower_bound(dirfrag_t(ino, 0)); @@ -927,7 +927,7 @@ out: void OpenFileTable::load(MDSInternalContextBase *onload) { dout(10) << __func__ << dendl; - assert(!load_done); + ceph_assert(!load_done); if (onload) waiting_for_load.push_back(onload); @@ -991,7 +991,7 @@ void OpenFileTable::_open_ino_finish(inodeno_t ino, int r) { if (prefetch_state == DIR_INODES && r >= 0 && ino != inodeno_t(0)) { auto p = loaded_anchor_map.find(ino); - assert(p != loaded_anchor_map.end()); + ceph_assert(p != loaded_anchor_map.end()); p->second.auth = mds_rank_t(r); } @@ -1010,7 +1010,7 @@ void OpenFileTable::_open_ino_finish(inodeno_t ino, int r) finish_contexts(g_ceph_context, waiting_for_prefetch); waiting_for_prefetch.clear(); } else { - assert(0); + ceph_assert(0); } } } @@ -1018,7 +1018,7 @@ void OpenFileTable::_open_ino_finish(inodeno_t ino, int r) void OpenFileTable::_prefetch_dirfrags() { dout(10) << __func__ << dendl; - assert(prefetch_state == DIRFRAGS); + ceph_assert(prefetch_state == DIRFRAGS); MDCache *mdcache = mds->mdcache; list fetch_queue; @@ -1060,7 +1060,7 @@ void OpenFileTable::_prefetch_dirfrags() int num_opening_dirfrags = 0; for (auto dir : fetch_queue) { if (dir->state_test(CDir::STATE_REJOINUNDEF)) - assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag())); + ceph_assert(dir->get_inode()->dirfragtree.is_leaf(dir->get_frag())); dir->fetch(gather.new_sub()); if (!(++num_opening_dirfrags % 1000)) @@ -1084,7 +1084,7 @@ void OpenFileTable::_prefetch_dirfrags() void OpenFileTable::_prefetch_inodes() { dout(10) << __func__ << " state " << prefetch_state << dendl; - assert(!num_opening_inodes); + ceph_assert(!num_opening_inodes); num_opening_inodes = 1; int64_t pool; @@ -1093,7 +1093,7 @@ void OpenFileTable::_prefetch_inodes() else if (prefetch_state == FILE_INODES) pool = mds->mdsmap->get_first_data_pool(); else - assert(0); + ceph_assert(0); MDCache *mdcache = mds->mdcache; @@ -1138,7 +1138,7 @@ void OpenFileTable::_prefetch_inodes() bool OpenFileTable::prefetch_inodes() { dout(10) << __func__ << dendl; - assert(!prefetch_state); + ceph_assert(!prefetch_state); prefetch_state = DIR_INODES; if (!load_done) { diff --git a/src/mds/OpenFileTable.h b/src/mds/OpenFileTable.h index 96e94de8119fb..4ffb397475366 100644 --- a/src/mds/OpenFileTable.h +++ b/src/mds/OpenFileTable.h @@ -45,7 +45,7 @@ public: void load(MDSInternalContextBase *c); bool is_loaded() const { return load_done; } void wait_for_load(MDSInternalContextBase *c) { - assert(!load_done); + ceph_assert(!load_done); waiting_for_load.push_back(c); } @@ -55,7 +55,7 @@ public: bool prefetch_inodes(); bool is_prefetched() const { return prefetch_state == DONE; } void wait_for_prefetch(MDSInternalContextBase *c) { - assert(!is_prefetched()); + ceph_assert(!is_prefetched()); waiting_for_prefetch.push_back(c); } diff --git a/src/mds/PurgeQueue.cc b/src/mds/PurgeQueue.cc index 79839d9593316..5e2df73c489d0 100644 --- a/src/mds/PurgeQueue.cc +++ b/src/mds/PurgeQueue.cc @@ -97,9 +97,9 @@ PurgeQueue::PurgeQueue( delayed_flush(nullptr), recovered(false) { - assert(cct != nullptr); - assert(on_error != nullptr); - assert(objecter != nullptr); + ceph_assert(cct != nullptr); + ceph_assert(on_error != nullptr); + ceph_assert(objecter != nullptr); journaler.set_write_error_handler(on_error); } @@ -129,7 +129,7 @@ void PurgeQueue::init() { Mutex::Locker l(lock); - assert(logger != nullptr); + ceph_assert(logger != nullptr); finisher.start(); timer.init(); @@ -208,7 +208,7 @@ void PurgeQueue::wait_for_recovery(Context* c) void PurgeQueue::_recover() { - assert(lock.is_locked_by_me()); + ceph_assert(lock.is_locked_by_me()); // Journaler::is_readable() adjusts write_pos if partial entry is encountered while (1) { @@ -241,7 +241,7 @@ void PurgeQueue::_recover() bufferlist bl; bool readable = journaler.try_read_entry(bl); - assert(readable); // we checked earlier + ceph_assert(readable); // we checked earlier } } @@ -273,7 +273,7 @@ void PurgeQueue::push(const PurgeItem &pi, Context *completion) Mutex::Locker l(lock); // Callers should have waited for open() before using us - assert(!journaler.is_readonly()); + ceph_assert(!journaler.is_readonly()); bufferlist bl; @@ -363,7 +363,7 @@ bool PurgeQueue::can_consume() bool PurgeQueue::_consume() { - assert(lock.is_locked_by_me()); + ceph_assert(lock.is_locked_by_me()); bool could_consume = false; while(can_consume()) { @@ -404,7 +404,7 @@ bool PurgeQueue::_consume() // The journaler is readable: consume an entry bufferlist bl; bool readable = journaler.try_read_entry(bl); - assert(readable); // we checked earlier + ceph_assert(readable); // we checked earlier dout(20) << " decoding entry" << dendl; PurgeItem item; @@ -429,7 +429,7 @@ void PurgeQueue::_execute_item( const PurgeItem &item, uint64_t expire_to) { - assert(lock.is_locked_by_me()); + ceph_assert(lock.is_locked_by_me()); in_flight[expire_to] = item; logger->set(l_pq_executing, in_flight.size()); @@ -504,7 +504,7 @@ void PurgeQueue::_execute_item( logger->set(l_pq_executing, in_flight.size()); return; } - assert(gather.has_subs()); + ceph_assert(gather.has_subs()); gather.set_finisher(new C_OnFinisher( new FunctionContext([this, expire_to](int r){ @@ -531,12 +531,12 @@ void PurgeQueue::_execute_item( void PurgeQueue::_execute_item_complete( uint64_t expire_to) { - assert(lock.is_locked_by_me()); + ceph_assert(lock.is_locked_by_me()); dout(10) << "complete at 0x" << std::hex << expire_to << std::dec << dendl; - assert(in_flight.count(expire_to) == 1); + ceph_assert(in_flight.count(expire_to) == 1); auto iter = in_flight.find(expire_to); - assert(iter != in_flight.end()); + ceph_assert(iter != in_flight.end()); if (iter == in_flight.begin()) { uint64_t pos = expire_to; if (!pending_expire.empty()) { @@ -635,9 +635,9 @@ bool PurgeQueue::drain( size_t *in_flight_count ) { - assert(progress != nullptr); - assert(progress_total != nullptr); - assert(in_flight_count != nullptr); + ceph_assert(progress != nullptr); + ceph_assert(progress_total != nullptr); + ceph_assert(in_flight_count != nullptr); const bool done = in_flight.empty() && ( journaler.get_read_pos() == journaler.get_write_pos()); diff --git a/src/mds/RecoveryQueue.cc b/src/mds/RecoveryQueue.cc index 8a0abe6fd1348..e02de3671bae8 100644 --- a/src/mds/RecoveryQueue.cc +++ b/src/mds/RecoveryQueue.cc @@ -43,7 +43,7 @@ public: C_MDC_Recover(RecoveryQueue *rq_, CInode *i) : MDSIOContextBase(false), rq(rq_), in(i), size(0) { - assert(rq != NULL); + ceph_assert(rq != NULL); } void print(ostream& out) const override { out << "file_recover(" << in->ino() << ")"; @@ -134,7 +134,7 @@ void RecoveryQueue::prioritize(CInode *in) if (!in->item_recover_queue_front.is_on_list()) { dout(20) << *in << dendl; - assert(in->item_recover_queue.is_on_list()); + ceph_assert(in->item_recover_queue.is_on_list()); in->item_recover_queue.remove_myself(); file_recover_queue_size--; @@ -161,8 +161,8 @@ static bool _is_in_any_recover_queue(CInode *in) void RecoveryQueue::enqueue(CInode *in) { dout(15) << "RecoveryQueue::enqueue " << *in << dendl; - assert(logger); // Caller should have done set_logger before using me - assert(in->is_auth()); + ceph_assert(logger); // Caller should have done set_logger before using me + ceph_assert(in->is_auth()); in->state_clear(CInode::STATE_NEEDSRECOVER); if (!in->state_test(CInode::STATE_RECOVERING)) { @@ -205,7 +205,7 @@ void RecoveryQueue::_recovered(CInode *in, int r, uint64_t size, utime_t mtime) } auto p = file_recovering.find(in); - assert(p != file_recovering.end()); + ceph_assert(p != file_recovering.end()); bool restart = p->second; file_recovering.erase(p); diff --git a/src/mds/ScatterLock.h b/src/mds/ScatterLock.h index 2b93f73d7a991..646e100c77cd8 100644 --- a/src/mds/ScatterLock.h +++ b/src/mds/ScatterLock.h @@ -51,7 +51,7 @@ public: ScatterLock(MDSCacheObject *o, LockType *lt) : SimpleLock(o, lt) {} ~ScatterLock() override { - assert(!_more); + ceph_assert(!_more); } bool is_scatterlock() const override { @@ -81,8 +81,8 @@ public: void set_xlock_snap_sync(MDSInternalContextBase *c) { - assert(get_type() == CEPH_LOCK_IFILE); - assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE); + ceph_assert(get_type() == CEPH_LOCK_IFILE); + ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE); state = LOCK_XLOCKSNAP; add_waiter(WAIT_STABLE, c); } diff --git a/src/mds/ScrubHeader.h b/src/mds/ScrubHeader.h index 0c1cd6ae08a69..02acc84fb5207 100644 --- a/src/mds/ScrubHeader.h +++ b/src/mds/ScrubHeader.h @@ -31,7 +31,7 @@ public: : tag(tag_), force(force_), recursive(recursive_), repair(repair_), formatter(f_), origin(nullptr) { - assert(formatter != nullptr); + ceph_assert(formatter != nullptr); } // Set after construction because it won't be known until we've diff --git a/src/mds/ScrubStack.cc b/src/mds/ScrubStack.cc index 0f34069c586e0..ec3a5b7901fae 100644 --- a/src/mds/ScrubStack.cc +++ b/src/mds/ScrubStack.cc @@ -52,7 +52,7 @@ void ScrubStack::pop_inode(CInode *in) { dout(20) << "popping " << *in << " off of ScrubStack" << dendl; - assert(in->item_scrub.is_on_list()); + ceph_assert(in->item_scrub.is_on_list()); in->put(CInode::PIN_SCRUBQUEUE); in->item_scrub.remove_myself(); stack_size--; @@ -64,7 +64,7 @@ void ScrubStack::_enqueue_inode(CInode *in, CDentry *parent, { dout(10) << __func__ << " with {" << *in << "}" << ", on_finish=" << on_finish << ", top=" << top << dendl; - assert(mdcache->mds->mds_lock.is_locked_by_me()); + ceph_assert(mdcache->mds->mds_lock.is_locked_by_me()); in->scrub_initialize(parent, header, on_finish); if (top) push_inode(in); @@ -135,7 +135,7 @@ void ScrubStack::scrub_dir_inode(CInode *in, bool all_frags_done = true; ScrubHeaderRef header = in->get_scrub_header(); - assert(header != nullptr); + ceph_assert(header != nullptr); if (header->get_recursive()) { list scrubbing_frags; @@ -240,7 +240,7 @@ bool ScrubStack::get_next_cdir(CInode *in, CDir **new_dir) dout(25) << "returning dir " << *new_dir << dendl; return true; } - assert(r == ENOENT); + ceph_assert(r == ENOENT); // there are no dirfrags left *new_dir = NULL; return true; @@ -294,7 +294,7 @@ void ScrubStack::scrub_dirfrag(CDir *dir, bool *added_children, bool *is_terminal, bool *done) { - assert(dir != NULL); + ceph_assert(dir != NULL); dout(20) << __func__ << " on " << *dir << dendl; *added_children = false; @@ -349,7 +349,7 @@ void ScrubStack::scrub_dirfrag(CDir *dir, // scrub_dentry_next defined to only give EAGAIN, ENOENT, 0 -- we should // never get random IO errors here. - assert(r == 0); + ceph_assert(r == 0); _enqueue_inode(dn->get_projected_inode(), dn, header, NULL, true); diff --git a/src/mds/ScrubStack.h b/src/mds/ScrubStack.h index 59d84be9d2f99..c727dc97b0f5a 100644 --- a/src/mds/ScrubStack.h +++ b/src/mds/ScrubStack.h @@ -62,8 +62,8 @@ public: scrub_kick(mdc, this), mdcache(mdc) {} ~ScrubStack() { - assert(inode_stack.empty()); - assert(!scrubs_in_progress); + ceph_assert(inode_stack.empty()); + ceph_assert(!scrubs_in_progress); } /** * Put a inode on the top of the scrub stack, so it is the highest priority. diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 789442ae3a5a4..5728f11f1c146 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -70,7 +70,7 @@ class ServerContext : public MDSInternalContextBase { public: explicit ServerContext(Server *s) : server(s) { - assert(server != NULL); + ceph_assert(server != NULL); } }; @@ -89,10 +89,10 @@ protected: } public: explicit ServerLogContext(Server *s) : server(s) { - assert(server != NULL); + ceph_assert(server != NULL); } explicit ServerLogContext(Server *s, MDRequestRef& r) : server(s), mdr(r) { - assert(server != NULL); + ceph_assert(server != NULL); } }; @@ -264,7 +264,7 @@ void Server::dispatch(const Message::const_ref &m) return; default: derr << "server unknown message " << m->get_type() << dendl; - assert(0 == "server unknown message"); + ceph_assert(0 == "server unknown message"); } } @@ -287,7 +287,7 @@ public: C_MDS_session_finish(Server *srv, Session *se, uint64_t sseq, bool s, version_t mv, interval_set& i, version_t iv, Context *fin_ = NULL) : ServerLogContext(srv), session(se), state_seq(sseq), open(s), cmapv(mv), inos(i), inotablev(iv), fin(fin_) { } void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); server->_session_logged(session, state_seq, open, cmapv, inos, inotablev); if (fin) { fin->complete(r); @@ -301,7 +301,7 @@ void Server::handle_client_session(const MClientSession::const_ref &m) Session *session = mds->get_session(m); dout(3) << "handle_client_session " << *m << " from " << m->get_source() << dendl; - assert(m->get_source().is_client()); // should _not_ come from an mds! + ceph_assert(m->get_source().is_client()); // should _not_ come from an mds! if (!session) { dout(0) << " ignoring sessionless msg " << *m << dendl; @@ -337,7 +337,7 @@ void Server::handle_client_session(const MClientSession::const_ref &m) dout(10) << "currently open|opening|stale|killing, dropping this req" << dendl; return; } - assert(session->is_closed() || + ceph_assert(session->is_closed() || session->is_closing()); if (mds->is_stopping()) { @@ -457,7 +457,7 @@ void Server::handle_client_session(const MClientSession::const_ref &m) dout(10) << "ignoring close req on importing session" << dendl; return; } - assert(session->is_open() || + ceph_assert(session->is_open() || session->is_stale() || session->is_opening()); if (m->get_seq() < session->get_push_seq()) { @@ -497,7 +497,7 @@ void Server::flush_client_sessions(set& client_set, MDSGatherBuilder& { for (set::iterator p = client_set.begin(); p != client_set.end(); ++p) { Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->v)); - assert(session); + ceph_assert(session); if (!session->is_open() || !session->get_connection() || !session->get_connection()->has_feature(CEPH_FEATURE_EXPORT_PEER)) @@ -521,11 +521,11 @@ void Server::_session_logged(Session *session, uint64_t state_seq, bool open, ve << " " << pv << dendl; if (piv) { - assert(session->is_closing() || session->is_killing() || + ceph_assert(session->is_closing() || session->is_killing() || session->is_opening()); // re-open closing session session->info.prealloc_inos.subtract(inos); mds->inotable->apply_release_ids(inos); - assert(mds->inotable->get_version() == piv); + ceph_assert(mds->inotable->get_version() == piv); } mds->sessionmap.mark_dirty(session); @@ -536,10 +536,10 @@ void Server::_session_logged(Session *session, uint64_t state_seq, bool open, ve << ", noop" << dendl; // close must have been canceled (by an import?), or any number of other things.. } else if (open) { - assert(session->is_opening()); + ceph_assert(session->is_opening()); mds->sessionmap.set_state(session, Session::STATE_OPEN); mds->sessionmap.touch_session(session); - assert(session->get_connection()); + ceph_assert(session->get_connection()); auto reply = MClientSession::create(CEPH_SESSION_OPEN); if (session->info.has_feature(CEPHFS_FEATURE_MIMIC)) reply->supported_features = supported_features; @@ -649,7 +649,7 @@ version_t Server::prepare_force_open_sessions(map& cm, if (q != cmm.end()) session->info.client_metadata.merge(q->second); } else { - assert(session->is_open() || + ceph_assert(session->is_open() || session->is_opening() || session->is_stale()); sseq = 0; @@ -692,7 +692,7 @@ void Server::finish_force_open_sessions(const mapinfo.inst << dendl; - assert(session->is_open() || session->is_stale()); + ceph_assert(session->is_open() || session->is_stale()); } if (dec_import) { @@ -787,7 +787,7 @@ void Server::find_idle_sessions() return; } const auto &stale_sessions = sessions_p->second; - assert(stale_sessions != nullptr); + ceph_assert(stale_sessions != nullptr); for (const auto &session: *stale_sessions) { auto last_cap_renew_span = std::chrono::duration(now-session->last_cap_renew).count(); @@ -795,7 +795,7 @@ void Server::find_idle_sessions() dout(10) << "stopping at importing session " << session->info.inst << dendl; break; } - assert(session->is_stale()); + ceph_assert(session->is_stale()); if (last_cap_renew_span < cutoff) { dout(20) << "oldest stale session is " << session->info.inst << " and recently renewed caps " << last_cap_renew_span << "s ago" << dendl; break; @@ -859,7 +859,7 @@ void Server::handle_conf_change(const ConfigProxy& conf, */ void Server::kill_session(Session *session, Context *on_safe) { - assert(mds->mds_lock.is_locked_by_me()); + ceph_assert(mds->mds_lock.is_locked_by_me()); if ((session->is_opening() || session->is_open() || @@ -869,7 +869,7 @@ void Server::kill_session(Session *session, Context *on_safe) journal_close_session(session, Session::STATE_KILLING, on_safe); } else { dout(10) << "kill_session importing or already closing/killing " << session << dendl; - assert(session->is_closing() || + ceph_assert(session->is_closing() || session->is_closed() || session->is_killing() || session->is_importing()); @@ -968,7 +968,7 @@ void Server::handle_client_reconnect(const MClientReconnect::const_ref &m) dout(7) << "handle_client_reconnect " << m->get_source() << dendl; client_t from = m->get_source().num(); Session *session = mds->get_session(m); - assert(session); + ceph_assert(session); if (!mds->is_reconnect() && mds->get_want_state() == CEPH_MDS_STATE_RECONNECT) { dout(10) << " we're almost in reconnect state (mdsmap delivery race?); waiting" << dendl; @@ -1172,7 +1172,7 @@ void Server::update_required_client_features() void Server::reconnect_gather_finish() { dout(7) << "reconnect_gather_finish. failed on " << failed_reconnects << " clients" << dendl; - assert(reconnect_done); + ceph_assert(reconnect_done); if (!mds->snapclient->is_synced()) { // make sure snaptable cache is populated. snaprealms will be @@ -1206,7 +1206,7 @@ void Server::reconnect_tick() p != client_reconnect_gather.end(); ++p) { Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->v)); - assert(session); + ceph_assert(session); dout(1) << "reconnect gave up on " << session->info.inst << dendl; mds->clog->warn() << "evicting unresponsive client " << *session @@ -1330,7 +1330,7 @@ void Server::force_clients_readonly() void Server::journal_and_reply(MDRequestRef& mdr, CInode *in, CDentry *dn, LogEvent *le, MDSLogContextBase *fin) { dout(10) << "journal_and_reply tracei " << in << " tracedn " << dn << dendl; - assert(!mdr->has_completed); + ceph_assert(!mdr->has_completed); // note trace items for eventual reply. mdr->tracei = in; @@ -1380,7 +1380,7 @@ void Server::respond_to_request(MDRequestRef& mdr, int r) } else if (mdr->internal_op > -1) { dout(10) << "respond_to_request on internal request " << mdr << dendl; if (!mdr->internal_op_finish) - assert(0 == "trying to respond to internal op without finisher"); + ceph_assert(0 == "trying to respond to internal op without finisher"); mdr->internal_op_finish->complete(r); mdcache->request_finish(mdr); } @@ -1559,7 +1559,7 @@ void Server::early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn) */ void Server::reply_client_request(MDRequestRef& mdr, const MClientReply::ref &reply) { - assert(mdr.get()); + ceph_assert(mdr.get()); const MClientRequest::const_ref &req = mdr->client_request; dout(7) << "reply_client_request " << reply->get_result() @@ -1792,7 +1792,7 @@ void Server::handle_client_request(const MClientRequest::const_ref &req) // completed request? bool has_completed = false; if (req->is_replay() || req->get_retry_attempt()) { - assert(session); + ceph_assert(session); inodeno_t created; if (session->have_completed_request(req->get_reqid().tid, &created)) { has_completed = true; @@ -1829,7 +1829,7 @@ void Server::handle_client_request(const MClientRequest::const_ref &req) // trim completed_request list if (req->get_oldest_client_tid() > 0) { dout(15) << " oldest_client_tid=" << req->get_oldest_client_tid() << dendl; - assert(session); + ceph_assert(session); if (session->trim_completed_requests(req->get_oldest_client_tid())) { // Sessions 'completed_requests' was dirtied, mark it to be // potentially flushed at segment expiry. @@ -1896,7 +1896,7 @@ void Server::handle_osd_map() void Server::dispatch_client_request(MDRequestRef& mdr) { // we shouldn't be waiting on anyone. - assert(!mdr->has_more() || mdr->more()->waiting_on_slave.empty()); + ceph_assert(!mdr->has_more() || mdr->more()->waiting_on_slave.empty()); if (mdr->killed) { dout(10) << "request " << *mdr << " was killed" << dendl; @@ -2080,7 +2080,7 @@ void Server::handle_slave_request(const MMDSSlaveRequest::const_ref &m) CDentry *straydn = NULL; if (m->straybl.length() > 0) { straydn = mdcache->add_replica_stray(m->straybl, from); - assert(straydn); + ceph_assert(straydn); m->straybl.clear(); } @@ -2113,7 +2113,7 @@ void Server::handle_slave_request(const MMDSSlaveRequest::const_ref &m) mdr->aborted = true; if (mdr->slave_request) { // only abort on-going xlock, wrlock and auth pin - assert(!mdr->slave_did_prepare()); + ceph_assert(!mdr->slave_did_prepare()); } else { mdcache->request_finish(mdr); } @@ -2130,7 +2130,7 @@ void Server::handle_slave_request(const MMDSSlaveRequest::const_ref &m) mdr = mdcache->request_start_slave(m->get_reqid(), m->get_attempt(), m); mdr->set_op_stamp(m->op_stamp); } - assert(mdr->slave_request == 0); // only one at a time, please! + ceph_assert(mdr->slave_request == 0); // only one at a time, please! if (straydn) { mdr->pin(straydn); @@ -2196,9 +2196,9 @@ void Server::handle_slave_request_reply(const MMDSSlaveRequest::const_ref &m) mdr->finish_locking(lock); lock->get_xlock(mdr, mdr->get_client()); - assert(mdr->more()->waiting_on_slave.count(from)); + ceph_assert(mdr->more()->waiting_on_slave.count(from)); mdr->more()->waiting_on_slave.erase(from); - assert(mdr->more()->waiting_on_slave.empty()); + ceph_assert(mdr->more()->waiting_on_slave.empty()); mdcache->dispatch_request(mdr); } break; @@ -2214,9 +2214,9 @@ void Server::handle_slave_request_reply(const MMDSSlaveRequest::const_ref &m) mdr->locks.insert(lock); mdr->finish_locking(lock); - assert(mdr->more()->waiting_on_slave.count(from)); + ceph_assert(mdr->more()->waiting_on_slave.count(from)); mdr->more()->waiting_on_slave.erase(from); - assert(mdr->more()->waiting_on_slave.empty()); + ceph_assert(mdr->more()->waiting_on_slave.empty()); mdcache->dispatch_request(mdr); } break; @@ -2315,7 +2315,7 @@ void Server::dispatch_slave_request(MDRequestRef& mdr) { SimpleLock *lock = mds->locker->get_lock(mdr->slave_request->get_lock_type(), mdr->slave_request->get_object_info()); - assert(lock); + ceph_assert(lock); bool need_issue = false; switch (op) { case MMDSSlaveRequest::OP_UNXLOCK: @@ -2497,7 +2497,7 @@ void Server::handle_slave_auth_pin_ack(MDRequestRef& mdr, const MMDSSlaveRequest set pinned; for (const auto &oi : ack->get_authpins()) { MDSCacheObject *object = mdcache->get_object(oi); - assert(object); // we pinned it + ceph_assert(object); // we pinned it dout(10) << " remote has pinned " << *object << dendl; if (!mdr->is_auth_pinned(object)) mdr->remote_auth_pins[object] = from; @@ -2510,7 +2510,7 @@ void Server::handle_slave_auth_pin_ack(MDRequestRef& mdr, const MMDSSlaveRequest if (mdr->more()->is_remote_frozen_authpin && ack->get_authpin_freeze() == MDSCacheObjectInfo()) { auto p = mdr->remote_auth_pins.find(mdr->more()->rename_inode); - assert(p != mdr->remote_auth_pins.end()); + ceph_assert(p != mdr->remote_auth_pins.end()); if (p->second == from) { mdr->more()->is_remote_frozen_authpin = false; } @@ -2540,7 +2540,7 @@ void Server::handle_slave_auth_pin_ack(MDRequestRef& mdr, const MMDSSlaveRequest mdr->more()->slaves.insert(from); // clear from waiting list - assert(mdr->more()->waiting_on_slave.count(from)); + ceph_assert(mdr->more()->waiting_on_slave.count(from)); mdr->more()->waiting_on_slave.erase(from); // go again? @@ -2635,7 +2635,7 @@ CDir *Server::validate_dentry_dir(MDRequestRef& mdr, CInode *diri, std::string_v CDentry* Server::prepare_null_dentry(MDRequestRef& mdr, CDir *dir, std::string_view dname, bool okexist) { dout(10) << "prepare_null_dentry " << dname << " in " << *dir << dendl; - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); client_t client = mdr->get_client(); @@ -2686,7 +2686,7 @@ CDentry* Server::prepare_stray_dentry(MDRequestRef& mdr, CInode *in) if (straydn->get_name() == straydname) return straydn; - assert(!mdr->done_locking); + ceph_assert(!mdr->done_locking); mdr->unpin(straydn); } @@ -2746,7 +2746,7 @@ CInode* Server::prepare_new_inode(MDRequestRef& mdr, CDir *dir, inodeno_t useino mdr->session->get_num_projected_prealloc_inos() < g_conf()->mds_client_prealloc_inos / 2) { int need = g_conf()->mds_client_prealloc_inos - mdr->session->get_num_projected_prealloc_inos(); mds->inotable->project_alloc_ids(mdr->prealloc_inos, need); - assert(mdr->prealloc_inos.size()); // or else fix projected increment semantics + ceph_assert(mdr->prealloc_inos.size()); // or else fix projected increment semantics mdr->session->pending_prealloc_inos.insert(mdr->prealloc_inos); mds->sessionmap.mark_projected(mdr->session); dout(10) << "prepare_new_inode prealloc " << mdr->prealloc_inos << dendl; @@ -2838,14 +2838,14 @@ void Server::apply_allocated_inos(MDRequestRef& mdr, Session *session) mds->inotable->apply_alloc_id(mdr->alloc_ino); } if (mdr->prealloc_inos.size()) { - assert(session); + ceph_assert(session); session->pending_prealloc_inos.subtract(mdr->prealloc_inos); session->info.prealloc_inos.insert(mdr->prealloc_inos); mds->sessionmap.mark_dirty(session); mds->inotable->apply_alloc_ids(mdr->prealloc_inos); } if (mdr->used_prealloc_ino) { - assert(session); + ceph_assert(session); session->info.used_inos.erase(mdr->used_prealloc_ino); mds->sessionmap.mark_dirty(session); } @@ -3125,7 +3125,7 @@ CDir* Server::try_open_auth_dirfrag(CInode *diri, frag_t fg, MDRequestRef& mdr) // not open and inode frozen? if (!dir && diri->is_frozen()) { dout(10) << "try_open_auth_dirfrag: dir inode is frozen, waiting " << *diri << dendl; - assert(diri->get_parent_dir()); + ceph_assert(diri->get_parent_dir()); diri->add_waiter(CInode::WAIT_UNFREEZE, new C_MDS_RetryRequest(mdcache, mdr)); return 0; } @@ -3463,7 +3463,7 @@ void Server::handle_client_open(MDRequestRef& mdr) return; if (cur->is_frozen() || cur->state_test(CInode::STATE_EXPORTINGCAPS)) { - assert(!need_auth); + ceph_assert(!need_auth); mdr->done_locking = false; CInode *cur = rdlock_path_pin_ref(mdr, 0, rdlocks, true); if (!cur) @@ -3534,7 +3534,7 @@ void Server::handle_client_open(MDRequestRef& mdr) // O_TRUNC if ((flags & CEPH_O_TRUNC) && !mdr->has_completed) { - assert(cur->is_auth()); + ceph_assert(cur->is_auth()); xlocks.insert(&cur->filelock); if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks)) @@ -3617,7 +3617,7 @@ void Server::handle_client_open(MDRequestRef& mdr) CDentry *dn = 0; if (req->get_dentry_wanted()) { - assert(mdr->dn[0].size()); + ceph_assert(mdr->dn[0].size()); dn = mdr->dn[0].back(); } @@ -3633,7 +3633,7 @@ public: C_MDS_openc_finish(Server *s, MDRequestRef& r, CDentry *d, CInode *ni) : ServerLogContext(s, r), dn(d), newi(ni) {} void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); dn->pop_projected_linkage(); @@ -3653,7 +3653,7 @@ public: server->respond_to_request(mdr, 0); - assert(g_conf()->mds_kill_openc_at != 1); + ceph_assert(g_conf()->mds_kill_openc_at != 1); } }; @@ -3770,7 +3770,7 @@ void Server::handle_client_openc(MDRequestRef& mdr) if (!dnl->is_null()) { // it existed. - assert(req->head.args.open.flags & CEPH_O_EXCL); + ceph_assert(req->head.args.open.flags & CEPH_O_EXCL); dout(10) << "O_EXCL, target exists, failing with -EEXIST" << dendl; mdr->tracei = dnl->get_inode(); mdr->tracedn = dn; @@ -3781,7 +3781,7 @@ void Server::handle_client_openc(MDRequestRef& mdr) // create inode. CInode *in = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino), req->head.args.open.mode | S_IFREG, &layout); - assert(in); + ceph_assert(in); // it's a file. dn->push_projected_linkage(in); @@ -3793,7 +3793,7 @@ void Server::handle_client_openc(MDRequestRef& mdr) SnapRealm *realm = diri->find_snaprealm(); snapid_t follows = mdcache->get_global_snaprealm()->get_newest_seq(); - assert(follows >= realm->get_newest_seq()); + ceph_assert(follows >= realm->get_newest_seq()); if (cmode & CEPH_FILE_MODE_WR) { in->inode.client_ranges[client].range.first = 0; @@ -3802,7 +3802,7 @@ void Server::handle_client_openc(MDRequestRef& mdr) } in->inode.rstat.rfiles = 1; - assert(dn->first == follows+1); + ceph_assert(dn->first == follows+1); in->first = dn->first; // prepare finisher @@ -3902,7 +3902,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) // ok! dout(10) << "handle_client_readdir on " << *dir << dendl; - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); if (!dir->is_complete()) { if (dir->is_frozen()) { @@ -4021,7 +4021,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) return; } } - assert(in); + ceph_assert(in); if ((int)(dnbl.length() + dn->get_name().length() + sizeof(__u32) + sizeof(LeaseStat)) > bytes_left) { dout(10) << " ran out of room, stopping at " << dnbl.length() << " < " << bytes_left << dendl; @@ -4046,7 +4046,7 @@ void Server::handle_client_readdir(MDRequestRef& mdr) dnbl.swap(keep); break; } - assert(r >= 0); + ceph_assert(r >= 0); numfiles++; // touch dn @@ -4103,7 +4103,7 @@ public: ServerLogContext(s, r), in(i), truncating_smaller(sm), changed_ranges(cr), new_realm(nr) { } void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); // apply in->pop_and_dirty_projected_inode(mdr->ls); @@ -4221,7 +4221,7 @@ void Server::handle_client_file_setlock(MDRequestRef& mdr) respond_to_request(mdr, -EWOULDBLOCK); } else { dout(10) << " added to waiting list" << dendl; - assert(lock_state->is_waiting(set_lock)); + ceph_assert(lock_state->is_waiting(set_lock)); mdr->more()->flock_was_waiting = true; mds->locker->drop_locks(mdr.get()); mdr->drop_local_auth_pins(); @@ -4426,7 +4426,7 @@ void Server::do_open_truncate(MDRequestRef& mdr, int cmode) { CInode *in = mdr->in[0]; client_t client = mdr->get_client(); - assert(in); + ceph_assert(in); dout(10) << "do_open_truncate " << *in << dendl; @@ -4469,7 +4469,7 @@ void Server::do_open_truncate(MDRequestRef& mdr, int cmode) CDentry *dn = 0; if (mdr->client_request->get_dentry_wanted()) { - assert(mdr->dn[0].size()); + ceph_assert(mdr->dn[0].size()); dn = mdr->dn[0].back(); } @@ -4828,7 +4828,7 @@ int Server::check_layout_vxattr(MDRequestRef& mdr, epoch = osdmap.get_epoch(); }); - assert(epoch >= req_epoch); // otherwise wait_for_map() told a lie + ceph_assert(epoch >= req_epoch); // otherwise wait_for_map() told a lie } else if (req_epoch == 0 && !mdr->waited_for_osdmap) { @@ -5081,7 +5081,7 @@ public: C_MDS_inode_xattr_update_finish(Server *s, MDRequestRef& r, CInode *i) : ServerLogContext(s, r), in(i) { } void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); // apply in->pop_and_dirty_projected_inode(mdr->ls); @@ -5264,7 +5264,7 @@ public: C_MDS_mknod_finish(Server *s, MDRequestRef& r, CDentry *d, CInode *ni) : ServerLogContext(s, r), dn(d), newi(ni) {} void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); // link the inode dn->pop_projected_linkage(); @@ -5279,7 +5279,7 @@ public: // mkdir? if (newi->inode.is_dir()) { CDir *dir = newi->get_dirfrag(frag_t()); - assert(dir); + ceph_assert(dir); dir->fnode.version--; dir->mark_dirty(dir->fnode.version + 1, mdr->ls); dir->mark_new(mdr->ls); @@ -5338,7 +5338,7 @@ void Server::handle_client_mknod(MDRequestRef& mdr) layout = mdcache->default_file_layout; CInode *newi = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino), mode, &layout); - assert(newi); + ceph_assert(newi); dn->push_projected_linkage(newi); @@ -5351,7 +5351,7 @@ void Server::handle_client_mknod(MDRequestRef& mdr) snapid_t follows = mdcache->get_global_snaprealm()->get_newest_seq(); SnapRealm *realm = dn->get_dir()->inode->find_snaprealm(); - assert(follows >= realm->get_newest_seq()); + ceph_assert(follows >= realm->get_newest_seq()); // if the client created a _regular_ file via MKNOD, it's highly likely they'll // want to write to it (e.g., if they are reexporting NFS) @@ -5374,7 +5374,7 @@ void Server::handle_client_mknod(MDRequestRef& mdr) } } - assert(dn->first == follows + 1); + ceph_assert(dn->first == follows + 1); newi->first = dn->first; dout(10) << "mknod mode " << newi->inode.mode << " rdev " << newi->inode.rdev << dendl; @@ -5430,7 +5430,7 @@ void Server::handle_client_mkdir(MDRequestRef& mdr) mode &= ~S_IFMT; mode |= S_IFDIR; CInode *newi = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino), mode); - assert(newi); + ceph_assert(newi); // it's a directory. dn->push_projected_linkage(newi); @@ -5441,10 +5441,10 @@ void Server::handle_client_mkdir(MDRequestRef& mdr) snapid_t follows = mdcache->get_global_snaprealm()->get_newest_seq(); SnapRealm *realm = dn->get_dir()->inode->find_snaprealm(); - assert(follows >= realm->get_newest_seq()); + ceph_assert(follows >= realm->get_newest_seq()); dout(12) << " follows " << follows << dendl; - assert(dn->first == follows + 1); + ceph_assert(dn->first == follows + 1); newi->first = dn->first; // ...and that new dir is empty. @@ -5508,7 +5508,7 @@ void Server::handle_client_symlink(MDRequestRef& mdr) unsigned mode = S_IFLNK | 0777; CInode *newi = prepare_new_inode(mdr, dn->get_dir(), inodeno_t(req->head.ino), mode); - assert(newi); + ceph_assert(newi); // it's a symlink dn->push_projected_linkage(newi); @@ -5593,7 +5593,7 @@ void Server::handle_client_link(MDRequestRef& mdr) } // go! - assert(g_conf()->mds_kill_link_at != 1); + ceph_assert(g_conf()->mds_kill_link_at != 1); // local or remote? if (targeti->is_auth()) @@ -5615,7 +5615,7 @@ public: ServerLogContext(s, r), dn(d), targeti(ti), dnpv(dnpv_), tipv(tipv_), adjust_realm(ar) { } void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); server->_link_local_finish(mdr, dn, targeti, dnpv, tipv, adjust_realm); } }; @@ -5708,7 +5708,7 @@ public: ServerLogContext(s, r), inc(i), dn(d), targeti(ti), dpv(d->get_projected_version()) {} void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); server->_link_remote_finish(mdr, inc, dn, targeti, dpv); } }; @@ -5743,13 +5743,13 @@ void Server::_link_remote(MDRequestRef& mdr, bool inc, CDentry *dn, CInode *targ encode(*desti_srnode, req->desti_snapbl); mds->send_message_mds(req, linkauth); - assert(mdr->more()->waiting_on_slave.count(linkauth) == 0); + ceph_assert(mdr->more()->waiting_on_slave.count(linkauth) == 0); mdr->more()->waiting_on_slave.insert(linkauth); return; } dout(10) << " targeti auth has prepared nlink++/--" << dendl; - assert(g_conf()->mds_kill_link_at != 2); + ceph_assert(g_conf()->mds_kill_link_at != 2); if (auto& desti_srnode = mdr->more()->desti_srnode) { delete desti_srnode; @@ -5794,7 +5794,7 @@ void Server::_link_remote_finish(MDRequestRef& mdr, bool inc, << (inc ? "link ":"unlink ") << *dn << " to " << *targeti << dendl; - assert(g_conf()->mds_kill_link_at != 3); + ceph_assert(g_conf()->mds_kill_link_at != 3); if (!mdr->more()->witnessed.empty()) mdcache->logged_master_update(mdr->reqid); @@ -5842,7 +5842,7 @@ public: C_MDS_SlaveLinkPrep(Server *s, MDRequestRef& r, CInode *t, bool ar) : ServerLogContext(s, r), targeti(t), adjust_realm(ar) { } void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); server->_logged_slave_link(mdr, targeti, adjust_realm); } }; @@ -5865,21 +5865,21 @@ void Server::handle_slave_link_prep(MDRequestRef& mdr) << " on " << mdr->slave_request->get_object_info() << dendl; - assert(g_conf()->mds_kill_link_at != 4); + ceph_assert(g_conf()->mds_kill_link_at != 4); CInode *targeti = mdcache->get_inode(mdr->slave_request->get_object_info().ino); - assert(targeti); + ceph_assert(targeti); dout(10) << "targeti " << *targeti << dendl; CDentry *dn = targeti->get_parent_dn(); CDentry::linkage_t *dnl = dn->get_linkage(); - assert(dnl->is_primary()); + ceph_assert(dnl->is_primary()); mdr->set_op_stamp(mdr->slave_request->op_stamp); mdr->auth_pin(targeti); //ceph_abort(); // test hack: make sure master can handle a slave that fails to prepare... - assert(g_conf()->mds_kill_link_at != 5); + ceph_assert(g_conf()->mds_kill_link_at != 5); // journal it mdr->ls = mdlog->get_current_segment(); @@ -5907,18 +5907,18 @@ void Server::handle_slave_link_prep(MDRequestRef& mdr) inc = false; pi.inode.nlink--; if (targeti->is_projected_snaprealm_global()) { - assert(mdr->slave_request->desti_snapbl.length()); + ceph_assert(mdr->slave_request->desti_snapbl.length()); auto p = mdr->slave_request->desti_snapbl.cbegin(); sr_t *newsnap = targeti->project_snaprealm(); decode(*newsnap, p); if (pi.inode.nlink == 0) - assert(!newsnap->is_parent_global()); + ceph_assert(!newsnap->is_parent_global()); realm_projected = true; } else { - assert(mdr->slave_request->desti_snapbl.length() == 0); + ceph_assert(mdr->slave_request->desti_snapbl.length() == 0); } } @@ -5964,7 +5964,7 @@ void Server::_logged_slave_link(MDRequestRef& mdr, CInode *targeti, bool adjust_ dout(10) << "_logged_slave_link " << *mdr << " " << *targeti << dendl; - assert(g_conf()->mds_kill_link_at != 6); + ceph_assert(g_conf()->mds_kill_link_at != 6); // update the target targeti->pop_and_dirty_projected_inode(mdr->ls); @@ -6006,7 +6006,7 @@ void Server::_commit_slave_link(MDRequestRef& mdr, int r, CInode *targeti) << " r=" << r << " " << *targeti << dendl; - assert(g_conf()->mds_kill_link_at != 7); + ceph_assert(g_conf()->mds_kill_link_at != 7); if (r == 0) { // drop our pins, etc. @@ -6027,7 +6027,7 @@ void Server::_committed_slave(MDRequestRef& mdr) { dout(10) << "_committed_slave " << *mdr << dendl; - assert(g_conf()->mds_kill_link_at != 8); + ceph_assert(g_conf()->mds_kill_link_at != 8); auto req = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_COMMITTED); mds->send_message_mds(req, mdr->slave_to_mds); @@ -6057,18 +6057,18 @@ void Server::do_link_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& << " ino " << rollback.ino << dendl; - assert(g_conf()->mds_kill_link_at != 9); + ceph_assert(g_conf()->mds_kill_link_at != 9); mdcache->add_rollback(rollback.reqid, master); // need to finish this update before resolve finishes - assert(mdr || mds->is_resolve()); + ceph_assert(mdr || mds->is_resolve()); MutationRef mut(new MutationImpl(nullptr, utime_t(), rollback.reqid)); mut->ls = mds->mdlog->get_current_segment(); CInode *in = mdcache->get_inode(rollback.ino); - assert(in); + ceph_assert(in); dout(10) << " target is " << *in << dendl; - assert(!in->is_projected()); // live slave request hold versionlock xlock. + ceph_assert(!in->is_projected()); // live slave request hold versionlock xlock. auto &pi = in->project_inode(); pi.inode.version = in->pre_dirty(); @@ -6133,7 +6133,7 @@ void Server::_link_rollback_finish(MutationRef& mut, MDRequestRef& mdr, { dout(10) << "_link_rollback_finish" << dendl; - assert(g_conf()->mds_kill_link_at != 10); + ceph_assert(g_conf()->mds_kill_link_at != 10); mut->apply(); @@ -6155,22 +6155,22 @@ void Server::handle_slave_link_prep_ack(MDRequestRef& mdr, const MMDSSlaveReques << " " << *m << dendl; mds_rank_t from = mds_rank_t(m->get_source().num()); - assert(g_conf()->mds_kill_link_at != 11); + ceph_assert(g_conf()->mds_kill_link_at != 11); // note slave mdr->more()->slaves.insert(from); // witnessed! - assert(mdr->more()->witnessed.count(from) == 0); + ceph_assert(mdr->more()->witnessed.count(from) == 0); mdr->more()->witnessed.insert(from); - assert(!m->is_not_journaled()); + ceph_assert(!m->is_not_journaled()); mdr->more()->has_journaled_slaves = true; // remove from waiting list - assert(mdr->more()->waiting_on_slave.count(from)); + ceph_assert(mdr->more()->waiting_on_slave.count(from)); mdr->more()->waiting_on_slave.erase(from); - assert(mdr->more()->waiting_on_slave.empty()); + ceph_assert(mdr->more()->waiting_on_slave.empty()); dispatch_client_request(mdr); // go again! } @@ -6221,7 +6221,7 @@ void Server::handle_client_unlink(MDRequestRef& mdr) } CDentry *dn = trace.back(); - assert(dn); + ceph_assert(dn); if (!dn->is_auth()) { mdcache->request_forward(mdr, dn->authority().first); return; @@ -6230,7 +6230,7 @@ void Server::handle_client_unlink(MDRequestRef& mdr) CInode *diri = dn->get_dir()->get_inode(); CDentry::linkage_t *dnl = dn->get_linkage(client, mdr); - assert(!dnl->is_null()); + ceph_assert(!dnl->is_null()); if (rmdir) { dout(7) << "handle_client_rmdir on " << *dn << dendl; @@ -6373,7 +6373,7 @@ public: ServerLogContext(s, r), dn(d), straydn(sd), dnpv(d->get_projected_version()) {} void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); server->_unlink_local_finish(mdr, dn, straydn, dnpv); } }; @@ -6401,7 +6401,7 @@ void Server::_unlink_local(MDRequestRef& mdr, CDentry *dn, CDentry *straydn) } if (straydn) { - assert(dnl->is_primary()); + ceph_assert(dnl->is_primary()); straydn->push_projected_linkage(in); } @@ -6455,12 +6455,12 @@ void Server::_unlink_local(MDRequestRef& mdr, CDentry *dn, CDentry *straydn) dn->push_projected_linkage(); if (straydn) { - assert(in->first <= straydn->first); + ceph_assert(in->first <= straydn->first); in->first = straydn->first; } if (in->is_dir()) { - assert(straydn); + ceph_assert(straydn); mdcache->project_subtree_rename(in, dn->get_dir(), straydn->get_dir()); in->maybe_export_pin(true); @@ -6556,7 +6556,7 @@ bool Server::_rmdir_prepare_witness(MDRequestRef& mdr, mds_rank_t who, vectorop_stamp = mdr->get_op_stamp(); mds->send_message_mds(req, who); - assert(mdr->more()->waiting_on_slave.count(who) == 0); + ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0); mdr->more()->waiting_on_slave.insert(who); return true; } @@ -6599,12 +6599,12 @@ void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) mdr->slave_to_mds); return; } - assert(r == 0); + ceph_assert(r == 0); CDentry *dn = trace.back(); dout(10) << " dn " << *dn << dendl; mdr->pin(dn); - assert(mdr->straydn); + ceph_assert(mdr->straydn); CDentry *straydn = mdr->straydn; dout(10) << " straydn " << *straydn << dendl; @@ -6634,7 +6634,7 @@ void Server::handle_slave_rmdir_prep(MDRequestRef& mdr) straydn->push_projected_linkage(in); dn->push_projected_linkage(); - assert(straydn->first >= in->first); + ceph_assert(straydn->first >= in->first); in->first = straydn->first; if (!in->has_subtree_root_dirfrag(mds->get_nodeid())) { @@ -6672,8 +6672,8 @@ void Server::_logged_slave_rmdir(MDRequestRef& mdr, CDentry *dn, CDentry *strayd if (mdr->slave_request->desti_snapbl.length()) { new_realm = !in->snaprealm; in->decode_snap_blob(mdr->slave_request->desti_snapbl); - assert(in->snaprealm); - assert(in->snaprealm->have_past_parents_open()); + ceph_assert(in->snaprealm); + ceph_assert(in->snaprealm->have_past_parents_open()); } else { new_realm = false; } @@ -6717,7 +6717,7 @@ void Server::handle_slave_rmdir_prep_ack(MDRequestRef& mdr, const MMDSSlaveReque mdr->more()->has_journaled_slaves = true; // remove from waiting list - assert(mdr->more()->waiting_on_slave.count(from)); + ceph_assert(mdr->more()->waiting_on_slave.count(from)); mdr->more()->waiting_on_slave.erase(from); if (mdr->more()->waiting_on_slave.empty()) @@ -6780,19 +6780,19 @@ void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& dout(10) << "do_rmdir_rollback on " << rollback.reqid << dendl; mdcache->add_rollback(rollback.reqid, master); // need to finish this update before resolve finishes - assert(mdr || mds->is_resolve()); + ceph_assert(mdr || mds->is_resolve()); CDir *dir = mdcache->get_dirfrag(rollback.src_dir); if (!dir) dir = mdcache->get_dirfrag(rollback.src_dir.ino, rollback.src_dname); - assert(dir); + ceph_assert(dir); CDentry *dn = dir->lookup(rollback.src_dname); - assert(dn); + ceph_assert(dn); dout(10) << " dn " << *dn << dendl; CDir *straydir = mdcache->get_dirfrag(rollback.dest_dir); - assert(straydir); + ceph_assert(straydir); CDentry *straydn = straydir->lookup(rollback.dest_dname); - assert(straydn); + ceph_assert(straydn); dout(10) << " straydn " << *straydn << dendl; CInode *in = straydn->get_linkage()->get_inode(); @@ -6811,7 +6811,7 @@ void Server::do_rmdir_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef& } if (mdr && !mdr->more()->slave_update_journaled) { - assert(!in->has_subtree_root_dirfrag(mds->get_nodeid())); + ceph_assert(!in->has_subtree_root_dirfrag(mds->get_nodeid())); _rmdir_rollback_finish(mdr, rollback.reqid, dn, straydn); return; @@ -6872,7 +6872,7 @@ void Server::_rmdir_rollback_finish(MDRequestRef& mdr, metareqid_t reqid, CDentr bool Server::_dir_is_nonempty_unlocked(MDRequestRef& mdr, CInode *in) { dout(10) << "dir_is_nonempty_unlocked " << *in << dendl; - assert(in->is_auth()); + ceph_assert(in->is_auth()); if (in->snaprealm && in->snaprealm->srnode.snaps.size()) return true; // in a snapshot! @@ -6897,8 +6897,8 @@ bool Server::_dir_is_nonempty_unlocked(MDRequestRef& mdr, CInode *in) bool Server::_dir_is_nonempty(MDRequestRef& mdr, CInode *in) { dout(10) << "dir_is_nonempty " << *in << dendl; - assert(in->is_auth()); - assert(in->filelock.can_read(mdr->get_client())); + ceph_assert(in->is_auth()); + ceph_assert(in->filelock.can_read(mdr->get_client())); frag_info_t dirstat; version_t dirstat_version = in->get_projected_inode()->dirstat.version; @@ -6937,7 +6937,7 @@ public: ServerLogContext(s, r), srcdn(sdn), destdn(ddn), straydn(stdn) { } void finish(int r) override { - assert(r == 0); + ceph_assert(r == 0); server->_rename_finish(mdr, srcdn, destdn, straydn); } }; @@ -6989,7 +6989,7 @@ void Server::handle_client_rename(MDRequestRef& mdr) } CDentry::linkage_t *destdnl = destdn->get_projected_linkage(); CDir *destdir = destdn->get_dir(); - assert(destdir->is_auth()); + ceph_assert(destdir->is_auth()); CF_MDS_MDRContextFactory cf(mdcache, mdr); int r = mdcache->path_traverse(mdr, cf, srcpath, &srctrace, NULL, MDS_TRAVERSE_DISCOVER); @@ -7006,7 +7006,7 @@ void Server::handle_client_rename(MDRequestRef& mdr) return; } - assert(!srctrace.empty()); + ceph_assert(!srctrace.empty()); CDentry *srcdn = srctrace.back(); dout(10) << " srcdn " << *srcdn << dendl; if (srcdn->last != CEPH_NOSNAP) { @@ -7215,7 +7215,7 @@ void Server::handle_client_rename(MDRequestRef& mdr) return; if (linkmerge) - assert(srcdir->inode->is_stray() && srcdnl->is_primary() && destdnl->is_remote()); + ceph_assert(srcdir->inode->is_stray() && srcdnl->is_primary() && destdnl->is_remote()); if ((!mdr->has_more() || mdr->more()->witnessed.empty())) { if (!check_access(mdr, srcdir->get_inode(), MAY_WRITE)) @@ -7255,7 +7255,7 @@ void Server::handle_client_rename(MDRequestRef& mdr) } */ - assert(g_conf()->mds_kill_rename_at != 1); + ceph_assert(g_conf()->mds_kill_rename_at != 1); // -- open all srcdn inode frags, if any -- // we need these open so that auth can properly delegate from inode to dirfrags @@ -7345,8 +7345,8 @@ void Server::handle_client_rename(MDRequestRef& mdr) // are involved in the rename operation. if (srcdnl->is_primary() && !mdr->more()->is_ambiguous_auth) { dout(10) << " preparing ambiguous auth for srci" << dendl; - assert(mdr->more()->is_remote_frozen_authpin); - assert(mdr->more()->rename_inode == srci); + ceph_assert(mdr->more()->is_remote_frozen_authpin); + ceph_assert(mdr->more()->rename_inode == srci); _rename_prepare_witness(mdr, last, witnesses, srctrace, desttrace, straydn); return; } @@ -7370,16 +7370,16 @@ void Server::handle_client_rename(MDRequestRef& mdr) if (last != MDS_RANK_NONE && mdr->more()->witnessed.count(last) == 0) { dout(10) << " preparing last witness (srcdn auth)" << dendl; - assert(mdr->more()->waiting_on_slave.count(last) == 0); + ceph_assert(mdr->more()->waiting_on_slave.count(last) == 0); _rename_prepare_witness(mdr, last, witnesses, srctrace, desttrace, straydn); return; } // test hack: bail after slave does prepare, so we can verify it's _live_ rollback. if (!mdr->more()->slaves.empty() && !srci->is_dir()) - assert(g_conf()->mds_kill_rename_at != 3); + ceph_assert(g_conf()->mds_kill_rename_at != 3); if (!mdr->more()->slaves.empty() && srci->is_dir()) - assert(g_conf()->mds_kill_rename_at != 4); + ceph_assert(g_conf()->mds_kill_rename_at != 4); // -- declare now -- mdr->set_mds_stamp(ceph_clock_now()); @@ -7429,9 +7429,9 @@ void Server::_rename_finish(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, // test hack: test slave commit if (!mdr->more()->slaves.empty() && !in->is_dir()) - assert(g_conf()->mds_kill_rename_at != 5); + ceph_assert(g_conf()->mds_kill_rename_at != 5); if (!mdr->more()->slaves.empty() && in->is_dir()) - assert(g_conf()->mds_kill_rename_at != 6); + ceph_assert(g_conf()->mds_kill_rename_at != 6); // bump popularity mds->balancer->hit_dir(srcdn->get_dir(), META_POP_IWR); @@ -7440,7 +7440,7 @@ void Server::_rename_finish(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, // did we import srci? if so, explicitly ack that import that, before we unlock and reply. - assert(g_conf()->mds_kill_rename_at != 7); + ceph_assert(g_conf()->mds_kill_rename_at != 7); // reply respond_to_request(mdr, 0); @@ -7495,7 +7495,7 @@ bool Server::_rename_prepare_witness(MDRequestRef& mdr, mds_rank_t who, setop_stamp = mdr->get_op_stamp(); mds->send_message_mds(req, who); - assert(mdr->more()->waiting_on_slave.count(who) == 0); + ceph_assert(mdr->more()->waiting_on_slave.count(who) == 0); mdr->more()->waiting_on_slave.insert(who); return true; } @@ -7587,7 +7587,7 @@ void Server::_rename_prepare(MDRequestRef& mdr, // primary+remote link merge? bool linkmerge = (srci == oldin); if (linkmerge) - assert(srcdnl->is_primary() && destdnl->is_remote()); + ceph_assert(srcdnl->is_primary() && destdnl->is_remote()); bool silent = srcdn->get_dir()->inode->is_stray(); bool force_journal_dest = false; @@ -7629,7 +7629,7 @@ void Server::_rename_prepare(MDRequestRef& mdr, // target inode if (!linkmerge) { if (destdnl->is_primary()) { - assert(straydn); // moving to straydn. + ceph_assert(straydn); // moving to straydn. // link--, and move. if (destdn->is_auth()) { auto &pi= oldin->project_inode(); //project_snaprealm @@ -7731,7 +7731,7 @@ void Server::_rename_prepare(MDRequestRef& mdr, // guarantee stray dir is processed first during journal replay. unlink the old inode, // then link the source inode to destdn if (destdnl->is_primary()) { - assert(straydn); + ceph_assert(straydn); if (straydn->is_auth()) { metablob->add_dir_context(straydn->get_dir()); metablob->add_dir(straydn->get_dir(), true); @@ -7743,7 +7743,7 @@ void Server::_rename_prepare(MDRequestRef& mdr, mdcache->predirty_journal_parents(mdr, metablob, oldin, destdn->get_dir(), (destdnl->is_primary() ? PREDIRTY_PRIMARY:0)|predirty_dir, -1); if (destdnl->is_primary()) { - assert(straydn); + ceph_assert(straydn); mdcache->predirty_journal_parents(mdr, metablob, oldin, straydn->get_dir(), PREDIRTY_PRIMARY|PREDIRTY_DIR, 1); } @@ -7761,13 +7761,13 @@ void Server::_rename_prepare(MDRequestRef& mdr, // target inode if (!linkmerge) { if (destdnl->is_primary()) { - assert(straydn); + ceph_assert(straydn); if (destdn->is_auth()) { // project snaprealm, too if (auto& desti_srnode = mdr->more()->desti_srnode) { oldin->project_snaprealm(desti_srnode); if (tpi->nlink == 0) - assert(!desti_srnode->is_parent_global()); + ceph_assert(!desti_srnode->is_parent_global()); desti_srnode = NULL; } straydn->first = mdcache->get_global_snaprealm()->get_newest_seq() + 1; @@ -7793,7 +7793,7 @@ void Server::_rename_prepare(MDRequestRef& mdr, if (new_srnode) { oldin->project_snaprealm(new_srnode); if (tpi->nlink == 0) - assert(!new_srnode->is_parent_global()); + ceph_assert(!new_srnode->is_parent_global()); } // auth for targeti metablob->add_dir_context(oldin->get_projected_parent_dir()); @@ -7806,7 +7806,7 @@ void Server::_rename_prepare(MDRequestRef& mdr, // dest if (srcdnl->is_remote()) { - assert(!linkmerge); + ceph_assert(!linkmerge); if (destdn->is_auth() && !destdnl->is_null()) mdcache->journal_cow_dentry(mdr.get(), metablob, destdn, CEPH_NOSNAP, 0, destdnl); else @@ -7881,17 +7881,17 @@ void Server::_rename_prepare(MDRequestRef& mdr, // make renamed inode first track the dn if (srcdnl->is_primary() && destdn->is_auth()) { - assert(srci->first <= destdn->first); + ceph_assert(srci->first <= destdn->first); srci->first = destdn->first; } // make stray inode first track the straydn if (straydn && straydn->is_auth()) { - assert(oldin->first <= straydn->first); + ceph_assert(oldin->first <= straydn->first); oldin->first = straydn->first; } if (oldin && oldin->is_dir()) { - assert(straydn); + ceph_assert(straydn); mdcache->project_subtree_rename(oldin, destdn->get_dir(), straydn->get_dir()); } if (srci->is_dir()) @@ -7913,7 +7913,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C // primary+remote link merge? bool linkmerge = (srcdnl->get_inode() == oldin); if (linkmerge) - assert(srcdnl->is_primary() || destdnl->is_remote()); + ceph_assert(srcdnl->is_primary() || destdnl->is_remote()); bool new_in_snaprealm = false; bool new_oldin_snaprealm = false; @@ -7921,7 +7921,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C // target inode if (!linkmerge) { if (destdnl->is_primary()) { - assert(straydn); + ceph_assert(straydn); dout(10) << "straydn is " << *straydn << dendl; // if there is newly created snaprealm, need to split old snaprealm's @@ -7931,12 +7931,12 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C oldin->early_pop_projected_snaprealm(); new_oldin_snaprealm = (oldin->snaprealm && !hadrealm); } else { - assert(mdr->slave_request); + ceph_assert(mdr->slave_request); if (mdr->slave_request->desti_snapbl.length()) { new_oldin_snaprealm = !oldin->snaprealm; oldin->decode_snap_blob(mdr->slave_request->desti_snapbl); - assert(oldin->snaprealm); - assert(oldin->snaprealm->have_past_parents_open()); + ceph_assert(oldin->snaprealm); + ceph_assert(oldin->snaprealm->have_past_parents_open()); } } @@ -7944,7 +7944,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C straydn->pop_projected_linkage(); if (mdr->is_slave() && !mdr->more()->slave_update_journaled) - assert(!straydn->is_projected()); // no other projected + ceph_assert(!straydn->is_projected()); // no other projected // nlink-- targeti if (destdn->is_auth()) @@ -7957,7 +7957,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C oldin->pop_and_dirty_projected_inode(mdr->ls); } else if (mdr->slave_request) { if (mdr->slave_request->desti_snapbl.length() > 0) { - assert(oldin->snaprealm); + ceph_assert(oldin->snaprealm); oldin->decode_snap_blob(mdr->slave_request->desti_snapbl); } } else if (auto& desti_srnode = mdr->more()->desti_srnode) { @@ -7969,7 +7969,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C // unlink src before we relink it at dest CInode *in = srcdnl->get_inode(); - assert(in); + ceph_assert(in); bool srcdn_was_remote = srcdnl->is_remote(); if (!srcdn_was_remote) { @@ -7980,12 +7980,12 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C in->early_pop_projected_snaprealm(); new_in_snaprealm = (in->snaprealm && !hadrealm); } else { - assert(mdr->slave_request); + ceph_assert(mdr->slave_request); if (mdr->slave_request->srci_snapbl.length()) { new_in_snaprealm = !in->snaprealm; in->decode_snap_blob(mdr->slave_request->srci_snapbl); - assert(in->snaprealm); - assert(in->snaprealm->have_past_parents_open()); + ceph_assert(in->snaprealm); + ceph_assert(in->snaprealm->have_past_parents_open()); } } } @@ -7998,7 +7998,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C // destdn destdnl = destdn->pop_projected_linkage(); if (mdr->is_slave() && !mdr->more()->slave_update_journaled) - assert(!destdn->is_projected()); // no other projected + ceph_assert(!destdn->is_projected()); // no other projected destdn->link_remote(destdnl, in); if (destdn->is_auth()) @@ -8008,7 +8008,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C in->pop_and_dirty_projected_inode(mdr->ls); } else if (mdr->slave_request) { if (mdr->slave_request->srci_snapbl.length() > 0) { - assert(in->snaprealm); + ceph_assert(in->snaprealm); in->decode_snap_blob(mdr->slave_request->srci_snapbl); } } else if (auto& srci_srnode = mdr->more()->srci_srnode) { @@ -8026,11 +8026,11 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C } destdnl = destdn->pop_projected_linkage(); if (mdr->is_slave() && !mdr->more()->slave_update_journaled) - assert(!destdn->is_projected()); // no other projected + ceph_assert(!destdn->is_projected()); // no other projected // srcdn inode import? if (!srcdn->is_auth() && destdn->is_auth()) { - assert(mdr->more()->inode_import.length() > 0); + ceph_assert(mdr->more()->inode_import.length() > 0); map imported_caps; @@ -8072,7 +8072,7 @@ void Server::_rename_apply(MDRequestRef& mdr, CDentry *srcdn, CDentry *destdn, C srcdn->mark_dirty(mdr->more()->pvmap[srcdn], mdr->ls); srcdn->pop_projected_linkage(); if (mdr->is_slave() && !mdr->more()->slave_update_journaled) - assert(!srcdn->is_projected()); // no other projected + ceph_assert(!srcdn->is_projected()); // no other projected // apply remaining projected inodes (nested) mdr->apply(); @@ -8163,7 +8163,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) mdr->slave_to_mds); return; } - assert(r == 0); // we shouldn't get an error here! + ceph_assert(r == 0); // we shouldn't get an error here! CDentry *destdn = trace.back(); CDentry::linkage_t *destdnl = destdn->get_projected_linkage(); @@ -8176,10 +8176,10 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) CInode *srci = nullptr; r = mdcache->path_traverse(mdr, cf, srcpath, &trace, &srci, MDS_TRAVERSE_DISCOVERXLOCK); if (r > 0) return; - assert(r == 0); + ceph_assert(r == 0); // srcpath must not point to a null dentry - assert(srci != nullptr); + ceph_assert(srci != nullptr); CDentry *srcdn = trace.back(); CDentry::linkage_t *srcdnl = srcdn->get_projected_linkage(); @@ -8190,10 +8190,10 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) // stray? bool linkmerge = srcdnl->get_inode() == destdnl->get_inode(); if (linkmerge) - assert(srcdnl->is_primary() && destdnl->is_remote()); + ceph_assert(srcdnl->is_primary() && destdnl->is_remote()); CDentry *straydn = mdr->straydn; if (destdnl->is_primary() && !linkmerge) - assert(straydn); + ceph_assert(straydn); mdr->set_op_stamp(mdr->slave_request->op_stamp); mdr->more()->srcdn_auth_mds = srcdn->authority().first; @@ -8276,7 +8276,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) } if (reply_witness) { - assert(!srcdnrep.empty()); + ceph_assert(!srcdnrep.empty()); auto reply = MMDSSlaveRequest::create(mdr->reqid, mdr->attempt, MMDSSlaveRequest::OP_RENAMEPREPACK); reply->witnesses.swap(srcdnrep); mds->send_message_mds(reply, mdr->slave_to_mds); @@ -8306,7 +8306,7 @@ void Server::handle_slave_rename_prep(MDRequestRef& mdr) if (srcdnl->is_primary()) rollback.orig_src.ino = srcdnl->get_inode()->ino(); else { - assert(srcdnl->is_remote()); + ceph_assert(srcdnl->is_remote()); rollback.orig_src.remote_ino = srcdnl->get_remote_ino(); rollback.orig_src.remote_d_type = srcdnl->get_remote_d_type(); } @@ -8442,7 +8442,7 @@ void Server::_logged_slave_rename(MDRequestRef& mdr, if (reply) { mds->send_message_mds(reply, mdr->slave_to_mds); } else { - assert(mdr->aborted); + ceph_assert(mdr->aborted); dout(10) << " abort flag set, finishing" << dendl; mdcache->request_finish(mdr); } @@ -8485,7 +8485,7 @@ void Server::_commit_slave_rename(MDRequestRef& mdr, int r, mds->queue_waiters(finished); // this includes SINGLEAUTH waiters. // unfreeze - assert(in->is_frozen_inode()); + ceph_assert(in->is_frozen_inode()); in->unfreeze_inode(finished); } @@ -8624,7 +8624,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef srcdn = srcdir->lookup(rollback.orig_src.dname); if (srcdn) { dout(10) << " srcdn " << *srcdn << dendl; - assert(srcdn->get_linkage()->is_null()); + ceph_assert(srcdn->get_linkage()->is_null()); } else dout(10) << " srcdn not found" << dendl; } else @@ -8648,7 +8648,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef if (rollback.orig_src.ino) { in = mdcache->get_inode(rollback.orig_src.ino); if (in && in->is_dir()) - assert(srcdn && destdn); + ceph_assert(srcdn && destdn); } else in = mdcache->get_inode(rollback.orig_src.remote_ino); @@ -8661,7 +8661,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef straydn = straydir->lookup(rollback.stray.dname); if (straydn) { dout(10) << " straydn " << *straydn << dendl; - assert(straydn->get_linkage()->is_primary()); + ceph_assert(straydn->get_linkage()->is_primary()); } else dout(10) << " straydn not found" << dendl; } else @@ -8672,15 +8672,15 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef if (rollback.orig_dest.ino) { target = mdcache->get_inode(rollback.orig_dest.ino); if (target) - assert(destdn && straydn); + ceph_assert(destdn && straydn); } else if (rollback.orig_dest.remote_ino) target = mdcache->get_inode(rollback.orig_dest.remote_ino); // can't use is_auth() in the resolve stage mds_rank_t whoami = mds->get_nodeid(); // slave - assert(!destdn || destdn->authority().first != whoami); - assert(!straydn || straydn->authority().first != whoami); + ceph_assert(!destdn || destdn->authority().first != whoami); + ceph_assert(!straydn || straydn->authority().first != whoami); bool force_journal_src = false; bool force_journal_dest = false; @@ -8695,7 +8695,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef if (srcdn->authority().first == whoami) srcdnpv = srcdn->pre_dirty(); if (rollback.orig_src.ino) { - assert(in); + ceph_assert(in); srcdn->push_projected_linkage(in); } else srcdn->push_projected_linkage(rollback.orig_src.remote_ino, @@ -8734,7 +8734,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef } else { SnapRealm *realm; if (rollback.orig_src.ino) { - assert(srcdir); + ceph_assert(srcdir); realm = srcdir->get_inode()->find_snaprealm(); } else { realm = in->snaprealm->parent; @@ -8765,7 +8765,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef } else { // the dentry will be trimmed soon, it's ok to have wrong linkage if (rollback.orig_dest.ino) - assert(mds->is_resolve()); + ceph_assert(mds->is_resolve()); destdn->push_projected_linkage(); } } @@ -8790,9 +8790,9 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef ti->ctime = ti->rstat.rctime = rollback.orig_dest.old_ctime; if (MDS_INO_IS_STRAY(rollback.orig_src.dirfrag.ino)) { if (MDS_INO_IS_STRAY(rollback.orig_dest.dirfrag.ino)) - assert(!rollback.orig_dest.ino && !rollback.orig_dest.remote_ino); + ceph_assert(!rollback.orig_dest.ino && !rollback.orig_dest.remote_ino); else - assert(rollback.orig_dest.remote_ino && + ceph_assert(rollback.orig_dest.remote_ino && rollback.orig_dest.remote_ino == rollback.orig_src.ino); } else ti->nlink++; @@ -8811,7 +8811,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef } else { SnapRealm *realm; if (rollback.orig_dest.ino) { - assert(destdir); + ceph_assert(destdir); realm = destdir->get_inode()->find_snaprealm(); } else { realm = target->snaprealm->parent; @@ -8855,7 +8855,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef } if (force_journal_dest) { - assert(rollback.orig_dest.ino); + ceph_assert(rollback.orig_dest.ino); le->commit.add_dir_context(destdir); le->commit.add_primary_dentry(destdn, 0, true); } @@ -8863,7 +8863,7 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef // slave: no need to journal straydn if (target && target != in && target->authority().first == whoami) { - assert(rollback.orig_dest.remote_ino); + ceph_assert(rollback.orig_dest.remote_ino); le->commit.add_dir_context(target->get_projected_parent_dir()); le->commit.add_primary_dentry(target->get_projected_parent_dn(), target, true); } @@ -8887,22 +8887,22 @@ void Server::do_rename_rollback(bufferlist &rbl, mds_rank_t master, MDRequestRef } if (target && target->is_dir()) { - assert(destdn); + ceph_assert(destdn); mdcache->project_subtree_rename(target, straydir, destdir); } if (in && in->is_dir()) { - assert(srcdn); + ceph_assert(srcdn); mdcache->project_subtree_rename(in, destdir, srcdir); } if (mdr && !mdr->more()->slave_update_journaled) { - assert(le->commit.empty()); + ceph_assert(le->commit.empty()); mdlog->cancel_entry(le); mut->ls = NULL; _rename_rollback_finish(mut, mdr, srcdn, srcdnpv, destdn, straydn, splits, finish_mdr); } else { - assert(!le->commit.empty()); + ceph_assert(!le->commit.empty()); if (mdr) mdr->more()->slave_update_journaled = false; MDSLogContextBase *fin = new C_MDS_LoggedRenameRollback(this, mut, mdr, @@ -8941,7 +8941,7 @@ void Server::_rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentr if (srcdn && srcdn->get_linkage()->is_primary()) { CInode *in = srcdn->get_linkage()->get_inode(); if (in && in->is_dir()) { - assert(destdn); + ceph_assert(destdn); mdcache->adjust_subtree_after_rename(in, destdn->get_dir(), true); } } @@ -8950,7 +8950,7 @@ void Server::_rename_rollback_finish(MutationRef& mut, MDRequestRef& mdr, CDentr CInode *oldin = destdn->get_linkage()->get_inode(); // update subtree map? if (oldin && oldin->is_dir()) { - assert(straydn); + ceph_assert(straydn); mdcache->adjust_subtree_after_rename(oldin, straydn->get_dir(), true); } } @@ -9005,7 +9005,7 @@ void Server::handle_slave_rename_prep_ack(MDRequestRef& mdr, const MMDSSlaveRequ } // witnessed? or add extra witnesses? - assert(mdr->more()->witnessed.count(from) == 0); + ceph_assert(mdr->more()->witnessed.count(from) == 0); if (ack->is_interrupted()) { dout(10) << " slave request interrupted, noop" << dendl; } else if (ack->witnesses.empty()) { @@ -9026,7 +9026,7 @@ void Server::handle_slave_rename_prep_ack(MDRequestRef& mdr, const MMDSSlaveRequ } // remove from waiting list - assert(mdr->more()->waiting_on_slave.count(from)); + ceph_assert(mdr->more()->waiting_on_slave.count(from)); mdr->more()->waiting_on_slave.erase(from); if (mdr->more()->waiting_on_slave.empty()) @@ -9039,7 +9039,7 @@ void Server::handle_slave_rename_notify_ack(MDRequestRef& mdr, const MMDSSlaveRe { dout(10) << "handle_slave_rename_notify_ack " << *mdr << " from mds." << ack->get_source() << dendl; - assert(mdr->is_slave()); + ceph_assert(mdr->is_slave()); mds_rank_t from = mds_rank_t(ack->get_source().num()); if (mdr->more()->waiting_on_slave.count(from)) { @@ -9274,7 +9274,7 @@ void Server::handle_client_mksnap(MDRequestRef& mdr) decode(snapid, p); dout(10) << " stid " << stid << " snapid " << snapid << dendl; - assert(mds->snapclient->get_cached_version() >= stid); + ceph_assert(mds->snapclient->get_cached_version() >= stid); // journal SnapInfo info; @@ -9417,7 +9417,7 @@ void Server::handle_client_rmsnap(MDRequestRef& mdr) decode(seq, p); dout(10) << " stid is " << stid << ", seq is " << seq << dendl; - assert(mds->snapclient->get_cached_version() >= stid); + ceph_assert(mds->snapclient->get_cached_version() >= stid); // journal auto &pi = diri->project_inode(false, true); @@ -9563,7 +9563,7 @@ void Server::handle_client_renamesnap(MDRequestRef& mdr) version_t stid = mdr->more()->stid; dout(10) << " stid is " << stid << dendl; - assert(mds->snapclient->get_cached_version() >= stid); + ceph_assert(mds->snapclient->get_cached_version() >= stid); // journal auto &pi = diri->project_inode(false, true); @@ -9573,7 +9573,7 @@ void Server::handle_client_renamesnap(MDRequestRef& mdr) // project the snaprealm auto &newsnap = *pi.snapnode; auto it = newsnap.snaps.find(snapid); - assert(it != newsnap.snaps.end()); + ceph_assert(it != newsnap.snaps.end()); it->second.name = dstname; // journal the inode changes diff --git a/src/mds/SessionMap.cc b/src/mds/SessionMap.cc index 65bcc7c05c34b..afe37fe7a6bb5 100644 --- a/src/mds/SessionMap.cc +++ b/src/mds/SessionMap.cc @@ -38,7 +38,7 @@ class SessionMapIOContext : public MDSIOContextBase MDSRank *get_mds() override {return sessionmap->mds;} public: explicit SessionMapIOContext(SessionMap *sessionmap_) : sessionmap(sessionmap_) { - assert(sessionmap != NULL); + ceph_assert(sessionmap != NULL); } }; }; @@ -324,7 +324,7 @@ void SessionMap::_load_legacy_finish(int r, bufferlist &bl) auto blp = bl.cbegin(); if (r < 0) { derr << "_load_finish got " << cpp_strerror(r) << dendl; - assert(0 == "failed to load sessionmap"); + ceph_assert(0 == "failed to load sessionmap"); } dump(); decode_legacy(blp); // note: this sets last_cap_renew = now() @@ -375,7 +375,7 @@ void SessionMap::save(MDSInternalContextBase *onsave, version_t needv) dout(10) << __func__ << ": needv " << needv << ", v " << version << dendl; if (needv && committing >= needv) { - assert(committing > committed); + ceph_assert(committing > committed); commit_waiters[committing].push_back(onsave); return; } @@ -518,7 +518,7 @@ void SessionMapStore::decode_legacy(bufferlist::const_iterator& p) decode(pre, p); if (pre == (uint64_t)-1) { DECODE_START_LEGACY_COMPAT_LEN(3, 3, 3, p); - assert(struct_v >= 2); + ceph_assert(struct_v >= 2); decode(version, p); @@ -619,7 +619,7 @@ void SessionMap::add_session(Session *s) { dout(10) << __func__ << " s=" << s << " name=" << s->info.inst.name << dendl; - assert(session_map.count(s->info.inst.name) == 0); + ceph_assert(session_map.count(s->info.inst.name) == 0); session_map[s->info.inst.name] = s; auto by_state_entry = by_state.find(s->state); if (by_state_entry == by_state.end()) @@ -652,7 +652,7 @@ void SessionMap::touch_session(Session *session) // Move to the back of the session list for this state (should // already be on a list courtesy of add_session and set_state) - assert(session->item_session_list.is_on_list()); + ceph_assert(session->item_session_list.is_on_list()); auto by_state_entry = by_state.find(session->state); if (by_state_entry == by_state.end()) by_state_entry = by_state.emplace(session->state, @@ -736,7 +736,7 @@ public: void SessionMap::save_if_dirty(const std::set &tgt_sessions, MDSGatherBuilder *gather_bld) { - assert(gather_bld != NULL); + ceph_assert(gather_bld != NULL); std::vector write_sessions; @@ -968,7 +968,7 @@ int Session::check_access(CInode *in, unsigned mask, void SessionMap::hit_session(Session *session) { uint64_t sessions = get_session_count_in_state(Session::STATE_OPEN) + get_session_count_in_state(Session::STATE_STALE); - assert(sessions != 0); + ceph_assert(sessions != 0); double total_load = total_load_avg.hit(); double avg_load = total_load / sessions; @@ -1006,7 +1006,7 @@ int SessionFilter::parse( const std::vector &args, std::stringstream *ss) { - assert(ss != NULL); + ceph_assert(ss != NULL); for (const auto &s : args) { dout(20) << __func__ << " parsing filter '" << s << "'" << dendl; @@ -1052,7 +1052,7 @@ int SessionFilter::parse( */ auto is_true = [](std::string_view bstr, bool *out) -> bool { - assert(out != nullptr); + ceph_assert(out != nullptr); if (bstr == "true" || bstr == "1") { *out = true; diff --git a/src/mds/SessionMap.h b/src/mds/SessionMap.h index 4813e66a3f89f..2cbf4836d7e7e 100644 --- a/src/mds/SessionMap.h +++ b/src/mds/SessionMap.h @@ -116,14 +116,14 @@ public: void push_pv(version_t pv) { - assert(projected.empty() || projected.back() != pv); + ceph_assert(projected.empty() || projected.back() != pv); projected.push_back(pv); } void pop_pv(version_t v) { - assert(!projected.empty()); - assert(projected.front() == v); + ceph_assert(!projected.empty()); + ceph_assert(projected.front() == v); projected.pop_front(); } @@ -172,7 +172,7 @@ public: return info.prealloc_inos.range_start(); } inodeno_t take_ino(inodeno_t ino = 0) { - assert(!info.prealloc_inos.empty()); + ceph_assert(!info.prealloc_inos.empty()); if (ino) { if (info.prealloc_inos.contains(ino)) @@ -208,13 +208,13 @@ public: ++importing_count; } void dec_importing() { - assert(importing_count > 0); + ceph_assert(importing_count > 0); --importing_count; } bool is_importing() const { return importing_count > 0; } void set_load_avg_decay_rate(double rate) { - assert(is_open() || is_stale()); + ceph_assert(is_open() || is_stale()); load_avg = DecayCounter(rate); } uint64_t get_load_avg() const { @@ -365,7 +365,7 @@ public: if (state == STATE_CLOSED) { item_session_list.remove_myself(); } else { - assert(!item_session_list.is_on_list()); + ceph_assert(!item_session_list.is_on_list()); } preopen_out_queue.clear(); } @@ -620,7 +620,7 @@ public: // helpers entity_inst_t& get_inst(entity_name_t w) { - assert(session_map.count(w)); + ceph_assert(session_map.count(w)); return session_map[w]->info.inst; } version_t inc_push_seq(client_t client) { @@ -635,7 +635,7 @@ public: } void trim_completed_requests(entity_name_t c, ceph_tid_t tid) { Session *session = get_session(c); - assert(session); + ceph_assert(session); session->trim_completed_requests(tid); } diff --git a/src/mds/SimpleLock.cc b/src/mds/SimpleLock.cc index 8167ad1d16e33..c314c3068f57b 100644 --- a/src/mds/SimpleLock.cc +++ b/src/mds/SimpleLock.cc @@ -17,7 +17,7 @@ #include "Mutation.h" void SimpleLock::dump(Formatter *f) const { - assert(f != NULL); + ceph_assert(f != NULL); if (is_sync_and_unlocked()) { return; } diff --git a/src/mds/SimpleLock.h b/src/mds/SimpleLock.h index c3d8f0a6df2c9..e5ecf1342d654 100644 --- a/src/mds/SimpleLock.h +++ b/src/mds/SimpleLock.h @@ -325,7 +325,7 @@ public: return s; } void set_state_rejoin(int s, MDSInternalContextBase::vec& waiters, bool survivor) { - assert(!get_parent()->is_auth()); + ceph_assert(!get_parent()->is_auth()); // If lock in the replica object was not in SYNC state when auth mds of the object failed. // Auth mds of the object may take xlock on the lock and change the object when replaying @@ -454,7 +454,7 @@ public: return ++num_rdlock; } int put_rdlock() { - assert(num_rdlock>0); + ceph_assert(num_rdlock>0); --num_rdlock; if (num_rdlock == 0) parent->put(MDSCacheObject::PIN_LOCK); @@ -487,8 +487,8 @@ public: // xlock void get_xlock(MutationRef who, client_t client) { - assert(get_xlock_by() == MutationRef()); - assert(state == LOCK_XLOCK || is_locallock() || + ceph_assert(get_xlock_by() == MutationRef()); + ceph_assert(state == LOCK_XLOCK || is_locallock() || state == LOCK_LOCK /* if we are a slave */); parent->get(MDSCacheObject::PIN_LOCK); more()->num_xlock++; @@ -496,15 +496,15 @@ public: more()->xlock_by_client = client; } void set_xlock_done() { - assert(more()->xlock_by); - assert(state == LOCK_XLOCK || is_locallock() || + ceph_assert(more()->xlock_by); + ceph_assert(state == LOCK_XLOCK || is_locallock() || state == LOCK_LOCK /* if we are a slave */); if (!is_locallock()) state = LOCK_XLOCKDONE; more()->xlock_by.reset(); } void put_xlock() { - assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE || + ceph_assert(state == LOCK_XLOCK || state == LOCK_XLOCKDONE || state == LOCK_XLOCKSNAP || is_locallock() || state == LOCK_LOCK /* if we are a master of a slave */); --more()->num_xlock; @@ -536,11 +536,11 @@ public: return state_flags & LEASED; } void get_client_lease() { - assert(!is_leased()); + ceph_assert(!is_leased()); state_flags |= LEASED; } void put_client_lease() { - assert(is_leased()); + ceph_assert(is_leased()); state_flags &= ~LEASED; } @@ -642,8 +642,8 @@ public: * called on first replica creation. */ void replicate_relax() { - assert(parent->is_auth()); - assert(!parent->is_replicated()); + ceph_assert(parent->is_auth()); + ceph_assert(!parent->is_replicated()); if (state == LOCK_LOCK && !is_used()) state = LOCK_SYNC; } diff --git a/src/mds/SnapClient.cc b/src/mds/SnapClient.cc index d226ba3937d9a..e4fcbbba172d6 100644 --- a/src/mds/SnapClient.cc +++ b/src/mds/SnapClient.cc @@ -51,7 +51,7 @@ void SnapClient::handle_query_result(const MMDSTableRequest::const_ref &m) switch (type) { case 'U': // uptodate - assert(cached_version == m->get_tid()); + ceph_assert(cached_version == m->get_tid()); break; case 'F': // full { @@ -123,7 +123,7 @@ void SnapClient::notify_commit(version_t tid) { dout(10) << __func__ << " tid " << tid << dendl; - assert(cached_version == 0 || cached_version >= tid); + ceph_assert(cached_version == 0 || cached_version >= tid); if (cached_version == 0) { committing_tids.insert(tid); } else if (cached_pending_update.count(tid)) { @@ -145,7 +145,7 @@ void SnapClient::refresh(version_t want, MDSInternalContextBase *onfinish) { dout(10) << __func__ << " want " << want << dendl; - assert(want >= cached_version); + ceph_assert(want >= cached_version); if (onfinish) waiting_for_version[want].push_back(onfinish); @@ -175,7 +175,7 @@ void SnapClient::sync(MDSInternalContextBase *onfinish) void SnapClient::get_snaps(set& result) const { - assert(cached_version > 0); + ceph_assert(cached_version > 0); for (auto& p : cached_snaps) result.insert(p.first); @@ -192,7 +192,7 @@ void SnapClient::get_snaps(set& result) const set SnapClient::filter(const set& snaps) const { - assert(cached_version > 0); + ceph_assert(cached_version > 0); if (snaps.empty()) return snaps; @@ -221,7 +221,7 @@ set SnapClient::filter(const set& snaps) const const SnapInfo* SnapClient::get_snap_info(snapid_t snapid) const { - assert(cached_version > 0); + ceph_assert(cached_version > 0); const SnapInfo* result = NULL; auto it = cached_snaps.find(snapid); @@ -249,7 +249,7 @@ const SnapInfo* SnapClient::get_snap_info(snapid_t snapid) const void SnapClient::get_snap_infos(map& infomap, const set& snaps) const { - assert(cached_version > 0); + ceph_assert(cached_version > 0); if (snaps.empty()) return; diff --git a/src/mds/SnapClient.h b/src/mds/SnapClient.h index f587983eec2c4..1a12d671eed29 100644 --- a/src/mds/SnapClient.h +++ b/src/mds/SnapClient.h @@ -96,7 +96,7 @@ public: bool is_synced() const { return synced; } void wait_for_sync(MDSInternalContextBase *c) { - assert(!synced); + ceph_assert(!synced); waiting_for_version[std::max(cached_version, 1)].push_back(c); } diff --git a/src/mds/SnapRealm.cc b/src/mds/SnapRealm.cc index 7b0302665b56b..507d206f5819d 100644 --- a/src/mds/SnapRealm.cc +++ b/src/mds/SnapRealm.cc @@ -75,7 +75,7 @@ void SnapRealm::add_open_past_parent(SnapRealm *parent, snapid_t last) { auto p = open_past_parents.find(parent->inode->ino()); if (p != open_past_parents.end()) { - assert(p->second.second.count(last) == 0); + ceph_assert(p->second.second.count(last) == 0); p->second.second.insert(last); } else { open_past_parents[parent->inode->ino()].first = parent; @@ -89,9 +89,9 @@ void SnapRealm::add_open_past_parent(SnapRealm *parent, snapid_t last) void SnapRealm::remove_open_past_parent(inodeno_t ino, snapid_t last) { auto p = open_past_parents.find(ino); - assert(p != open_past_parents.end()); + ceph_assert(p != open_past_parents.end()); auto q = p->second.second.find(last); - assert(q != p->second.second.end()); + ceph_assert(q != p->second.second.end()); p->second.second.erase(q); --num_open_past_parents; if (p->second.second.empty()) { @@ -153,7 +153,7 @@ bool SnapRealm::_open_parents(MDSInternalContextBase *finish, snapid_t first, sn } if (!srnode.past_parent_snaps.empty()) - assert(mdcache->mds->snapclient->get_cached_version() > 0); + ceph_assert(mdcache->mds->snapclient->get_cached_version() > 0); if (!srnode.past_parents.empty() && mdcache->mds->allows_multimds_snaps()) { @@ -163,7 +163,7 @@ bool SnapRealm::_open_parents(MDSInternalContextBase *finish, snapid_t first, sn } // and my past parents too! - assert(srnode.past_parents.size() >= num_open_past_parents); + ceph_assert(srnode.past_parents.size() >= num_open_past_parents); if (srnode.past_parents.size() > num_open_past_parents) { for (map::iterator p = srnode.past_parents.begin(); p != srnode.past_parents.end(); ) { @@ -182,7 +182,7 @@ bool SnapRealm::_open_parents(MDSInternalContextBase *finish, snapid_t first, sn past_parents_dirty = true; continue; } - assert(parent->snaprealm); // hmm! + ceph_assert(parent->snaprealm); // hmm! if (!parent->snaprealm->_open_parents(finish, p->second.first, p->first)) return false; auto q = open_past_parents.find(p->second.ino); @@ -212,7 +212,7 @@ bool SnapRealm::have_past_parents_open(snapid_t first, snapid_t last) const return true; if (!srnode.past_parent_snaps.empty()) - assert(mdcache->mds->snapclient->get_cached_version() > 0); + ceph_assert(mdcache->mds->snapclient->get_cached_version() > 0); if (!srnode.past_parents.empty() && mdcache->mds->allows_multimds_snaps()) { @@ -285,8 +285,8 @@ void SnapRealm::build_snap_set() const // include snaps for parents for (const auto& p : srnode.past_parents) { const CInode *oldparent = mdcache->get_inode(p.second.ino); - assert(oldparent); // call open_parents first! - assert(oldparent->snaprealm); + ceph_assert(oldparent); // call open_parents first! + ceph_assert(oldparent->snaprealm); const set& snaps = oldparent->snaprealm->get_snaps(); snapid_t last = 0; @@ -313,7 +313,7 @@ void SnapRealm::build_snap_set() const void SnapRealm::check_cache() const { - assert(have_past_parents_open()); + ceph_assert(have_past_parents_open()); snapid_t seq; snapid_t last_created; snapid_t last_destroyed = mdcache->mds->snapclient->get_last_destroyed(); @@ -402,8 +402,8 @@ void SnapRealm::get_snap_info(map& infomap, snapid_t p != srnode.past_parents.end() && p->first >= first && p->second.first <= last; ++p) { CInode *oldparent = mdcache->get_inode(p->second.ino); - assert(oldparent); // call open_parents first! - assert(oldparent->snaprealm); + ceph_assert(oldparent); // call open_parents first! + ceph_assert(oldparent->snaprealm); oldparent->snaprealm->get_snap_info(infomap, std::max(first, p->second.first), std::min(last, p->first)); @@ -438,14 +438,14 @@ std::string_view SnapRealm::get_snapname(snapid_t snapid, inodeno_t atino) map::iterator p = srnode.past_parents.lower_bound(snapid); if (p != srnode.past_parents.end() && p->second.first <= snapid) { CInode *oldparent = mdcache->get_inode(p->second.ino); - assert(oldparent); // call open_parents first! - assert(oldparent->snaprealm); + ceph_assert(oldparent); // call open_parents first! + ceph_assert(oldparent->snaprealm); return oldparent->snaprealm->get_snapname(snapid, atino); } } - assert(srnode.current_parent_since <= snapid); - assert(parent); + ceph_assert(srnode.current_parent_since <= snapid); + ceph_assert(parent); return parent->get_snapname(snapid, atino); } @@ -505,8 +505,8 @@ snapid_t SnapRealm::resolve_snapname(std::string_view n, inodeno_t atino, snapid p != srnode.past_parents.end() && p->first >= first && p->second.first <= last; ++p) { CInode *oldparent = mdcache->get_inode(p->second.ino); - assert(oldparent); // call open_parents first! - assert(oldparent->snaprealm); + ceph_assert(oldparent); // call open_parents first! + ceph_assert(oldparent->snaprealm); snapid_t r = oldparent->snaprealm->resolve_snapname(n, atino, std::max(first, p->second.first), std::min(last, p->first)); @@ -556,7 +556,7 @@ void SnapRealm::split_at(SnapRealm *child) } else { // no caps, nothing to move/split. dout(20) << " split no-op, no caps to move on file " << *child->inode << dendl; - assert(!child->inode->is_any_caps()); + ceph_assert(!child->inode->is_any_caps()); } return; } @@ -601,7 +601,7 @@ void SnapRealm::merge_to(SnapRealm *newparent) newparent = parent; dout(10) << "merge to " << *newparent << " on " << *newparent->inode << dendl; - assert(open_past_children.empty()); + ceph_assert(open_past_children.empty()); dout(10) << " open_children are " << open_children << dendl; for (auto realm : open_children) { @@ -617,7 +617,7 @@ void SnapRealm::merge_to(SnapRealm *newparent) ++p; in->move_to_realm(newparent); } - assert(inodes_with_caps.empty()); + ceph_assert(inodes_with_caps.empty()); // delete this inode->close_snaprealm(); diff --git a/src/mds/SnapServer.cc b/src/mds/SnapServer.cc index 7da7fbe0d7b52..16b42e9ae4ef9 100644 --- a/src/mds/SnapServer.cc +++ b/src/mds/SnapServer.cc @@ -259,7 +259,7 @@ bool SnapServer::_notify_prep(version_t tid) encode(pending_destroy, bl); encode(last_created, bl); encode(last_destroyed, bl); - assert(version == tid); + ceph_assert(version == tid); for (auto &p : active_clients) { auto m = MMDSTableRequest::create(table, TABLESERVER_OP_NOTIFY_PREP, 0, version); @@ -283,7 +283,7 @@ void SnapServer::handle_query(const MMDSTableRequest::const_ref &req) case 'F': // full version_t have_version; decode(have_version, p); - assert(have_version <= version); + ceph_assert(have_version <= version); if (have_version == version) { char type = 'U'; encode(type, reply->bl); diff --git a/src/mds/SnapServer.h b/src/mds/SnapServer.h index 1860dd4cafd25..0a21a756e56a2 100644 --- a/src/mds/SnapServer.h +++ b/src/mds/SnapServer.h @@ -100,7 +100,7 @@ public: bool upgrade_format() { // upgraded from old filesystem - assert(last_snap > 0); + ceph_assert(last_snap > 0); bool upgraded = false; if (get_version() == 0) { // version 0 confuses snapclient code @@ -123,7 +123,7 @@ public: else if (ino == MDS_INO_MDSDIR(rank)) mdsdir_scrubbed = true; else - assert(0); + ceph_assert(0); } bool can_allow_multimds_snaps() const { return (root_scrubbed && mdsdir_scrubbed) || diff --git a/src/mds/StrayManager.cc b/src/mds/StrayManager.cc index 7918c06df3ed3..0ace8b8c08991 100644 --- a/src/mds/StrayManager.cc +++ b/src/mds/StrayManager.cc @@ -77,7 +77,7 @@ public: C_IO_PurgeStrayPurged(StrayManager *sm_, CDentry *d, bool oh) : StrayManagerIOContext(sm_), dn(d), only_head(oh) { } void finish(int r) override { - assert(r == 0 || r == -ENOENT); + ceph_assert(r == 0 || r == -ENOENT); sm->_purge_stray_purged(dn, only_head); } void print(ostream& out) const override { @@ -92,7 +92,7 @@ void StrayManager::purge(CDentry *dn) CDentry::linkage_t *dnl = dn->get_projected_linkage(); CInode *in = dnl->get_inode(); dout(10) << __func__ << " " << *dn << " " << *in << dendl; - assert(!dn->is_replicated()); + ceph_assert(!dn->is_replicated()); // CHEAT. there's no real need to journal our intent to purge, since // that is implicit in the dentry's presence and non-use in the stray @@ -117,7 +117,7 @@ void StrayManager::purge(CDentry *dn) } else { dout(10) << " NO realm, using null context" << dendl; snapc = &nullsnapc; - assert(in->last == CEPH_NOSNAP); + ceph_assert(in->last == CEPH_NOSNAP); } uint64_t to = 0; @@ -203,7 +203,7 @@ void StrayManager::_purge_stray_purged( // is being purged (aside from it were derr << "Rogue reference after purge to " << *dn << dendl; - assert(0 == "rogue reference to purging inode"); + ceph_assert(0 == "rogue reference to purging inode"); } // kill dentry. @@ -240,12 +240,12 @@ void StrayManager::_purge_stray_logged(CDentry *dn, version_t pdv, LogSegment *l CInode *in = dn->get_linkage()->get_inode(); dout(10) << "_purge_stray_logged " << *dn << " " << *in << dendl; - assert(!in->state_test(CInode::STATE_RECOVERING)); + ceph_assert(!in->state_test(CInode::STATE_RECOVERING)); bool new_dn = dn->is_new(); // unlink - assert(dn->get_projected_linkage()->is_null()); + ceph_assert(dn->get_projected_linkage()->is_null()); dn->dir->unlink_inode(dn, !new_dn); dn->pop_projected_linkage(); dn->mark_dirty(pdv, ls); @@ -276,9 +276,9 @@ void StrayManager::_purge_stray_logged(CDentry *dn, version_t pdv, LogSegment *l void StrayManager::enqueue(CDentry *dn, bool trunc) { CDentry::linkage_t *dnl = dn->get_projected_linkage(); - assert(dnl); + ceph_assert(dnl); CInode *in = dnl->get_inode(); - assert(in); + ceph_assert(in); /* We consider a stray to be purging as soon as it is enqueued, to avoid * enqueing it twice */ @@ -323,7 +323,7 @@ class C_OpenSnapParents : public StrayManagerContext { void StrayManager::_enqueue(CDentry *dn, bool trunc) { - assert(started); + ceph_assert(started); CInode *in = dn->get_linkage()->get_inode(); if (in->snaprealm && @@ -376,7 +376,7 @@ void StrayManager::advance_delayed() void StrayManager::set_num_strays(uint64_t num) { - assert(!started); + ceph_assert(!started); num_strays = num; logger->set(l_mdc_num_strays, num_strays); } @@ -414,20 +414,20 @@ bool StrayManager::_eval_stray(CDentry *dn, bool delay) { dout(10) << "eval_stray " << *dn << dendl; CDentry::linkage_t *dnl = dn->get_projected_linkage(); - assert(dnl->is_primary()); + ceph_assert(dnl->is_primary()); dout(10) << " inode is " << *dnl->get_inode() << dendl; CInode *in = dnl->get_inode(); - assert(in); - assert(!in->state_test(CInode::STATE_REJOINUNDEF)); + ceph_assert(in); + ceph_assert(!in->state_test(CInode::STATE_REJOINUNDEF)); // The only dentries elegible for purging are those // in the stray directories - assert(dn->get_dir()->get_inode()->is_stray()); + ceph_assert(dn->get_dir()->get_inode()->is_stray()); // Inode may not pass through this function if it // was already identified for purging (i.e. cannot // call eval_stray() after purge() - assert(!dn->state_test(CDentry::STATE_PURGING)); + ceph_assert(!dn->state_test(CDentry::STATE_PURGING)); if (!dn->is_auth()) { return false; @@ -477,7 +477,7 @@ bool StrayManager::_eval_stray(CDentry *dn, bool delay) for (auto it = in->remote_parents.begin(); it != in->remote_parents.end(); ) { CDentry *remote_dn = *it; ++it; - assert(remote_dn->last != CEPH_NOSNAP); + ceph_assert(remote_dn->last != CEPH_NOSNAP); remote_dn->unlink_remote(remote_dn->get_linkage()); } } @@ -514,7 +514,7 @@ bool StrayManager::_eval_stray(CDentry *dn, bool delay) !in->old_inodes.empty()) { // A file with snapshots: we will truncate the HEAD revision // but leave the metadata intact. - assert(!in->is_dir()); + ceph_assert(!in->is_dir()); dout(20) << " file has past parents " << in->snaprealm << dendl; if (in->is_file() && in->get_projected_inode()->size > 0) { @@ -565,7 +565,7 @@ void StrayManager::eval_remote(CDentry *remote_dn) dout(10) << __func__ << " " << *remote_dn << dendl; CDentry::linkage_t *dnl = remote_dn->get_projected_linkage(); - assert(dnl->is_remote()); + ceph_assert(dnl->is_remote()); CInode *in = dnl->get_inode(); if (!in) { @@ -580,7 +580,7 @@ void StrayManager::eval_remote(CDentry *remote_dn) // refers to stray? CDentry *primary_dn = in->get_projected_parent_dn(); - assert(primary_dn != NULL); + ceph_assert(primary_dn != NULL); if (primary_dn->get_dir()->get_inode()->is_stray()) { _eval_stray_remote(primary_dn, remote_dn); } else { @@ -605,13 +605,13 @@ class C_RetryEvalRemote : public StrayManagerContext { void StrayManager::_eval_stray_remote(CDentry *stray_dn, CDentry *remote_dn) { dout(20) << __func__ << " " << *stray_dn << dendl; - assert(stray_dn != NULL); - assert(stray_dn->get_dir()->get_inode()->is_stray()); + ceph_assert(stray_dn != NULL); + ceph_assert(stray_dn->get_dir()->get_inode()->is_stray()); CDentry::linkage_t *stray_dnl = stray_dn->get_projected_linkage(); - assert(stray_dnl->is_primary()); + ceph_assert(stray_dnl->is_primary()); CInode *stray_in = stray_dnl->get_inode(); - assert(stray_in->inode.nlink >= 1); - assert(stray_in->last == CEPH_NOSNAP); + ceph_assert(stray_in->inode.nlink >= 1); + ceph_assert(stray_in->last == CEPH_NOSNAP); /* If no remote_dn hinted, pick one arbitrarily */ if (remote_dn == NULL) { @@ -633,7 +633,7 @@ void StrayManager::_eval_stray_remote(CDentry *stray_dn, CDentry *remote_dn) return; } } - assert(remote_dn->last == CEPH_NOSNAP); + ceph_assert(remote_dn->last == CEPH_NOSNAP); // NOTE: we repeat this check in _rename(), since our submission path is racey. if (!remote_dn->is_projected()) { if (remote_dn->is_auth()) { @@ -679,9 +679,9 @@ void StrayManager::reintegrate_stray(CDentry *straydn, CDentry *rdn) void StrayManager::migrate_stray(CDentry *dn, mds_rank_t to) { CInode *in = dn->get_projected_linkage()->get_inode(); - assert(in); + ceph_assert(in); CInode *diri = dn->dir->get_inode(); - assert(diri->is_stray()); + ceph_assert(diri->is_stray()); dout(10) << "migrate_stray from mds." << MDS_INO_STRAY_OWNER(diri->inode.ino) << " to mds." << to << " " << *dn << " " << *in << dendl; @@ -691,7 +691,7 @@ void StrayManager::migrate_stray(CDentry *dn, mds_rank_t to) // rename it to another mds. filepath src; dn->make_path(src); - assert(src.depth() == 2); + ceph_assert(src.depth() == 2); filepath dst(MDS_INO_MDSDIR(to)); dst.push_dentry(src[0]); @@ -711,19 +711,19 @@ StrayManager::StrayManager(MDSRank *mds, PurgeQueue &purge_queue_) num_strays_delayed(0), num_strays_enqueuing(0), purge_queue(purge_queue_) { - assert(mds != NULL); + ceph_assert(mds != NULL); } void StrayManager::truncate(CDentry *dn) { const CDentry::linkage_t *dnl = dn->get_projected_linkage(); const CInode *in = dnl->get_inode(); - assert(in); + ceph_assert(in); dout(10) << __func__ << ": " << *dn << " " << *in << dendl; - assert(!dn->is_replicated()); + ceph_assert(!dn->is_replicated()); const SnapRealm *realm = in->find_snaprealm(); - assert(realm); + ceph_assert(realm); dout(10) << " realm " << *realm << dendl; const SnapContext *snapc = &realm->get_snap_context(); @@ -734,7 +734,7 @@ void StrayManager::truncate(CDentry *dn) // the file has ever been. to = std::max(in->inode.max_size_ever, to); - assert(to > 0); + ceph_assert(to > 0); PurgeItem item; item.action = PurgeItem::TRUNCATE_FILE; diff --git a/src/mds/events/EMetaBlob.h b/src/mds/events/EMetaBlob.h index 1686bab37d3cd..cf6701f0a6b0d 100644 --- a/src/mds/events/EMetaBlob.h +++ b/src/mds/events/EMetaBlob.h @@ -364,7 +364,7 @@ private: } void add_opened_ino(inodeno_t ino) { - assert(!opened_ino); + ceph_assert(!opened_ino); opened_ino = ino; } @@ -489,7 +489,7 @@ private: add_null_dentry(dn, dirty); return; } - assert(dn->get_projected_linkage()->is_primary()); + ceph_assert(dn->get_projected_linkage()->is_primary()); add_primary_dentry(dn, 0, dirty, dirty_parent, dirty_pool); } diff --git a/src/mds/journal.cc b/src/mds/journal.cc index 5d0baf335a2ed..ed256e55e5e98 100644 --- a/src/mds/journal.cc +++ b/src/mds/journal.cc @@ -67,27 +67,27 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire" << dendl; - assert(g_conf()->mds_kill_journal_expire_at != 1); + ceph_assert(g_conf()->mds_kill_journal_expire_at != 1); // commit dirs for (elist::iterator p = new_dirfrags.begin(); !p.end(); ++p) { dout(20) << " new_dirfrag " << **p << dendl; - assert((*p)->is_auth()); + ceph_assert((*p)->is_auth()); commit.insert(*p); } for (elist::iterator p = dirty_dirfrags.begin(); !p.end(); ++p) { dout(20) << " dirty_dirfrag " << **p << dendl; - assert((*p)->is_auth()); + ceph_assert((*p)->is_auth()); commit.insert(*p); } for (elist::iterator p = dirty_dentries.begin(); !p.end(); ++p) { dout(20) << " dirty_dentry " << **p << dendl; - assert((*p)->is_auth()); + ceph_assert((*p)->is_auth()); commit.insert((*p)->get_dir()); } for (elist::iterator p = dirty_inodes.begin(); !p.end(); ++p) { dout(20) << " dirty_inode " << **p << dendl; - assert((*p)->is_auth()); + ceph_assert((*p)->is_auth()); if ((*p)->is_base()) { (*p)->store(gather_bld.new_sub()); } else @@ -99,7 +99,7 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o p != commit.end(); ++p) { CDir *dir = *p; - assert(dir->is_auth()); + ceph_assert(dir->is_auth()); if (dir->can_auth_pin()) { dout(15) << "try_to_expire committing " << *dir << dendl; dir->commit(0, gather_bld.new_sub(), false, op_prio); @@ -143,14 +143,14 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o mds->locker->scatter_nudge(&in->nestlock, gather_bld.new_sub()); } - assert(g_conf()->mds_kill_journal_expire_at != 2); + ceph_assert(g_conf()->mds_kill_journal_expire_at != 2); // open files and snap inodes if (!open_files.empty()) { - assert(!mds->mdlog->is_capped()); // hmm FIXME + ceph_assert(!mds->mdlog->is_capped()); // hmm FIXME EOpen *le = 0; LogSegment *ls = mds->mdlog->get_current_segment(); - assert(ls != this); + ceph_assert(ls != this); elist::iterator p = open_files.begin(member_offset(CInode, item_open_file)); while (!p.end()) { CInode *in = *p; @@ -176,12 +176,12 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o } } - assert(g_conf()->mds_kill_journal_expire_at != 3); + ceph_assert(g_conf()->mds_kill_journal_expire_at != 3); // backtraces to be stored/updated for (elist::iterator p = dirty_parent_inodes.begin(); !p.end(); ++p) { CInode *in = *p; - assert(in->is_auth()); + ceph_assert(in->is_auth()); if (in->can_auth_pin()) { dout(15) << "try_to_expire waiting for storing backtrace on " << *in << dendl; in->store_backtrace(gather_bld.new_sub(), op_prio); @@ -191,7 +191,7 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o } } - assert(g_conf()->mds_kill_journal_expire_at != 4); + ceph_assert(g_conf()->mds_kill_journal_expire_at != 4); // slave updates for (elist::iterator p = slave_updates.begin(member_offset(MDSlaveUpdate, @@ -199,7 +199,7 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o !p.end(); ++p) { MDSlaveUpdate *su = *p; dout(10) << "try_to_expire waiting on slave update " << su << dendl; - assert(su->waiter == 0); + ceph_assert(su->waiter == 0); su->waiter = gather_bld.new_sub(); } @@ -230,13 +230,13 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o p != pending_commit_tids.end(); ++p) { MDSTableClient *client = mds->get_table_client(p->first); - assert(client); + ceph_assert(client); for (ceph::unordered_set::iterator q = p->second.begin(); q != p->second.end(); ++q) { dout(10) << "try_to_expire " << get_mdstable_name(p->first) << " transaction " << *q << " pending commit (not yet acked), waiting" << dendl; - assert(!client->has_committed(*q)); + ceph_assert(!client->has_committed(*q)); client->wait_for_ack(*q, gather_bld.new_sub()); } } @@ -246,7 +246,7 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o p != tablev.end(); ++p) { MDSTableServer *server = mds->get_table_server(p->first); - assert(server); + ceph_assert(server); if (p->second > server->get_committed_version()) { dout(10) << "try_to_expire waiting for " << get_mdstable_name(p->first) << " to save, need " << p->second << dendl; @@ -266,7 +266,7 @@ void LogSegment::try_to_expire(MDSRank *mds, MDSGatherBuilder &gather_bld, int o dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire waiting" << dendl; mds->mdlog->flush(); } else { - assert(g_conf()->mds_kill_journal_expire_at != 5); + ceph_assert(g_conf()->mds_kill_journal_expire_at != 5); dout(6) << "LogSegment(" << seq << "/" << offset << ").try_to_expire success" << dendl; } } @@ -311,7 +311,7 @@ void EMetaBlob::add_dir_context(CDir *dir, int mode) !dir->state_test(CDir::STATE_AUXSUBTREE) && !diri->state_test(CInode::STATE_AMBIGUOUSAUTH)) { dout(0) << "EMetaBlob::add_dir_context unexpected subtree " << *dir << dendl; - assert(0); + ceph_assert(0); } dout(20) << "EMetaBlob::add_dir_context(" << dir << ") ambiguous or transient subtree " << dendl; } else { @@ -363,7 +363,7 @@ void EMetaBlob::add_dir_context(CDir *dir, int mode) dout(20) << "EMetaBlob::add_dir_context final: " << parents << dendl; for (list::iterator p = parents.begin(); p != parents.end(); ++p) { - assert((*p)->get_projected_linkage()->is_primary()); + ceph_assert((*p)->get_projected_linkage()->is_primary()); add_dentry(*p, false); } } @@ -1127,9 +1127,9 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) { dout(10) << "EMetaBlob.replay " << lump_map.size() << " dirlumps by " << client_name << dendl; - assert(logseg); + ceph_assert(logseg); - assert(g_conf()->mds_kill_journal_replay_at != 1); + ceph_assert(g_conf()->mds_kill_journal_replay_at != 1); for (list >::iterator p = roots.begin(); p != roots.end(); ++p) { CInode *in = mds->mdcache->get_inode((*p)->inode.ino); @@ -1161,7 +1161,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) nnull += lump.nnull; } } - assert(nnull <= 1); + ceph_assert(nnull <= 1); } // keep track of any inodes we unlink and don't relink elsewhere @@ -1182,7 +1182,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) CInode *diri = mds->mdcache->get_inode((*lp).ino); if (!diri) { if (MDS_INO_IS_MDSDIR(lp->ino)) { - assert(MDS_INO_MDSDIR(mds->get_nodeid()) != lp->ino); + ceph_assert(MDS_INO_MDSDIR(mds->get_nodeid()) != lp->ino); diri = mds->mdcache->create_system_inode(lp->ino, S_IFDIR|0755); diri->state_clear(CInode::STATE_AUTH); dout(10) << "EMetaBlob.replay created base " << *diri << dendl; @@ -1259,7 +1259,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) if (p->is_dirty()) dn->_mark_dirty(logseg); dout(10) << "EMetaBlob.replay for [" << p->dnfirst << "," << p->dnlast << "] had " << *dn << dendl; dn->first = p->dnfirst; - assert(dn->last == p->dnlast); + ceph_assert(dn->last == p->dnlast); } if (lump.is_importing()) dn->state_set(CDentry::STATE_AUTH); @@ -1311,7 +1311,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) } else { dout(10) << "EMetaBlob.replay for [" << p->dnfirst << "," << p->dnlast << "] had " << *in << dendl; } - assert(in->first == p->dnfirst || + ceph_assert(in->first == p->dnfirst || (in->is_multiversion() && in->first > p->dnfirst)); } if (p->is_dirty()) @@ -1324,7 +1324,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) in->state_set(CInode::STATE_AUTH); else in->state_clear(CInode::STATE_AUTH); - assert(g_conf()->mds_kill_journal_replay_at != 2); + ceph_assert(g_conf()->mds_kill_journal_replay_at != 2); } // remote dentries @@ -1354,7 +1354,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) if (p->dirty) dn->_mark_dirty(logseg); dout(10) << "EMetaBlob.replay for [" << p->dnfirst << "," << p->dnlast << "] had " << *dn << dendl; dn->first = p->dnfirst; - assert(dn->last == p->dnlast); + ceph_assert(dn->last == p->dnlast); } if (lump.is_importing()) dn->state_set(CDentry::STATE_AUTH); @@ -1387,7 +1387,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) dn->set_version(p->dnv); if (p->dirty) dn->_mark_dirty(logseg); dout(10) << "EMetaBlob.replay had " << *dn << dendl; - assert(dn->last == p->dnlast); + ceph_assert(dn->last == p->dnlast); } olddir = dir; if (lump.is_importing()) @@ -1398,28 +1398,28 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) } } - assert(g_conf()->mds_kill_journal_replay_at != 3); + ceph_assert(g_conf()->mds_kill_journal_replay_at != 3); if (renamed_dirino) { if (renamed_diri) { - assert(unlinked.count(renamed_diri)); - assert(linked.count(renamed_diri)); + ceph_assert(unlinked.count(renamed_diri)); + ceph_assert(linked.count(renamed_diri)); olddir = unlinked[renamed_diri]; } else { // we imported a diri we haven't seen before renamed_diri = mds->mdcache->get_inode(renamed_dirino); - assert(renamed_diri); // it was in the metablob + ceph_assert(renamed_diri); // it was in the metablob } if (olddir) { if (olddir->authority() != CDIR_AUTH_UNDEF && renamed_diri->authority() == CDIR_AUTH_UNDEF) { - assert(slaveup); // auth to non-auth, must be slave prepare + ceph_assert(slaveup); // auth to non-auth, must be slave prepare list leaves; renamed_diri->dirfragtree.get_leaves(leaves); for (list::iterator p = leaves.begin(); p != leaves.end(); ++p) { CDir *dir = renamed_diri->get_dirfrag(*p); - assert(dir); + ceph_assert(dir); if (dir->get_dir_auth() == CDIR_AUTH_UNDEF) // preserve subtree bound until slave commit slaveup->olddirs.insert(dir->inode); @@ -1447,7 +1447,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) if (dir) { // we already had the inode before, and we already adjusted this subtree accordingly. dout(10) << " already had+adjusted rename import bound " << *dir << dendl; - assert(olddir); + ceph_assert(olddir); continue; } dir = renamed_diri->get_or_open_dirfrag(mds->mdcache, *p); @@ -1462,7 +1462,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) for (map::iterator p = unlinked.begin(); p != unlinked.end(); ++p) { if (!linked.count(p->first)) continue; - assert(p->first->is_dir()); + ceph_assert(p->first->is_dir()); mds->mdcache->adjust_subtree_after_rename(p->first, p->second, false); } } @@ -1496,7 +1496,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) // opened ino? if (opened_ino) { CInode *in = mds->mdcache->get_inode(opened_ino); - assert(in); + ceph_assert(in); dout(10) << "EMetaBlob.replay noting opened inode " << *in << dendl; logseg->open_files.push_back(&in->item_open_file); } @@ -1524,7 +1524,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) mds->inotable->force_replay_version(inotablev); } - assert(inotablev == mds->inotable->get_version()); + ceph_assert(inotablev == mds->inotable->get_version()); } } if (sessionmapv) { @@ -1547,7 +1547,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) if (next != i) mds->clog->warn() << " replayed op " << client_reqs << " used ino " << i << " but session next is " << next; - assert(i == used_preallocated_ino); + ceph_assert(i == used_preallocated_ino); session->info.used_inos.clear(); } mds->sessionmap.replay_dirty_session(session); @@ -1565,11 +1565,11 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) if (!preallocated_inos.empty()) mds->sessionmap.replay_advance_version(); } - assert(sessionmapv == mds->sessionmap.get_version()); + ceph_assert(sessionmapv == mds->sessionmap.get_version()); } else { mds->clog->error() << "journal replay sessionmap v " << sessionmapv << " -(1|2) > table " << mds->sessionmap.get_version(); - assert(g_conf()->mds_wipe_sessions); + ceph_assert(g_conf()->mds_wipe_sessions); mds->sessionmap.wipe(); mds->sessionmap.set_version(sessionmapv); } @@ -1580,7 +1580,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) p != truncate_start.end(); ++p) { CInode *in = mds->mdcache->get_inode(*p); - assert(in); + ceph_assert(in); mds->mdcache->add_recovered_truncate(in, logseg); } for (map::iterator p = truncate_finish.begin(); @@ -1589,7 +1589,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) LogSegment *ls = mds->mdlog->get_segment(p->second); if (ls) { CInode *in = mds->mdcache->get_inode(p->first); - assert(in); + ceph_assert(in); mds->mdcache->remove_recovered_truncate(in, ls); } } @@ -1606,7 +1606,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) mds->mdcache->remove_inode(in); if (parent) { dout(10) << "EMetaBlob.replay unlinked from dentry " << *parent << dendl; - assert(parent->get_linkage()->is_null()); + ceph_assert(parent->get_linkage()->is_null()); } } else { dout(10) << "EMetaBlob.replay destroyed " << *p << ", not in cache" << dendl; @@ -1623,7 +1623,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) dout(10) << "EMetaBlob.replay request " << p->first << " trim_to " << p->second << dendl; inodeno_t created = allocated_ino ? allocated_ino : used_preallocated_ino; // if we allocated an inode, there should be exactly one client request id. - assert(created == inodeno_t() || client_reqs.size() == 1); + ceph_assert(created == inodeno_t() || client_reqs.size() == 1); Session *session = mds->sessionmap.get_session(p->first.name); if (session) { @@ -1652,7 +1652,7 @@ void EMetaBlob::replay(MDSRank *mds, LogSegment *logseg, MDSlaveUpdate *slaveup) // update segment update_segment(logseg); - assert(g_conf()->mds_kill_journal_replay_at != 4); + ceph_assert(g_conf()->mds_kill_journal_replay_at != 4); } // ----------------------- @@ -1700,7 +1700,7 @@ void ESession::replay(MDSRank *mds) } else { mds->sessionmap.replay_advance_version(); } - assert(mds->sessionmap.get_version() == cmapv); + ceph_assert(mds->sessionmap.get_version() == cmapv); } if (inos.size() && inotablev) { @@ -1710,9 +1710,9 @@ void ESession::replay(MDSRank *mds) } else { dout(10) << "ESession.replay inotable " << mds->inotable->get_version() << " < " << inotablev << " " << (open ? "add":"remove") << dendl; - assert(!open); // for now + ceph_assert(!open); // for now mds->inotable->replay_release_ids(inos); - assert(mds->inotable->get_version() == inotablev); + ceph_assert(mds->inotable->get_version() == inotablev); } } @@ -1834,7 +1834,7 @@ void ESessions::replay(MDSRank *mds) dout(10) << "ESessions.replay sessionmap " << mds->sessionmap.get_version() << " < " << cmapv << dendl; mds->sessionmap.replay_open_sessions(client_map, client_metadata_map); - assert(mds->sessionmap.get_version() == cmapv); + ceph_assert(mds->sessionmap.get_version() == cmapv); } update_segment(); } @@ -1910,7 +1910,7 @@ void ETableServer::replay(MDSRank *mds) dout(10) << " ETableServer.replay " << get_mdstable_name(table) << " " << get_mdstableserver_opname(op) << " event " << version << " - 1 == table " << server->get_version() << dendl; - assert(version-1 == server->get_version()); + ceph_assert(version-1 == server->get_version()); switch (op) { case TABLESERVER_OP_PREPARE: { @@ -1938,7 +1938,7 @@ void ETableServer::replay(MDSRank *mds) ceph_abort(); // Should be unreachable because damaged() calls respawn() } - assert(version == server->get_version()); + ceph_assert(version == server->get_version()); update_segment(); } @@ -1989,7 +1989,7 @@ void ETableClient::replay(MDSRank *mds) if (!client) return; - assert(op == TABLESERVER_OP_ACK); + ceph_assert(op == TABLESERVER_OP_ACK); client->got_journaled_ack(tid); } @@ -2012,17 +2012,17 @@ void ESnap::replay(MDSRank *mds) dout(10) << " ESnap.replay event " << version << " - 1 == table " << mds->snaptable->get_version() << dendl; - assert(version-1 == mds->snaptable->get_version()); + ceph_assert(version-1 == mds->snaptable->get_version()); if (create) { version_t v; snapid_t s = mds->snaptable->create(snap.dirino, snap.name, snap.stamp, &v); - assert(s == snap.snapid); + ceph_assert(s == snap.snapid); } else { mds->snaptable->remove(snap.snapid); } - assert(version == mds->snaptable->get_version()); + ceph_assert(version == mds->snaptable->get_version()); } */ @@ -2117,7 +2117,7 @@ void EUpdate::replay(MDSRank *mds) decode(cmm, blp); mds->sessionmap.replay_open_sessions(cm, cmm); - assert(mds->sessionmap.get_version() == cmapv); + ceph_assert(mds->sessionmap.get_version() == cmapv); } } update_segment(); @@ -2182,7 +2182,7 @@ void EOpen::replay(MDSRank *mds) CInode *in = mds->mdcache->get_inode(ino); if (!in) { dout(0) << "EOpen.replay ino " << ino << " not in metablob" << dendl; - assert(in); + ceph_assert(in); } _segment->open_files.push_back(&in->item_open_file); } @@ -2190,7 +2190,7 @@ void EOpen::replay(MDSRank *mds) CInode *in = mds->mdcache->get_inode(vino); if (!in) { dout(0) << "EOpen.replay ino " << vino << " not in metablob" << dendl; - assert(in); + ceph_assert(in); } _segment->open_files.push_back(&in->item_open_file); } @@ -2682,7 +2682,7 @@ void ESubtreeMap::replay(MDSRank *mds) dout(0) << "journal subtrees: " << subtrees << dendl; dout(0) << "journal ambig_subtrees: " << ambiguous_subtrees << dendl; mds->mdcache->show_subtrees(); - assert(!g_conf()->mds_debug_subtrees || errors == 0); + ceph_assert(!g_conf()->mds_debug_subtrees || errors == 0); } return; } @@ -2698,7 +2698,7 @@ void ESubtreeMap::replay(MDSRank *mds) p != subtrees.end(); ++p) { CDir *dir = mds->mdcache->get_dirfrag(p->first); - assert(dir); + ceph_assert(dir); if (ambiguous_subtrees.count(p->first)) { // ambiguous! mds->mdcache->add_ambiguous_import(p->first, p->second); @@ -2846,14 +2846,14 @@ void EExport::replay(MDSRank *mds) metablob.replay(mds, _segment); CDir *dir = mds->mdcache->get_dirfrag(base); - assert(dir); + ceph_assert(dir); set realbounds; for (set::iterator p = bounds.begin(); p != bounds.end(); ++p) { CDir *bd = mds->mdcache->get_dirfrag(*p); - assert(bd); + ceph_assert(bd); realbounds.insert(bd); } @@ -2928,14 +2928,14 @@ void EImportStart::replay(MDSRank *mds) // set auth partially to us so we don't trim it CDir *dir = mds->mdcache->get_dirfrag(base); - assert(dir); + ceph_assert(dir); set realbounds; for (vector::iterator p = bounds.begin(); p != bounds.end(); ++p) { CDir *bd = mds->mdcache->get_dirfrag(*p); - assert(bd); + ceph_assert(bd); if (!bd->is_subtree_root()) bd->state_clear(CDir::STATE_AUTH); realbounds.insert(bd); @@ -3024,7 +3024,7 @@ void EImportFinish::replay(MDSRank *mds) mds->mdcache->finish_ambiguous_import(base); } else { CDir *dir = mds->mdcache->get_dirfrag(base); - assert(dir); + ceph_assert(dir); vector bounds; mds->mdcache->get_ambiguous_import_bounds(base, bounds); mds->mdcache->adjust_bounded_subtree_auth(dir, bounds, CDIR_AUTH_UNDEF); diff --git a/src/mds/mdstypes.cc b/src/mds/mdstypes.cc index 846345d649d6f..e98ca15eeb75c 100644 --- a/src/mds/mdstypes.cc +++ b/src/mds/mdstypes.cc @@ -390,7 +390,7 @@ feature_bitset_t::feature_bitset_t(const vector& array) if (bit > last) last = bit; else - assert(bit == last); + ceph_assert(bit == last); _vec[bit / bits_per_block] |= (block_type)1 << (bit % bits_per_block); } } diff --git a/src/mds/mdstypes.h b/src/mds/mdstypes.h index a544597284012..6d72bd4e19e9d 100644 --- a/src/mds/mdstypes.h +++ b/src/mds/mdstypes.h @@ -543,7 +543,7 @@ struct inode_t { bool is_truncating() const { return (truncate_pending > 0); } void truncate(uint64_t old_size, uint64_t new_size) { - assert(new_size < old_size); + ceph_assert(new_size < old_size); if (old_size > max_size_ever) max_size_ever = old_size; truncate_from = old_size; @@ -861,7 +861,7 @@ void inode_t::generate_test_instances(list& ls) template class Allocator> int inode_t::compare(const inode_t &other, bool *divergent) const { - assert(ino == other.ino); + ceph_assert(ino == other.ino); *divergent = false; if (version == other.version) { if (rdev != other.rdev || @@ -899,7 +899,7 @@ int inode_t::compare(const inode_t &other, bool *divergent *divergent = !older_is_consistent(other); return 1; } else { - assert(version < other.version); + ceph_assert(version < other.version); *divergent = !other.older_is_consistent(*this); return -1; } @@ -1225,7 +1225,7 @@ struct dentry_key_t { } static void decode_helper(std::string_view key, string& nm, snapid_t& sn) { size_t i = key.find_last_of('_'); - assert(i != string::npos); + ceph_assert(i != string::npos); if (key.compare(i+1, std::string_view::npos, "head") == 0) { // name_head sn = CEPH_NOSNAP; -- 2.39.5