else if (command == "status")
m_client->dump_status(f.get());
else
- assert(0 == "bad command registered");
+ ceph_assert(0 == "bad command registered");
m_client->client_lock.Unlock();
f->close_section();
f->flush(out);
last_used_faked_ino = 0;
it = free_faked_inos.lower_bound(last_used_faked_ino + 1);
}
- assert(it != free_faked_inos.end());
+ ceph_assert(it != free_faked_inos.end());
if (last_used_faked_ino < it.get_start()) {
- assert(it.get_len() > 0);
+ ceph_assert(it.get_len() > 0);
last_used_faked_ino = it.get_start();
} else {
++last_used_faked_ino;
- assert(it.get_start() + it.get_len() > last_used_faked_ino);
+ ceph_assert(it.get_start() + it.get_len() > last_used_faked_ino);
}
in->faked_ino = last_used_faked_ino;
free_faked_inos.erase(in->faked_ino);
Client::~Client()
{
- assert(!client_lock.is_locked());
+ ceph_assert(!client_lock.is_locked());
// It is necessary to hold client_lock, because any inode destruction
// may call into ObjectCacher, which asserts that it's lock (which is
// empty lru
trim_cache();
- assert(lru.lru_get_size() == 0);
+ ceph_assert(lru.lru_get_size() == 0);
// close root ino
- assert(inode_map.size() <= 1 + root_parents.size());
+ ceph_assert(inode_map.size() <= 1 + root_parents.size());
if (root && inode_map.size() == 1 + root_parents.size()) {
delete root;
root = 0;
_reset_faked_inos();
}
- assert(inode_map.empty());
+ ceph_assert(inode_map.empty());
}
inodeno_t Client::get_root_ino()
void Client::dump_status(Formatter *f)
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(client_lock.is_locked_by_me());
ldout(cct, 1) << __func__ << dendl;
objectcacher->start();
client_lock.Lock();
- assert(!initialized);
+ ceph_assert(!initialized);
messenger->add_dispatcher_tail(this);
client_lock.Unlock();
objectcacher->stop(); // outside of client_lock! this does a join.
client_lock.Lock();
- assert(initialized);
+ ceph_assert(initialized);
initialized = false;
timer.shutdown();
client_lock.Unlock();
utime_t dttl = from;
dttl += (float)dlease->duration_ms / 1000.0;
- assert(dn);
+ ceph_assert(dn);
if (dlease->mask & CEPH_LOCK_DN) {
if (dttl > dn->lease_ttl) {
}
dir_result_t *dirp = request->dirp;
- assert(dirp);
+ ceph_assert(dirp);
// the extra buffer list is only set for readdir and lssnap replies
auto p = reply->get_extra_bl().cbegin();
if (!p.end()) {
// snapdir?
if (request->head.op == CEPH_MDS_OP_LSSNAP) {
- assert(diri);
+ ceph_assert(diri);
diri = open_snapdir(diri);
}
// only open dir if we're actually adding stuff to it!
Dir *dir = diri->open_dir();
- assert(dir);
+ ceph_assert(dir);
// dirstat
DirStat dst(p, features);
frag_t fg = (unsigned)request->head.args.readdir.frag;
unsigned readdir_offset = dirp->next_offset;
string readdir_start = dirp->last_name;
- assert(!readdir_start.empty() || readdir_offset == 2);
+ ceph_assert(!readdir_start.empty() || readdir_offset == 2);
unsigned last_hash = 0;
if (hash_order) {
// replace incorrect dentry
unlink(olddn, true, true); // keep dir, dentry
dn = link(dir, dname, in, olddn);
- assert(dn == olddn);
+ ceph_assert(dn == olddn);
} else {
// keep existing dn
dn = olddn;
dirp->start_shared_gen == diri->shared_gen) {
if (dirp->cache_index == dir->readdir_cache.size()) {
if (i == 0) {
- assert(!dirp->inode->is_complete_and_ordered());
+ ceph_assert(!dirp->inode->is_complete_and_ordered());
dir->readdir_cache.reserve(dirp->cache_index + numdn);
}
dir->readdir_cache.push_back(dn);
} else if (dirp->cache_index < dir->readdir_cache.size()) {
if (dirp->inode->is_complete_and_ordered())
- assert(dir->readdir_cache[dirp->cache_index] == dn);
+ ceph_assert(dir->readdir_cache[dirp->cache_index] == dn);
else
dir->readdir_cache[dirp->cache_index] = dn;
} else {
- assert(0 == "unexpected readdir buffer idx");
+ ceph_assert(0 == "unexpected readdir buffer idx");
}
dirp->cache_index++;
}
auto p = reply->get_trace_bl().cbegin();
if (request->got_unsafe) {
ldout(cct, 10) << "insert_trace -- already got unsafe; ignoring" << dendl;
- assert(p.end());
+ ceph_assert(p.end());
return NULL;
}
// rename
Dentry *od = request->old_dentry();
ldout(cct, 10) << " unlinking rename src dn " << od << " for traceless reply" << dendl;
- assert(od);
+ ceph_assert(od);
unlink(od, true, true); // keep dir, dentry
} else if (op == CEPH_MDS_OP_RMDIR ||
op == CEPH_MDS_OP_UNLINK) {
if ((wanted & CEPH_CAP_XATTR_SHARED) &&
!(ist.xattr_version > 0 && ist.xattrbl.length() > 0))
- assert(0 == "MDS reply does not contain xattrs");
+ ceph_assert(0 == "MDS reply does not contain xattrs");
}
in = add_update_inode(&ist, request->sent_stamp, session,
// fake it for snap lookup
vinodeno_t vino = ist.vino;
vino.snapid = CEPH_SNAPDIR;
- assert(inode_map.count(vino));
+ ceph_assert(inode_map.count(vino));
diri = inode_map[vino];
string dname = request->path.last_dentry();
void Client::connect_mds_targets(mds_rank_t mds)
{
ldout(cct, 10) << __func__ << " for mds." << mds << dendl;
- assert(mds_sessions.count(mds));
+ ceph_assert(mds_sessions.count(mds));
const MDSMap::mds_info_t& info = mdsmap->get_mds_info(mds);
for (set<mds_rank_t>::const_iterator q = info.export_targets.begin();
q != info.export_targets.end();
&target, perms);
} else {
// if the dentry is not linked, just do our best. see #5021.
- assert(0 == "how did this happen? i want logs!");
+ ceph_assert(0 == "how did this happen? i want logs!");
}
} else {
Inode *in = request->inode();
}
if (!request->reply) {
- assert(request->aborted());
- assert(!request->got_unsafe);
+ ceph_assert(request->aborted());
+ ceph_assert(!request->got_unsafe);
r = request->get_abort_code();
request->item.remove_myself();
unregister_request(request);
request->success = true;
// kick dispatcher (we've got it!)
- assert(request->dispatch_cond);
+ ceph_assert(request->dispatch_cond);
request->dispatch_cond->Signal();
ldout(cct, 20) << "sendrecv kickback on tid " << tid << " " << request->dispatch_cond << dendl;
request->dispatch_cond = 0;
void Client::update_metadata(std::string const &k, std::string const &v)
{
Mutex::Locker l(client_lock);
- assert(initialized);
+ ceph_assert(initialized);
auto it = metadata.find(k);
if (it != metadata.end()) {
auto em = mds_sessions.emplace(std::piecewise_construct,
std::forward_as_tuple(mds),
std::forward_as_tuple(mds, messenger->connect_to_mds(addrs), addrs));
- assert(em.second); /* not already present */
+ ceph_assert(em.second); /* not already present */
MetaSession *session = &em.first->second;
// Maybe skip sending a request to open if this MDS daemon
bool Client::_any_stale_sessions() const
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(client_lock.is_locked_by_me());
for (const auto &it : mds_sessions) {
if (it.second.state == MetaSession::STATE_STALE) {
}
MetaRequest *request = mds_requests[tid];
- assert(request);
+ ceph_assert(request);
// reset retry counter
request->retry_attempt = 0;
}
}
- assert(request->reply == NULL);
+ ceph_assert(request->reply == NULL);
request->reply = reply;
insert_trace(request, session);
session->unsafe_requests.push_back(&request->unsafe_item);
if (is_dir_operation(request)) {
Inode *dir = request->inode();
- assert(dir);
+ ceph_assert(dir);
dir->unsafe_ops.push_back(&request->unsafe_dir_item);
}
if (request->target) {
}
}
}
- assert(session->requests.empty());
- assert(session->unsafe_requests.empty());
+ ceph_assert(session->requests.empty());
+ ceph_assert(session->unsafe_requests.empty());
}
{
ldout(cct, 10) << __func__ << " " << *m << dendl;
- assert(m->get_action() == CEPH_MDS_LEASE_REVOKE);
+ ceph_assert(m->get_action() == CEPH_MDS_LEASE_REVOKE);
mds_rank_t mds = mds_rank_t(m->get_source().num());
MetaSession *session = _get_mds_session(mds, m->get_connection().get());
ldout(cct, 10) << __func__ << " deleting " << *in << dendl;
bool unclean = objectcacher->release_set(&in->oset);
- assert(!unclean);
+ ceph_assert(!unclean);
inode_map.erase(in->vino());
if (use_faked_inos())
_release_faked_ino(in);
{
Inode *in = dir->parent_inode;
ldout(cct, 15) << __func__ << " dir " << dir << " on " << in << dendl;
- assert(dir->is_empty());
- assert(in->dir == dir);
- assert(in->dentries.size() < 2); // dirs can't be hard-linked
+ ceph_assert(dir->is_empty());
+ ceph_assert(in->dir == dir);
+ ceph_assert(in->dentries.size() < 2); // dirs can't be hard-linked
if (!in->dentries.empty())
in->get_first_parent()->put(); // unpin dentry
ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in
<< " dn " << dn << " (new dn)" << dendl;
} else {
- assert(!dn->inode);
+ ceph_assert(!dn->inode);
ldout(cct, 15) << "link dir " << dir->parent_inode << " '" << name << "' to inode " << in
<< " dn " << dn << " (old dn)" << dendl;
}
if (in->is_dir() && !in->dentries.empty()) {
tmp_ref = in; // prevent unlink below from freeing the inode.
Dentry *olddn = in->get_first_parent();
- assert(olddn->dir != dir || olddn->name != name);
+ ceph_assert(olddn->dir != dir || olddn->name != name);
Inode *old_diri = olddn->dir->parent_inode;
old_diri->dir_release_count++;
clear_dir_complete_and_ordered(old_diri, true);
public:
C_Client_FlushComplete(Client *c, Inode *in) : client(c), inode(in) { }
void finish(int r) override {
- assert(client->client_lock.is_locked_by_me());
+ ceph_assert(client->client_lock.is_locked_by_me());
if (r != 0) {
client_t const whoami = client->whoami; // For the benefit of ldout prefix
ldout(client->cct, 1) << "I/O error from flush on inode " << inode
(used & CEPH_CAP_FILE_WR) ||
(dirty & CEPH_CAP_ANY_WR)) {
const auto &capsnapem = in->cap_snaps.emplace(std::piecewise_construct, std::make_tuple(old_snapc.seq), std::make_tuple(in));
- assert(capsnapem.second); /* element inserted */
+ ceph_assert(capsnapem.second); /* element inserted */
CapSnap &capsnap = capsnapem.first->second;
capsnap.context = old_snapc;
capsnap.issued = in->caps_issued();
void Client::flush_snaps(Inode *in, bool all_again)
{
ldout(cct, 10) << "flush_snaps on " << *in << " all_again " << all_again << dendl;
- assert(in->cap_snaps.size());
+ ceph_assert(in->cap_snaps.size());
// pick auth mds
- assert(in->auth_cap);
+ ceph_assert(in->auth_cap);
MetaSession *session = in->auth_cap->session;
int mseq = in->auth_cap->mseq;
m->inline_data = in->inline_data;
}
- assert(!session->flushing_caps_tids.empty());
+ ceph_assert(!session->flushing_caps_tids.empty());
m->set_oldest_flush_tid(*session->flushing_caps_tids.begin());
session->con->send_message2(m);
}
void finish(int r) override {
// _async_invalidate takes the lock when it needs to, call this back from outside of lock.
- assert(!client->client_lock.is_locked_by_me());
+ ceph_assert(!client->client_lock.is_locked_by_me());
client->_async_invalidate(ino, offset, length);
}
};
void Client::_flush_range(Inode *in, int64_t offset, uint64_t size)
{
- assert(client_lock.is_locked());
+ ceph_assert(client_lock.is_locked());
if (!in->oset.dirty_or_tx) {
ldout(cct, 10) << " nothing to flush" << dendl;
return;
void Client::flush_set_callback(ObjectCacher::ObjectSet *oset)
{
// Mutex::Locker l(client_lock);
- assert(client_lock.is_locked()); // will be called via dispatch() -> objecter -> ...
+ ceph_assert(client_lock.is_locked()); // will be called via dispatch() -> objecter -> ...
Inode *in = static_cast<Inode *>(oset->parent);
- assert(in);
+ ceph_assert(in);
_flushed(in);
}
int flags, const UserPerm& cap_perms)
{
if (!in->is_any_caps()) {
- assert(in->snaprealm == 0);
+ ceph_assert(in->snaprealm == 0);
in->snaprealm = get_snap_realm(realm);
in->snaprealm->inodes_with_caps.push_back(&in->snaprealm_item);
ldout(cct, 15) << __func__ << " first one, opened snaprealm " << in->snaprealm << dendl;
} else {
- assert(in->snaprealm);
+ ceph_assert(in->snaprealm);
if ((flags & CEPH_CAP_FLAG_AUTH) &&
realm != inodeno_t(-1) && in->snaprealm->ino != realm) {
in->snaprealm_item.remove_myself();
* don't remove caps.
*/
if (ceph_seq_cmp(seq, cap.seq) <= 0) {
- assert(&cap == in->auth_cap);
- assert(cap.cap_id == cap_id);
+ ceph_assert(&cap == in->auth_cap);
+ ceph_assert(cap.cap_id == cap_id);
seq = cap.seq;
mseq = cap.mseq;
issued |= cap.issued;
in.auth_cap = NULL;
}
size_t n = in.caps.erase(mds);
- assert(n == 1);
+ ceph_assert(n == 1);
cap = nullptr;
if (!in.is_any_caps()) {
public:
explicit C_Client_Remount(Client *c) : client(c) {}
void finish(int r) override {
- assert(r == 0);
+ ceph_assert(r == 0);
client->_do_remount();
}
};
for (auto p = dir->dentries.begin(); p != dir->dentries.end(); ) {
Dentry *dn = p->second;
++p;
- assert(!dn->inode);
+ ceph_assert(!dn->inode);
if (dn->lru_is_expireable())
unlink(dn, true, false); // keep dir, drop dentry
}
MetaSession *session = in->auth_cap->session;
int flushing = in->dirty_caps;
- assert(flushing);
+ ceph_assert(flushing);
ceph_tid_t flush_tid = ++last_flush_tid;
in->flushing_cap_tids[flush_tid] = flushing;
{
ldout(cct, 10) << __func__ << " " << in << " mds." << session->mds_num << dendl;
Cap *cap = in->auth_cap;
- assert(cap->session == session);
+ ceph_assert(cap->session == session);
for (map<ceph_tid_t,int>::iterator p = in->flushing_cap_tids.begin();
p != in->flushing_cap_tids.end();
{
while (in->flushing_caps) {
map<ceph_tid_t, int>::iterator it = in->flushing_cap_tids.begin();
- assert(it != in->flushing_cap_tids.end());
+ ceph_assert(it != in->flushing_cap_tids.end());
if (it->first > want)
break;
ldout(cct, 10) << __func__ << " on " << *in << " flushing "
for (xlist<Inode*>::iterator p = session->flushing_caps.begin(); !p.end(); ++p) {
Inode *in = *p;
- assert(in->auth_cap);
+ ceph_assert(in->auth_cap);
// if flushing caps were revoked, we re-send the cap flush in client reconnect
// stage. This guarantees that MDS processes the cap flush message before issuing
SnapRealm *realm = 0;
if (m->head.op == CEPH_SNAP_OP_SPLIT) {
- assert(m->head.split);
+ ceph_assert(m->head.split);
SnapRealmInfo info;
auto p = m->bl.cbegin();
decode(info, p);
- assert(info.ino() == m->head.split);
+ ceph_assert(info.ino() == m->head.split);
// flush, then move, ino's.
realm = get_snap_realm(info.ino());
void Client::handle_cap_trunc(MetaSession *session, Inode *in, MClientCaps *m)
{
mds_rank_t mds = session->mds_num;
- assert(in->caps.count(mds));
+ ceph_assert(in->caps.count(mds));
ldout(cct, 10) << __func__ << " on ino " << *in
<< " size " << in->size << " -> " << m->get_size()
void Client::handle_cap_flushsnap_ack(MetaSession *session, Inode *in, MClientCaps *m)
{
mds_rank_t mds = session->mds_num;
- assert(in->caps.count(mds));
+ ceph_assert(in->caps.count(mds));
snapid_t follows = m->get_snap_follows();
if (in->cap_snaps.count(follows)) {
}
void finish(int r) override {
// _async_dentry_invalidate is responsible for its own locking
- assert(!client->client_lock.is_locked_by_me());
+ ceph_assert(!client->client_lock.is_locked_by_me());
client->_async_dentry_invalidate(dirino, ino, name);
}
};
const std::string &mds_spec,
std::vector<mds_gid_t> *targets)
{
- assert(fsmap);
- assert(targets != nullptr);
+ ceph_assert(fsmap);
+ ceph_assert(targets != nullptr);
mds_role_t role;
std::stringstream ss;
*/
int Client::authenticate()
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(client_lock.is_locked_by_me());
if (monclient->is_authenticated()) {
return 0;
monclient->renew_subs();
wait_on_list(waiting_for_fsmap);
}
- assert(fsmap_user);
- assert(fsmap_user->get_epoch() >= fsmap_latest);
+ ceph_assert(fsmap_user);
+ ceph_assert(fsmap_user->get_epoch() >= fsmap_latest);
} else {
if (!fsmap || fsmap->get_epoch() < fsmap_latest) {
monclient->sub_want("fsmap", fsmap_latest, CEPH_SUBSCRIBE_ONETIME);
monclient->renew_subs();
wait_on_list(waiting_for_fsmap);
}
- assert(fsmap);
- assert(fsmap->get_epoch() >= fsmap_latest);
+ ceph_assert(fsmap);
+ ceph_assert(fsmap->get_epoch() >= fsmap_latest);
}
ldout(cct, 10) << __func__ << " finished waiting for FSMap version "
<< fsmap_latest << dendl;
break;
}
- assert(root);
+ ceph_assert(root);
_ll_get(root);
mounted = true;
Inode *in = p.second;
if (!in) {
ldout(cct, 0) << "null inode_map entry ino " << p.first << dendl;
- assert(in);
+ ceph_assert(in);
}
// prevent inode from getting freed
dump_cache(NULL);
}
}
- assert(lru.lru_get_size() == 0);
- assert(inode_map.empty());
+ ceph_assert(lru.lru_get_size() == 0);
+ ceph_assert(inode_map.empty());
// stop tracing
if (!cct->_conf->client_trace.empty()) {
{
if (cct->_conf->client_debug_inject_tick_delay > 0) {
sleep(cct->_conf->client_debug_inject_tick_delay);
- assert(0 == cct->_conf.set_val("client_debug_inject_tick_delay", "0"));
+ ceph_assert(0 == cct->_conf.set_val("client_debug_inject_tick_delay", "0"));
cct->_conf.apply_changes(nullptr);
}
cct->_conf->client_tick_interval,
new FunctionContext([this](int) {
// Called back via Timer, which takes client_lock for us
- assert(client_lock.is_locked_by_me());
+ ceph_assert(client_lock.is_locked_by_me());
tick();
}));
utime_t now = ceph_clock_now();
cur = root;
else
cur = cwd;
- assert(cur);
+ ceph_assert(cur);
ldout(cct, 10) << __func__ << " " << path << dendl;
void Client::_readdir_rechoose_frag(dir_result_t *dirp)
{
- assert(dirp->inode);
+ ceph_assert(dirp->inode);
if (dirp->hash_order())
return;
int Client::_readdir_get_frag(dir_result_t *dirp)
{
- assert(dirp);
- assert(dirp->inode);
+ ceph_assert(dirp);
+ ceph_assert(dirp->inode);
// get the current frag.
frag_t fg;
int Client::_readdir_cache_cb(dir_result_t *dirp, add_dirent_cb_t cb, void *p,
int caps, bool getref)
{
- assert(client_lock.is_locked());
+ ceph_assert(client_lock.is_locked());
ldout(cct, 10) << __func__ << " " << dirp << " on " << dirp->inode->ino
<< " last_name " << dirp->last_name << " offset " << hex << dirp->offset << dec
<< dendl;
if (dirp->offset == 0) {
ldout(cct, 15) << " including ." << dendl;
- assert(diri->dentries.size() < 2); // can't have multiple hard-links to a dir
+ ceph_assert(diri->dentries.size() < 2); // can't have multiple hard-links to a dir
uint64_t next_off = 1;
int r;
if (diri->dir_ordered_count == dirp->ordered_count) {
ldout(cct, 10) << " marking (I_COMPLETE|I_DIR_ORDERED) on " << *diri << dendl;
if (diri->dir) {
- assert(diri->dir->readdir_cache.size() >= dirp->cache_index);
+ ceph_assert(diri->dir->readdir_cache.size() >= dirp->cache_index);
diri->dir->readdir_cache.resize(dirp->cache_index);
}
diri->flags |= I_COMPLETE | I_DIR_ORDERED;
r = _open(in.get(), flags, mode, &fh, perms);
if (r >= 0) {
// allocate a integer file descriptor
- assert(fh);
+ ceph_assert(fh);
r = get_fd();
- assert(fd_map.count(r) == 0);
+ ceph_assert(fd_map.count(r) == 0);
fd_map[r] = fh;
}
if (r == 0 && inode != NULL) {
vinodeno_t vino(ino, CEPH_NOSNAP);
unordered_map<vinodeno_t,Inode*>::iterator p = inode_map.find(vino);
- assert(p != inode_map.end());
+ ceph_assert(p != inode_map.end());
*inode = p->second;
_ll_get(*inode);
}
*/
int Client::_lookup_name(Inode *ino, Inode *parent, const UserPerm& perms)
{
- assert(parent->is_dir());
+ ceph_assert(parent->is_dir());
ldout(cct, 3) << __func__ << " enter(" << ino->ino << ")" << dendl;
if (unmounting)
Fh *Client::_create_fh(Inode *in, int flags, int cmode, const UserPerm& perms)
{
- assert(in);
+ ceph_assert(in);
Fh *f = new Fh(in, flags, cmode, perms);
ldout(cct, 10) << __func__ << " " << in->ino << " mode " << cmode << dendl;
check_caps(in, 0);
}
} else {
- assert(in->snap_cap_refs > 0);
+ ceph_assert(in->snap_cap_refs > 0);
in->snap_cap_refs--;
}
while (f->pos_locked || f->pos_waiters.front() != &cond)
cond.Wait(client_lock);
ldout(cct, 10) << __func__ << " UNBLOCKING on " << f << dendl;
- assert(f->pos_waiters.front() == &cond);
+ ceph_assert(f->pos_waiters.front() == &cond);
f->pos_waiters.pop_front();
}
if (r < 0) {
goto done;
}
- assert(in->inline_version > 0);
+ ceph_assert(in->inline_version > 0);
}
retry:
}
success:
- assert(r >= 0);
+ ceph_assert(r >= 0);
if (movepos) {
// adjust fd pos
f->pos = start_pos + r;
*/
void Client::_sync_write_commit(Inode *in)
{
- assert(unsafe_sync_write > 0);
+ ceph_assert(unsafe_sync_write > 0);
unsafe_sync_write--;
put_cap_ref(in, CEPH_CAP_FILE_BUFFER);
return -ENOSPC;
}
- assert(in->snapid == CEPH_NOSNAP);
+ ceph_assert(in->snapid == CEPH_NOSNAP);
// was Fh opened as writeable?
if ((f->mode & CEPH_FILE_MODE_WR) == 0)
int r = _getattr(in, CEPH_STAT_CAP_INLINE_DATA, f->actor_perms, true);
if (r < 0)
return r;
- assert(in->inline_version > 0);
+ ceph_assert(in->inline_version > 0);
}
// copy into fresh buffer (since our write may be resub, async)
Inode *in = cwd.get();
while (in != root) {
- assert(in->dentries.size() < 2); // dirs can't be hard-linked
+ ceph_assert(in->dentries.size() < 2); // dirs can't be hard-linked
// A cwd or ancester is unlinked
if (in->dentries.empty()) {
// Usually quota_root will == root_ancestor, but if the mount root has no
// quota but we can see a parent of it that does have a quota, we'll
// respect that one instead.
- assert(root != nullptr);
+ ceph_assert(root != nullptr);
Inode *quota_root = root->quota.is_enable() ? root : get_quota_root(root, perms);
// get_quota_root should always give us something
// because client quotas are always enabled
- assert(quota_root != nullptr);
+ ceph_assert(quota_root != nullptr);
if (quota_root && cct->_conf->client_quota_df && quota_root->quota.max_bytes) {
lock_state->remove_lock(filelock, activated_locks);
} else {
bool r = lock_state->add_lock(filelock, false, false, NULL);
- assert(r);
+ ceph_assert(r);
}
}
can_invalidate_dentries = can_invalidate;
if (can_invalidate_dentries) {
- assert(dentry_invalidate_cb);
+ ceph_assert(dentry_invalidate_cb);
ldout(cct, 1) << "using dentry_invalidate_cb" << dendl;
r = 0;
} else if (remount_cb) {
goto out;
}
- assert(in);
+ ceph_assert(in);
fill_stat(in, attr);
_ll_get(in.get());
if (r) {
return r;
}
- assert(inode != NULL);
- assert(*inode != NULL);
+ ceph_assert(inode != NULL);
+ ceph_assert(*inode != NULL);
// Num2: Request the parent inode, so that we can look up the name
Inode *parent;
}
// FIXME: I don't think this works; lookup_parent() returns 0 if the parent
// is already in cache
- assert(parent != NULL);
+ ceph_assert(parent != NULL);
// Num3: Finally, get the name (dentry) of the requested inode
r = _lookup_name(*inode, parent, perms);
stx->stx_ino = 0;
stx->stx_mask = 0;
} else {
- assert(in);
+ ceph_assert(in);
fill_statx(in, mask, stx);
_ll_get(in.get());
}
*out = NULL;
return rc;
} else {
- assert(in);
+ ceph_assert(in);
fill_statx(in, mask, stx);
_ll_get(in.get());
*out = in.get();
if (in->ll_ref == 0) {
in->get();
if (in->is_dir() && !in->dentries.empty()) {
- assert(in->dentries.size() == 1); // dirs can't be hard-linked
+ ceph_assert(in->dentries.size() == 1); // dirs can't be hard-linked
in->get_first_parent()->get(); // pin dentry
}
if (in->snapid != CEPH_NOSNAP)
ldout(cct, 20) << __func__ << " " << in << " " << in->ino << " " << num << " -> " << in->ll_ref << dendl;
if (in->ll_ref == 0) {
if (in->is_dir() && !in->dentries.empty()) {
- assert(in->dentries.size() == 1); // dirs can't be hard-linked
+ ceph_assert(in->dentries.size() == 1); // dirs can't be hard-linked
in->get_first_parent()->put(); // unpin dentry
}
if (in->snapid != CEPH_NOSNAP) {
auto p = ll_snap_ref.find(in->snapid);
- assert(p != ll_snap_ref.end());
- assert(p->second > 0);
+ ceph_assert(p != ll_snap_ref.end());
+ ceph_assert(p->second > 0);
if (--p->second == 0)
ll_snap_ref.erase(p);
}
InodeRef target(in);
int res = _ll_setattrx(in, stx, mask, perms, &target);
if (res == 0) {
- assert(in == target.get());
+ ceph_assert(in == target.get());
fill_statx(in, in->caps_issued(), stx);
}
InodeRef target(in);
int res = _ll_setattrx(in, &stx, mask, perms, &target);
if (res == 0) {
- assert(in == target.get());
+ ceph_assert(in == target.get());
fill_stat(in, attr);
}
int Client::ll_open(Inode *in, int flags, Fh **fhp, const UserPerm& perms)
{
- assert(!(flags & O_CREAT));
+ ceph_assert(!(flags & O_CREAT));
Mutex::Locker lock(client_lock);
if (r < 0)
goto out;
- assert(*in);
+ ceph_assert(*in);
ldout(cct, 20) << "_ll_create created = " << created << dendl;
if (!created) {
if (r < 0) {
if (*fhp) {
int release_r = _release_fh(*fhp);
- assert(release_r == 0); // during create, no async data ops should have happened
+ ceph_assert(release_r == 0); // during create, no async data ops should have happened
}
goto out;
}
int r = _ll_create(parent, name, mode, flags, &in, CEPH_STAT_CAP_INODE_ALL,
fhp, perms);
if (r >= 0) {
- assert(in);
+ ceph_assert(in);
// passing an Inode in outp requires an additional ref
if (outp) {
int r = _ll_create(parent, name, mode, oflags, &in, caps, fhp, perms);
if (r >= 0) {
- assert(in);
+ ceph_assert(in);
// passing an Inode in outp requires an additional ref
if (outp) {
}
void finish(int r) override {
Mutex::Locker l(client->client_lock);
- assert(req->head.op == CEPH_MDS_OP_SETFILELOCK);
+ ceph_assert(req->head.op == CEPH_MDS_OP_SETFILELOCK);
client->_interrupt_filelock(req);
client->put_request(req);
}
vector<ObjectExtent> extents;
Striper::file_to_extents(cct, in->ino, &in->layout, off, 1, in->truncate_size, extents);
- assert(extents.size() == 1);
+ ceph_assert(extents.size() == 1);
objecter->with_osdmap([&](const OSDMap& o) {
pg_t pg = o.object_locator_to_pg(extents[0].oid, extents[0].oloc);
vector<ObjectExtent> extents;
Striper::file_to_extents(cct, in->ino, &in->layout, offset, 1,
in->truncate_size, extents);
- assert(extents.size() == 1);
+ ceph_assert(extents.size() == 1);
// now we have the object and its 'layout'
return objecter->with_osdmap([&](const OSDMap& o) {
std::function<bool (const Inode &in)> test)
{
while (true) {
- assert(in != NULL);
+ ceph_assert(in != NULL);
if (test(*in)) {
return true;
}
return true;
}
- assert(in.size >= in.reported_size);
+ ceph_assert(in.size >= in.reported_size);
const uint64_t space = in.quota.max_bytes - in.rstat.rbytes;
const uint64_t size = in.size - in.reported_size;
return (space >> 4) < size;
wait_on_list(waiting_for_pool_perm);
} else {
have = it->second;
- assert(have & POOL_CHECKED);
+ ceph_assert(have & POOL_CHECKED);
break;
}
}
void Client::set_filer_flags(int flags)
{
Mutex::Locker l(client_lock);
- assert(flags == 0 ||
+ ceph_assert(flags == 0 ||
flags == CEPH_OSD_FLAG_LOCALIZE_READS);
objecter->add_global_op_flags(flags);
}
void Client::clear_filer_flags(int flags)
{
Mutex::Locker l(client_lock);
- assert(flags == CEPH_OSD_FLAG_LOCALIZE_READS);
+ ceph_assert(flags == CEPH_OSD_FLAG_LOCALIZE_READS);
objecter->clear_global_op_flag(flags);
}
mds_rank_t Client::_get_random_up_mds() const
{
- assert(client_lock.is_locked_by_me());
+ ceph_assert(client_lock.is_locked_by_me());
std::set<mds_rank_t> up;
mdsmap->get_up_mds_set(up);
objecter->init();
client_lock.Lock();
- assert(!is_initialized());
+ ceph_assert(!is_initialized());
messenger->add_dispatcher_tail(objecter);
messenger->add_dispatcher_tail(this);