Sadly, you can't implicitly convert non-const references to shared pointers, so avoid the atomic ops necessary when copying a shared_ptr.
Signed-off-by: Greg Farnum <greg@inktank.com>
if (!object->is_auth()) {
if (!mdr->locks.empty())
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
if (object->is_ambiguous_auth()) {
// wait
dout(10) << " ambiguous auth, waiting to authpin " << *object << dendl;
}
if (!object->can_auth_pin()) {
// wait
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
mdr->drop_local_auth_pins();
if (auth_pin_nonblock) {
dout(10) << " can't auth_pin (freezing?) " << *object << ", nonblocking" << dendl;
mdr->remote_wrlocks[have] != (*remote_wrlocks)[have]) {
dout(10) << " unlocking remote_wrlock on wrong mds." << mdr->remote_wrlocks[have]
<< " " << *have << " " << *have->get_parent() << dendl;
- remote_wrlock_finish(have, mdr->remote_wrlocks[have], mdr);
+ remote_wrlock_finish(have, mdr->remote_wrlocks[have], mdr.get());
}
}
if (need_wrlock || need_remote_wrlock) {
else if (need_remote_wrlock) // acquire remote_wrlock first
dout(10) << " unlocking out-of-order " << *lock << " " << *lock->get_parent() << dendl;
bool need_issue = false;
- wrlock_finish(lock, mdr, &need_issue);
+ wrlock_finish(lock, mdr.get(), &need_issue);
if (need_issue)
issue_set.insert(static_cast<CInode*>(lock->get_parent()));
}
dout(10) << " unlocking out-of-order " << *stray << " " << *stray->get_parent() << dendl;
bool need_issue = false;
if (mdr->xlocks.count(stray)) {
- xlock_finish(stray, mdr, &need_issue);
+ xlock_finish(stray, mdr.get(), &need_issue);
} else if (mdr->rdlocks.count(stray)) {
- rdlock_finish(stray, mdr, &need_issue);
+ rdlock_finish(stray, mdr.get(), &need_issue);
} else {
// may have acquired both wrlock and remore wrlock
if (mdr->wrlocks.count(stray))
- wrlock_finish(stray, mdr, &need_issue);
+ wrlock_finish(stray, mdr.get(), &need_issue);
if (mdr->remote_wrlocks.count(stray))
- remote_wrlock_finish(stray, mdr->remote_wrlocks[stray], mdr);
+ remote_wrlock_finish(stray, mdr->remote_wrlocks[stray], mdr.get());
}
if (need_issue)
issue_set.insert(static_cast<CInode*>(stray->get_parent()));
// lock
if (mdr->locking && *p != mdr->locking) {
- cancel_locking(mdr, &issue_set);
+ cancel_locking(mdr.get(), &issue_set);
}
if (xlocks.count(*p)) {
if (!xlock_start(*p, mdr))
if (need_remote_wrlock && !(*p)->can_wrlock(mdr->get_client())) {
// can't take the wrlock because the scatter lock is gathering. need to
// release the remote wrlock, so that the gathering process can finish.
- remote_wrlock_finish(*p, mdr->remote_wrlocks[*p], mdr);
+ remote_wrlock_finish(*p, mdr->remote_wrlocks[*p], mdr.get());
remote_wrlock_start(*p, (*remote_wrlocks)[*p], mdr);
goto out;
}
dout(10) << " unlocking extra " << *stray << " " << *stray->get_parent() << dendl;
bool need_issue = false;
if (mdr->xlocks.count(stray)) {
- xlock_finish(stray, mdr, &need_issue);
+ xlock_finish(stray, mdr.get(), &need_issue);
} else if (mdr->rdlocks.count(stray)) {
- rdlock_finish(stray, mdr, &need_issue);
+ rdlock_finish(stray, mdr.get(), &need_issue);
} else {
// may have acquired both wrlock and remore wrlock
if (mdr->wrlocks.count(stray))
- wrlock_finish(stray, mdr, &need_issue);
+ wrlock_finish(stray, mdr.get(), &need_issue);
if (mdr->remote_wrlocks.count(stray))
- remote_wrlock_finish(stray, mdr->remote_wrlocks[stray], mdr);
+ remote_wrlock_finish(stray, mdr->remote_wrlocks[stray], mdr.get());
}
if (need_issue)
issue_set.insert(static_cast<CInode*>(stray->get_parent()));
}
-void Locker::set_xlocks_done(MutationRef& mut, bool skip_dentry)
+void Locker::set_xlocks_done(MutationImpl *mut, bool skip_dentry)
{
for (set<SimpleLock*>::iterator p = mut->xlocks.begin();
p != mut->xlocks.end();
}
}
-void Locker::_drop_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue)
+void Locker::_drop_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue)
{
while (!mut->rdlocks.empty()) {
bool ni = false;
}
}
-void Locker::_drop_non_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue)
+void Locker::_drop_non_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue)
{
set<int> slaves;
}
}
-void Locker::cancel_locking(MutationRef& mut, set<CInode*> *pneed_issue)
+void Locker::cancel_locking(MutationImpl *mut, set<CInode*> *pneed_issue)
{
SimpleLock *lock = mut->locking;
assert(lock);
mut->finish_locking(lock);
}
-void Locker::drop_locks(MutationRef& mut, set<CInode*> *pneed_issue)
+void Locker::drop_locks(MutationImpl *mut, set<CInode*> *pneed_issue)
{
// leftover locks
set<CInode*> my_need_issue;
mut->done_locking = false;
}
-void Locker::drop_non_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue)
+void Locker::drop_non_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue)
{
set<CInode*> my_need_issue;
if (!pneed_issue)
issue_caps_set(*pneed_issue);
}
-void Locker::drop_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue)
+void Locker::drop_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue)
{
set<CInode*> my_need_issue;
if (!pneed_issue)
mds->mdlog->flush();
}
-void Locker::rdlock_finish(SimpleLock *lock, MutationRef& mut, bool *pneed_issue)
+void Locker::rdlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue)
{
// drop ref
lock->put_rdlock();
return false;
}
-void Locker::wrlock_finish(SimpleLock *lock, MutationRef& mut, bool *pneed_issue)
+void Locker::wrlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue)
{
if (lock->get_type() == CEPH_LOCK_IVERSION ||
lock->get_type() == CEPH_LOCK_DVERSION)
}
void Locker::remote_wrlock_finish(SimpleLock *lock, int target,
- MutationRef& mut)
+ MutationImpl *mut)
{
// drop ref
mut->remote_wrlocks.erase(lock);
eval_gather(lock, true, pneed_issue);
}
-void Locker::xlock_finish(SimpleLock *lock, MutationRef& mut, bool *pneed_issue)
+void Locker::xlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue)
{
if (lock->get_type() == CEPH_LOCK_IVERSION ||
lock->get_type() == CEPH_LOCK_DVERSION)
}
}
-void Locker::xlock_export(SimpleLock *lock, MutationRef& mut)
+void Locker::xlock_export(SimpleLock *lock, MutationImpl *mut)
{
dout(10) << "xlock_export on " << *lock << " " << *lock->get_parent() << dendl;
mds->send_message_client_counted(ack, client);
set<CInode*> need_issue;
- drop_locks(mut, &need_issue);
+ drop_locks(mut.get(), &need_issue);
if (!in->is_head() && !in->client_snap_caps.empty()) {
dout(10) << " client_snap_caps " << in->client_snap_caps << dendl;
}
mut->apply();
- drop_locks(mut);
+ drop_locks(mut.get());
mut->cleanup();
if (lock->is_stable())
}
}
-void Locker::local_wrlock_finish(LocalLock *lock, MutationRef& mut)
+void Locker::local_wrlock_finish(LocalLock *lock, MutationImpl *mut)
{
dout(7) << "local_wrlock_finish on " << *lock
<< " on " << *lock->get_parent() << dendl;
return true;
}
-void Locker::local_xlock_finish(LocalLock *lock, MutationRef& mut)
+void Locker::local_xlock_finish(LocalLock *lock, MutationImpl *mut)
{
dout(7) << "local_xlock_finish on " << *lock
<< " on " << *lock->get_parent() << dendl;
void send_lock_message(SimpleLock *lock, int msg, const bufferlist &data);
// -- locks --
- void _drop_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue);
- void _drop_non_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue);
+ void _drop_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue);
+ void _drop_non_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue);
public:
void include_snap_rdlocks(set<SimpleLock*>& rdlocks, CInode *in);
void include_snap_rdlocks_wlayout(set<SimpleLock*>& rdlocks, CInode *in,
CInode *auth_pin_freeze=NULL,
bool auth_pin_nonblock=false);
- void cancel_locking(MutationRef& mut, set<CInode*> *pneed_issue);
- void drop_locks(MutationRef& mut, set<CInode*> *pneed_issue=0);
- void set_xlocks_done(MutationRef& mut, bool skip_dentry=false);
- void drop_non_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue=0);
- void drop_rdlocks(MutationRef& mut, set<CInode*> *pneed_issue=0);
+ void cancel_locking(MutationImpl *mut, set<CInode*> *pneed_issue);
+ void drop_locks(MutationImpl *mut, set<CInode*> *pneed_issue=0);
+ void set_xlocks_done(MutationImpl *mut, bool skip_dentry=false);
+ void drop_non_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue=0);
+ void drop_rdlocks(MutationImpl *mut, set<CInode*> *pneed_issue=0);
void eval_gather(SimpleLock *lock, bool first=false, bool *need_issue=0, list<Context*> *pfinishers=0);
void eval(SimpleLock *lock, bool *need_issue);
bool _rdlock_kick(SimpleLock *lock, bool as_anon);
bool rdlock_try(SimpleLock *lock, client_t client, Context *c);
bool rdlock_start(SimpleLock *lock, MDRequestRef& mut, bool as_anon=false);
- void rdlock_finish(SimpleLock *lock, MutationRef& mut, bool *pneed_issue);
+ void rdlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue);
bool can_rdlock_set(set<SimpleLock*>& locks);
bool rdlock_try_set(set<SimpleLock*>& locks);
void rdlock_take_set(set<SimpleLock*>& locks, MutationRef& mut);
void wrlock_force(SimpleLock *lock, MutationRef& mut);
bool wrlock_start(SimpleLock *lock, MDRequestRef& mut, bool nowait=false);
- void wrlock_finish(SimpleLock *lock, MutationRef& mut, bool *pneed_issue);
+ void wrlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue);
void remote_wrlock_start(SimpleLock *lock, int target, MDRequestRef& mut);
- void remote_wrlock_finish(SimpleLock *lock, int target, MutationRef& mut);
+ void remote_wrlock_finish(SimpleLock *lock, int target, MutationImpl *mut);
bool xlock_start(SimpleLock *lock, MDRequestRef& mut);
void _finish_xlock(SimpleLock *lock, client_t xlocker, bool *pneed_issue);
- void xlock_finish(SimpleLock *lock, MutationRef& mut, bool *pneed_issue);
+ void xlock_finish(SimpleLock *lock, MutationImpl *mut, bool *pneed_issue);
- void xlock_export(SimpleLock *lock, MutationRef& mut);
+ void xlock_export(SimpleLock *lock, MutationImpl *mut);
void xlock_import(SimpleLock *lock);
void local_wrlock_grab(LocalLock *lock, MutationRef& mut);
protected:
bool local_wrlock_start(LocalLock *lock, MDRequestRef& mut);
- void local_wrlock_finish(LocalLock *lock, MutationRef& mut);
+ void local_wrlock_finish(LocalLock *lock, MutationImpl *mut);
bool local_xlock_start(LocalLock *lock, MDRequestRef& mut);
- void local_xlock_finish(LocalLock *lock, MutationRef& mut);
+ void local_xlock_finish(LocalLock *lock, MutationImpl *mut);
// file
}
mut->apply();
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
fin->complete(0);
in->pop_and_dirty_projected_inode(mut->ls);
mut->apply();
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
in->auth_unpin(this);
{
in->pop_and_dirty_projected_inode(mut->ls);
mut->apply();
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
}
{
dout(10) << "truncate_inode_logged " << *in << dendl;
mut->apply();
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
in->put(CInode::PIN_TRUNCATING);
void MDCache::request_drop_non_rdlocks(MDRequestRef& mdr)
{
request_drop_foreign_locks(mdr);
- mds->locker->drop_non_rdlocks(mdr);
+ mds->locker->drop_non_rdlocks(mdr.get());
}
void MDCache::request_drop_locks(MDRequestRef& mdr)
{
request_drop_foreign_locks(mdr);
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
}
void MDCache::request_cleanup(MDRequestRef& mdr)
mds->anchorclient->commit(atid, mut->ls);
// drop locks and finish
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
// trigger waiters
// apply
in->pop_and_dirty_projected_inode(mut->ls);
mut->apply();
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
// tell table we've committed
mds->mdlog->start_submit_entry(le, new C_MDC_FragmentCommit(this, basedirfrag,
info.resultfrags));
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
// unfreeze resulting frags
for (list<CDir*>::iterator p = info.resultfrags.begin();
mds->mdcache->request_finish(mdr);
} else if (it->second.mut) {
MutationRef& mut = it->second.mut;
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
}
// unpin path
MutationRef& mut = it->second.mut;
if (mut) {
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
}
// clean up
map<dirfrag_t, import_state_t>::iterator it = import_state.find(dir->dirfrag());
if (it->second.mut) {
- mds->locker->drop_locks(it->second.mut);
+ mds->locker->drop_locks(it->second.mut.get());
it->second.mut->cleanup();
}
import_state.erase(it);
//audit(); // this fails, bc we munge up the subtree map during handle_import_map (resolve phase)
if (mut) {
- mds->locker->drop_locks(mut);
+ mds->locker->drop_locks(mut.get());
mut->cleanup();
}
mdlog->flush();
}
} else if (mdr->did_early_reply)
- mds->locker->drop_rdlocks(mdr);
+ mds->locker->drop_rdlocks(mdr.get());
else
mdlog->flush();
}
//_rename_finish() does not send dentry link/unlink message to replicas.
// so do not set xlocks on dentries "done", the xlocks prevent dentries
// that have projected linkages from getting new replica.
- mds->locker->set_xlocks_done(mdr, mdr->client_request->get_op() == CEPH_MDS_OP_RENAME);
+ mds->locker->set_xlocks_done(mdr.get(), mdr->client_request->get_op() == CEPH_MDS_OP_RENAME);
char buf[80];
dout(10) << "early_reply " << reply->get_result()
bool need_issue = false;
switch (op) {
case MMDSSlaveRequest::OP_UNXLOCK:
- mds->locker->xlock_finish(lock, mdr, &need_issue);
+ mds->locker->xlock_finish(lock, mdr.get(), &need_issue);
break;
case MMDSSlaveRequest::OP_UNWRLOCK:
- mds->locker->wrlock_finish(lock, mdr, &need_issue);
+ mds->locker->wrlock_finish(lock, mdr.get(), &need_issue);
break;
}
if (need_issue)
break;
case MMDSSlaveRequest::OP_DROPLOCKS:
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
mdr->slave_request->put();
mdr->slave_request = 0;
break;
* a single MDS request; otherwise we'd be in
* rdlock_path_xlock_dentry.
*/
- mds->locker->drop_locks(mdr, NULL);
+ mds->locker->drop_locks(mdr.get(), NULL);
mdr->drop_local_auth_pins();
return 0;
}
if (!dir->is_complete()) {
if (dir->is_frozen()) {
dout(7) << "dir is frozen " << *dir << dendl;
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
mdr->drop_local_auth_pins();
dir->add_waiter(CDir::WAIT_UNFREEZE, new C_MDS_RetryRequest(mdcache, mdr));
return;
break;
}
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
mdr->drop_local_auth_pins();
mdcache->open_remote_dentry(dn, dnp, new C_MDS_RetryRequest(mdcache, mdr));
return;
dout(10) << " added to waiting list" << dendl;
assert(lock_state->is_waiting(set_lock));
mdr->more()->flock_was_waiting = true;
- mds->locker->drop_locks(mdr);
+ mds->locker->drop_locks(mdr.get());
mdr->drop_local_auth_pins();
cur->add_waiter(CInode::WAIT_FLOCK, new C_MDS_RetryRequest(mdcache, mdr));
}
// we only care about xlocks on the exported inode
if (lock->get_parent() == in &&
!lock->is_locallock())
- mds->locker->xlock_export(lock, mdr);
+ mds->locker->xlock_export(lock, mdr.get());
}
map<client_t,Capability::Import> peer_imported;