Client::Client(Messenger *m, MonClient *mc, Objecter *objecter_)
: Dispatcher(m->cct),
- timer(m->cct, client_lock),
+ timer(m->cct, timer_lock, false),
messenger(m),
monclient(mc),
objecter(objecter_),
std::lock_guard l{client_lock};
ceph_assert(initialized);
initialized = false;
+ }
+
+ {
+ std::scoped_lock l(timer_lock);
timer.shutdown();
}
objecter_finisher.wait_for_empty();
int Client::mount(const std::string &mount_root, const UserPerm& perms,
bool require_mds, const std::string &fs_name)
{
- std::lock_guard lock(client_lock);
+ std::unique_lock lock(client_lock);
if (mounted) {
ldout(cct, 5) << "already mounted" << dendl;
return r;
}
+ lock.unlock();
tick(); // start tick
+ lock.lock();
if (require_mds) {
while (1) {
}
return mds_requests.empty();
});
- if (tick_event)
- timer.cancel_event(tick_event);
- tick_event = 0;
+
+ {
+ std::scoped_lock l(timer_lock);
+ if (tick_event)
+ timer.cancel_event(tick_event);
+ tick_event = 0;
+ }
cwd.reset();
void Client::tick()
{
+ ldout(cct, 20) << "tick" << dendl;
+
+ {
+ std::scoped_lock l(timer_lock);
+ tick_event = timer.add_event_after(
+ cct->_conf->client_tick_interval,
+ new LambdaContext([this](int) {
+ tick();
+ }));
+ }
+
if (cct->_conf->client_debug_inject_tick_delay > 0) {
sleep(cct->_conf->client_debug_inject_tick_delay);
ceph_assert(0 == cct->_conf.set_val("client_debug_inject_tick_delay", "0"));
cct->_conf.apply_changes(nullptr);
}
- ldout(cct, 21) << "tick" << dendl;
- tick_event = timer.add_event_after(
- cct->_conf->client_tick_interval,
- new LambdaContext([this](int) {
- // Called back via Timer, which takes client_lock for us
- ceph_assert(ceph_mutex_is_locked_by_me(client_lock));
- tick();
- }));
utime_t now = ceph_clock_now();
+ std::lock_guard lock(client_lock);
+
if (!mounted && !mds_requests.empty()) {
MetaRequest *req = mds_requests.begin()->second;
if (req->op_stamp + cct->_conf->client_mount_timeout < now) {
int r = monclient->init();
if (r < 0) {
// need to do cleanup because we're in an intermediate init state
- timer.shutdown();
+ {
+ std::scoped_lock l(timer_lock);
+ timer.shutdown();
+ }
+
client_lock.unlock();
objecter->shutdown();
objectcacher->stop();
xlist<Inode*> &get_dirty_list() { return dirty_list; }
+ /* timer_lock for 'timer' and 'tick_event' */
+ ceph::mutex timer_lock = ceph::make_mutex("Client::timer_lock");
+ Context *tick_event = nullptr;
SafeTimer timer;
std::unique_ptr<PerfCounters> logger;
Finisher async_ino_releasor;
Finisher objecter_finisher;
- Context *tick_event = nullptr;
utime_t last_cap_renew;
CommandHook m_command_hook;
Inode *in = deleg->get_fh()->inode.get();
Client *client = in->client;
- // Called back via Timer, which takes client_lock for us
- ceph_assert(ceph_mutex_is_locked_by_me(client->client_lock));
-
lsubdout(client->cct, client, 0) << __func__ <<
": delegation return timeout for inode 0x" <<
std::hex << in->ino << ". Forcibly unmounting client. "<<
client << std::dec << dendl;
+ std::scoped_lock l(client->client_lock);
client->_unmount(false);
}
};
{
Client *client = fh->inode.get()->client;
+ std::scoped_lock l(client->timer_lock);
if (timeout_event)
return;
{
Client *client = fh->inode.get()->client;
+ std::scoped_lock l(client->timer_lock);
if (!timeout_event)
return;