<< cpp_strerror(ret) << dendl;
}
+ timer_lock.Lock();
timer.init();
+ timer_lock.Unlock();
initialized.set(1);
}
}
if (tick_event) {
- timer.cancel_event(tick_event);
- tick_event = NULL;
+ Mutex::Locker l(timer_lock);
+ if (timer.cancel_event(tick_event))
+ tick_event = NULL;
}
if (m_request_state_hook) {
logger = NULL;
}
- timer.shutdown();
+ {
+ Mutex::Locker l(timer_lock);
+ timer.shutdown();
+ }
}
void Objecter::schedule_tick()
{
+ Mutex::Locker l(timer_lock);
assert(tick_event == NULL);
tick_event = new C_Tick(this);
timer.add_event_after(cct->_conf->objecter_tick_interval, tick_event);
void Objecter::tick()
{
- if (!initialized.read()) {
- schedule_tick();
- return;
- }
-
- assert(rwlock.is_locked());
+ RWLock::RLocker rl(rwlock);
ldout(cct, 10) << "tick" << dendl;
- assert(initialized.read());
// we are only called by C_Tick
assert(tick_event);
tick_event = NULL;
+ if (!initialized.read()) {
+ // we raced with shutdown
+ return;
+ }
+
set<OSDSession*> toping;
int r = 0;
class C_CancelOp : public Context
{
- Objecter::Op *op;
+ ceph_tid_t tid;
Objecter *objecter;
public:
- C_CancelOp(Objecter::Op *op, Objecter *objecter) : op(op),
- objecter(objecter) {}
+ C_CancelOp(ceph_tid_t t, Objecter *objecter) : tid(t), objecter(objecter) {}
void finish(int r) {
- objecter->op_cancel(op->session, op->tid, -ETIMEDOUT);
+ objecter->op_cancel(tid, -ETIMEDOUT);
}
};
assert(op->ops.size() == op->out_rval.size());
assert(op->ops.size() == op->out_handler.size());
- if (osd_timeout > 0) {
- op->ontimeout = new C_CancelOp(op, this);
- timer.add_event_after(osd_timeout, op->ontimeout);
- }
-
// throttle. before we look at any state, because
// take_op_budget() may drop our lock while it blocks.
_take_op_budget(op);
- return _op_submit(op, lc);
+ ceph_tid_t tid = _op_submit(op, lc);
+
+ if (osd_timeout > 0) {
+ Mutex::Locker l(timer_lock);
+ op->ontimeout = new C_CancelOp(tid, this);
+ timer.add_event_after(osd_timeout, op->ontimeout);
+ }
+
+ return tid;
}
ceph_tid_t Objecter::_op_submit(Op *op, RWLock::Context& lc)
put_op_budget(op);
if (op->ontimeout) {
+ Mutex::Locker l(timer_lock);
timer.cancel_event(op->ontimeout);
}
{
assert(rwlock.is_locked());
if (mon_timeout > 0) {
+ Mutex::Locker l(timer_lock);
op->ontimeout = new C_CancelPoolOp(op->tid, this);
timer.add_event_after(mon_timeout, op->ontimeout);
}
logger->set(l_osdc_poolop_active, pool_ops.size());
if (op->ontimeout) {
+ Mutex::Locker l(timer_lock);
timer.cancel_event(op->ontimeout);
}
op->onfinish = onfinish;
op->ontimeout = NULL;
if (mon_timeout > 0) {
+ Mutex::Locker l(timer_lock);
op->ontimeout = new C_CancelPoolStatOp(op->tid, this);
timer.add_event_after(mon_timeout, op->ontimeout);
}
logger->set(l_osdc_poolstat_active, poolstat_ops.size());
if (op->ontimeout) {
+ Mutex::Locker l(timer_lock);
timer.cancel_event(op->ontimeout);
}
op->onfinish = onfinish;
op->ontimeout = NULL;
if (mon_timeout > 0) {
+ Mutex::Locker l(timer_lock);
op->ontimeout = new C_CancelStatfsOp(op->tid, this);
timer.add_event_after(mon_timeout, op->ontimeout);
}
logger->set(l_osdc_statfs_active, statfs_ops.size());
if (op->ontimeout) {
+ Mutex::Locker l(timer_lock);
timer.cancel_event(op->ontimeout);
}
(void)_calc_command_target(c);
_assign_command_session(c);
if (osd_timeout > 0) {
+ Mutex::Locker l(timer_lock);
c->ontimeout = new C_CancelCommandOp(c->session, tid, this);
timer.add_event_after(osd_timeout, c->ontimeout);
}
c->onfinish->complete(r);
if (c->ontimeout) {
+ Mutex::Locker l(timer_lock);
timer.cancel_event(c->ontimeout);
}