Break kick() into wake() and _wake() methods, depending on whether the
lock is already held. (The rename ensures that we audit/fix all
callers.)
Signed-off-by: Sage Weil <sage@inktank.com>
void unlock() {
pool->unlock();
}
- void kick() {
- pool->kick();
+ void wake() {
+ pool->wake();
+ }
+ void _wake() {
+ pool->_wake();
}
void drain() {
pool->drain(this);
void unlock() {
pool->unlock();
}
- void kick() {
- pool->kick();
+ /// wake up the thread pool (without lock held)
+ void wake() {
+ pool->wake();
+ }
+ /// wake up the thread pool (with lock already held)
+ void _wake() {
+ pool->_wake();
}
void drain() {
pool->drain(this);
void wait(Cond &c) {
c.Wait(_lock);
}
- /// wake up a waiter
- void kick() {
+
+ /// wake up a waiter (with lock already held)
+ void _wake() {
+ _cond.Signal();
+ }
+ /// wake up a waiter (without lock held)
+ void wake() {
+ Mutex::Locker l(_lock);
_cond.Signal();
}
logger->set(l_osd_buf, buffer::get_total_alloc());
// periodically kick recovery work queue
- recovery_tp.kick();
+ recovery_tp.wake();
if (service.scrub_should_schedule()) {
sched_scrub();
<< "to " << g_conf->osd_recovery_delay_start;
defer_recovery_until = ceph_clock_now(g_ceph_context);
defer_recovery_until += g_conf->osd_recovery_delay_start;
- recovery_wq.kick();
+ recovery_wq.wake();
}
}
recovery_wq._queue_front(pg);
}
- recovery_wq.kick();
+ recovery_wq._wake();
recovery_wq.unlock();
}
dout(10) << "defer_recovery " << *pg << dendl;
// move pg to the end of the queue...
- recovery_wq.lock();
- recovery_wq._enqueue(pg);
- recovery_wq.kick();
- recovery_wq.unlock();
+ recovery_wq.queue(pg);
}