cct->_conf->osd_recovery_max_single_start);
_queue_for_recovery(awaiting_throttle.front(), to_start);
awaiting_throttle.pop_front();
+ dout(10) << __func__ << " starting " << to_start
+ << ", recovery_ops_reserved " << recovery_ops_reserved
+ << " -> " << (recovery_ops_reserved + to_start) << dendl;
recovery_ops_reserved += to_start;
}
}
return local_reserver.has_reservation() || remote_reserver.has_reservation();
}
+void OSDService::release_reserved_pushes(uint64_t pushes)
+{
+ Mutex::Locker l(recovery_lock);
+ dout(10) << __func__ << "(" << pushes << "), recovery_ops_reserved "
+ << recovery_ops_reserved << " -> " << (recovery_ops_reserved-pushes)
+ << dendl;
+ assert(recovery_ops_reserved >= pushes);
+ recovery_ops_reserved -= pushes;
+ _maybe_queue_recovery();
+}
+
// =========================================================
// OPS
void start_recovery_op(PG *pg, const hobject_t& soid);
void finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue);
bool is_recovery_active();
- void release_reserved_pushes(uint64_t pushes) {
- Mutex::Locker l(recovery_lock);
- assert(recovery_ops_reserved >= pushes);
- recovery_ops_reserved -= pushes;
- _maybe_queue_recovery();
- }
+ void release_reserved_pushes(uint64_t pushes);
void defer_recovery(float defer_for) {
defer_recovery_until = ceph_clock_now();
defer_recovery_until += defer_for;
}
friend ostream& operator<<(ostream& out, const OpQueueItem& item) {
- return out << "OpQueueItem("
- << item.get_ordering_token() << " " << *item.qitem
- << " prio " << item.get_priority()
- << " cost " << item.get_cost()
- << " e" << item.get_map_epoch() << ")";
+ out << "OpQueueItem("
+ << item.get_ordering_token() << " " << *item.qitem
+ << " prio " << item.get_priority()
+ << " cost " << item.get_cost()
+ << " e" << item.get_map_epoch();
+ if (item.get_reserved_pushes()) {
+ out << " reserved_pushes " << item.get_reserved_pushes();
+ }
+ return out << ")";
}
}; // class OpQueueItem