Objecter *objecter;
Finisher objecter_finisher;
-
// -- Watch --
Mutex watch_lock;
SafeTimer watch_timer;
entity_inst_t())));
}
+private:
// -- pg recovery and associated throttling --
Mutex recovery_lock;
list<pair<epoch_t, PGRef> > awaiting_throttle;
#ifdef DEBUG_RECOVERY_OIDS
map<spg_t, set<hobject_t, hobject_t::BitwiseComparator> > recovery_oids;
#endif
- void start_recovery_op(PG *pg, const hobject_t& soid);
- void finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue);
bool _recover_now(uint64_t *available_pushes);
void _maybe_queue_recovery();
+ void _queue_for_recovery(
+ pair<epoch_t, PGRef> p, uint64_t reserved_pushes) {
+ assert(recovery_lock.is_locked_by_me());
+ pair<PGRef, PGQueueable> to_queue = make_pair(
+ p.second,
+ PGQueueable(
+ PGRecovery(p.first, reserved_pushes),
+ cct->_conf->osd_recovery_cost,
+ cct->_conf->osd_recovery_priority,
+ ceph_clock_now(cct),
+ entity_inst_t()));
+ op_wq.queue(to_queue);
+ }
+public:
+ void start_recovery_op(PG *pg, const hobject_t& soid);
+ void finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue);
void release_reserved_pushes(uint64_t pushes) {
Mutex::Locker l(recovery_lock);
assert(recovery_ops_reserved >= pushes);
_maybe_queue_recovery();
}
- void _queue_for_recovery(
- pair<epoch_t, PGRef> p, uint64_t reserved_pushes) {
- assert(recovery_lock.is_locked_by_me());
- pair<PGRef, PGQueueable> to_queue = make_pair(
- p.second,
- PGQueueable(
- PGRecovery(p.first, reserved_pushes),
- cct->_conf->osd_recovery_cost,
- cct->_conf->osd_recovery_priority,
- ceph_clock_now(cct),
- entity_inst_t()));
- op_wq.queue(to_queue);
- }
// osd map cache (past osd maps)
Mutex map_cache_lock;