client:
rbd qos iops limit: 50
rbd qos iops burst: 100
+ rbd qos schedule tick min: 100
return 0;
}
+void TokenBucketThrottle::set_schedule_tick_min(uint64_t tick) {
+ std::lock_guard<Mutex> lock(m_lock);
+ if (tick != 0) {
+ m_tick_min = tick;
+ }
+}
+
uint64_t TokenBucketThrottle::tokens_filled(double tick) {
return (0 == m_avg) ? 0 : (tick / m_ticks_per_second * m_avg);
}
Mutex m_lock;
// minimum of the filling period.
- static const uint64_t m_tick_min = 50;
+ uint64_t m_tick_min = 50;
// tokens filling period, its unit is millisecond.
uint64_t m_tick = 0;
/**
}
int set_limit(uint64_t average, uint64_t burst);
+ void set_schedule_tick_min(uint64_t tick);
private:
uint64_t tokens_filled(double tick);
.set_default(0)
.set_description("the desired burst limit of write bytes"),
+ Option("rbd_qos_schedule_tick_min", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+ .set_default(50)
+ .set_min(1)
+ .set_description("minimum schedule tick (in milliseconds) for QoS"),
+
Option("rbd_discard_on_zeroed_write_same", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_default(true)
.set_description("discard data on zeroed write same instead of writing zero"),
sparse_read_threshold_bytes = get_object_size();
}
+ io_work_queue->apply_qos_schedule_tick_min(
+ config.get_val<uint64_t>("rbd_qos_schedule_tick_min"));
+
io_work_queue->apply_qos_limit(
RBD_QOS_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_iops_limit"),
}
}
+template <typename I>
+void ImageRequestWQ<I>::apply_qos_schedule_tick_min(uint64_t tick){
+ for (auto pair : m_throttles) {
+ pair.second->set_schedule_tick_min(tick);
+ }
+}
+
template <typename I>
void ImageRequestWQ<I>::apply_qos_limit(const uint64_t flag,
uint64_t limit,
void set_require_lock(Direction direction, bool enabled);
+ void apply_qos_schedule_tick_min(uint64_t tick);
+
void apply_qos_limit(const uint64_t flag, uint64_t limit, uint64_t burst);
protected:
void *_void_dequeue() override;