conf:
client:
rbd qos iops limit: 50
+ rbd qos iops burst: 100
}
}
-int TokenBucketThrottle::set_burst(uint64_t burst){
- std::lock_guard<Mutex> lock(m_lock);
- if (0 < burst && burst < m_avg) {
- // the burst should never less than the average.
- return -EINVAL;
- } else {
- m_burst = burst;
- }
- // for the default configuration of burst.
- m_throttle.set_max(0 == m_burst ? m_avg : m_burst);
- return 0;
-}
-
-int TokenBucketThrottle::set_average(uint64_t avg) {
+int TokenBucketThrottle::set_limit(uint64_t average, uint64_t burst) {
{
std::lock_guard<Mutex> lock(m_lock);
- m_avg = avg;
- if (0 < m_burst && m_burst < avg) {
+ if (0 < burst && burst < average) {
// the burst should never less than the average.
return -EINVAL;
- } else if (0 == avg) {
+ }
+
+ m_avg = average;
+ m_burst = burst;
+
+ if (0 == average) {
// The limit is not set, and no tokens will be put into the bucket.
// So, we can schedule the timer slowly, or even cancel it.
m_tick = 1000;
} else {
// calculate the tick(ms), don't less than the minimum.
- m_tick = 1000 / avg;
+ m_tick = 1000 / average;
if (m_tick < m_tick_min) {
m_tick = m_tick_min;
}
m_current_tick = 0;
// for the default configuration of burst.
- if (0 == m_burst) {
- m_throttle.set_max(m_avg);
- }
+ m_throttle.set_max(0 == burst ? average : burst);
}
// turn millisecond to second
m_schedule_tick = m_tick / 1000.0;
return wait;
}
- int set_burst(uint64_t burst);
- int set_average(uint64_t avg);
+ int set_limit(uint64_t average, uint64_t burst);
private:
uint64_t tokens_filled(double tick);
.set_default(0)
.set_description("the desired limit of write bytes per second"),
+ Option("rbd_qos_iops_burst", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+ .set_default(0)
+ .set_description("the desired burst limit of IO operations"),
+
+ Option("rbd_qos_bps_burst", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+ .set_default(0)
+ .set_description("the desired burst limit of IO bytes"),
+
+ Option("rbd_qos_read_iops_burst", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+ .set_default(0)
+ .set_description("the desired burst limit of read operations"),
+
+ Option("rbd_qos_write_iops_burst", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+ .set_default(0)
+ .set_description("the desired burst limit of write operations"),
+
+ Option("rbd_qos_read_bps_burst", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+ .set_default(0)
+ .set_description("the desired burst limit of read bytes"),
+
+ Option("rbd_qos_write_bps_burst", Option::TYPE_UINT, Option::LEVEL_ADVANCED)
+ .set_default(0)
+ .set_description("the desired burst limit of write bytes"),
+
Option("rbd_discard_on_zeroed_write_same", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_default(true)
.set_description("discard data on zeroed write same instead of writing zero"),
}
io_work_queue->apply_qos_limit(
+ RBD_QOS_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_iops_limit"),
- RBD_QOS_IOPS_THROTTLE);
+ config.get_val<uint64_t>("rbd_qos_iops_burst"));
io_work_queue->apply_qos_limit(
+ RBD_QOS_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_bps_limit"),
- RBD_QOS_BPS_THROTTLE);
+ config.get_val<uint64_t>("rbd_qos_bps_burst"));
io_work_queue->apply_qos_limit(
+ RBD_QOS_READ_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_read_iops_limit"),
- RBD_QOS_READ_IOPS_THROTTLE);
+ config.get_val<uint64_t>("rbd_qos_read_iops_burst"));
io_work_queue->apply_qos_limit(
+ RBD_QOS_WRITE_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_write_iops_limit"),
- RBD_QOS_WRITE_IOPS_THROTTLE);
+ config.get_val<uint64_t>("rbd_qos_write_iops_burst"));
io_work_queue->apply_qos_limit(
+ RBD_QOS_READ_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_read_bps_limit"),
- RBD_QOS_READ_BPS_THROTTLE);
+ config.get_val<uint64_t>("rbd_qos_read_bps_burst"));
io_work_queue->apply_qos_limit(
+ RBD_QOS_WRITE_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_write_bps_limit"),
- RBD_QOS_WRITE_BPS_THROTTLE);
+ config.get_val<uint64_t>("rbd_qos_write_bps_burst"));
}
ExclusiveLock<ImageCtx> *ImageCtx::create_exclusive_lock() {
}
template <typename I>
-void ImageRequestWQ<I>::apply_qos_limit(uint64_t limit, const uint64_t flag) {
+void ImageRequestWQ<I>::apply_qos_limit(const uint64_t flag,
+ uint64_t limit,
+ uint64_t burst) {
+ CephContext *cct = m_image_ctx.cct;
TokenBucketThrottle *throttle = nullptr;
for (auto pair : m_throttles) {
if (flag == pair.first) {
}
}
ceph_assert(throttle != nullptr);
- throttle->set_burst(limit);
- throttle->set_average(limit);
+
+ int r = throttle->set_limit(limit, burst);
+ if (r < 0) {
+ lderr(cct) << "invalid qos parameter: "
+ << "burst(" << burst << ") is less than "
+ << "limit(" << limit << ")" << dendl;
+ // if apply failed, we should at least make sure the limit works.
+ throttle->set_limit(limit, 0);
+ }
+
if (limit)
m_qos_enabled_flag |= flag;
else
void set_require_lock(Direction direction, bool enabled);
- void apply_qos_limit(uint64_t limit, const uint64_t flag);
-
+ void apply_qos_limit(const uint64_t flag, uint64_t limit, uint64_t burst);
protected:
void *_void_dequeue() override;
void process(ImageDispatchSpec<ImageCtxT> *req) override;
InSequence seq;
MockImageRequestWQ mock_image_request_wq(&mock_image_ctx, "io", 60, nullptr);
- mock_image_request_wq.apply_qos_limit(0, RBD_QOS_BPS_THROTTLE);
+ mock_image_request_wq.apply_qos_limit(RBD_QOS_BPS_THROTTLE, 0, 0);
expect_front(mock_image_request_wq, &mock_queued_image_request);
expect_is_refresh_request(mock_image_ctx, false);
InSequence seq;
MockImageRequestWQ mock_image_request_wq(&mock_image_ctx, "io", 60, nullptr);
- mock_image_request_wq.apply_qos_limit(1, RBD_QOS_BPS_THROTTLE);
+ mock_image_request_wq.apply_qos_limit(RBD_QOS_BPS_THROTTLE, 1, 0);
expect_front(mock_image_request_wq, &mock_queued_image_request);
expect_tokens_requested(mock_queued_image_request, 2);