OPTION(bluestore_throttle_cost_per_io, OPT_U64, 0)
OPTION(bluestore_deferred_max_ops, OPT_U64, 512)
OPTION(bluestore_deferred_max_bytes, OPT_U64, 128*1024*1024)
-OPTION(bluestore_deferred_batch_ops, OPT_U64, 32)
+OPTION(bluestore_deferred_batch_ops, OPT_U64, 0)
+OPTION(bluestore_deferred_batch_ops_hdd, OPT_U64, 64)
+OPTION(bluestore_deferred_batch_ops_ssd, OPT_U64, 16)
OPTION(bluestore_nid_prealloc, OPT_INT, 1024)
OPTION(bluestore_blobid_prealloc, OPT_U64, 10240)
OPTION(bluestore_clone_cow, OPT_BOOL, true) // do copy-on-write for clones
"bluestore_compression_max_blob_size",
"bluestore_max_alloc_size",
"bluestore_prefer_deferred_size",
+ "bleustore_deferred_batch_ops",
+ "bleustore_deferred_batch_ops_hdd",
+ "bleustore_deferred_batch_ops_ssd",
"bluestore_max_ops",
"bluestore_max_bytes",
"bluestore_deferred_max_ops",
_set_compression();
}
if (changed.count("bluestore_prefer_deferred_size") ||
- changed.count("bluestore_max_alloc_size")) {
+ changed.count("bluestore_max_alloc_size") ||
+ changed.count("bluestore_deferred_batch_ops") ||
+ changed.count("bluestore_deferred_batch_ops_hdd") ||
+ changed.count("bluestore_deferred_batch_ops_ssd")) {
if (bdev) {
// only after startup
_set_alloc_sizes();
}
}
+ if (cct->_conf->bluestore_deferred_batch_ops) {
+ deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops;
+ } else {
+ assert(bdev);
+ if (bdev->is_rotational()) {
+ deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_hdd;
+ } else {
+ deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_ssd;
+ }
+ }
+
dout(10) << __func__ << " min_alloc_size 0x" << std::hex << min_alloc_size
<< std::dec << " order " << min_alloc_size_order
<< " max_alloc_size 0x" << std::hex << max_alloc_size
- << std::dec << dendl;
+ << " prefer_deferred_size 0x" << prefer_deferred_size
+ << std::dec
+ << " deferred_batch_ops " << deferred_batch_ops
+ << dendl;
}
int BlueStore::_open_bdev(bool create)
if (!deferred_aggressive) {
std::lock_guard<std::mutex> l(deferred_lock);
- if (deferred_queue_size >= (int)g_conf->bluestore_deferred_batch_ops ||
+ if (deferred_queue_size >= deferred_batch_ops ||
throttle_deferred_bytes.past_midpoint()) {
_deferred_try_submit();
}