From be90d3afba58dcd6b1e0aaed9520052ab0e05666 Mon Sep 17 00:00:00 2001 From: xie xingguo Date: Thu, 22 Jun 2017 10:30:55 +0800 Subject: [PATCH] os/bluestore: non-atomic min_alloc_size_order This is not necessary because we now write min_alloc_size into disk during mkfs. There are two acaccompanying changes too: (1) don't call _set_alloc_sizes() during mkfs() any more, which is not necessary. (2) don't update min_alloc_size during _set_alloc_sizes(), we instead update min_alloc_size_order each time we successfully read min_alloc_size from disk. Signed-off-by: xie xingguo --- src/os/bluestore/BlueStore.cc | 11 ++++------- src/os/bluestore/BlueStore.h | 16 +++++++++------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 81b64ea6f57..ad58ef7a668 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -3981,9 +3981,6 @@ int BlueStore::_check_or_set_bdev_label( void BlueStore::_set_alloc_sizes(void) { - min_alloc_size_order = ctz(min_alloc_size); - assert(min_alloc_size == 1u << min_alloc_size_order); - max_alloc_size = cct->_conf->bluestore_max_alloc_size; if (cct->_conf->bluestore_prefer_deferred_size) { @@ -5076,7 +5073,6 @@ int BlueStore::mkfs() min_alloc_size = cct->_conf->bluestore_min_alloc_size_ssd; } } - _set_alloc_sizes(); { bufferlist bl; ::encode((uint64_t)min_alloc_size, bl); @@ -7438,6 +7434,8 @@ int BlueStore::_open_super_meta() uint64_t val; ::decode(val, p); min_alloc_size = val; + min_alloc_size_order = ctz(val); + assert(min_alloc_size == 1u << min_alloc_size_order); } catch (buffer::error& e) { derr << __func__ << " unable to read min_alloc_size" << dendl; return -EIO; @@ -10020,12 +10018,11 @@ void BlueStore::_choose_write_options( dout(20) << __func__ << " will prefer large blob and csum sizes" << dendl; - auto order = min_alloc_size_order.load(); if (o->onode.expected_write_size) { - wctx->csum_order = std::max(order, + wctx->csum_order = std::max(min_alloc_size_order, (uint8_t)ctz(o->onode.expected_write_size)); } else { - wctx->csum_order = order; + wctx->csum_order = min_alloc_size_order; } if (wctx->compress) { diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index 7a567c4f1b4..6eb13b1f3a5 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -1875,24 +1875,26 @@ private: size_t block_size_order = 0; ///< bits to shift to get block size uint64_t min_alloc_size = 0; ///< minimum allocation unit (power of 2) - std::atomic deferred_batch_ops = {0}; ///< deferred batch size - ///< bits for min_alloc_size - std::atomic min_alloc_size_order = {0}; + uint8_t min_alloc_size_order = 0; static_assert(std::numeric_limits::max() > std::numeric_limits::digits, "not enough bits for min_alloc_size"); - ///< size threshold for forced deferred writes - std::atomic prefer_deferred_size = {0}; - ///< maximum allocation unit (power of 2) std::atomic max_alloc_size = {0}; + ///< number threshold for forced deferred writes + std::atomic deferred_batch_ops = {0}; + + ///< size threshold for forced deferred writes + std::atomic prefer_deferred_size = {0}; + ///< approx cost per io, in bytes std::atomic throttle_cost_per_io = {0}; - std::atomic comp_mode = {Compressor::COMP_NONE}; ///< compression mode + std::atomic comp_mode = + {Compressor::COMP_NONE}; ///< compression mode CompressorRef compressor; std::atomic comp_min_blob_size = {0}; std::atomic comp_max_blob_size = {0}; -- 2.39.5