From 93c164a5d4a8bdd961a6ce9eb5f94eeaca752ca2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 20 Jun 2016 15:22:51 -0400 Subject: [PATCH] os/bluestore: limit read-amp on overwrite If we are doing a small overwrite over a blob with a large chunk size (say, due to a large csum order), we are better off writing into a new allocation than doing a large read/modify/write. If the read amp will be more than min_alloc_size, skip the read entirely and write into a new blob. Signed-off-by: Sage Weil --- src/os/bluestore/BlueStore.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 10ea00ba37bde..c42cc85666af6 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -5419,7 +5419,8 @@ void BlueStore::_do_write_small( uint64_t tail_read = ROUND_UP_TO(b_off + b_len, chunk_size) - (b_off + b_len); if ((head_read || tail_read) && - (b->blob.get_ondisk_length() >= b_off + b_len + tail_read)) { + (b->blob.get_ondisk_length() >= b_off + b_len + tail_read) && + head_read + tail_read < min_alloc_size) { dout(20) << __func__ << " reading head 0x" << std::hex << head_read << " and tail 0x" << tail_read << std::dec << dendl; if (head_read) { -- 2.39.5