wctx->buffered ? 0 : Buffer::FLAG_NOCACHE);
if (!g_conf()->bluestore_debug_omit_block_device_write) {
- if (b_len <= prefer_deferred_size) {
+ if (b_len < prefer_deferred_size) {
dout(20) << __func__ << " deferring small 0x" << std::hex
<< b_len << std::dec << " unused write via deferred" << dendl;
bluestore_deferred_op_t *op = _get_deferred_op(txc);
ceph_assert(b_off % chunk_size == 0);
ceph_assert(blob_aligned_len() % chunk_size == 0);
- res = blob_aligned_len() <= prefer_deferred_size &&
+ res = blob_aligned_len() < prefer_deferred_size &&
blob_aligned_len() <= ondisk &&
blob.is_allocated(b_off, blob_aligned_len());
if (res) {
<< std::dec << dendl;
offset += l;
length -= l;
- logger->inc(l_bluestore_write_big_blobs, remaining ? 2 : 1);
+ logger->inc(l_bluestore_write_big_blobs, remaining ? 2 : 1);
logger->inc(l_bluestore_write_big_deferred, remaining ? 2 : 1);
continue;
}
// queue io
if (!g_conf()->bluestore_debug_omit_block_device_write) {
- if (l->length() <= prefer_deferred_size.load()) {
+ if (l->length() < prefer_deferred_size.load()) {
dout(20) << __func__ << " deferring 0x" << std::hex
<< l->length() << std::dec << " write via deferred" << dendl;
bluestore_deferred_op_t *op = _get_deferred_op(txc);
}
}
+TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite2) {
+
+ if (string(GetParam()) != "bluestore")
+ return;
+
+ size_t block_size = 4096;
+ StartDeferred(block_size);
+ SetVal(g_conf(), "bluestore_max_blob_size", "65536");
+ SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536");
+
+ g_conf().apply_changes(nullptr);
+
+ int r;
+ coll_t cid;
+ ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
+
+ PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters());
+
+ auto ch = store->create_new_collection(cid);
+ {
+ ObjectStore::Transaction t;
+ t.create_collection(cid, 0);
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ bufferlist bl;
+
+ bl.append(std::string(128 * 1024, 'c'));
+
+ t.write(cid, hoid, 0x1000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
+ ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length());
+ ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 2u);
+ ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u);
+ ASSERT_EQ(logger->get(l_bluestore_write_deferred), 0u);
+ }
+
+ logger->reset();
+ {
+ ObjectStore::Transaction t;
+ bufferlist bl;
+
+ bl.append(std::string(128 * 1024, 'c'));
+
+ t.write(cid, hoid, 0x2000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+
+ ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
+ ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length());
+ ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 4u);
+ ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 2u);
+ ASSERT_EQ(logger->get(l_bluestore_write_deferred), 2u);
+ }
+
+ {
+ ObjectStore::Transaction t;
+ t.remove(cid, hoid);
+ t.remove(cid, hoid);
+ t.remove_collection(cid);
+ cerr << "Cleaning" << std::endl;
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+}
+
+TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite3) {
+
+ if (string(GetParam()) != "bluestore")
+ return;
+
+ size_t block_size = 4096;
+ StartDeferred(block_size);
+ SetVal(g_conf(), "bluestore_max_blob_size", "65536");
+ SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536");
+
+ g_conf().apply_changes(nullptr);
+
+ int r;
+ coll_t cid;
+ ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
+
+ PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters());
+
+ auto ch = store->create_new_collection(cid);
+ {
+ ObjectStore::Transaction t;
+ t.create_collection(cid, 0);
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+
+ logger->reset();
+ {
+ ObjectStore::Transaction t;
+ bufferlist bl;
+
+ bl.append(std::string(4096 * 1024, 'c'));
+
+ t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+
+ ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
+ ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length());
+ ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 64u);
+ ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u);
+ ASSERT_EQ(logger->get(l_bluestore_write_deferred), 0u);
+ }
+ {
+ ObjectStore::Transaction t;
+ t.remove(cid, hoid);
+ t.remove_collection(cid);
+ cerr << "Cleaning" << std::endl;
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+}
TEST_P(StoreTestSpecificAUSize, DeferredDifferentChunks) {
size_t alloc_size = 4096;
size_t large_object_size = 1 * 1024 * 1024;
+ size_t prefer_deferred_size = 65536;
StartDeferred(alloc_size);
SetVal(g_conf(), "bluestore_max_blob_size", "131072");
- SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536");
+ SetVal(g_conf(), "bluestore_prefer_deferred_size",
+ stringify(prefer_deferred_size).c_str());
g_conf().apply_changes(nullptr);
int r;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
- for (size_t expected_write_size = 1024; expected_write_size <= 65536; expected_write_size *= 2) {
+ for (size_t expected_write_size = 1024; expected_write_size <= prefer_deferred_size; expected_write_size *= 2) {
//create object with hint
ghobject_t hoid(hobject_t("test-"+to_string(expected_write_size), "", CEPH_NOSNAP, 0, -1, ""));
{
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
++exp_bluestore_write_big;
- ++exp_bluestore_write_big_deferred;
+ if (expected_write_size != prefer_deferred_size)
+ ++exp_bluestore_write_big_deferred;
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), exp_bluestore_write_big);