OPTION(bluestore_warn_on_bluefs_spillover, OPT_BOOL)
OPTION(bluestore_log_op_age, OPT_DOUBLE)
OPTION(bluestore_log_omap_iterator_age, OPT_DOUBLE)
+OPTION(bluestore_debug_enforce_settings, OPT_STR)
OPTION(kstore_max_ops, OPT_U64)
OPTION(kstore_max_bytes, OPT_U64)
.set_default(1)
.set_description("log omap iteration operation if it's slower than this age (seconds)"),
+ Option("bluestore_debug_enforce_settings", Option::TYPE_STR, Option::LEVEL_DEV)
+ .set_default("default")
+ .set_enum_allowed({"default", "hdd", "ssd"})
+ .set_description("Enforces specific hw profile settings")
+ .set_long_description("'hdd' enforces settings intended for BlueStore above a rotational drive. 'ssd' enforces settings intended for BlueStore above a solid drive. 'default' - using settings for the actual hardware."),
+
+
// -----------------------------------------
// kstore
comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size;
} else {
ceph_assert(bdev);
- if (bdev->is_rotational()) {
+ if (_use_rotational_settings()) {
comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_hdd;
} else {
comp_min_blob_size = cct->_conf->bluestore_compression_min_blob_size_ssd;
comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size;
} else {
ceph_assert(bdev);
- if (bdev->is_rotational()) {
+ if (_use_rotational_settings()) {
comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_hdd;
} else {
comp_max_blob_size = cct->_conf->bluestore_compression_max_blob_size_ssd;
throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io;
} else {
ceph_assert(bdev);
- if (bdev->is_rotational()) {
+ if (_use_rotational_settings()) {
throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_hdd;
} else {
throttle_cost_per_io = cct->_conf->bluestore_throttle_cost_per_io_ssd;
max_blob_size = cct->_conf->bluestore_max_blob_size;
} else {
ceph_assert(bdev);
- if (bdev->is_rotational()) {
+ if (_use_rotational_settings()) {
max_blob_size = cct->_conf->bluestore_max_blob_size_hdd;
} else {
max_blob_size = cct->_conf->bluestore_max_blob_size_ssd;
cache_size = cct->_conf->bluestore_cache_size;
} else {
// choose global cache size based on backend type
- if (bdev->is_rotational()) {
+ if (_use_rotational_settings()) {
cache_size = cct->_conf->bluestore_cache_size_hdd;
} else {
cache_size = cct->_conf->bluestore_cache_size_ssd;
prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size;
} else {
ceph_assert(bdev);
- if (bdev->is_rotational()) {
+ if (_use_rotational_settings()) {
prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_hdd;
} else {
prefer_deferred_size = cct->_conf->bluestore_prefer_deferred_size_ssd;
deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops;
} else {
ceph_assert(bdev);
- if (bdev->is_rotational()) {
+ if (_use_rotational_settings()) {
deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_hdd;
} else {
deferred_batch_ops = cct->_conf->bluestore_deferred_batch_ops_ssd;
return bluefs->wal_is_rotational();
}
+bool BlueStore::_use_rotational_settings()
+{
+ if (cct->_conf->bluestore_debug_enforce_settings == "hdd") {
+ return true;
+ }
+ if (cct->_conf->bluestore_debug_enforce_settings == "ssd") {
+ return false;
+ }
+ return bdev->is_rotational();
+}
+
bool BlueStore::test_mount_in_use()
{
// most error conditions mean the mount is not in use (e.g., because
int allocate_freespace(uint64_t size, PExtentVector& extents) override {
return allocate_bluefs_freespace(size, &extents);
};
+
+ inline bool _use_rotational_settings();
};
inline ostream& operator<<(ostream& out, const BlueStore::volatile_statfs& s) {
ASSERT_EQ(final_len, static_cast<uint64_t>(r));
}
}
+
+TEST_P(StoreTestSpecificAUSize, BluestoreEnforceHWSettingsHdd) {
+ if (string(GetParam()) != "bluestore")
+ return;
+
+ SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd");
+ StartDeferred(0x1000);
+
+ int r;
+ coll_t cid;
+ ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP)));
+ auto ch = store->create_new_collection(cid);
+ {
+ ObjectStore::Transaction t;
+ t.create_collection(cid, 0);
+ cerr << "Creating collection " << cid << std::endl;
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ bufferlist bl, orig;
+ string s(g_ceph_context->_conf->bluestore_max_blob_size_hdd, '0');
+ bl.append(s);
+ t.write(cid, hoid, 0, bl.length(), bl);
+ cerr << "write" << std::endl;
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+
+ const PerfCounters* logger = store->get_perf_counters();
+ ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u);
+ }
+}
+
+TEST_P(StoreTestSpecificAUSize, BluestoreEnforceHWSettingsSsd) {
+ if (string(GetParam()) != "bluestore")
+ return;
+
+ SetVal(g_conf(), "bluestore_debug_enforce_settings", "ssd");
+ StartDeferred(0x1000);
+
+ int r;
+ coll_t cid;
+ ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP)));
+ auto ch = store->create_new_collection(cid);
+ {
+ ObjectStore::Transaction t;
+ t.create_collection(cid, 0);
+ cerr << "Creating collection " << cid << std::endl;
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+ }
+ {
+ ObjectStore::Transaction t;
+ bufferlist bl, orig;
+ string s(g_ceph_context->_conf->bluestore_max_blob_size_ssd * 8, '0');
+ bl.append(s);
+ t.write(cid, hoid, 0, bl.length(), bl);
+ cerr << "write" << std::endl;
+ r = queue_transaction(store, ch, std::move(t));
+ ASSERT_EQ(r, 0);
+
+ const PerfCounters* logger = store->get_perf_counters();
+ ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 8u);
+ }
+}
+
#endif // WITH_BLUESTORE
int main(int argc, char **argv) {