void BlueStore::Cache::trim(uint64_t onode_max, uint64_t buffer_max)
{
std::lock_guard l(lock);
+ if (cct->_conf->objectstore_blackhole) {
+ // do not trim if we are throwing away IOs a layer down
+ return;
+ }
_trim(onode_max, buffer_max);
}
void BlueStore::Cache::trim_all()
{
std::lock_guard l(lock);
+ // we should not be shutting down after the blackhole is enabled
+ assert(!cct->_conf->objectstore_blackhole);
_trim(0, 0);
}
ObjectStore::Transaction::collect_contexts(
tls, &on_applied, &on_commit, &on_applied_sync);
- if (cct->_conf->objectstore_blackhole) {
- dout(0) << __func__ << " objectstore_blackhole = TRUE, dropping transaction"
- << dendl;
- for (auto& l : { on_applied, on_commit, on_applied_sync }) {
- for (auto c : l) {
- delete c;
- }
- }
- return 0;
- }
auto start = mono_clock::now();
Collection *c = static_cast<Collection*>(ch.get());
<< (buffered ? " (buffered)" : " (direct)")
<< dendl;
ceph_assert(is_valid_io(off, len));
+ if (cct->_conf->objectstore_blackhole) {
+ lderr(cct) << __func__ << " objectstore_blackhole=true, throwing out IO"
+ << dendl;
+ return 0;
+ }
if ((!buffered || bl.get_num_buffers() >= IOV_MAX) &&
bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) {
<< (buffered ? " (buffered)" : " (direct)")
<< dendl;
ceph_assert(is_valid_io(off, len));
+ if (cct->_conf->objectstore_blackhole) {
+ lderr(cct) << __func__ << " objectstore_blackhole=true, throwing out IO"
+ << dendl;
+ return 0;
+ }
if ((!buffered || bl.get_num_buffers() >= IOV_MAX) &&
bl.rebuild_aligned_size_and_memory(block_size, block_size, IOV_MAX)) {
int KernelDevice::discard(uint64_t offset, uint64_t len)
{
int r = 0;
+ if (cct->_conf->objectstore_blackhole) {
+ lderr(cct) << __func__ << " objectstore_blackhole=true, throwing out IO"
+ << dendl;
+ return 0;
+ }
if (support_discard) {
dout(10) << __func__
<< " 0x" << std::hex << offset << "~" << len << std::dec