return shard_stores.invoke_on_all(
[](auto &local_store) {
return local_store.mount().handle_error(
- crimson::stateful_ec::handle([](const auto& ec) {
+ crimson::stateful_ec::assert_failure([](const auto& ec) {
crimson::get_logger(ceph_subsys_cyanstore).error(
"error mounting cyanstore: ({}) {}",
ec.value(), ec.message());
- std::exit(EXIT_FAILURE);
}));
});
}
return do_with_transaction_intr<Func, true>(
Transaction::src_t::READ, name, std::forward<Func>(f)
).handle_error(
- crimson::ct_error::eagain::handle([] {
- ceph_assert(0 == "eagain impossible");
- }),
+ crimson::ct_error::eagain::assert_failure{"unexpected eagain"},
crimson::ct_error::pass_further_all{}
);
}
has_io_error = true;
wait_available_promise->set_value();
wait_available_promise.reset();
+ return seastar::now();
})
).handle_exception([FNAME, this](auto e) {
ERROR("{} got exception {}, available", get_name(), e);
ERROR("{} {} records, {}, got error {}",
get_name(), num, sizes, e);
finish_submit_batch(p_batch, std::nullopt);
+ return seastar::now();
})
).handle_exception([this, p_batch, FNAME, num, sizes=sizes](auto e) {
ERROR("{} {} records, {}, got exception {}",
return c.nm.get_super(c.t, root_tracker
).handle_error_interruptible(
eagain_iertr::pass_further{},
- crimson::ct_error::input_output_error::handle([FNAME, c] {
+ crimson::ct_error::input_output_error::assert_failure([FNAME, c] {
ERRORT("EIO during get_super()", c.t);
- ceph_abort("fatal error");
})
).si_then([c, &root_tracker, FNAME](auto&& _super) {
assert(_super);
return c.nm.read_extent(c.t, addr
).handle_error_interruptible(
eagain_iertr::pass_further{},
- crimson::ct_error::input_output_error::handle(
+ crimson::ct_error::input_output_error::assert_failure(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("EIO -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
- ceph_abort("fatal error");
}),
- crimson::ct_error::invarg::handle(
+ crimson::ct_error::invarg::assert_failure(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("EINVAL -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
- ceph_abort("fatal error");
}),
- crimson::ct_error::enoent::handle(
+ crimson::ct_error::enoent::assert_failure(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("ENOENT -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
- ceph_abort("fatal error");
}),
- crimson::ct_error::erange::handle(
+ crimson::ct_error::erange::assert_failure(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("ERANGE -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
- ceph_abort("fatal error");
})
).si_then([FNAME, c, addr, expect_is_level_tail](auto extent)
-> eagain_ifuture<Ref<Node>> {
return c.nm.get_super(c.t, root_tracker
).handle_error_interruptible(
eagain_iertr::pass_further{},
- crimson::ct_error::input_output_error::handle([FNAME, c] {
+ crimson::ct_error::input_output_error::assert_failure([FNAME, c] {
ERRORT("EIO during get_super()", c.t);
- ceph_abort("fatal error");
})
).si_then([c, root](auto&& super) {
assert(super);
return c.nm.alloc_extent(c.t, hint, alloc_size
).handle_error_interruptible(
eagain_iertr::pass_further{},
- crimson::ct_error::input_output_error::handle(
+ crimson::ct_error::input_output_error::assert_failure(
[FNAME, c, alloc_size, l_to_discard = extent->get_laddr()] {
SUBERRORT(seastore_onode,
"EIO during allocate -- node_size={}, to_discard={:x}",
c.t, alloc_size, l_to_discard);
- ceph_abort("fatal error");
})
).si_then([this, c, FNAME] (auto fresh_extent) {
SUBDEBUGT(seastore_onode,
return c.nm.retire_extent(c.t, to_discard
).handle_error_interruptible(
eagain_iertr::pass_further{},
- crimson::ct_error::input_output_error::handle(
+ crimson::ct_error::input_output_error::assert_failure(
[FNAME, c, l_to_discard = to_discard->get_laddr(),
l_fresh = fresh_extent->get_laddr()] {
SUBERRORT(seastore_onode,
"EIO during retire -- to_disgard={:x}, fresh={:x}",
c.t, l_to_discard, l_fresh);
- ceph_abort("fatal error");
}),
- crimson::ct_error::enoent::handle(
+ crimson::ct_error::enoent::assert_failure(
[FNAME, c, l_to_discard = to_discard->get_laddr(),
l_fresh = fresh_extent->get_laddr()] {
SUBERRORT(seastore_onode,
"ENOENT during retire -- to_disgard={:x}, fresh={:x}",
c.t, l_to_discard, l_fresh);
- ceph_abort("fatal error");
})
);
}).si_then([this, c] {
return c.nm.retire_extent(c.t, std::move(extent)
).handle_error_interruptible(
eagain_iertr::pass_further{},
- crimson::ct_error::input_output_error::handle(
+ crimson::ct_error::input_output_error::assert_failure(
[FNAME, c, addr] {
SUBERRORT(seastore_onode, "EIO -- addr={:x}", c.t, addr);
- ceph_abort("fatal error");
}),
- crimson::ct_error::enoent::handle(
+ crimson::ct_error::enoent::assert_failure(
[FNAME, c, addr] {
SUBERRORT(seastore_onode, "ENOENT -- addr={:x}", c.t, addr);
- ceph_abort("fatal error");
})
#ifndef NDEBUG
).si_then([c] {
return c.nm.alloc_extent(c.t, hint, extent_size
).handle_error_interruptible(
eagain_iertr::pass_further{},
- crimson::ct_error::input_output_error::handle(
+ crimson::ct_error::input_output_error::assert_failure(
[FNAME, c, extent_size, is_level_tail, level] {
SUBERRORT(seastore_onode,
"EIO -- extent_size={}, is_level_tail={}, level={}",
c.t, extent_size, is_level_tail, level);
- ceph_abort("fatal error");
})
).si_then([is_level_tail, level](auto extent) {
assert(extent);
return eagain_iertr::make_ready_future<BtreeCursor>(cursor);
#endif
}).handle_error_interruptible(
- [] (const crimson::ct_error::value_too_large& e) {
- ceph_abort("impossible path");
- },
+ crimson::ct_error::value_too_large::assert_failure{"impossible path"},
crimson::ct_error::pass_further_all{}
);
}
onode.get_metadata_hint(device->get_block_size())),
name);
}
- ).handle_error(crimson::ct_error::input_output_error::handle([FNAME] {
- ERROR("EIO when getting attrs");
- abort();
- }), crimson::ct_error::pass_further_all{});
+ ).handle_error(
+ crimson::ct_error::input_output_error::assert_failure{
+ "EIO when getting attrs"},
+ crimson::ct_error::pass_further_all{});
}
SeaStore::Shard::get_attrs_ertr::future<SeaStore::Shard::attrs_t>
return seastar::make_ready_future<omap_values_t>(std::move(attrs));
});
}
- ).handle_error(crimson::ct_error::input_output_error::handle([FNAME] {
- ERROR("EIO when getting attrs");
- abort();
- }), crimson::ct_error::pass_further_all{});
+ ).handle_error(
+ crimson::ct_error::input_output_error::assert_failure{
+ "EIO when getting attrs"},
+ crimson::ct_error::pass_further_all{});
}
seastar::future<struct stat> SeaStore::Shard::stat(
}).handle_error(
crimson::ct_error::all_same_way([&ctx](auto e) {
on_error(ctx.ext_transaction);
+ return seastar::now();
})
);
}).then([this, op_type, &ctx] {
INFO("completed");
}).handle_error(
mount_ertr::pass_further{},
- crimson::ct_error::all_same_way([] {
- ceph_assert(0 == "unhandled error");
- return mount_ertr::now();
- })
+ crimson::ct_error::assert_all{"unhandled error"}
);
}
return result.refcount;
}).handle_error_interruptible(
ref_iertr::pass_further{},
- ct_error::all_same_way([](auto e) {
- ceph_assert(0 == "unhandled error, TODO");
- }));
+ ct_error::assert_all{"unhandled error, TODO"});
}
TransactionManager::ref_ret TransactionManager::inc_ref(
});
}).handle_error(
submit_transaction_iertr::pass_further{},
- crimson::ct_error::all_same_way([](auto e) {
- ceph_assert(0 == "Hit error submitting to journal");
- })
+ crimson::ct_error::assert_all{"Hit error submitting to journal"}
);
});
}
{
return msgr.bind(addrs).safe_then([this, &msgr]() mutable {
return msgr.start({this});
- }, crimson::net::Messenger::bind_ertr::all_same_way(
+ }, crimson::net::Messenger::bind_ertr::assert_all_func(
[addrs] (const std::error_code& e) {
logger().error("heartbeat messenger bind({}): {}", addrs, e);
- ceph_abort();
}));
}
LOG_PREFIX(OSD::mkfs);
return store.start().then([&store, FNAME, osd_uuid] {
return store.mkfs(osd_uuid).handle_error(
- crimson::stateful_ec::handle([FNAME] (const auto& ec) {
+ crimson::stateful_ec::assert_failure([FNAME] (const auto& ec) {
ERROR("error creating empty object store in {}: ({}) {}",
local_conf().get_val<std::string>("osd_data"),
ec.value(), ec.message());
- std::exit(EXIT_FAILURE);
}));
}).then([&store, FNAME] {
return store.mount().handle_error(
- crimson::stateful_ec::handle([FNAME](const auto& ec) {
+ crimson::stateful_ec::assert_failure([FNAME](const auto& ec) {
ERROR("error mounting object store in {}: ({}) {}",
local_conf().get_val<std::string>("osd_data"),
ec.value(), ec.message());
- std::exit(EXIT_FAILURE);
}));
}).then([&store] {
return open_or_create_meta_coll(store);
whoami, get_shard_services(),
*monc, *hb_front_msgr, *hb_back_msgr});
return store.mount().handle_error(
- crimson::stateful_ec::handle([FNAME] (const auto& ec) {
+ crimson::stateful_ec::assert_failure([FNAME] (const auto& ec) {
ERROR("error mounting object store in {}: ({}) {}",
local_conf().get_val<std::string>("osd_data"),
ec.value(), ec.message());
- std::exit(EXIT_FAILURE);
}));
}).then([this] {
return open_meta_coll();
cluster_msgr->bind(pick_addresses(CEPH_PICK_ADDRESS_CLUSTER))
.safe_then([this, dispatchers]() mutable {
return cluster_msgr->start(dispatchers);
- }, crimson::net::Messenger::bind_ertr::all_same_way(
+ }, crimson::net::Messenger::bind_ertr::assert_all_func(
[FNAME] (const std::error_code& e) {
ERROR("cluster messenger bind(): {}", e);
- ceph_abort();
})),
public_msgr->bind(pick_addresses(CEPH_PICK_ADDRESS_PUBLIC))
.safe_then([this, dispatchers]() mutable {
return public_msgr->start(dispatchers);
- }, crimson::net::Messenger::bind_ertr::all_same_way(
+ }, crimson::net::Messenger::bind_ertr::assert_all_func(
[FNAME] (const std::error_code& e) {
ERROR("public messenger bind(): {}", e);
- ceph_abort();
})));
}).then_unpack([this] {
return seastar::when_all_succeed(monc->start(),
return store.read(coll,
osdmap_oid(e), 0, 0,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED).handle_error(
- read_errorator::all_same_way([e] {
+ read_errorator::assert_all_func([e](const auto&) {
ceph_abort_msg(fmt::format("{} read gave enoent on {}",
__func__, osdmap_oid(e)));
}));
std::make_tuple(std::move(pi),
std::move(name),
std::move(ec_profile)));
- },read_errorator::all_same_way([pool] {
+ },read_errorator::assert_all_func([pool](const auto&) {
throw std::runtime_error(fmt::format("read gave enoent on {}",
final_pool_info_oid(pool)));
}));
ct_error::all_same_way([FNAME, &pg, &obj, &entry](auto e) {
DEBUGDPP("obj: {} stat error", pg, obj);
entry.stat_error = true;
+ return seastar::now();
})
).then_interruptible([FNAME, this, &pg, &obj] {
if (deep) {
ct_error::all_same_way([&progress, &entry](auto e) {
entry.read_error = true;
progress.offset = std::nullopt;
+ return seastar::now();
})
).then([] {
return interruptor::make_interruptible(
).safe_then([&progress](auto bl) {
progress.omap_hash << bl;
}).handle_error(
- ct_error::enodata::handle([] {}),
+ ct_error::enodata::handle([] { return seastar::now(); }),
ct_error::all_same_way([&entry](auto e) {
entry.read_error = true;
+ return seastar::now();
})
).then([&progress] {
progress.header_done = true;
pg, *this, obj, progress, e);
progress.keys_done = true;
entry.read_error = true;
+ return seastar::now();
})
).then([] {
return interruptor::make_interruptible(
// TODO: may need eio handling?
logger().error("recover_object saw error code {}, ignoring object {}",
code, soid);
+ return seastar::now();
}));
});
}
return server.msgr->bind(entity_addrvec_t{addr}
).safe_then([&server] {
return server.msgr->start({&server});
- }, crimson::net::Messenger::bind_ertr::all_same_way(
+ }, crimson::net::Messenger::bind_ertr::assert_all_func(
[addr] (const std::error_code& e) {
logger().error("Server: "
"there is another instance running at {}", addr);
- ceph_abort();
}));
});
}
uuid_d uuid;
uuid.generate_random();
return fs->mkfs(uuid).handle_error(
- crimson::stateful_ec::handle([] (const auto& ec) {
+ crimson::stateful_ec::assert_failure([] (const auto& ec) {
crimson::get_logger(ceph_subsys_test)
.error("error creating empty object store in {}: ({}) {}",
crimson::common::local_conf().get_val<std::string>("osd_data"),
ec.value(), ec.message());
- std::exit(EXIT_FAILURE);
}));
}).then([this] {
return fs->stop();
}).then([this] {
return fs->mount(
).handle_error(
- crimson::stateful_ec::handle([] (const auto& ec) {
+ crimson::stateful_ec::assert_failure([] (const auto& ec) {
crimson::get_logger(
ceph_subsys_test
).error(
crimson::common::local_conf().get_val<std::string>("osd_data"),
ec.value(),
ec.message());
- std::exit(EXIT_FAILURE);
}));
}).then([this] {
return seastar::do_for_each(
}).then([this] {
return fs->mount(
).handle_error(
- crimson::stateful_ec::handle([] (const auto& ec) {
+ crimson::stateful_ec::assert_failure([] (const auto& ec) {
crimson::get_logger(
ceph_subsys_test
).error(
crimson::common::local_conf().get_val<std::string>("osd_data"),
ec.value(),
ec.message());
- std::exit(EXIT_FAILURE);
}));
}).then([this] {
return seastar::do_for_each(
log_entry.is_delete());
return seastar::now();
}),
- crimson::ct_error::enodata::handle([] { ceph_abort("unexpected enodata"); })
+ crimson::ct_error::enodata::assert_failure{"unexpected enodata"}
).then([] {
return seastar::stop_iteration::no;
});
new UnboundedBtree(NodeExtentManager::create_dummy(IS_DUMMY_SYNC))
);
return INTR(tree->mkfs, *ref_t).handle_error(
- crimson::ct_error::all_same_way([] {
- ASSERT_FALSE("Unable to mkfs");
- })
+ crimson::ct_error::assert_all{"Unable to mkfs"}
);
}
});
});
}).handle_error(
- crimson::ct_error::all_same_way([] {
- ceph_assert(0 == "error");
- })
+ crimson::ct_error::assert_all{"error"}
);
}
epm.reset();
cache.reset();
}).handle_error(
- crimson::ct_error::all_same_way([] {
- ASSERT_FALSE("Unable to close");
- })
+ crimson::ct_error::assert_all{"Unable to close"}
);
}
};
SeaStoreShard::attrs_t get_attrs(
SeaStoreShard &sharded_seastore) {
return sharded_seastore.get_attrs(coll, oid)
- .handle_error(SeaStoreShard::get_attrs_ertr::discard_all{})
- .get();
+ .handle_error(
+ SeaStoreShard::get_attrs_ertr::assert_all{"unexpected error"})
+ .get();
}
ceph::bufferlist get_attr(
SeaStoreShard& sharded_seastore,
std::string_view name) {
return sharded_seastore.get_attr(coll, oid, name)
- .handle_error(
- SeaStoreShard::get_attr_errorator::discard_all{})
- .get();
+ .handle_error(
+ SeaStoreShard::get_attr_errorator::assert_all{"unexpected error"})
+ .get();
}
void check_omap_key(
cache->complete_commit(*t, prev, seq /* TODO */);
return prev;
},
- crimson::ct_error::all_same_way([](auto e) {
- ASSERT_FALSE("failed to submit");
- })
+ crimson::ct_error::assert_all{"failed to submit"}
);
}
});
});
}).handle_error(
- crimson::ct_error::all_same_way([](auto e) {
- ASSERT_FALSE("failed to submit");
- })
+ crimson::ct_error::assert_all{"failed to submit"}
);
}
}).safe_then([this](auto) {
dummy_tail = journal_seq_t{0,
paddr_t::make_seg_paddr(segment_id_t(segment_manager->get_device_id(), 0), 0)};
- }, crimson::ct_error::all_same_way([] {
- ASSERT_FALSE("Unable to mount");
- }));
+ }, crimson::ct_error::assert_all{"Unable to mount"});
}
seastar::future<> tear_down_fut() final {
sms.reset();
journal.reset();
}).handle_error(
- crimson::ct_error::all_same_way([](auto e) {
- ASSERT_FALSE("Unable to close");
- })
+ crimson::ct_error::assert_all{"Unable to close"}
);
}
return server.msgr->bind(entity_addrvec_t{addr}
).safe_then([&server] {
return server.msgr->start({&server.dispatcher});
- }, crimson::net::Messenger::bind_ertr::all_same_way([](auto& e) {
- ceph_abort_msg("bind failed");
- })).then([&dispatcher=server.dispatcher, count] {
+ }, crimson::net::Messenger::bind_ertr::assert_all{"bind failed"}
+ ).then([&dispatcher=server.dispatcher, count] {
return dispatcher.on_reply.wait([&dispatcher, count] {
return dispatcher.count >= count;
});
return seastar::now();
}, errorator<ct_error::enoent>::all_same_way([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
+ return seastar::now();
})
);
}, [](std::exception_ptr) {}, false).get0();
return seastar::now();
}, errorator<ct_error::enoent>::all_same_way([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
+ return seastar::now();
}));
});
});
return seastar::now();
}, errorator<ct_error::enoent>::all_same_way([] {
ceph_assert(interruptible::interrupt_cond<TestInterruptCondition>.interrupt_cond);
+ return seastar::now();
}));
});
});
msgr->set_auth_server(&dummy_auth);
return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
return msgr->start({this});
- }, crimson::net::Messenger::bind_ertr::all_same_way(
+ }, crimson::net::Messenger::bind_ertr::assert_all_func(
[addr] (const std::error_code& e) {
logger().error("test_echo(): "
"there is another instance running at {}", addr);
- ceph_abort();
}));
}
seastar::future<> shutdown() {
msgr->set_auth_server(&dummy_auth);
return msgr->bind(entity_addrvec_t{addr}).safe_then([this] {
return msgr->start({this});
- }, crimson::net::Messenger::bind_ertr::all_same_way(
+ }, crimson::net::Messenger::bind_ertr::assert_all_func(
[addr] (const std::error_code& e) {
logger().error("test_preemptive_shutdown(): "
"there is another instance running at {}", addr);
- ceph_abort();
}));
}
entity_addr_t get_addr() const {
test_msgr->set_interceptor(&interceptor);
return test_msgr->bind(entity_addrvec_t{test_addr}).safe_then([this] {
return test_msgr->start({this});
- }, Messenger::bind_ertr::all_same_way([test_addr] (const std::error_code& e) {
+ }, Messenger::bind_ertr::assert_all_func(
+ [test_addr] (const std::error_code& e) {
logger().error("FailoverSuite: "
"there is another instance running at {}", test_addr);
- ceph_abort();
}));
}
peer_msgr->set_auth_server(&dummy_auth);
return peer_msgr->bind(entity_addrvec_t{test_peer_addr}).safe_then([this] {
return peer_msgr->start({this});
- }, Messenger::bind_ertr::all_same_way([test_peer_addr] (const std::error_code& e) {
+ }, Messenger::bind_ertr::assert_all_func(
+ [test_peer_addr] (const std::error_code& e) {
logger().error("FailoverSuitePeer: "
"there is another instance running at {}", test_peer_addr);
- ceph_abort();
}));
}
cmd_msgr->set_auth_server(&dummy_auth);
return cmd_msgr->bind(entity_addrvec_t{cmd_peer_addr}).safe_then([this] {
return cmd_msgr->start({this});
- }, Messenger::bind_ertr::all_same_way([cmd_peer_addr] (const std::error_code& e) {
+ }, Messenger::bind_ertr::assert_all_func(
+ [cmd_peer_addr] (const std::error_code& e) {
logger().error("FailoverTestPeer: "
"there is another instance running at {}", cmd_peer_addr);
- ceph_abort();
}));
}
return msgr->bind(entity_addrvec_t{addr}).safe_then(
[this, msgr] {
return msgr->start({&dispatcher});
- }, crimson::net::Messenger::bind_ertr::all_same_way(
+ }, crimson::net::Messenger::bind_ertr::assert_all_func(
[addr] (const std::error_code& e) {
logger().error("{} test_messenger_thrash(): "
"there is another instance running at {}",
__func__, addr);
- ceph_abort();
}));
}
return pss2->shutdown_destroy();
});
});
- }, listen_ertr::all_same_way(
+ }, listen_ertr::assert_all_func(
[saddr](const std::error_code& e) {
logger().error("test_bind_same(): there is another instance running at {}",
saddr);
- ceph_abort();
})).then([pss1] {
return pss1->shutdown_destroy();
}).handle_exception([](auto eptr) {
).finally([cleanup = std::move(socket)] {});
});
});
- }, listen_ertr::all_same_way(
+ }, listen_ertr::assert_all_func(
[saddr](const std::error_code& e) {
logger().error("test_accept(): there is another instance running at {}",
saddr);
- ceph_abort();
})).then([saddr] {
return seastar::when_all(
socket_connect(saddr).then([](auto socket) {
psf->pss = pss;
return pss->listen(saddr
).safe_then([] {
- }, listen_ertr::all_same_way([saddr](const std::error_code& e) {
+ }, listen_ertr::assert_all_func(
+ [saddr](const std::error_code& e) {
logger().error("dispatch_sockets(): there is another instance running at {}",
saddr);
- ceph_abort();
}));
});
}).then([psf, saddr] {