{
const auto oid = m->get_snapid() == CEPH_SNAPDIR ? m->get_hobj().get_head()
: m->get_hobj();
- return backend->get_object_state(oid).then([this, m](auto os) mutable {
+ return backend->get_object_state(oid).safe_then([this, m](auto os) mutable {
return crimson::do_with(OpsExecuter{std::move(os), *this/* as const& */, m},
[this, m] (auto& ox) {
return crimson::do_for_each(m->ops, [this, &ox](OSDOp& osd_op) {
}
});
}, OpsExecuter::osd_op_errorator::pass_further{});
- }).safe_then([m,this] {
- auto reply = make_message<MOSDOpReply>(m.get(), 0, get_osdmap_epoch(),
- 0, false);
- reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
+ });
+ }).safe_then([m,this] {
+ auto reply = make_message<MOSDOpReply>(m.get(), 0, get_osdmap_epoch(),
+ 0, false);
+ reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
+ return seastar::make_ready_future<Ref<MOSDOpReply>>(std::move(reply));
+ }, OpsExecuter::osd_op_errorator::all_same_way([=,&oid] (const std::error_code& e) {
+ assert(e.value() > 0);
+ logger().debug("got statical error code while handling object {}: {} ({})",
+ oid, e.value(), e.message());
+ return backend->evict_object_state(oid).then([=] {
+ auto reply = make_message<MOSDOpReply>(
+ m.get(), -e.value(), get_osdmap_epoch(), 0, false);
+ reply->set_enoent_reply_versions(peering_state.get_info().last_update,
+ peering_state.get_info().last_user_version);
return seastar::make_ready_future<Ref<MOSDOpReply>>(std::move(reply));
- }, OpsExecuter::osd_op_errorator::all_same_way([] (const std::error_code& err) {
- assert(err.value() > 0);
- throw crimson::osd::make_error(err.value());
- }));
- }).handle_exception_type([=,&oid](const crimson::osd::error& e) {
+ });
+ })).handle_exception_type([=,&oid](const crimson::osd::error& e) {
+ // we need this handler because throwing path which aren't errorated yet.
logger().debug("got ceph::osd::error while handling object {}: {} ({})",
oid, e.code(), e.what());
return backend->evict_object_state(oid).then([=] {
store{store}
{}
-seastar::future<PGBackend::cached_os_t>
+PGBackend::get_os_errorator::future<PGBackend::cached_os_t>
PGBackend::get_object_state(const hobject_t& oid)
{
// want the head?
auto clone = std::upper_bound(begin(ss->clones), end(ss->clones),
oid.snap);
if (clone == end(ss->clones)) {
- return seastar::make_exception_future<PGBackend::cached_os_t>(
- crimson::osd::object_not_found{});
+ return get_os_errorator::make_plain_exception_future<cached_os_t>(
+ crimson::ct_error::enoent::make());
}
// clone
auto soid = oid;
assert(clone_snap != end(ss->clone_snaps));
if (clone_snap->second.empty()) {
logger().trace("find_object: {}@[] -- DNE", soid);
- return seastar::make_exception_future<PGBackend::cached_os_t>(
- crimson::osd::object_not_found{});
+ return get_os_errorator::make_plain_exception_future<cached_os_t>(
+ crimson::ct_error::enoent::make());
}
auto first = clone_snap->second.back();
auto last = clone_snap->second.front();
if (first > soid.snap) {
logger().trace("find_object: {}@[{},{}] -- DNE",
soid, first, last);
- return seastar::make_exception_future<PGBackend::cached_os_t>(
- crimson::osd::object_not_found{});
+ return get_os_errorator::make_plain_exception_future<cached_os_t>(
+ crimson::ct_error::enoent::make());
}
logger().trace("find_object: {}@[{},{}] -- HIT",
soid, first, last);
crimson::osd::ShardServices& shard_services,
const ec_profile_t& ec_profile);
using cached_os_t = boost::local_shared_ptr<ObjectState>;
- seastar::future<cached_os_t> get_object_state(const hobject_t& oid);
+ using get_os_errorator = crimson::errorator<crimson::ct_error::enoent>;
+ get_os_errorator::future<cached_os_t> get_object_state(const hobject_t& oid);
seastar::future<> evict_object_state(const hobject_t& oid);
using read_errorator = ll_read_errorator::extend<