crimson::stateful_ec,
crimson::ct_error::enoent,
crimson::ct_error::eexist,
+ crimson::ct_error::enospc,
+ crimson::ct_error::edquot,
+ crimson::ct_error::eagain,
crimson::ct_error::invarg,
crimson::ct_error::erange,
crimson::ct_error::ecanceled,
virtual epoch_t get_map_epoch() const = 0;
virtual entity_inst_t get_orig_source_inst() const = 0;
virtual uint64_t get_features() const = 0;
+ virtual bool has_flag(uint32_t flag) const = 0;
};
template <class ImplT>
osd_reqid_t get_reqid() const final {
return pimpl->get_reqid();
}
+ bool has_flag(uint32_t flag) const final {
+ return pimpl->has_flag(flag);
+ }
utime_t get_mtime() const final {
return pimpl->get_mtime();
};
::crimson::interruptible::interruptible_errorator<
IOInterruptCondition, osd_op_errorator>;
+ object_stat_sum_t delta_stats;
private:
// an operation can be divided into two stages: main and effect-exposing
// one. The former is performed immediately on call to `do_osd_op()` while
size_t num_read = 0; ///< count read ops
size_t num_write = 0; ///< count update ops
- object_stat_sum_t delta_stats;
// this gizmo could be wrapped in std::optional for the sake of lazy
// initialization. we don't need it for ops that doesn't have effect
logger().debug(
"do_osd_ops_execute: object {} all operations successful",
ox->get_target());
+ // check for full
+ if ((ox->delta_stats.num_bytes > 0 ||
+ ox->delta_stats.num_objects > 0) &&
+ get_pool().info.has_flag(pg_pool_t::FLAG_FULL)) {
+ const auto& m = ox->get_message();
+ if (m.get_reqid().name.is_mds() || // FIXME: ignore MDS for now
+ m.has_flag(CEPH_OSD_FLAG_FULL_FORCE)) {
+ logger().info(" full, but proceeding due to FULL_FORCE or MDS");
+ } else if (m.has_flag(CEPH_OSD_FLAG_FULL_TRY)) {
+ // they tried, they failed.
+ logger().info(" full, replying to FULL_TRY op");
+ if (get_pool().info.has_flag(pg_pool_t::FLAG_FULL_QUOTA))
+ return interruptor::make_ready_future<OpsExecuter::rep_op_fut_tuple>(
+ seastar::now(),
+ OpsExecuter::osd_op_ierrorator::future<>(
+ crimson::ct_error::edquot::make()));
+ else
+ return interruptor::make_ready_future<OpsExecuter::rep_op_fut_tuple>(
+ seastar::now(),
+ OpsExecuter::osd_op_ierrorator::future<>(
+ crimson::ct_error::enospc::make()));
+ } else {
+ // drop request
+ logger().info(" full, dropping request (bad client)");
+ return interruptor::make_ready_future<OpsExecuter::rep_op_fut_tuple>(
+ seastar::now(),
+ OpsExecuter::osd_op_ierrorator::future<>(
+ crimson::ct_error::eagain::make()));
+ }
+ }
+
peering_state.apply_op_stats(ox->get_target(), ox->get_stats());
return std::move(*ox).flush_changes_n_do_ops_effects(ops,
[this] (auto&& txn,
uint64_t get_features() const {
return features;
}
+ // Only used by InternalClientRequest, no op flags
+ bool has_flag(uint32_t flag) const {
+ return false;
+ }
crimson::net::ConnectionRef conn;
osd_reqid_t reqid;
utime_t mtime;