PGBackend::ll_read_ierrorator::future<ceph::bufferlist>
PGBackend::omap_get_header(
const crimson::os::CollectionRef& c,
- const ghobject_t& oid) const
+ const ghobject_t& oid,
+ uint32_t op_flags) const
{
- return store->omap_get_header(c, oid)
+ return store->omap_get_header(c, oid, op_flags)
.handle_error(
crimson::ct_error::enodata::handle([] {
return seastar::make_ready_future<bufferlist>();
PGBackend::omap_get_header(
const ObjectState& os,
OSDOp& osd_op,
- object_stat_sum_t& delta_stats) const
+ object_stat_sum_t& delta_stats,
+ uint32_t op_flags) const
{
if (os.oi.is_omap()) {
- return omap_get_header(coll, ghobject_t{os.oi.soid}).safe_then_interruptible(
+ return omap_get_header(
+ coll, ghobject_t{os.oi.soid}, CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
+ ).safe_then_interruptible(
[&delta_stats, &osd_op] (ceph::bufferlist&& header) {
osd_op.outdata = std::move(header);
delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
CollectionRef c,
const ghobject_t& oid,
uint64_t off,
- uint64_t len)
+ uint64_t len,
+ uint32_t op_flags)
{
return store->fiemap(c, oid, off, len);
}
CollectionRef c,
const ghobject_t& oid,
uint64_t off,
- uint64_t len);
+ uint64_t len,
+ uint32_t op_flags = 0);
write_iertr::future<> tmapput(
ObjectState& os,
object_stat_sum_t& delta_stats);
ll_read_ierrorator::future<ceph::bufferlist> omap_get_header(
const crimson::os::CollectionRef& c,
- const ghobject_t& oid) const;
+ const ghobject_t& oid,
+ uint32_t op_flags = 0) const;
ll_read_ierrorator::future<> omap_get_header(
const ObjectState& os,
OSDOp& osd_op,
- object_stat_sum_t& delta_stats) const;
+ object_stat_sum_t& delta_stats,
+ uint32_t op_flags = 0) const;
interruptible_future<> omap_set_header(
ObjectState& os,
const OSDOp& osd_op,
return seastar::make_ready_future<eversion_t>(ver);
}
return interruptor::make_interruptible(interruptor::when_all_succeed(
- backend->omap_get_header(coll, ghobject_t(oid)).handle_error_interruptible<false>(
+ backend->omap_get_header(
+ coll, ghobject_t(oid), CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
+ ).handle_error_interruptible<false>(
crimson::os::FuturizedStore::Shard::read_errorator::all_same_way(
[oid] (const std::error_code& e) {
logger().debug("read_metadata_for_push_op, error {} when getting omap header: {}", e, oid);
return seastar::make_ready_future<bufferlist>();
})),
- interruptor::make_interruptible(store->get_attrs(coll, ghobject_t(oid)))
- .handle_error_interruptible<false>(
+ interruptor::make_interruptible(
+ store->get_attrs(coll, ghobject_t(oid), CEPH_OSD_OP_FLAG_FADVISE_DONTNEED)
+ ).handle_error_interruptible<false>(
crimson::os::FuturizedStore::Shard::get_attrs_ertr::all_same_way(
[oid] (const std::error_code& e) {
logger().debug("read_metadata_for_push_op, error {} when getting attrs: {}", e, oid);
return seastar::make_ready_future<uint64_t>(offset);
}
// 1. get the extents in the interested range
- return interruptor::make_interruptible(backend->fiemap(coll, ghobject_t{oid},
- 0, copy_subset.range_end())).safe_then_interruptible(
+ return interruptor::make_interruptible(
+ backend->fiemap(
+ coll,
+ ghobject_t{oid},
+ 0,
+ copy_subset.range_end(),
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED)
+ ).safe_then_interruptible(
[=, this](auto&& fiemap_included) mutable {
interval_set<uint64_t> extents;
try {
push_op->data_included.span_of(extents, offset, max_len);
// 3. read the truncated extents
// TODO: check if the returned extents are pruned
- return interruptor::make_interruptible(store->readv(coll, ghobject_t{oid},
- push_op->data_included, 0));
+ return interruptor::make_interruptible(
+ store->readv(
+ coll,
+ ghobject_t{oid},
+ push_op->data_included,
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED));
}).safe_then_interruptible([push_op, range_end=copy_subset.range_end()](auto &&bl) {
push_op->data.claim_append(std::move(bl));
uint64_t recovered_to = 0;