.set_default(true)
.set_description("when writing a object, it will issue a hint to osd backend to indicate the expected size object need"),
+ Option("rbd_compression_hint", Option::TYPE_STR, Option::LEVEL_BASIC)
+ .set_enum_allowed({"none", "compressible", "incompressible"})
+ .set_default("none")
+ .set_description("Compression hint to send to the OSDs during writes")
+ .set_flag(Option::FLAG_RUNTIME),
+
Option("rbd_tracing", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
.set_default(false)
.set_description("true if LTTng-UST tracepoints should be enabled"),
discard_granularity_bytes = 0;
}
+ alloc_hint_flags = 0;
+ auto compression_hint = config.get_val<std::string>("rbd_compression_hint");
+ if (compression_hint == "compressible") {
+ alloc_hint_flags |= librados::ALLOC_HINT_FLAG_COMPRESSIBLE;
+ } else if (compression_hint == "incompressible") {
+ alloc_hint_flags |= librados::ALLOC_HINT_FLAG_INCOMPRESSIBLE;
+ }
+
io_work_queue->apply_qos_schedule_tick_min(
config.get_val<uint64_t>("rbd_qos_schedule_tick_min"));
uint64_t readahead_disable_after_bytes;
bool clone_copy_on_read;
bool enable_alloc_hint;
+ uint32_t alloc_hint_flags = 0U;
uint32_t discard_granularity_bytes = 0;
bool blkin_trace_all;
uint64_t mirroring_replay_delay;
void ObjectRequest<I>::add_write_hint(I& image_ctx,
librados::ObjectWriteOperation *wr) {
if (image_ctx.enable_alloc_hint) {
- wr->set_alloc_hint(image_ctx.get_object_size(),
- image_ctx.get_object_size());
+ wr->set_alloc_hint2(image_ctx.get_object_size(),
+ image_ctx.get_object_size(),
+ image_ctx.alloc_hint_flags);
+ } else if (image_ctx.alloc_hint_flags != 0U) {
+ wr->set_alloc_hint2(0, 0, image_ctx.alloc_hint_flags);
}
}
uint64_t expected_write_size) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
o->ops.push_back(boost::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
- expected_object_size, expected_write_size, _4));
+ expected_object_size, expected_write_size, 0,
+ _4));
}
+void ObjectWriteOperation::set_alloc_hint2(uint64_t expected_object_size,
+ uint64_t expected_write_size,
+ uint32_t flags) {
+ TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
+ o->ops.push_back(boost::bind(&TestIoCtxImpl::set_alloc_hint, _1, _2,
+ expected_object_size, expected_write_size, flags,
+ _4));
+}
void ObjectWriteOperation::tmap_update(const bufferlist& cmdbl) {
TestObjectOperationImpl *o = reinterpret_cast<TestObjectOperationImpl*>(impl);
int TestIoCtxImpl::set_alloc_hint(const std::string& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
+ uint32_t flags,
const SnapContext &snapc) {
return 0;
}
virtual int set_alloc_hint(const std::string& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
+ uint32_t flags,
const SnapContext &snapc);
virtual void set_snap_read(snap_t seq);
virtual int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
int TestMemIoCtxImpl::set_alloc_hint(const std::string& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
+ uint32_t flags,
const SnapContext &snapc) {
if (get_snap_read() != CEPH_NOSNAP) {
return -EROFS;
int selfmanaged_snap_rollback(const std::string& oid,
uint64_t snapid) override;
int set_alloc_hint(const std::string& oid, uint64_t expected_object_size,
- uint64_t expected_write_size,
+ uint64_t expected_write_size, uint32_t flags,
const SnapContext &snapc) override;
int sparse_read(const std::string& oid, uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl) override;
non_blocking_aio(image_ctx.non_blocking_aio),
blkin_trace_all(image_ctx.blkin_trace_all),
enable_alloc_hint(image_ctx.enable_alloc_hint),
+ alloc_hint_flags(image_ctx.alloc_hint_flags),
ignore_migrating(image_ctx.ignore_migrating),
enable_sparse_copyup(image_ctx.enable_sparse_copyup),
mtime_update_interval(image_ctx.mtime_update_interval),
bool non_blocking_aio;
bool blkin_trace_all;
bool enable_alloc_hint;
+ uint32_t alloc_hint_flags;
bool ignore_migrating;
bool enable_sparse_copyup;
uint64_t mtime_update_interval;