ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
: ML<I>(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid,
image_ctx.image_watcher, managed_lock::EXCLUSIVE,
- image_ctx.blacklist_on_break_lock,
- image_ctx.blacklist_expire_seconds),
+ image_ctx.config.template get_val<bool>("rbd_blacklist_on_break_lock"),
+ image_ctx.config.template get_val<uint64_t>("rbd_blacklist_expire_seconds")),
m_image_ctx(image_ctx) {
Mutex::Locker locker(ML<I>::m_lock);
ML<I>::set_state_uninitialized();
if (snap_id == LIBRADOS_SNAP_HEAD)
return flags;
- if (balance_snap_reads)
+ if (config.get_val<bool>("rbd_balance_snap_reads"))
flags |= librados::OPERATION_BALANCE_READS;
- else if (localize_snap_reads)
+ else if (config.get_val<bool>("rbd_localize_snap_reads"))
flags |= librados::OPERATION_LOCALIZE_READS;
return flags;
}
}
}
-#define ASSIGN_OPTION(param, type) \
- do { \
- string key = "rbd_"; \
- key = key + #param; \
- param = config.get_val<type>("rbd_"#param); \
- } while (0);
+#define ASSIGN_OPTION(param, type) \
+ param = config.get_val<type>("rbd_"#param)
ASSIGN_OPTION(non_blocking_aio, bool);
ASSIGN_OPTION(cache, bool);
ASSIGN_OPTION(cache_writethrough_until_flush, bool);
- ASSIGN_OPTION(cache_size, Option::size_t);
ASSIGN_OPTION(cache_max_dirty, Option::size_t);
- ASSIGN_OPTION(cache_target_dirty, Option::size_t);
- ASSIGN_OPTION(cache_max_dirty_age, double);
- ASSIGN_OPTION(cache_max_dirty_object, uint64_t);
- ASSIGN_OPTION(cache_block_writes_upfront, bool);
- ASSIGN_OPTION(concurrent_management_ops, uint64_t);
- ASSIGN_OPTION(balance_snap_reads, bool);
- ASSIGN_OPTION(localize_snap_reads, bool);
- ASSIGN_OPTION(balance_parent_reads, bool);
- ASSIGN_OPTION(localize_parent_reads, bool);
ASSIGN_OPTION(sparse_read_threshold_bytes, Option::size_t);
- ASSIGN_OPTION(readahead_trigger_requests, uint64_t);
ASSIGN_OPTION(readahead_max_bytes, Option::size_t);
ASSIGN_OPTION(readahead_disable_after_bytes, Option::size_t);
ASSIGN_OPTION(clone_copy_on_read, bool);
- ASSIGN_OPTION(blacklist_on_break_lock, bool);
- ASSIGN_OPTION(blacklist_expire_seconds, uint64_t);
- ASSIGN_OPTION(request_timed_out_seconds, uint64_t);
ASSIGN_OPTION(enable_alloc_hint, bool);
- ASSIGN_OPTION(journal_order, uint64_t);
- ASSIGN_OPTION(journal_splay_width, uint64_t);
- ASSIGN_OPTION(journal_commit_age, double);
- ASSIGN_OPTION(journal_object_flush_interval, uint64_t);
- ASSIGN_OPTION(journal_object_flush_bytes, Option::size_t);
- ASSIGN_OPTION(journal_object_flush_age, double);
- ASSIGN_OPTION(journal_object_max_in_flight_appends, uint64_t);
- ASSIGN_OPTION(journal_max_payload_bytes, Option::size_t);
- ASSIGN_OPTION(journal_max_concurrent_object_sets, uint64_t);
- ASSIGN_OPTION(mirroring_resync_after_disconnect, bool);
- ASSIGN_OPTION(mirroring_delete_delay, uint64_t);
ASSIGN_OPTION(mirroring_replay_delay, uint64_t);
ASSIGN_OPTION(mtime_update_interval, uint64_t);
ASSIGN_OPTION(atime_update_interval, uint64_t);
ASSIGN_OPTION(skip_partial_discard, bool);
ASSIGN_OPTION(blkin_trace_all, bool);
- ASSIGN_OPTION(qos_iops_limit, uint64_t);
- ASSIGN_OPTION(qos_bps_limit, uint64_t);
- ASSIGN_OPTION(qos_read_iops_limit, uint64_t);
- ASSIGN_OPTION(qos_write_iops_limit, uint64_t);
- ASSIGN_OPTION(qos_read_bps_limit, uint64_t);
- ASSIGN_OPTION(qos_write_bps_limit, uint64_t);
-
- if (thread_safe) {
- ASSIGN_OPTION(journal_pool, std::string);
- }
+
+#undef ASSIGN_OPTION
if (sparse_read_threshold_bytes == 0) {
sparse_read_threshold_bytes = get_object_size();
}
- io_work_queue->apply_qos_limit(qos_iops_limit, RBD_QOS_IOPS_THROTTLE);
- io_work_queue->apply_qos_limit(qos_bps_limit, RBD_QOS_BPS_THROTTLE);
- io_work_queue->apply_qos_limit(qos_read_iops_limit, RBD_QOS_READ_IOPS_THROTTLE);
- io_work_queue->apply_qos_limit(qos_write_iops_limit, RBD_QOS_WRITE_IOPS_THROTTLE);
- io_work_queue->apply_qos_limit(qos_read_bps_limit, RBD_QOS_READ_BPS_THROTTLE);
- io_work_queue->apply_qos_limit(qos_write_bps_limit, RBD_QOS_WRITE_BPS_THROTTLE);
+ io_work_queue->apply_qos_limit(
+ config.get_val<uint64_t>("rbd_qos_iops_limit"),
+ RBD_QOS_IOPS_THROTTLE);
+ io_work_queue->apply_qos_limit(
+ config.get_val<uint64_t>("rbd_qos_bps_limit"),
+ RBD_QOS_BPS_THROTTLE);
+ io_work_queue->apply_qos_limit(
+ config.get_val<uint64_t>("rbd_qos_read_iops_limit"),
+ RBD_QOS_READ_IOPS_THROTTLE);
+ io_work_queue->apply_qos_limit(
+ config.get_val<uint64_t>("rbd_qos_write_iops_limit"),
+ RBD_QOS_WRITE_IOPS_THROTTLE);
+ io_work_queue->apply_qos_limit(
+ config.get_val<uint64_t>("rbd_qos_read_bps_limit"),
+ RBD_QOS_READ_BPS_THROTTLE);
+ io_work_queue->apply_qos_limit(
+ config.get_val<uint64_t>("rbd_qos_write_bps_limit"),
+ RBD_QOS_WRITE_BPS_THROTTLE);
}
ExclusiveLock<ImageCtx> *ImageCtx::create_exclusive_lock() {
}
struct ImageCtx {
+ static const string METADATA_CONF_PREFIX;
+
CephContext *cct;
ConfigProxy config;
std::set<std::string> config_overrides;
bool ignore_migrating = false;
- // Configuration
- static const string METADATA_CONF_PREFIX;
+ /// Cached latency-sensitive configuration settings
bool non_blocking_aio;
bool cache;
bool cache_writethrough_until_flush;
- uint64_t cache_size;
uint64_t cache_max_dirty;
- uint64_t cache_target_dirty;
- double cache_max_dirty_age;
- uint32_t cache_max_dirty_object;
- bool cache_block_writes_upfront;
- uint32_t concurrent_management_ops;
- bool balance_snap_reads;
- bool localize_snap_reads;
- bool balance_parent_reads;
- bool localize_parent_reads;
uint64_t sparse_read_threshold_bytes;
- uint32_t readahead_trigger_requests;
uint64_t readahead_max_bytes;
uint64_t readahead_disable_after_bytes;
bool clone_copy_on_read;
- bool blacklist_on_break_lock;
- uint32_t blacklist_expire_seconds;
- uint32_t request_timed_out_seconds;
bool enable_alloc_hint;
- uint8_t journal_order;
- uint8_t journal_splay_width;
- double journal_commit_age;
- int journal_object_flush_interval;
- uint64_t journal_object_flush_bytes;
- double journal_object_flush_age;
- uint64_t journal_object_max_in_flight_appends;
- std::string journal_pool;
- uint32_t journal_max_payload_bytes;
- int journal_max_concurrent_object_sets;
- bool mirroring_resync_after_disconnect;
- uint64_t mirroring_delete_delay;
- int mirroring_replay_delay;
bool skip_partial_discard;
bool blkin_trace_all;
+ uint64_t mirroring_replay_delay;
uint64_t mtime_update_interval;
uint64_t atime_update_interval;
- uint64_t qos_iops_limit;
- uint64_t qos_bps_limit;
- uint64_t qos_read_iops_limit;
- uint64_t qos_write_iops_limit;
- uint64_t qos_read_bps_limit;
- uint64_t qos_write_bps_limit;
LibrbdAdminSocketHook *asok_hook;
Task task(TASK_CODE_ASYNC_REQUEST, id);
m_task_finisher->cancel(task);
- m_task_finisher->add_event_after(task, m_image_ctx.request_timed_out_seconds,
- ctx);
+ m_task_finisher->add_event_after(
+ task, m_image_ctx.config.template get_val<uint64_t>("rbd_request_timed_out_seconds"),
+ ctx);
}
template <typename I>
transition_state(STATE_INITIALIZING, 0);
::journal::Settings settings;
- settings.commit_interval = m_image_ctx.journal_commit_age;
- settings.max_payload_bytes = m_image_ctx.journal_max_payload_bytes;
+ settings.commit_interval =
+ m_image_ctx.config.template get_val<double>("rbd_journal_commit_age");
+ settings.max_payload_bytes =
+ m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_max_payload_bytes");
settings.max_concurrent_object_sets =
- m_image_ctx.journal_max_concurrent_object_sets;
+ m_image_ctx.config.template get_val<uint64_t>("rbd_journal_max_concurrent_object_sets");
// TODO: a configurable filter to exclude certain peers from being
// disconnected.
settings.whitelisted_laggy_clients = {IMAGE_CLIENT_ID};
template <typename I>
void Journal<I>::start_append() {
ceph_assert(m_lock.is_locked());
- m_journaler->start_append(m_image_ctx.journal_object_flush_interval,
- m_image_ctx.journal_object_flush_bytes,
- m_image_ctx.journal_object_flush_age,
- m_image_ctx.journal_object_max_in_flight_appends);
+ m_journaler->start_append(
+ m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_flush_interval"),
+ m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_object_flush_bytes"),
+ m_image_ctx.config.template get_val<double>("rbd_journal_object_flush_age"),
+ m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_max_in_flight_appends"));
transition_state(STATE_READY, 0);
}
: callback(callback), callback_arg(callback_arg),
whole_object(_whole_object), from_snap_id(_from_snap_id),
end_snap_id(_end_snap_id),
- throttle(image_ctx.concurrent_management_ops, true) {
+ throttle(image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"), true) {
}
};
init_max_dirty = 0;
}
+ auto cache_size =
+ m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_size");
+ auto target_dirty =
+ m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_target_dirty");
+ auto max_dirty_age =
+ m_image_ctx->config.template get_val<double>("rbd_cache_max_dirty_age");
+ auto block_writes_upfront =
+ m_image_ctx->config.template get_val<bool>("rbd_cache_block_writes_upfront");
+ auto max_dirty_object =
+ m_image_ctx->config.template get_val<uint64_t>("rbd_cache_max_dirty_object");
+
ldout(cct, 5) << "Initial cache settings:"
- << " size=" << m_image_ctx->cache_size
+ << " size=" << cache_size
<< " num_objects=" << 10
<< " max_dirty=" << init_max_dirty
- << " target_dirty=" << m_image_ctx->cache_target_dirty
- << " max_dirty_age="
- << m_image_ctx->cache_max_dirty_age << dendl;
+ << " target_dirty=" << target_dirty
+ << " max_dirty_age=" << max_dirty_age << dendl;
m_object_cacher = new ObjectCacher(cct, m_image_ctx->perfcounter->get_name(),
*m_writeback_handler, m_cache_lock,
- nullptr, nullptr, m_image_ctx->cache_size,
- 10, /* reset this in init */
- init_max_dirty,
- m_image_ctx->cache_target_dirty,
- m_image_ctx->cache_max_dirty_age,
- m_image_ctx->cache_block_writes_upfront);
+ nullptr, nullptr, cache_size,
+ 10, /* reset this in init */
+ init_max_dirty, target_dirty,
+ max_dirty_age, block_writes_upfront);
// size object cache appropriately
- uint64_t obj = m_image_ctx->cache_max_dirty_object;
- if (!obj) {
- obj = std::min<uint64_t>(2000,
- std::max<uint64_t>(
- 10, m_image_ctx->cache_size / 100 /
+ if (max_dirty_object == 0) {
+ max_dirty_object = std::min<uint64_t>(
+ 2000, std::max<uint64_t>(10, cache_size / 100 /
sizeof(ObjectCacher::Object)));
}
- ldout(cct, 5) << " cache bytes " << m_image_ctx->cache_size
- << " -> about " << obj << " objects" << dendl;
- m_object_cacher->set_max_objects(obj);
+ ldout(cct, 5) << " cache bytes " << cache_size
+ << " -> about " << max_dirty_object << " objects" << dendl;
+ m_object_cacher->set_max_objects(max_dirty_object);
m_object_set = new ObjectCacher::ObjectSet(nullptr,
m_image_ctx->data_ctx.get_id(), 0);
{
Mutex::Locker locker(m_lock);
for (uint64_t i = 0;
- i < m_cct->_conf.get_val<uint64_t>("rbd_concurrent_management_ops");
+ i < m_src_image_ctx->config.template get_val<uint64_t>("rbd_concurrent_management_ops");
++i) {
send_next_object_copy();
if (m_ret_val < 0 && m_current_ops == 0) {
// readahead requires the cache
m_image_ctx->readahead.set_trigger_requests(
- m_image_ctx->readahead_trigger_requests);
+ m_image_ctx->config.template get_val<uint64_t>("rbd_readahead_trigger_requests"));
m_image_ctx->readahead.set_max_readahead_size(
- m_image_ctx->readahead_max_bytes);
+ m_image_ctx->config.template get_val<Option::size_t>("rbd_readahead_max_bytes"));
return send_register_watch(result);
}
m_parent_image_ctx->child = &m_child_image_ctx;
// set rados flags for reading the parent image
- if (m_child_image_ctx.balance_parent_reads) {
+ if (m_child_image_ctx.config.template get_val<bool>("rbd_balance_parent_reads")) {
m_parent_image_ctx->set_read_flag(librados::OPERATION_BALANCE_READS);
- } else if (m_child_image_ctx.localize_parent_reads) {
+ } else if (m_child_image_ctx.config.template get_val<bool>("rbd_localize_parent_reads")) {
m_parent_image_ctx->set_read_flag(librados::OPERATION_LOCALIZE_READS);
}
}
RWLock::RLocker owner_lock(src->owner_lock);
- SimpleThrottle throttle(src->concurrent_management_ops, false);
+ SimpleThrottle throttle(src->config.get_val<uint64_t>("rbd_concurrent_management_ops"), false);
uint64_t period = src->get_stripe_period();
unsigned fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
LIBRADOS_OP_FLAG_FADVISE_NOCACHE;
return -EINVAL;
}
- if (ictx->blacklist_on_break_lock) {
+ if (ictx->config.get_val<bool>("rbd_blacklist_on_break_lock")) {
typedef std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> Lockers;
Lockers lockers;
RWLock::RLocker locker(ictx->md_lock);
librados::Rados rados(ictx->md_ctx);
- r = rados.blacklist_add(client_address,
- ictx->blacklist_expire_seconds);
+ r = rados.blacklist_add(
+ client_address,
+ ictx->config.get_val<uint64_t>("rbd_blacklist_expire_seconds"));
if (r < 0) {
lderr(ictx->cct) << "unable to blacklist client: " << cpp_strerror(r)
<< dendl;
AsyncObjectThrottle<> *throttle = new AsyncObjectThrottle<>(
NULL, *m_ictx, context_factory, util::create_context_callback(this),
NULL, 0, m_snap_ids.size());
- throttle->start_ops(m_ictx->concurrent_management_ops);
+ throttle->start_ops(
+ m_ictx->config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
return false;
}
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(bl.c_str(), bl.length())) {
int r = ictx->io_work_queue->discard(ofs, len, false);
tracepoint(librbd, writesame_exit, r);
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(bl.c_str(), bl.length())) {
ictx->io_work_queue->aio_discard(get_aio_completion(c), off, len, false);
tracepoint(librbd, aio_writesame_exit, 0);
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
int r = ictx->io_work_queue->discard(ofs, len, false);
tracepoint(librbd, writesame_exit, r);
return -EINVAL;
}
- bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+ bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
ictx->io_work_queue->aio_discard(get_aio_completion(comp), off, len, false);
tracepoint(librbd, aio_writesame_exit, 0);
&EnableFeaturesRequest<I>::handle_create_journal>(this);
journal::CreateRequest<I> *req = journal::CreateRequest<I>::create(
- image_ctx.md_ctx, image_ctx.id, image_ctx.journal_order,
- image_ctx.journal_splay_width, image_ctx.journal_pool,
+ image_ctx.md_ctx, image_ctx.id,
+ image_ctx.config.template get_val<uint64_t>("rbd_journal_order"),
+ image_ctx.config.template get_val<uint64_t>("rbd_journal_splay_width"),
+ image_ctx.config.template get_val<std::string>("rbd_journal_pool"),
cls::journal::Tag::TAG_CLASS_NEW, tag_data,
librbd::Journal<>::IMAGE_CLIENT_ID, image_ctx.op_work_queue, ctx);
boost::lambda::_1, &image_ctx, m_snapc, boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, m_overlap_objects);
- throttle->start_ops(image_ctx.concurrent_management_ops);
+ throttle->start_ops(
+ image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
boost::lambda::_1, &image_ctx, image_ctx.snapc, boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, overlap_objects);
- throttle->start_ops(image_ctx.concurrent_management_ops);
+ throttle->start_ops(
+ image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, m_image_ctx, context_factory, this->create_callback_context(),
&m_prog_ctx, 0, num_objects);
- throttle->start_ops(m_image_ctx.concurrent_management_ops);
+ throttle->start_ops(
+ m_image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
m_head_num_objects, m_snap_object_map));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, num_objects);
- throttle->start_ops(image_ctx.concurrent_management_ops);
+ throttle->start_ops(
+ image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
boost::lambda::_1, &image_ctx, pspec, pools, boost::lambda::_2));
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
nullptr, image_ctx, context_factory, ctx, NULL, 0, pools.size());
- throttle->start_ops(image_ctx.concurrent_management_ops);
+ throttle->start_ops(
+ image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, copyup_start,
copyup_end);
- throttle->start_ops(image_ctx.concurrent_management_ops);
+ throttle->start_ops(
+ image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template <typename I>
AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
this, image_ctx, context_factory, ctx, &m_prog_ctx, m_delete_start,
m_num_objects);
- throttle->start_ops(image_ctx.concurrent_management_ops);
+ throttle->start_ops(
+ image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
}
template<typename I>
ASSERT_EQ(initial_tag + 1, current_tag);
ASSERT_EQ(1, current_entry);
- ASSERT_EQ(9876, ictx->mirroring_replay_delay);
+ ASSERT_EQ(9876U, ictx->mirroring_replay_delay);
std::string value;
ASSERT_EQ(0, librbd::metadata_get(ictx, "conf_rbd_mirroring_replay_delay",
get_journal_commit_position(ictx, ¤t_tag, ¤t_entry);
ASSERT_EQ(initial_tag, current_tag);
ASSERT_EQ(initial_entry + 2, current_entry);
- ASSERT_EQ(0, ictx->mirroring_replay_delay);
+ ASSERT_EQ(0U, ictx->mirroring_replay_delay);
std::string value;
ASSERT_EQ(-ENOENT,
image_watcher(NULL), object_map(NULL),
exclusive_lock(NULL), journal(NULL),
trace_endpoint(image_ctx.trace_endpoint),
- concurrent_management_ops(image_ctx.concurrent_management_ops),
- blacklist_on_break_lock(image_ctx.blacklist_on_break_lock),
- blacklist_expire_seconds(image_ctx.blacklist_expire_seconds),
sparse_read_threshold_bytes(image_ctx.sparse_read_threshold_bytes),
- journal_order(image_ctx.journal_order),
- journal_splay_width(image_ctx.journal_splay_width),
- journal_commit_age(image_ctx.journal_commit_age),
- journal_object_flush_interval(image_ctx.journal_object_flush_interval),
- journal_object_flush_bytes(image_ctx.journal_object_flush_bytes),
- journal_object_flush_age(image_ctx.journal_object_flush_age),
- journal_object_max_in_flight_appends(
- image_ctx.journal_object_max_in_flight_appends),
- journal_pool(image_ctx.journal_pool),
- journal_max_payload_bytes(image_ctx.journal_max_payload_bytes),
- journal_max_concurrent_object_sets(
- image_ctx.journal_max_concurrent_object_sets),
- mirroring_resync_after_disconnect(
- image_ctx.mirroring_resync_after_disconnect),
- mirroring_delete_delay(image_ctx.mirroring_delete_delay),
mirroring_replay_delay(image_ctx.mirroring_replay_delay),
non_blocking_aio(image_ctx.non_blocking_aio),
blkin_trace_all(image_ctx.blkin_trace_all),
ZTracer::Endpoint trace_endpoint;
- int concurrent_management_ops;
- bool blacklist_on_break_lock;
- uint32_t blacklist_expire_seconds;
uint64_t sparse_read_threshold_bytes;
- uint8_t journal_order;
- uint8_t journal_splay_width;
- double journal_commit_age;
- int journal_object_flush_interval;
- uint64_t journal_object_flush_bytes;
- double journal_object_flush_age;
- uint64_t journal_object_max_in_flight_appends;
- std::string journal_pool;
- uint32_t journal_max_payload_bytes;
- int journal_max_concurrent_object_sets;
- bool mirroring_resync_after_disconnect;
- uint64_t mirroring_delete_delay;
int mirroring_replay_delay;
bool non_blocking_aio;
bool blkin_trace_all;
librbd::ImageCtx *ictx;
ASSERT_EQ(0, open_image(m_image_name, &ictx));
- ictx->request_timed_out_seconds = 0;
+ ictx->config.set_val("rbd_request_timed_out_seconds", "0");
ASSERT_EQ(0, register_image_watch(*ictx));
ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE,
ASSERT_EQ(0, open_image(image_name, &ictx));
ASSERT_EQ(ictx->order, 17);
- ASSERT_EQ(ictx->journal_order, 13);
+ ASSERT_EQ(ictx->config.get_val<uint64_t>("rbd_journal_order"), 13U);
if (is_feature_enabled(RBD_FEATURE_JOURNALING)) {
uint8_t order;
"14"));
ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_JOURNALING,
true));
- ASSERT_EQ(ictx->journal_order, 14);
+ ASSERT_EQ(ictx->config.get_val<uint64_t>("rbd_journal_order"), 14U);
+
C_SaferCond cond1;
cls::journal::client::get_immutable_metadata(m_ioctx, "journal." + ictx->id,
&order, &splay_width, &pool_id,
TEST_F(TestMockImageDeleterTrashMoveRequest, DelayedDelation) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
- mock_image_ctx.mirroring_delete_delay = 600;
+ mock_image_ctx.config.set_val("rbd_mirroring_delete_delay", "600");
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
if (m_client_meta.image_id == m_local_image_id &&
client.state != cls::journal::CLIENT_STATE_CONNECTED) {
dout(5) << "client flagged disconnected, stopping image replay" << dendl;
- if (m_local_image_ctx->mirroring_resync_after_disconnect) {
+ if (m_local_image_ctx->config.template get_val<bool>("rbd_mirroring_resync_after_disconnect")) {
m_resync_requested = true;
on_start_fail(-ENOTCONN, "disconnected: automatic resync");
} else {
utime_t delete_time{ceph_clock_now()};
utime_t deferment_end_time{delete_time};
- deferment_end_time += m_image_ctx->mirroring_delete_delay;
+ deferment_end_time +=
+ m_image_ctx->config.template get_val<uint64_t>("rbd_mirroring_delete_delay");
m_trash_image_spec = {
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, m_image_ctx->name, delete_time,