]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: remove non-performance sensitive cached config values
authorJason Dillaman <dillaman@redhat.com>
Thu, 4 Oct 2018 17:55:01 +0000 (13:55 -0400)
committerJason Dillaman <dillaman@redhat.com>
Fri, 5 Oct 2018 13:54:54 +0000 (09:54 -0400)
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
27 files changed:
src/librbd/ExclusiveLock.cc
src/librbd/ImageCtx.cc
src/librbd/ImageCtx.h
src/librbd/ImageWatcher.cc
src/librbd/Journal.cc
src/librbd/api/DiffIterate.cc
src/librbd/cache/ObjectCacherObjectDispatch.cc
src/librbd/deep_copy/ImageCopyRequest.cc
src/librbd/image/OpenRequest.cc
src/librbd/image/RefreshParentRequest.cc
src/librbd/internal.cc
src/librbd/io/CopyupRequest.cc
src/librbd/librbd.cc
src/librbd/operation/EnableFeaturesRequest.cc
src/librbd/operation/FlattenRequest.cc
src/librbd/operation/MigrateRequest.cc
src/librbd/operation/ObjectMapIterate.cc
src/librbd/operation/SnapshotRollbackRequest.cc
src/librbd/operation/SnapshotUnprotectRequest.cc
src/librbd/operation/TrimRequest.cc
src/test/librbd/journal/test_Replay.cc
src/test/librbd/mock/MockImageCtx.h
src/test/librbd/test_ImageWatcher.cc
src/test/librbd/test_internal.cc
src/test/rbd_mirror/image_deleter/test_mock_TrashMoveRequest.cc
src/tools/rbd_mirror/ImageReplayer.cc
src/tools/rbd_mirror/image_deleter/TrashMoveRequest.cc

index ba3591fb904e63523c99de81f65b07506607aa93..71d98c5b2e6496625ea44c2c5809601fdfd8d5b3 100644 (file)
@@ -29,8 +29,8 @@ template <typename I>
 ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
   : ML<I>(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid,
           image_ctx.image_watcher, managed_lock::EXCLUSIVE,
-          image_ctx.blacklist_on_break_lock,
-          image_ctx.blacklist_expire_seconds),
+          image_ctx.config.template get_val<bool>("rbd_blacklist_on_break_lock"),
+          image_ctx.config.template get_val<uint64_t>("rbd_blacklist_expire_seconds")),
     m_image_ctx(image_ctx) {
   Mutex::Locker locker(ML<I>::m_lock);
   ML<I>::set_state_uninitialized();
index 1777456319ce489404b124c2eb12ed55ae5e01b9..ec9dc2d0d9913cd1f63ab21ff268f4a22b3c8bd5 100644 (file)
@@ -314,9 +314,9 @@ public:
     if (snap_id == LIBRADOS_SNAP_HEAD)
       return flags;
 
-    if (balance_snap_reads)
+    if (config.get_val<bool>("rbd_balance_snap_reads"))
       flags |= librados::OPERATION_BALANCE_READS;
-    else if (localize_snap_reads)
+    else if (config.get_val<bool>("rbd_localize_snap_reads"))
       flags |= librados::OPERATION_LOCALIZE_READS;
     return flags;
   }
@@ -785,73 +785,48 @@ public:
       }
     }
 
-#define ASSIGN_OPTION(param, type)                                             \
-    do {                                                                       \
-      string key = "rbd_";                                                    \
-      key = key + #param;                                                     \
-      param = config.get_val<type>("rbd_"#param);                              \
-    } while (0);
+#define ASSIGN_OPTION(param, type)              \
+    param = config.get_val<type>("rbd_"#param)
 
     ASSIGN_OPTION(non_blocking_aio, bool);
     ASSIGN_OPTION(cache, bool);
     ASSIGN_OPTION(cache_writethrough_until_flush, bool);
-    ASSIGN_OPTION(cache_size, Option::size_t);
     ASSIGN_OPTION(cache_max_dirty, Option::size_t);
-    ASSIGN_OPTION(cache_target_dirty, Option::size_t);
-    ASSIGN_OPTION(cache_max_dirty_age, double);
-    ASSIGN_OPTION(cache_max_dirty_object, uint64_t);
-    ASSIGN_OPTION(cache_block_writes_upfront, bool);
-    ASSIGN_OPTION(concurrent_management_ops, uint64_t);
-    ASSIGN_OPTION(balance_snap_reads, bool);
-    ASSIGN_OPTION(localize_snap_reads, bool);
-    ASSIGN_OPTION(balance_parent_reads, bool);
-    ASSIGN_OPTION(localize_parent_reads, bool);
     ASSIGN_OPTION(sparse_read_threshold_bytes, Option::size_t);
-    ASSIGN_OPTION(readahead_trigger_requests, uint64_t);
     ASSIGN_OPTION(readahead_max_bytes, Option::size_t);
     ASSIGN_OPTION(readahead_disable_after_bytes, Option::size_t);
     ASSIGN_OPTION(clone_copy_on_read, bool);
-    ASSIGN_OPTION(blacklist_on_break_lock, bool);
-    ASSIGN_OPTION(blacklist_expire_seconds, uint64_t);
-    ASSIGN_OPTION(request_timed_out_seconds, uint64_t);
     ASSIGN_OPTION(enable_alloc_hint, bool);
-    ASSIGN_OPTION(journal_order, uint64_t);
-    ASSIGN_OPTION(journal_splay_width, uint64_t);
-    ASSIGN_OPTION(journal_commit_age, double);
-    ASSIGN_OPTION(journal_object_flush_interval, uint64_t);
-    ASSIGN_OPTION(journal_object_flush_bytes, Option::size_t);
-    ASSIGN_OPTION(journal_object_flush_age, double);
-    ASSIGN_OPTION(journal_object_max_in_flight_appends, uint64_t);
-    ASSIGN_OPTION(journal_max_payload_bytes, Option::size_t);
-    ASSIGN_OPTION(journal_max_concurrent_object_sets, uint64_t);
-    ASSIGN_OPTION(mirroring_resync_after_disconnect, bool);
-    ASSIGN_OPTION(mirroring_delete_delay, uint64_t);
     ASSIGN_OPTION(mirroring_replay_delay, uint64_t);
     ASSIGN_OPTION(mtime_update_interval, uint64_t);
     ASSIGN_OPTION(atime_update_interval, uint64_t);
     ASSIGN_OPTION(skip_partial_discard, bool);
     ASSIGN_OPTION(blkin_trace_all, bool);
-    ASSIGN_OPTION(qos_iops_limit, uint64_t);
-    ASSIGN_OPTION(qos_bps_limit, uint64_t);
-    ASSIGN_OPTION(qos_read_iops_limit, uint64_t);
-    ASSIGN_OPTION(qos_write_iops_limit, uint64_t);
-    ASSIGN_OPTION(qos_read_bps_limit, uint64_t);
-    ASSIGN_OPTION(qos_write_bps_limit, uint64_t);
-
-    if (thread_safe) {
-      ASSIGN_OPTION(journal_pool, std::string);
-    }
+
+#undef ASSIGN_OPTION
 
     if (sparse_read_threshold_bytes == 0) {
       sparse_read_threshold_bytes = get_object_size();
     }
 
-    io_work_queue->apply_qos_limit(qos_iops_limit, RBD_QOS_IOPS_THROTTLE);
-    io_work_queue->apply_qos_limit(qos_bps_limit, RBD_QOS_BPS_THROTTLE);
-    io_work_queue->apply_qos_limit(qos_read_iops_limit, RBD_QOS_READ_IOPS_THROTTLE);
-    io_work_queue->apply_qos_limit(qos_write_iops_limit, RBD_QOS_WRITE_IOPS_THROTTLE);
-    io_work_queue->apply_qos_limit(qos_read_bps_limit, RBD_QOS_READ_BPS_THROTTLE);
-    io_work_queue->apply_qos_limit(qos_write_bps_limit, RBD_QOS_WRITE_BPS_THROTTLE);
+    io_work_queue->apply_qos_limit(
+      config.get_val<uint64_t>("rbd_qos_iops_limit"),
+      RBD_QOS_IOPS_THROTTLE);
+    io_work_queue->apply_qos_limit(
+      config.get_val<uint64_t>("rbd_qos_bps_limit"),
+      RBD_QOS_BPS_THROTTLE);
+    io_work_queue->apply_qos_limit(
+      config.get_val<uint64_t>("rbd_qos_read_iops_limit"),
+      RBD_QOS_READ_IOPS_THROTTLE);
+    io_work_queue->apply_qos_limit(
+      config.get_val<uint64_t>("rbd_qos_write_iops_limit"),
+      RBD_QOS_WRITE_IOPS_THROTTLE);
+    io_work_queue->apply_qos_limit(
+      config.get_val<uint64_t>("rbd_qos_read_bps_limit"),
+      RBD_QOS_READ_BPS_THROTTLE);
+    io_work_queue->apply_qos_limit(
+      config.get_val<uint64_t>("rbd_qos_write_bps_limit"),
+      RBD_QOS_WRITE_BPS_THROTTLE);
   }
 
   ExclusiveLock<ImageCtx> *ImageCtx::create_exclusive_lock() {
index 911044202ecdc7982e6b08f710d152076fdb7f7d..3e97c712951cabed30e10f3c9fb1ae48d163e099 100644 (file)
@@ -63,6 +63,8 @@ namespace librbd {
   }
 
   struct ImageCtx {
+    static const string METADATA_CONF_PREFIX;
+
     CephContext *cct;
     ConfigProxy config;
     std::set<std::string> config_overrides;
@@ -170,54 +172,21 @@ namespace librbd {
 
     bool ignore_migrating = false;
 
-    // Configuration
-    static const string METADATA_CONF_PREFIX;
+    /// Cached latency-sensitive configuration settings
     bool non_blocking_aio;
     bool cache;
     bool cache_writethrough_until_flush;
-    uint64_t cache_size;
     uint64_t cache_max_dirty;
-    uint64_t cache_target_dirty;
-    double cache_max_dirty_age;
-    uint32_t cache_max_dirty_object;
-    bool cache_block_writes_upfront;
-    uint32_t concurrent_management_ops;
-    bool balance_snap_reads;
-    bool localize_snap_reads;
-    bool balance_parent_reads;
-    bool localize_parent_reads;
     uint64_t sparse_read_threshold_bytes;
-    uint32_t readahead_trigger_requests;
     uint64_t readahead_max_bytes;
     uint64_t readahead_disable_after_bytes;
     bool clone_copy_on_read;
-    bool blacklist_on_break_lock;
-    uint32_t blacklist_expire_seconds;
-    uint32_t request_timed_out_seconds;
     bool enable_alloc_hint;
-    uint8_t journal_order;
-    uint8_t journal_splay_width;
-    double journal_commit_age;
-    int journal_object_flush_interval;
-    uint64_t journal_object_flush_bytes;
-    double journal_object_flush_age;
-    uint64_t journal_object_max_in_flight_appends;
-    std::string journal_pool;
-    uint32_t journal_max_payload_bytes;
-    int journal_max_concurrent_object_sets;
-    bool mirroring_resync_after_disconnect;
-    uint64_t mirroring_delete_delay;
-    int mirroring_replay_delay;
     bool skip_partial_discard;
     bool blkin_trace_all;
+    uint64_t mirroring_replay_delay;
     uint64_t mtime_update_interval;
     uint64_t atime_update_interval;
-    uint64_t qos_iops_limit;
-    uint64_t qos_bps_limit;
-    uint64_t qos_read_iops_limit;
-    uint64_t qos_write_iops_limit;
-    uint64_t qos_read_bps_limit;
-    uint64_t qos_write_bps_limit;
 
     LibrbdAdminSocketHook *asok_hook;
 
index fba8025fcc4d3525f7f26d754f5d97ad9ef5537f..4fead7161f3448344a6dd7ca4590d247cefa18b2 100644 (file)
@@ -488,8 +488,9 @@ void ImageWatcher<I>::schedule_async_request_timed_out(const AsyncRequestId &id)
   Task task(TASK_CODE_ASYNC_REQUEST, id);
   m_task_finisher->cancel(task);
 
-  m_task_finisher->add_event_after(task, m_image_ctx.request_timed_out_seconds,
-                                   ctx);
+  m_task_finisher->add_event_after(
+    task, m_image_ctx.config.template get_val<uint64_t>("rbd_request_timed_out_seconds"),
+    ctx);
 }
 
 template <typename I>
index 233de8fb5252599b5e7023741984c14da4c8ec99..45d103f43277d12c8747522c966198f85b6ae473 100644 (file)
@@ -1062,10 +1062,12 @@ void Journal<I>::create_journaler() {
 
   transition_state(STATE_INITIALIZING, 0);
   ::journal::Settings settings;
-  settings.commit_interval = m_image_ctx.journal_commit_age;
-  settings.max_payload_bytes = m_image_ctx.journal_max_payload_bytes;
+  settings.commit_interval =
+    m_image_ctx.config.template get_val<double>("rbd_journal_commit_age");
+  settings.max_payload_bytes =
+    m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_max_payload_bytes");
   settings.max_concurrent_object_sets =
-    m_image_ctx.journal_max_concurrent_object_sets;
+    m_image_ctx.config.template get_val<uint64_t>("rbd_journal_max_concurrent_object_sets");
   // TODO: a configurable filter to exclude certain peers from being
   // disconnected.
   settings.whitelisted_laggy_clients = {IMAGE_CLIENT_ID};
@@ -1166,10 +1168,11 @@ void Journal<I>::complete_event(typename Events::iterator it, int r) {
 template <typename I>
 void Journal<I>::start_append() {
   ceph_assert(m_lock.is_locked());
-  m_journaler->start_append(m_image_ctx.journal_object_flush_interval,
-                           m_image_ctx.journal_object_flush_bytes,
-                           m_image_ctx.journal_object_flush_age,
-                            m_image_ctx.journal_object_max_in_flight_appends);
+  m_journaler->start_append(
+    m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_flush_interval"),
+    m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_object_flush_bytes"),
+    m_image_ctx.config.template get_val<double>("rbd_journal_object_flush_age"),
+    m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_max_in_flight_appends"));
   transition_state(STATE_READY, 0);
 }
 
index a3af7fda4b9f4775f5ffccd711f1bbda31d17713..0e620cb5adb74c04ea6aceaaa2d251664940e28e 100644 (file)
@@ -52,7 +52,7 @@ struct DiffContext {
     : callback(callback), callback_arg(callback_arg),
       whole_object(_whole_object), from_snap_id(_from_snap_id),
       end_snap_id(_end_snap_id),
-      throttle(image_ctx.concurrent_management_ops, true) {
+      throttle(image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"), true) {
   }
 };
 
index 3aec9edcc9c61d2652a930a99f2e6ddeeb5a62e4..f108247b2542350c4a246953bc560b8f939bc9bf 100644 (file)
@@ -102,34 +102,40 @@ void ObjectCacherObjectDispatch<I>::init() {
     init_max_dirty = 0;
   }
 
+  auto cache_size =
+    m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_size");
+  auto target_dirty =
+    m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_target_dirty");
+  auto max_dirty_age =
+    m_image_ctx->config.template get_val<double>("rbd_cache_max_dirty_age");
+  auto block_writes_upfront =
+    m_image_ctx->config.template get_val<bool>("rbd_cache_block_writes_upfront");
+  auto max_dirty_object =
+    m_image_ctx->config.template get_val<uint64_t>("rbd_cache_max_dirty_object");
+
   ldout(cct, 5) << "Initial cache settings:"
-                << " size=" << m_image_ctx->cache_size
+                << " size=" << cache_size
                 << " num_objects=" << 10
                 << " max_dirty=" << init_max_dirty
-                << " target_dirty=" << m_image_ctx->cache_target_dirty
-                << " max_dirty_age="
-                << m_image_ctx->cache_max_dirty_age << dendl;
+                << " target_dirty=" << target_dirty
+                << " max_dirty_age=" << max_dirty_age << dendl;
 
   m_object_cacher = new ObjectCacher(cct, m_image_ctx->perfcounter->get_name(),
                                      *m_writeback_handler, m_cache_lock,
-                                     nullptr, nullptr, m_image_ctx->cache_size,
-                                    10,  /* reset this in init */
-                                    init_max_dirty,
-                                    m_image_ctx->cache_target_dirty,
-                                    m_image_ctx->cache_max_dirty_age,
-                                     m_image_ctx->cache_block_writes_upfront);
+                                     nullptr, nullptr, cache_size,
+                                     10,  /* reset this in init */
+                                     init_max_dirty, target_dirty,
+                                     max_dirty_age, block_writes_upfront);
 
   // size object cache appropriately
-  uint64_t obj = m_image_ctx->cache_max_dirty_object;
-  if (!obj) {
-    obj = std::min<uint64_t>(2000,
-                             std::max<uint64_t>(
-                               10, m_image_ctx->cache_size / 100 /
+  if (max_dirty_object == 0) {
+    max_dirty_object = std::min<uint64_t>(
+      2000, std::max<uint64_t>(10, cache_size / 100 /
                                  sizeof(ObjectCacher::Object)));
   }
-  ldout(cct, 5) << " cache bytes " << m_image_ctx->cache_size
-                << " -> about " << obj << " objects" << dendl;
-  m_object_cacher->set_max_objects(obj);
+  ldout(cct, 5) << " cache bytes " << cache_size
+                << " -> about " << max_dirty_object << " objects" << dendl;
+  m_object_cacher->set_max_objects(max_dirty_object);
 
   m_object_set = new ObjectCacher::ObjectSet(nullptr,
                                              m_image_ctx->data_ctx.get_id(), 0);
index f9be8276a75baf440b7d6dc3b1259459c65b4769..bcf9970bf589d251389afb38ceecb39c2578110c 100644 (file)
@@ -83,7 +83,7 @@ void ImageCopyRequest<I>::send_object_copies() {
   {
     Mutex::Locker locker(m_lock);
     for (uint64_t i = 0;
-         i < m_cct->_conf.get_val<uint64_t>("rbd_concurrent_management_ops");
+         i < m_src_image_ctx->config.template get_val<uint64_t>("rbd_concurrent_management_ops");
          ++i) {
       send_next_object_copy();
       if (m_ret_val < 0 && m_current_ops == 0) {
index 7ea98408924d5fed3ce099bba518c0189ae0d71c..dd9e603dc98bdae7d4270eabc9270b702215ddfd 100644 (file)
@@ -528,9 +528,9 @@ Context *OpenRequest<I>::send_init_cache(int *result) {
 
   // readahead requires the cache
   m_image_ctx->readahead.set_trigger_requests(
-    m_image_ctx->readahead_trigger_requests);
+    m_image_ctx->config.template get_val<uint64_t>("rbd_readahead_trigger_requests"));
   m_image_ctx->readahead.set_max_readahead_size(
-    m_image_ctx->readahead_max_bytes);
+    m_image_ctx->config.template get_val<Option::size_t>("rbd_readahead_max_bytes"));
 
   return send_register_watch(result);
 }
index 5afb245867ef44a405c78142c8b32d5f6cb02e90..668ad56bc14df6f1b69439fbbddb2d34326d2da6 100644 (file)
@@ -136,9 +136,9 @@ void RefreshParentRequest<I>::send_open_parent() {
   m_parent_image_ctx->child = &m_child_image_ctx;
 
   // set rados flags for reading the parent image
-  if (m_child_image_ctx.balance_parent_reads) {
+  if (m_child_image_ctx.config.template get_val<bool>("rbd_balance_parent_reads")) {
     m_parent_image_ctx->set_read_flag(librados::OPERATION_BALANCE_READS);
-  } else if (m_child_image_ctx.localize_parent_reads) {
+  } else if (m_child_image_ctx.config.template get_val<bool>("rbd_localize_parent_reads")) {
     m_parent_image_ctx->set_read_flag(librados::OPERATION_LOCALIZE_READS);
   }
 
index d57ad9567109184692a8278f06d3950579e5afd1..cbe6574aba1b5f96bf445d685fa9d78bf6f1bdfa 100644 (file)
@@ -2032,7 +2032,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2)
     }
 
     RWLock::RLocker owner_lock(src->owner_lock);
-    SimpleThrottle throttle(src->concurrent_management_ops, false);
+    SimpleThrottle throttle(src->config.get_val<uint64_t>("rbd_concurrent_management_ops"), false);
     uint64_t period = src->get_stripe_period();
     unsigned fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
                             LIBRADOS_OP_FLAG_FADVISE_NOCACHE;
@@ -2183,7 +2183,7 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2)
       return -EINVAL;
     }
 
-    if (ictx->blacklist_on_break_lock) {
+    if (ictx->config.get_val<bool>("rbd_blacklist_on_break_lock")) {
       typedef std::map<rados::cls::lock::locker_id_t,
                       rados::cls::lock::locker_info_t> Lockers;
       Lockers lockers;
@@ -2212,8 +2212,9 @@ bool compare_by_name(const child_info_t& c1, const child_info_t& c2)
 
       RWLock::RLocker locker(ictx->md_lock);
       librados::Rados rados(ictx->md_ctx);
-      r = rados.blacklist_add(client_address,
-                             ictx->blacklist_expire_seconds);
+      r = rados.blacklist_add(
+        client_address,
+        ictx->config.get_val<uint64_t>("rbd_blacklist_expire_seconds"));
       if (r < 0) {
         lderr(ictx->cct) << "unable to blacklist client: " << cpp_strerror(r)
                       << dendl;
index 0def795f1a845bdeb7c09183b85be28958266cc1..7168cafda07f4fd59ef187cd47b0a7e18278f763 100644 (file)
@@ -482,7 +482,8 @@ bool CopyupRequest<I>::send_object_map() {
     AsyncObjectThrottle<> *throttle = new AsyncObjectThrottle<>(
       NULL, *m_ictx, context_factory, util::create_context_callback(this),
       NULL, 0, m_snap_ids.size());
-    throttle->start_ops(m_ictx->concurrent_management_ops);
+    throttle->start_ops(
+      m_ictx->config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
   }
   return false;
 }
index 75413110ba0f9dde2d8498aa9a74a6c285483acb..1962179258eccc94dedc20ad91bdd641f30f00a0 100644 (file)
@@ -2051,7 +2051,7 @@ namespace librbd {
       return -EINVAL;
     }
 
-    bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+    bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
     if (discard_zero && mem_is_zero(bl.c_str(), bl.length())) {
       int r = ictx->io_work_queue->discard(ofs, len, false);
       tracepoint(librbd, writesame_exit, r);
@@ -2188,7 +2188,7 @@ namespace librbd {
       return -EINVAL;
     }
 
-    bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+    bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
     if (discard_zero && mem_is_zero(bl.c_str(), bl.length())) {
       ictx->io_work_queue->aio_discard(get_aio_completion(c), off, len, false);
       tracepoint(librbd, aio_writesame_exit, 0);
@@ -4559,7 +4559,7 @@ extern "C" ssize_t rbd_writesame(rbd_image_t image, uint64_t ofs, size_t len,
     return -EINVAL;
   }
 
-  bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+  bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
   if (discard_zero && mem_is_zero(buf, data_len)) {
     int r = ictx->io_work_queue->discard(ofs, len, false);
     tracepoint(librbd, writesame_exit, r);
@@ -4781,7 +4781,7 @@ extern "C" int rbd_aio_writesame(rbd_image_t image, uint64_t off, size_t len,
     return -EINVAL;
   }
 
-  bool discard_zero = ictx->cct->_conf.get_val<bool>("rbd_discard_on_zeroed_write_same");
+  bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
   if (discard_zero && mem_is_zero(buf, data_len)) {
     ictx->io_work_queue->aio_discard(get_aio_completion(comp), off, len, false);
     tracepoint(librbd, aio_writesame_exit, 0);
index 8cd3f00b55ebb1c698f0b90c85a701151109248f..44938ffee66d5a93620368d17f3cc7dd0d8b5704 100644 (file)
@@ -228,8 +228,10 @@ void EnableFeaturesRequest<I>::send_create_journal() {
     &EnableFeaturesRequest<I>::handle_create_journal>(this);
 
   journal::CreateRequest<I> *req = journal::CreateRequest<I>::create(
-    image_ctx.md_ctx, image_ctx.id, image_ctx.journal_order,
-    image_ctx.journal_splay_width, image_ctx.journal_pool,
+    image_ctx.md_ctx, image_ctx.id,
+    image_ctx.config.template get_val<uint64_t>("rbd_journal_order"),
+    image_ctx.config.template get_val<uint64_t>("rbd_journal_splay_width"),
+    image_ctx.config.template get_val<std::string>("rbd_journal_pool"),
     cls::journal::Tag::TAG_CLASS_NEW, tag_data,
     librbd::Journal<>::IMAGE_CLIENT_ID, image_ctx.op_work_queue, ctx);
 
index e55daf2670b80107d688e0a475346cccb0cfab64..af21bd4e6a520f066249cde377908bc79a68c086 100644 (file)
@@ -108,7 +108,8 @@ void FlattenRequest<I>::flatten_objects() {
       boost::lambda::_1, &image_ctx, m_snapc, boost::lambda::_2));
   AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
     this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, m_overlap_objects);
-  throttle->start_ops(image_ctx.concurrent_management_ops);
+  throttle->start_ops(
+    image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
 }
 
 template <typename I>
index 7bcff805a5e139ac9371c61ee00ff0ecf198c4f9..71a6f93288efa38e09c230befe1e5138860cc66e 100644 (file)
@@ -197,7 +197,8 @@ void MigrateRequest<I>::migrate_objects() {
       boost::lambda::_1, &image_ctx, image_ctx.snapc, boost::lambda::_2));
   AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
     this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, overlap_objects);
-  throttle->start_ops(image_ctx.concurrent_management_ops);
+  throttle->start_ops(
+    image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
 }
 
 template <typename I>
index 5ab35e907ad6912307fe7c13c09632caac5d417a..658dbb4a73762d5c8adbf0a1e81f9df449c4c60e 100644 (file)
@@ -259,7 +259,8 @@ void ObjectMapIterateRequest<I>::send_verify_objects() {
   AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
     this, m_image_ctx, context_factory, this->create_callback_context(),
     &m_prog_ctx, 0, num_objects);
-  throttle->start_ops(m_image_ctx.concurrent_management_ops);
+  throttle->start_ops(
+    m_image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
 }
 
 template <typename I>
index 006dc4bb761c2797f307e55cebcd73726b4607d7..596570a397b97ceb9dcc61a1e1ed9082ae6de02d 100644 (file)
@@ -298,7 +298,8 @@ void SnapshotRollbackRequest<I>::send_rollback_objects() {
       m_head_num_objects, m_snap_object_map));
   AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
     this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, num_objects);
-  throttle->start_ops(image_ctx.concurrent_management_ops);
+  throttle->start_ops(
+    image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
 }
 
 template <typename I>
index e8f5e6f4201850936bea57a5f7ff8d2139379620..4a58dbc0bbacbd15eec3809ab82c28d1c4c8c21d 100644 (file)
@@ -263,7 +263,8 @@ void SnapshotUnprotectRequest<I>::send_scan_pool_children() {
       boost::lambda::_1, &image_ctx, pspec, pools, boost::lambda::_2));
   AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
     nullptr, image_ctx, context_factory, ctx, NULL, 0, pools.size());
-  throttle->start_ops(image_ctx.concurrent_management_ops);
+  throttle->start_ops(
+    image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
 }
 
 template <typename I>
index 8609235e558ef986f3443672b3a440656ab82d2c..cecf37b3e6138936b3764f6993051b1e2926d204 100644 (file)
@@ -253,7 +253,8 @@ void TrimRequest<I>::send_copyup_objects() {
   AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
     this, image_ctx, context_factory, ctx, &m_prog_ctx, copyup_start,
     copyup_end);
-  throttle->start_ops(image_ctx.concurrent_management_ops);
+  throttle->start_ops(
+    image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
 }
 
 template <typename I>
@@ -273,7 +274,8 @@ void TrimRequest<I>::send_remove_objects() {
   AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
     this, image_ctx, context_factory, ctx, &m_prog_ctx, m_delete_start,
     m_num_objects);
-  throttle->start_ops(image_ctx.concurrent_management_ops);
+  throttle->start_ops(
+    image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
 }
 
 template<typename I>
index 3ab6fb9d548261c076f02ac1a0e196c32750cb47..82e862381f8a3dda32d0d05b069bd5ee10c2e088 100644 (file)
@@ -755,7 +755,7 @@ TEST_F(TestJournalReplay, MetadataSet) {
   ASSERT_EQ(initial_tag + 1, current_tag);
   ASSERT_EQ(1, current_entry);
 
-  ASSERT_EQ(9876, ictx->mirroring_replay_delay);
+  ASSERT_EQ(9876U, ictx->mirroring_replay_delay);
 
   std::string value;
   ASSERT_EQ(0, librbd::metadata_get(ictx, "conf_rbd_mirroring_replay_delay",
@@ -797,7 +797,7 @@ TEST_F(TestJournalReplay, MetadataRemove) {
   get_journal_commit_position(ictx, &current_tag, &current_entry);
   ASSERT_EQ(initial_tag, current_tag);
   ASSERT_EQ(initial_entry + 2, current_entry);
-  ASSERT_EQ(0, ictx->mirroring_replay_delay);
+  ASSERT_EQ(0U, ictx->mirroring_replay_delay);
 
   std::string value;
   ASSERT_EQ(-ENOENT,
index d027d2dac4003ee425ad4be73814cc288a74aff9..88e06b7e3b1b14abf4aee686ac7f7cc1a47000f7 100644 (file)
@@ -92,25 +92,7 @@ struct MockImageCtx {
       image_watcher(NULL), object_map(NULL),
       exclusive_lock(NULL), journal(NULL),
       trace_endpoint(image_ctx.trace_endpoint),
-      concurrent_management_ops(image_ctx.concurrent_management_ops),
-      blacklist_on_break_lock(image_ctx.blacklist_on_break_lock),
-      blacklist_expire_seconds(image_ctx.blacklist_expire_seconds),
       sparse_read_threshold_bytes(image_ctx.sparse_read_threshold_bytes),
-      journal_order(image_ctx.journal_order),
-      journal_splay_width(image_ctx.journal_splay_width),
-      journal_commit_age(image_ctx.journal_commit_age),
-      journal_object_flush_interval(image_ctx.journal_object_flush_interval),
-      journal_object_flush_bytes(image_ctx.journal_object_flush_bytes),
-      journal_object_flush_age(image_ctx.journal_object_flush_age),
-      journal_object_max_in_flight_appends(
-          image_ctx.journal_object_max_in_flight_appends),
-      journal_pool(image_ctx.journal_pool),
-      journal_max_payload_bytes(image_ctx.journal_max_payload_bytes),
-      journal_max_concurrent_object_sets(
-          image_ctx.journal_max_concurrent_object_sets),
-      mirroring_resync_after_disconnect(
-          image_ctx.mirroring_resync_after_disconnect),
-      mirroring_delete_delay(image_ctx.mirroring_delete_delay),
       mirroring_replay_delay(image_ctx.mirroring_replay_delay),
       non_blocking_aio(image_ctx.non_blocking_aio),
       blkin_trace_all(image_ctx.blkin_trace_all),
@@ -316,22 +298,7 @@ struct MockImageCtx {
 
   ZTracer::Endpoint trace_endpoint;
 
-  int concurrent_management_ops;
-  bool blacklist_on_break_lock;
-  uint32_t blacklist_expire_seconds;
   uint64_t sparse_read_threshold_bytes;
-  uint8_t journal_order;
-  uint8_t journal_splay_width;
-  double journal_commit_age;
-  int journal_object_flush_interval;
-  uint64_t journal_object_flush_bytes;
-  double journal_object_flush_age;
-  uint64_t journal_object_max_in_flight_appends;
-  std::string journal_pool;
-  uint32_t journal_max_payload_bytes;
-  int journal_max_concurrent_object_sets;
-  bool mirroring_resync_after_disconnect;
-  uint64_t mirroring_delete_delay;
   int mirroring_replay_delay;
   bool non_blocking_aio;
   bool blkin_trace_all;
index 9def43e28e97f110cc0a53190facf898bfd8cecd..9343f74d78d565171d07f6e3614fd306ad38338a 100644 (file)
@@ -677,7 +677,7 @@ TEST_F(TestImageWatcher, NotifyAsyncRequestTimedOut) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
 
-  ictx->request_timed_out_seconds = 0;
+  ictx->config.set_val("rbd_request_timed_out_seconds", "0");
 
   ASSERT_EQ(0, register_image_watch(*ictx));
   ASSERT_EQ(0, lock_image(*ictx, LOCK_EXCLUSIVE,
index c7e71681aed0d125a0792e788e6e83a41f84f9e4..326c87435a70e117a9301774f9031f7283aca64f 100644 (file)
@@ -1305,7 +1305,7 @@ TEST_F(TestInternal, PoolMetadataConfApply) {
 
   ASSERT_EQ(0, open_image(image_name, &ictx));
   ASSERT_EQ(ictx->order, 17);
-  ASSERT_EQ(ictx->journal_order, 13);
+  ASSERT_EQ(ictx->config.get_val<uint64_t>("rbd_journal_order"), 13U);
 
   if (is_feature_enabled(RBD_FEATURE_JOURNALING)) {
     uint8_t order;
@@ -1324,7 +1324,8 @@ TEST_F(TestInternal, PoolMetadataConfApply) {
                                                   "14"));
     ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_JOURNALING,
                                                    true));
-    ASSERT_EQ(ictx->journal_order, 14);
+    ASSERT_EQ(ictx->config.get_val<uint64_t>("rbd_journal_order"), 14U);
+
     C_SaferCond cond1;
     cls::journal::client::get_immutable_metadata(m_ioctx, "journal." + ictx->id,
                                                  &order, &splay_width, &pool_id,
index d82a316472ec638ce266314015c510bd7db53dee..b3f204c574983ad1b416a025b59868a60de65584 100644 (file)
@@ -692,7 +692,7 @@ TEST_F(TestMockImageDeleterTrashMoveRequest, CloseImageError) {
 TEST_F(TestMockImageDeleterTrashMoveRequest, DelayedDelation) {
   librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
   librbd::MockExclusiveLock mock_exclusive_lock;
-  mock_image_ctx.mirroring_delete_delay = 600;
+  mock_image_ctx.config.set_val("rbd_mirroring_delete_delay", "600");
   mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
 
   InSequence seq;
index 2a9a26f40fa1312a3063a1a6d21632c2befe3f71..aeb91d50dea7e910581daaeca47218a4d911b34a 100644 (file)
@@ -605,7 +605,7 @@ void ImageReplayer<I>::handle_init_remote_journaler(int r) {
   if (m_client_meta.image_id == m_local_image_id &&
       client.state != cls::journal::CLIENT_STATE_CONNECTED) {
     dout(5) << "client flagged disconnected, stopping image replay" << dendl;
-    if (m_local_image_ctx->mirroring_resync_after_disconnect) {
+    if (m_local_image_ctx->config.template get_val<bool>("rbd_mirroring_resync_after_disconnect")) {
       m_resync_requested = true;
       on_start_fail(-ENOTCONN, "disconnected: automatic resync");
     } else {
index 4e9a4beac1b28bcd95686a601224f615859078f4..ad96e2504b5b040e70676184f5c6b5ecf8f35903 100644 (file)
@@ -255,7 +255,8 @@ void TrashMoveRequest<I>::trash_move() {
 
   utime_t delete_time{ceph_clock_now()};
   utime_t deferment_end_time{delete_time};
-  deferment_end_time += m_image_ctx->mirroring_delete_delay;
+  deferment_end_time +=
+    m_image_ctx->config.template get_val<uint64_t>("rbd_mirroring_delete_delay");
 
   m_trash_image_spec = {
     cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, m_image_ctx->name, delete_time,