]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: teach mclock about peering_event
authorSage Weil <sage@redhat.com>
Thu, 30 Nov 2017 15:19:37 +0000 (09:19 -0600)
committerSage Weil <sage@redhat.com>
Mon, 4 Dec 2017 18:45:17 +0000 (12:45 -0600)
Signed-off-by: Sage Weil <sage@redhat.com>
src/common/legacy_config_opts.h
src/common/options.cc
src/osd/mClockOpClassSupport.cc
src/osd/mClockOpClassSupport.h

index ec32f3d25567f9e7e05f6ca7579ee79212d96709..66b9c31ed12f95044583361f70b848bd33c3fe66 100644 (file)
@@ -670,6 +670,9 @@ OPTION(osd_op_queue_mclock_recov_lim, OPT_DOUBLE)
 OPTION(osd_op_queue_mclock_scrub_res, OPT_DOUBLE)
 OPTION(osd_op_queue_mclock_scrub_wgt, OPT_DOUBLE)
 OPTION(osd_op_queue_mclock_scrub_lim, OPT_DOUBLE)
+OPTION(osd_op_queue_mclock_peering_event_res, OPT_DOUBLE)
+OPTION(osd_op_queue_mclock_peering_event_wgt, OPT_DOUBLE)
+OPTION(osd_op_queue_mclock_peering_event_lim, OPT_DOUBLE)
 
 OPTION(osd_ignore_stale_divergent_priors, OPT_BOOL) // do not assert on divergent_prior entries which aren't in the log and whose on-disk objects are newer
 
index e75f67b08b7f0cca799553622746307efb59f2ae..548e886392fa226931de956b8911817580376fee 100644 (file)
@@ -2298,6 +2298,66 @@ std::vector<Option> get_global_options() {
     .add_see_also("osd_op_queue_mclock_scrub_res")
     .add_see_also("osd_op_queue_mclock_scrub_wgt"),
 
+    Option("osd_op_queue_mclock_peering_event_res", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
+    .set_default(0.0)
+    .set_description("mclock reservation of peering events")
+    .set_long_description("mclock reservation of scrub requests when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the reservation")
+    .add_see_also("osd_op_queue")
+    .add_see_also("osd_op_queue_mclock_client_op_res")
+    .add_see_also("osd_op_queue_mclock_client_op_wgt")
+    .add_see_also("osd_op_queue_mclock_client_op_lim")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
+    .add_see_also("osd_op_queue_mclock_snap_res")
+    .add_see_also("osd_op_queue_mclock_snap_wgt")
+    .add_see_also("osd_op_queue_mclock_snap_lim")
+    .add_see_also("osd_op_queue_mclock_recov_res")
+    .add_see_also("osd_op_queue_mclock_recov_wgt")
+    .add_see_also("osd_op_queue_mclock_recov_lim")
+    .add_see_also("osd_op_queue_mclock_scrub_wgt")
+    .add_see_also("osd_op_queue_mclock_scrub_lim"),
+
+    Option("osd_op_queue_mclock_peering_event_wgt", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
+    .set_default(1.0)
+    .set_description("mclock weight of peering events")
+    .set_long_description("mclock weight of scrub requests when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the weight")
+    .add_see_also("osd_op_queue")
+    .add_see_also("osd_op_queue_mclock_client_op_res")
+    .add_see_also("osd_op_queue_mclock_client_op_wgt")
+    .add_see_also("osd_op_queue_mclock_client_op_lim")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
+    .add_see_also("osd_op_queue_mclock_snap_res")
+    .add_see_also("osd_op_queue_mclock_snap_wgt")
+    .add_see_also("osd_op_queue_mclock_snap_lim")
+    .add_see_also("osd_op_queue_mclock_recov_res")
+    .add_see_also("osd_op_queue_mclock_recov_wgt")
+    .add_see_also("osd_op_queue_mclock_recov_lim")
+    .add_see_also("osd_op_queue_mclock_scrub_res")
+    .add_see_also("osd_op_queue_mclock_scrub_lim"),
+
+    Option("osd_op_queue_mclock_peering_event_lim", Option::TYPE_FLOAT, Option::LEVEL_ADVANCED)
+    .set_default(0.001)
+    .set_description("mclock weight of limit peering events")
+    .set_long_description("mclock weight of limit requests when osd_op_queue is either 'mclock_opclass' or 'mclock_client'; higher values increase the limit")
+    .add_see_also("osd_op_queue")
+    .add_see_also("osd_op_queue_mclock_client_op_res")
+    .add_see_also("osd_op_queue_mclock_client_op_wgt")
+    .add_see_also("osd_op_queue_mclock_client_op_lim")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_res")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_wgt")
+    .add_see_also("osd_op_queue_mclock_osd_rep_op_lim")
+    .add_see_also("osd_op_queue_mclock_snap_res")
+    .add_see_also("osd_op_queue_mclock_snap_wgt")
+    .add_see_also("osd_op_queue_mclock_snap_lim")
+    .add_see_also("osd_op_queue_mclock_recov_res")
+    .add_see_also("osd_op_queue_mclock_recov_wgt")
+    .add_see_also("osd_op_queue_mclock_recov_lim")
+    .add_see_also("osd_op_queue_mclock_scrub_res")
+    .add_see_also("osd_op_queue_mclock_scrub_wgt"),
+
     Option("osd_ignore_stale_divergent_priors", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
     .set_default(false)
     .set_description(""),
index afbc2f739c58d8d47650d6aa4848dbf6032a0caf..3849ad9c680350c2f49ba758f2db2ec35f5afb08 100644 (file)
@@ -38,7 +38,10 @@ namespace ceph {
            cct->_conf->osd_op_queue_mclock_recov_lim),
       scrub(cct->_conf->osd_op_queue_mclock_scrub_res,
            cct->_conf->osd_op_queue_mclock_scrub_wgt,
-           cct->_conf->osd_op_queue_mclock_scrub_lim)
+           cct->_conf->osd_op_queue_mclock_scrub_lim),
+      peering_event(cct->_conf->osd_op_queue_mclock_peering_event_res,
+                   cct->_conf->osd_op_queue_mclock_peering_event_wgt,
+                   cct->_conf->osd_op_queue_mclock_peering_event_lim)
     {
       constexpr int rep_ops[] = {
        MSG_OSD_REPOP,
index 8f57c9b0641438efe563423cfa7def118940564b..a33e57a5d6168cea4158f936d59dd9302d980902 100644 (file)
@@ -28,7 +28,7 @@ namespace ceph {
     using op_item_type_t = OpQueueItem::OpQueueable::op_type_t;
     
     enum class osd_op_type_t {
-      client_op, osd_rep_op, bg_snaptrim, bg_recovery, bg_scrub
+      client_op, osd_rep_op, bg_snaptrim, bg_recovery, bg_scrub, peering_event
     };
 
     class OpClassClientInfoMgr {
@@ -37,6 +37,7 @@ namespace ceph {
       crimson::dmclock::ClientInfo snaptrim;
       crimson::dmclock::ClientInfo recov;
       crimson::dmclock::ClientInfo scrub;
+      crimson::dmclock::ClientInfo peering_event;
 
       static constexpr std::size_t rep_op_msg_bitset_size = 128;
       std::bitset<rep_op_msg_bitset_size> rep_op_msg_bitset;
@@ -59,6 +60,8 @@ namespace ceph {
          return &recov;
        case osd_op_type_t::bg_scrub:
          return &scrub;
+       case osd_op_type_t::peering_event:
+         return &peering_event;
        default:
          ceph_abort();
          return nullptr;
@@ -77,6 +80,8 @@ namespace ceph {
          return osd_op_type_t::bg_recovery;
        case op_item_type_t::bg_scrub:
          return osd_op_type_t::bg_scrub;
+       case op_item_type_t::peering_event:
+         return osd_op_type_t::peering_event;
        default:
          ceph_abort();
        }