]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
OSD: add config option for peering_wq batch size
authorSamuel Just <sam.just@inktank.com>
Mon, 15 Jul 2013 20:44:20 +0000 (13:44 -0700)
committerSamuel Just <sam.just@inktank.com>
Mon, 15 Jul 2013 22:15:17 +0000 (15:15 -0700)
Large peering_wq batch sizes may excessively delay
peering messages resulting in unreasonably long
peering.  This may speed up peering.

Backport: cuttlefish
Related: #5084
Signed-off-by: Samuel Just <sam.just@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
src/common/config_opts.h
src/osd/OSD.cc
src/osd/OSD.h

index a959b4db401445279fd75d295e92e26d01b13a69..485a683692b47e4b843d16877d5aff30830d13cc 100644 (file)
@@ -393,6 +393,7 @@ OPTION(osd_map_cache_size, OPT_INT, 500)
 OPTION(osd_map_message_max, OPT_INT, 100)  // max maps per MOSDMap message
 OPTION(osd_map_share_max_epochs, OPT_INT, 100)  // cap on # of inc maps we send to peers, clients
 OPTION(osd_op_threads, OPT_INT, 2)    // 0 == no threading
+OPTION(osd_peering_wq_batch_size, OPT_U64, 20)
 OPTION(osd_op_pq_max_tokens_per_priority, OPT_U64, 4194304)
 OPTION(osd_op_pq_min_cost, OPT_U64, 65536)
 OPTION(osd_disk_threads, OPT_INT, 1)
index f2cb90756642bb1adde94fe0465d233f2ede06be..e0f00b92536ecdacbeeb5aedf1bdb2bccb0dbb45 100644 (file)
@@ -918,7 +918,7 @@ OSD::OSD(int id, Messenger *internal_messenger, Messenger *external_messenger,
   finished_lock("OSD::finished_lock"),
   test_ops_hook(NULL),
   op_wq(this, g_conf->osd_op_thread_timeout, &op_tp),
-  peering_wq(this, g_conf->osd_op_thread_timeout, &op_tp, 200),
+  peering_wq(this, g_conf->osd_op_thread_timeout, &op_tp),
   map_lock("OSD::map_lock"),
   peer_map_epoch_lock("OSD::peer_map_epoch_lock"),
   debug_drop_pg_create_probability(g_conf->osd_debug_drop_pg_create_probability),
index 5114c99b66a39bd2d5ae8ec745055f994b7b9e6c..cbd61b0a7bcbcffdba394967a6491674e67293b7 100644 (file)
@@ -901,10 +901,9 @@ private:
     list<PG*> peering_queue;
     OSD *osd;
     set<PG*> in_use;
-    const size_t batch_size;
-    PeeringWQ(OSD *o, time_t ti, ThreadPool *tp, size_t batch_size)
+    PeeringWQ(OSD *o, time_t ti, ThreadPool *tp)
       : ThreadPool::BatchWorkQueue<PG>(
-       "OSD::PeeringWQ", ti, ti*10, tp), osd(o), batch_size(batch_size) {}
+       "OSD::PeeringWQ", ti, ti*10, tp), osd(o) {}
 
     void _dequeue(PG *pg) {
       for (list<PG*>::iterator i = peering_queue.begin();
@@ -929,7 +928,8 @@ private:
     void _dequeue(list<PG*> *out) {
       set<PG*> got;
       for (list<PG*>::iterator i = peering_queue.begin();
-          i != peering_queue.end() && out->size() < batch_size;
+          i != peering_queue.end() &&
+            out->size() < g_conf->osd_peering_wq_batch_size;
           ) {
        if (in_use.count(*i)) {
          ++i;