]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
OSD: add config option for peering_wq batch size
authorSamuel Just <sam.just@inktank.com>
Mon, 15 Jul 2013 20:44:20 +0000 (13:44 -0700)
committerSage Weil <sage@inktank.com>
Wed, 24 Jul 2013 23:20:34 +0000 (16:20 -0700)
Large peering_wq batch sizes may excessively delay
peering messages resulting in unreasonably long
peering.  This may speed up peering.

Backport: cuttlefish
Related: #5084
Signed-off-by: Samuel Just <sam.just@inktank.com>
Reviewed-by: Sage Weil <sage@inktank.com>
(cherry picked from commit 39e5a2a406b77fa82e9a78c267b679d49927e3c3)

src/common/config_opts.h
src/osd/OSD.cc
src/osd/OSD.h

index c31282c1997e26228192bc4d0f0c264585a68b6d..4fe4277a50ddeeb16f79e2fc8fb64cb163763dfe 100644 (file)
@@ -388,6 +388,7 @@ OPTION(osd_map_cache_size, OPT_INT, 500)
 OPTION(osd_map_message_max, OPT_INT, 100)  // max maps per MOSDMap message
 OPTION(osd_map_share_max_epochs, OPT_INT, 100)  // cap on # of inc maps we send to peers, clients
 OPTION(osd_op_threads, OPT_INT, 2)    // 0 == no threading
+OPTION(osd_peering_wq_batch_size, OPT_U64, 20)
 OPTION(osd_op_pq_max_tokens_per_priority, OPT_U64, 4194304)
 OPTION(osd_op_pq_min_cost, OPT_U64, 65536)
 OPTION(osd_disk_threads, OPT_INT, 1)
index 825a2fea99fcf0ebb4d5807e52685c33bd4b343b..6202bac3461d9d07db4791f68b75b27855620a8f 100644 (file)
@@ -908,7 +908,7 @@ OSD::OSD(int id, Messenger *internal_messenger, Messenger *external_messenger,
   finished_lock("OSD::finished_lock"),
   test_ops_hook(NULL),
   op_wq(this, g_conf->osd_op_thread_timeout, &op_tp),
-  peering_wq(this, g_conf->osd_op_thread_timeout, &op_tp, 200),
+  peering_wq(this, g_conf->osd_op_thread_timeout, &op_tp),
   map_lock("OSD::map_lock"),
   peer_map_epoch_lock("OSD::peer_map_epoch_lock"),
   debug_drop_pg_create_probability(g_conf->osd_debug_drop_pg_create_probability),
index ac2c634c1f2780c90a9cd9d747972770788e2534..2db9d3b8c44df25c1807aa3d3ca208b92aa842c1 100644 (file)
@@ -798,10 +798,9 @@ private:
     list<PG*> peering_queue;
     OSD *osd;
     set<PG*> in_use;
-    const size_t batch_size;
-    PeeringWQ(OSD *o, time_t ti, ThreadPool *tp, size_t batch_size)
+    PeeringWQ(OSD *o, time_t ti, ThreadPool *tp)
       : ThreadPool::BatchWorkQueue<PG>(
-       "OSD::PeeringWQ", ti, ti*10, tp), osd(o), batch_size(batch_size) {}
+       "OSD::PeeringWQ", ti, ti*10, tp), osd(o) {}
 
     void _dequeue(PG *pg) {
       for (list<PG*>::iterator i = peering_queue.begin();
@@ -826,7 +825,8 @@ private:
     void _dequeue(list<PG*> *out) {
       set<PG*> got;
       for (list<PG*>::iterator i = peering_queue.begin();
-          i != peering_queue.end() && out->size() < batch_size;
+          i != peering_queue.end() &&
+            out->size() < g_conf->osd_peering_wq_batch_size;
           ) {
        if (in_use.count(*i)) {
          ++i;