]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
OSD: add op_wq suicide timeout
authorSamuel Just <sjust@redhat.com>
Wed, 6 May 2015 18:02:19 +0000 (11:02 -0700)
committerAbhishek Lekshmanan <abhishek.lekshmanan@ril.com>
Wed, 15 Jul 2015 16:32:09 +0000 (22:02 +0530)
Signed-off-by: Samuel Just <sjust@redhat.com>
src/common/config_opts.h
src/osd/OSD.cc
src/osd/OSD.h

index aee900d4bb03feb68fc94887c591d0cea8e36be9..06be0f55038a2dbebaa1b15b9ebca4b8795c3248 100644 (file)
@@ -564,6 +564,7 @@ OPTION(osd_recover_clone_overlap_limit, OPT_INT, 10)
 OPTION(osd_backfill_scan_min, OPT_INT, 64)
 OPTION(osd_backfill_scan_max, OPT_INT, 512)
 OPTION(osd_op_thread_timeout, OPT_INT, 15)
+OPTION(osd_op_thread_suicide_timeout, OPT_INT, 150)
 OPTION(osd_recovery_thread_timeout, OPT_INT, 30)
 OPTION(osd_snap_trim_thread_timeout, OPT_INT, 60*60*1)
 OPTION(osd_snap_trim_sleep, OPT_FLOAT, 0)
index c126c3b105d0df775aa473bd3f2701af0b5050c1..3a58b9196bd0d7f9edcd6d189d9f2dbb0466aa30 100644 (file)
@@ -1543,9 +1543,17 @@ OSD::OSD(CephContext *cct_, ObjectStore *store_,
   op_tracker(cct, cct->_conf->osd_enable_op_tracker, 
                   cct->_conf->osd_num_op_tracker_shard),
   test_ops_hook(NULL),
-  op_shardedwq(cct->_conf->osd_op_num_shards, this, 
-    cct->_conf->osd_op_thread_timeout, &osd_op_tp),
-  peering_wq(this, cct->_conf->osd_op_thread_timeout, &osd_tp),
+  op_shardedwq(
+    cct->_conf->osd_op_num_shards,
+    this,
+    cct->_conf->osd_op_thread_timeout,
+    cct->_conf->osd_op_thread_suicide_timeout,
+    &osd_op_tp),
+  peering_wq(
+    this,
+    cct->_conf->osd_op_thread_timeout,
+    cct->_conf->osd_op_thread_suicide_timeout,
+    &osd_tp),
   map_lock("OSD::map_lock"),
   pg_map_lock("OSD::pg_map_lock"),
   debug_drop_pg_create_probability(cct->_conf->osd_debug_drop_pg_create_probability),
index c8e5b8fbe580830600d8690ed3b2c4b0ebf21ee9..ac7f2ce213ea378609c80ff1da8f7a9fe3efe204 100644 (file)
@@ -1452,8 +1452,8 @@ private:
     uint32_t num_shards;
 
     public:
-      ShardedOpWQ(uint32_t pnum_shards, OSD *o, time_t ti, ShardedThreadPool* tp):
-        ShardedThreadPool::ShardedWQ < pair <PGRef, OpRequestRef> >(ti, ti*10, tp),
+      ShardedOpWQ(uint32_t pnum_shards, OSD *o, time_t ti, time_t si, ShardedThreadPool* tp):
+        ShardedThreadPool::ShardedWQ < pair <PGRef, OpRequestRef> >(ti, si, tp),
         osd(o), num_shards(pnum_shards) {
         for(uint32_t i = 0; i < num_shards; i++) {
           char lock_name[32] = {0};
@@ -1563,9 +1563,9 @@ private:
     list<PG*> peering_queue;
     OSD *osd;
     set<PG*> in_use;
-    PeeringWQ(OSD *o, time_t ti, ThreadPool *tp)
+    PeeringWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp)
       : ThreadPool::BatchWorkQueue<PG>(
-       "OSD::PeeringWQ", ti, ti*10, tp), osd(o) {}
+       "OSD::PeeringWQ", ti, si, tp), osd(o) {}
 
     void _dequeue(PG *pg) {
       for (list<PG*>::iterator i = peering_queue.begin();