From: Samuel Just Date: Wed, 6 May 2015 18:02:19 +0000 (-0700) Subject: OSD: add op_wq suicide timeout X-Git-Tag: v9.0.2~188^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=3f2946aa9f9b4b432e8c018283503153a2b1eddc;p=ceph.git OSD: add op_wq suicide timeout Signed-off-by: Samuel Just --- diff --git a/src/common/config_opts.h b/src/common/config_opts.h index e7c26122918..88c09f004dc 100644 --- a/src/common/config_opts.h +++ b/src/common/config_opts.h @@ -583,6 +583,7 @@ OPTION(osd_recover_clone_overlap_limit, OPT_INT, 10) OPTION(osd_backfill_scan_min, OPT_INT, 64) OPTION(osd_backfill_scan_max, OPT_INT, 512) OPTION(osd_op_thread_timeout, OPT_INT, 15) +OPTION(osd_op_thread_suicide_timeout, OPT_INT, 150) OPTION(osd_recovery_thread_timeout, OPT_INT, 30) OPTION(osd_recovery_thread_suicide_timeout, OPT_INT, 300) OPTION(osd_recovery_sleep, OPT_FLOAT, 0) // seconds to sleep between recovery ops diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 1c2b2edc04e..2afaae51cb4 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -1501,9 +1501,17 @@ OSD::OSD(CephContext *cct_, ObjectStore *store_, op_tracker(cct, cct->_conf->osd_enable_op_tracker, cct->_conf->osd_num_op_tracker_shard), test_ops_hook(NULL), - op_shardedwq(cct->_conf->osd_op_num_shards, this, - cct->_conf->osd_op_thread_timeout, &osd_op_tp), - peering_wq(this, cct->_conf->osd_op_thread_timeout, &osd_tp), + op_shardedwq( + cct->_conf->osd_op_num_shards, + this, + cct->_conf->osd_op_thread_timeout, + cct->_conf->osd_op_thread_suicide_timeout, + &osd_op_tp), + peering_wq( + this, + cct->_conf->osd_op_thread_timeout, + cct->_conf->osd_op_thread_suicide_timeout, + &osd_tp), map_lock("OSD::map_lock"), pg_map_lock("OSD::pg_map_lock"), debug_drop_pg_create_probability(cct->_conf->osd_debug_drop_pg_create_probability), diff --git a/src/osd/OSD.h b/src/osd/OSD.h index 7c5f05b16c9..56ccb4ffc13 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -1477,8 +1477,8 @@ private: uint32_t num_shards; public: - ShardedOpWQ(uint32_t pnum_shards, OSD *o, time_t ti, ShardedThreadPool* tp): - ShardedThreadPool::ShardedWQ < pair >(ti, ti*10, tp), + ShardedOpWQ(uint32_t pnum_shards, OSD *o, time_t ti, time_t si, ShardedThreadPool* tp): + ShardedThreadPool::ShardedWQ < pair >(ti, si, tp), osd(o), num_shards(pnum_shards) { for(uint32_t i = 0; i < num_shards; i++) { char lock_name[32] = {0}; @@ -1588,9 +1588,9 @@ private: list peering_queue; OSD *osd; set in_use; - PeeringWQ(OSD *o, time_t ti, ThreadPool *tp) + PeeringWQ(OSD *o, time_t ti, time_t si, ThreadPool *tp) : ThreadPool::BatchWorkQueue( - "OSD::PeeringWQ", ti, ti*10, tp), osd(o) {} + "OSD::PeeringWQ", ti, si, tp), osd(o) {} void _dequeue(PG *pg) { for (list::iterator i = peering_queue.begin();