]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
threads: add thread names 5882/head
authorIgor Podoski <igor.podoski@ts.fujitsu.com>
Tue, 22 Dec 2015 08:54:42 +0000 (09:54 +0100)
committerIgor Podoski <igor.podoski@ts.fujitsu.com>
Tue, 5 Jan 2016 14:15:36 +0000 (15:15 +0100)
Adding names to threads simplifies cpu usage realtime tracking
e.g. top -H -p <OSD_PID>

This commit changes Thread.create() method forcing to pass thread name.

Signed-off-by: Igor Podoski <igor.podoski@ts.fujitsu.com>
63 files changed:
src/ceph_fuse.cc
src/client/SyntheticClient.cc
src/common/Finisher.cc
src/common/Finisher.h
src/common/OutputDataSocket.cc
src/common/Thread.cc
src/common/Thread.h
src/common/Timer.cc
src/common/WorkQueue.cc
src/common/WorkQueue.h
src/common/admin_socket.cc
src/common/ceph_context.cc
src/common/obj_bencher.cc
src/compressor/AsyncCompressor.cc
src/global/signal_handler.cc
src/kv/LevelDBStore.cc
src/kv/RocksDBStore.cc
src/librbd/ImageCtx.cc
src/log/Log.cc
src/mds/MDLog.cc
src/mds/MDSRank.cc
src/mon/MonitorDBStore.h
src/msg/async/AsyncMessenger.cc
src/msg/simple/Accepter.cc
src/msg/simple/DispatchQueue.cc
src/msg/simple/Pipe.cc
src/msg/simple/SimpleMessenger.cc
src/os/bluestore/BlockDevice.cc
src/os/bluestore/BlueStore.cc
src/os/filestore/FileJournal.cc
src/os/filestore/FileStore.cc
src/os/filestore/JournalingObjectStore.h
src/os/filestore/WBThrottle.cc
src/os/keyvaluestore/KeyValueStore.cc
src/os/kstore/KStore.cc
src/osd/OSD.cc
src/osdc/ObjectCacher.h
src/rgw/rgw_bucket.h
src/rgw/rgw_gc.cc
src/rgw/rgw_main.cc
src/rgw/rgw_object_expirer_core.cc
src/rgw/rgw_quota.cc
src/rgw/rgw_swift.cc
src/test/bench/dumb_backend.h
src/test/bench/small_io_bench_fs.cc
src/test/bench/tp_bench.cc
src/test/bench_log.cc
src/test/common/Throttle.cc
src/test/common/test_shared_cache.cc
src/test/common/test_sharedptr_registry.cc
src/test/erasure-code/TestErasureCodePlugin.cc
src/test/librbd/test_librbd.cc
src/test/msgr/perf_msgr_client.cc
src/test/msgr/perf_msgr_server.cc
src/test/msgr/test_async_driver.cc
src/test/osd/types.cc
src/test/perf_local.cc
src/test/test_snap_mapper.cc
src/test/test_stress_watch.cc
src/test/test_trans.cc
src/test/test_workqueue.cc
src/tools/cephfs/MDSUtility.cc
src/tools/rbd_nbd/rbd-nbd.cc

index e593d0b2c91507b47b4ec09e53a8043980ec6b5c..70d8845f6166045146fadb843901ce3aa4a10f5f 100644 (file)
@@ -244,7 +244,7 @@ int main(int argc, const char **argv, const char *envp[]) {
 
     cerr << "ceph-fuse[" << getpid() << "]: starting fuse" << std::endl;
     tester.init(cfuse, client);
-    tester.create();
+    tester.create("tester");
     r = cfuse->loop();
     tester.join(&tester_rp);
     tester_r = static_cast<int>(reinterpret_cast<uint64_t>(tester_rp));
index 5371ac01ee24a6bb1127a1fc391ca545bc969d09..d2ab7ecc576fc1eac27448918faa40cd93536e20 100644 (file)
@@ -934,6 +934,7 @@ int SyntheticClient::start_thread()
 
   pthread_create(&thread_id, NULL, synthetic_client_thread_entry, this);
   assert(thread_id);
+  pthread_setname_np(thread_id, "client");
   return 0;
 }
 
index 9b39dc888e3ce8ca661b688ad3ecfeff019925e1..73900ddc20dbda1f33c3641c833a4f4417fd4dc0 100644 (file)
@@ -12,7 +12,7 @@
 void Finisher::start()
 {
   ldout(cct, 10) << __func__ << dendl;
-  finisher_thread.create();
+  finisher_thread.create(thread_name.c_str());
 }
 
 void Finisher::stop()
index 95db977a0ea2d460a36282cedf188f7cdf35221f..26a41b7461e3594bfafe238a979838517858ada1 100644 (file)
@@ -48,6 +48,8 @@ class Finisher {
   /// should be completed in that place instead.
   vector<Context*> finisher_queue;
 
+  string thread_name;
+
   /// Queue for contexts for which the complete function will be called
   /// with a parameter other than 0.
   list<pair<Context*,int> > finisher_queue_rval;
@@ -135,14 +137,14 @@ class Finisher {
   Finisher(CephContext *cct_) :
     cct(cct_), finisher_lock("Finisher::finisher_lock"),
     finisher_stop(false), finisher_running(false),
-    logger(0),
+    thread_name("fn_anonymous"), logger(0),
     finisher_thread(this) {}
 
   /// Construct a named Finisher that logs its queue length.
-  Finisher(CephContext *cct_, string name) :
+  Finisher(CephContext *cct_, string name, string tn) :
     cct(cct_), finisher_lock("Finisher::finisher_lock"),
     finisher_stop(false), finisher_running(false),
-    logger(0),
+    thread_name(tn), logger(0),
     finisher_thread(this) {
     PerfCountersBuilder b(cct, string("finisher-") + name,
                          l_finisher_first, l_finisher_last);
index e43f5cf95cb894216c66ae6b4446ef9df617df29..a1c368b40aad51477c9203e4f8537cbc4639e71b 100644 (file)
@@ -371,7 +371,7 @@ bool OutputDataSocket::init(const std::string &path)
   m_shutdown_rd_fd = pipe_rd;
   m_shutdown_wr_fd = pipe_wr;
   m_path = path;
-  create();
+  create("out_data_socket");
   add_cleanup_file(m_path.c_str());
   return true;
 }
index b917838f45bb538caa915762d32ef813b8e54e14..4d40016a3c998a5c10e6b97458d57563042575e4 100644 (file)
@@ -143,7 +143,7 @@ int Thread::try_create(size_t stacksize)
   return r;
 }
 
-void Thread::create(size_t stacksize)
+void Thread::create(const char *name, size_t stacksize)
 {
   int ret = try_create(stacksize);
   if (ret != 0) {
@@ -152,6 +152,9 @@ void Thread::create(size_t stacksize)
             "failed with error %d", ret);
     dout_emergency(buf);
     assert(ret == 0);
+  } else if (thread_id > 0) {
+      assert(strlen(name) < 16);
+      pthread_setname_np(thread_id, name);
   }
 }
 
index 98b76fde3b27ec22518b261c271d165b05681928..deced8f46cc320939109564bf30053f0bda5b79e 100644 (file)
@@ -48,7 +48,7 @@ class Thread {
   bool am_self() const;
   int kill(int signal);
   int try_create(size_t stacksize);
-  void create(size_t stacksize = 0);
+  void create(const char *name, size_t stacksize = 0);
   int join(void **prval = 0);
   int detach();
   int set_ioprio(int cls, int prio);
index e49a70093c70c2b312c72866d239a07d67588433..1160541acae32df0193bd54486a7daa8341fb17e 100644 (file)
@@ -62,7 +62,7 @@ void SafeTimer::init()
 {
   ldout(cct,10) << "init" << dendl;
   thread = new SafeTimerThread(this);
-  thread->create();
+  thread->create("safe_timer");
 }
 
 void SafeTimer::shutdown()
index 00666faca334365726847a8f2899a09de68c94a6..9d99fd1cbfd7ca7b4b827373f10fc9bb64099b7b 100644 (file)
@@ -27,8 +27,8 @@
 #define dout_prefix *_dout << name << " "
 
 
-ThreadPool::ThreadPool(CephContext *cct_, string nm, int n, const char *option)
-  : cct(cct_), name(nm),
+ThreadPool::ThreadPool(CephContext *cct_, string nm, string tn, int n, const char *option)
+  : cct(cct_), name(nm), thread_name(tn),
     lockname(nm + "::lock"),
     _lock(lockname.c_str()),  // this should be safe due to declaration order
     _stop(false),
@@ -169,7 +169,7 @@ void ThreadPool::start_threads()
     if (r < 0)
       lderr(cct) << " set_ioprio got " << cpp_strerror(r) << dendl;
 
-    wt->create();
+    wt->create(thread_name.c_str());
   }
 }
 
@@ -286,8 +286,8 @@ void ThreadPool::set_ioprio(int cls, int priority)
   }
 }
 
-ShardedThreadPool::ShardedThreadPool(CephContext *pcct_, string nm, 
-  uint32_t pnum_threads): cct(pcct_),name(nm),lockname(nm + "::lock"), 
+ShardedThreadPool::ShardedThreadPool(CephContext *pcct_, string nm, string tn,
+  uint32_t pnum_threads): cct(pcct_),name(nm),thread_name(tn),lockname(nm + "::lock"),
   shardedpool_lock(lockname.c_str()),num_threads(pnum_threads),stop_threads(0), 
   pause_threads(0),drain_threads(0), num_paused(0), num_drained(0), wq(NULL) {}
 
@@ -356,7 +356,7 @@ void ShardedThreadPool::start_threads()
     WorkThreadSharded *wt = new WorkThreadSharded(this, thread_index);
     ldout(cct, 10) << "start_threads creating and starting " << wt << dendl;
     threads_shardedpool.push_back(wt);
-    wt->create();
+    wt->create(thread_name.c_str());
     thread_index++;
   }
 }
index a6e7972408f62a476649a35c2362373f4918b425..a9a753378b43028074d0ab3fe01c90402371847d 100644 (file)
@@ -28,6 +28,7 @@ class CephContext;
 class ThreadPool : public md_config_obs_t {
   CephContext *cct;
   string name;
+  string thread_name;
   string lockname;
   Mutex _lock;
   Cond _cond;
@@ -459,7 +460,7 @@ private:
   void worker(WorkThread *wt);
 
 public:
-  ThreadPool(CephContext *cct_, string nm, int n, const char *option = NULL);
+  ThreadPool(CephContext *cct_, string nm, string tn, int n, const char *option = NULL);
   virtual ~ThreadPool();
 
   /// return number of threads currently running
@@ -618,6 +619,7 @@ class ShardedThreadPool {
 
   CephContext *cct;
   string name;
+  string thread_name;
   string lockname;
   Mutex shardedpool_lock;
   Cond shardedpool_cond;
@@ -698,7 +700,7 @@ private:
 
 public:
 
-  ShardedThreadPool(CephContext *cct_, string nm, uint32_t pnum_threads);
+  ShardedThreadPool(CephContext *cct_, string nm, string tn, uint32_t pnum_threads);
 
   ~ShardedThreadPool(){};
 
index 07a2246e9e552479eafe60e69b0ca214fee052de..1d51ef54227a40e9882113a1414c47245cb8dde0 100644 (file)
@@ -556,7 +556,7 @@ bool AdminSocket::init(const std::string &path)
   register_command("get_command_descriptions", "get_command_descriptions",
                   m_getdescs_hook, "list available commands");
 
-  create();
+  create("admin_socket");
   add_cleanup_file(m_path.c_str());
   return true;
 }
index 4ead871e9b41f7394cf73a8ddab890c9a47aa0d4..d37f631e9876df312ee5b6510b7d273e726feee3 100644 (file)
@@ -547,7 +547,7 @@ void CephContext::start_service_thread()
     return;
   }
   _service_thread = new CephContextServiceThread(this);
-  _service_thread->create();
+  _service_thread->create("service");
   ceph_spin_unlock(&_service_thread_lock);
 
   // make logs flush on_exit()
index 9ecfbec4d31d42897ddda61f3bdab8379509f213..525a54264121400118cd8a40176a8a65ea734bd9 100644 (file)
@@ -390,6 +390,7 @@ int ObjBencher::write_bench(int secondsToRun,
   pthread_t print_thread;
 
   pthread_create(&print_thread, NULL, ObjBencher::status_printer, (void *)this);
+  pthread_setname_np(print_thread, "write_stat");
   lock.Lock();
   data.finished = 0;
   data.start_time = ceph_clock_now(cct);
@@ -610,6 +611,7 @@ int ObjBencher::seq_read_bench(int seconds_to_run, int num_objects, int concurre
 
   pthread_t print_thread;
   pthread_create(&print_thread, NULL, status_printer, (void *)this);
+  pthread_setname_np(print_thread, "seq_read_stat");
 
   utime_t finish_time = data.start_time + time_to_run;
   //start initial reads
@@ -830,6 +832,7 @@ int ObjBencher::rand_read_bench(int seconds_to_run, int num_objects, int concurr
 
   pthread_t print_thread;
   pthread_create(&print_thread, NULL, status_printer, (void *)this);
+  pthread_setname_np(print_thread, "rand_read_stat");
 
   utime_t finish_time = data.start_time + time_to_run;
   //start initial reads
index 564d61473305fe0a26ef26afef5b5ee373d3b943..cdd666701e2a7d59ca7585f7c5fad2c949b65133 100644 (file)
@@ -23,7 +23,7 @@
 AsyncCompressor::AsyncCompressor(CephContext *c):
   compressor(Compressor::create(c->_conf->async_compressor_type)), cct(c),
   job_id(0),
-  compress_tp(g_ceph_context, "AsyncCompressor::compressor_tp", cct->_conf->async_compressor_threads, "async_compressor_threads"),
+  compress_tp(g_ceph_context, "AsyncCompressor::compressor_tp", "tp_async_compr", cct->_conf->async_compressor_threads, "async_compressor_threads"),
   job_lock("AsyncCompressor::job_lock"),
   compress_wq(this, c->_conf->async_compressor_thread_timeout, c->_conf->async_compressor_thread_suicide_timeout, &compress_tp) {
 }
index 120767650eb386dbd89803bf4d39af47c260b53e..6254646865d431d9d2d0fc9545a4bfbf9cf10b7c 100644 (file)
@@ -192,7 +192,7 @@ struct SignalHandler : public Thread {
     assert(r == 0);
 
     // create thread
-    create();
+    create("sginal_handler");
   }
 
   ~SignalHandler() {
index 481fbf16d044b4648f4b873c447fa67de9939641..a3d4617c65dba646ebd7377e674218699c859fcb 100644 (file)
@@ -378,6 +378,6 @@ void LevelDBStore::compact_range_async(const string& start, const string& end)
   }
   compact_queue_cond.Signal();
   if (!compact_thread.is_started()) {
-    compact_thread.create();
+    compact_thread.create("levdbst_compact");
   }
 }
index d03d913b3204e51d1107230d3e54c4d0f92e7209..93c8ee9bbe40ab46947c09a510cc6530bf5d924c 100644 (file)
@@ -514,7 +514,7 @@ void RocksDBStore::compact_range_async(const string& start, const string& end)
   }
   compact_queue_cond.Signal();
   if (!compact_thread.is_started()) {
-    compact_thread.create();
+    compact_thread.create("rstore_commpact");
   }
 }
 bool RocksDBStore::check_omap_dir(string &omap_dir)
index ef15e2df602dde81de16b50d9c93ba3c49c14c3e..84d74d2772cd23f67c85e0f71afc8fa54811cd1e 100644 (file)
@@ -47,7 +47,7 @@ namespace {
 class ThreadPoolSingleton : public ThreadPool {
 public:
   ThreadPoolSingleton(CephContext *cct)
-    : ThreadPool(cct, "librbd::thread_pool", cct->_conf->rbd_op_threads,
+    : ThreadPool(cct, "librbd::thread_pool", "tp_librbd", cct->_conf->rbd_op_threads,
                  "rbd_op_threads") {
     start();
   }
index 860b2c7cdb96f8e53734538518bf69f054c3fe93..8be66115c9a2ac6bcc84087d94c5f1d886efe358 100644 (file)
@@ -343,7 +343,7 @@ void Log::start()
   pthread_mutex_lock(&m_queue_mutex);
   m_stop = false;
   pthread_mutex_unlock(&m_queue_mutex);
-  create();
+  create("log");
 }
 
 void Log::stop()
index 43cdc48503548bb4b9a5250d04cea635cf5886f2..c8817b5eb4a54831c66f9e3aec9a732d8e662f09 100644 (file)
@@ -169,7 +169,7 @@ void MDLog::create(MDSInternalContextBase *c)
   logger->set(l_mdl_expos, journaler->get_expire_pos());
   logger->set(l_mdl_wrpos, journaler->get_write_pos());
 
-  submit_thread.create();
+  submit_thread.create("md_submit");
 }
 
 void MDLog::open(MDSInternalContextBase *c)
@@ -177,9 +177,9 @@ void MDLog::open(MDSInternalContextBase *c)
   dout(5) << "open discovering log bounds" << dendl;
 
   recovery_thread.set_completion(c);
-  recovery_thread.create();
+  recovery_thread.create("md_recov_open");
 
-  submit_thread.create();
+  submit_thread.create("md_submit");
   // either append() or replay() will follow.
 }
 
@@ -216,7 +216,7 @@ void MDLog::reopen(MDSInternalContextBase *c)
   journaler = NULL;
 
   recovery_thread.set_completion(new C_ReopenComplete(this, c));
-  recovery_thread.create();
+  recovery_thread.create("md_recov_reopen");
 }
 
 void MDLog::append()
@@ -842,7 +842,7 @@ void MDLog::replay(MDSInternalContextBase *c)
   assert(num_events == 0 || already_replayed);
   already_replayed = true;
 
-  replay_thread.create();
+  replay_thread.create("md_log_replay");
 }
 
 
index fcb8efdda4bd765b01729f7510bfca972e44fe2a..d284296b313f219d7941cc8e460efa58af707bd4 100644 (file)
@@ -142,7 +142,7 @@ void MDSRankDispatcher::init()
   // who is interested in it.
   handle_osd_map();
 
-  progress_thread.create();
+  progress_thread.create("mds_rank_progr");
 
   finisher->start();
 }
index 81016417ec93437a0d33a0493bd8e29741865a4a..6d44e6adc06d682eccb31cd74a62df4481cbe698 100644 (file)
@@ -630,7 +630,7 @@ class MonitorDBStore
       do_dump(false),
       dump_fd_binary(-1),
       dump_fmt(true),
-      io_work(g_ceph_context, "monstore"),
+      io_work(g_ceph_context, "monstore", "fn_monstore"),
       is_open(false) {
     string::const_reverse_iterator rit;
     int pos = 0;
index 524c6015e249fa7eaa3613c951a669d8a5c73020..258e8dd5dc4a0ae1fef361dcb1fac94b1c2d5e94 100644 (file)
@@ -344,7 +344,7 @@ void WorkerPool::start()
 {
   if (!started) {
     for (uint64_t i = 0; i < workers.size(); ++i) {
-      workers[i]->create();
+      workers[i]->create("ms_async_worker");
     }
     started = true;
   }
index a8aa4955a98ffc661c845c962acdd20786b311fe..3d6f1f7a5e7990ae264289a74452446ba66aec32 100644 (file)
@@ -208,7 +208,7 @@ int Accepter::start()
   ldout(msgr->cct,1) << "accepter.start" << dendl;
 
   // start thread
-  create();
+  create("ms_accepter");
 
   return 0;
 }
index 500239f29ab1b832cb807436c9966862400f9016..6f7495ef520151d31e48a04e48ba338429bf40c1 100644 (file)
@@ -217,8 +217,8 @@ void DispatchQueue::start()
 {
   assert(!stop);
   assert(!dispatch_thread.is_started());
-  dispatch_thread.create();
-  local_delivery_thread.create();
+  dispatch_thread.create("ms_dispatch");
+  local_delivery_thread.create("ms_local");
 }
 
 void DispatchQueue::wait()
index 33884c81cda4e68d5392234d48d19bc3e4556ea2..9658ffba836da931ae247a203f7d044006b878c7 100644 (file)
@@ -135,7 +135,7 @@ void Pipe::start_reader()
     reader_needs_join = false;
   }
   reader_running = true;
-  reader_thread.create(msgr->cct->_conf->ms_rwthread_stack_bytes);
+  reader_thread.create("ms_pipe_read", msgr->cct->_conf->ms_rwthread_stack_bytes);
 }
 
 void Pipe::maybe_start_delay_thread()
@@ -144,7 +144,7 @@ void Pipe::maybe_start_delay_thread()
       msgr->cct->_conf->ms_inject_delay_type.find(ceph_entity_type_name(connection_state->peer_type)) != string::npos) {
     lsubdout(msgr->cct, ms, 1) << "setting up a delay queue on Pipe " << this << dendl;
     delay_thread = new DelayedDelivery(this);
-    delay_thread->create();
+    delay_thread->create("ms_pipe_delay");
   }
 }
 
@@ -153,7 +153,7 @@ void Pipe::start_writer()
   assert(pipe_lock.is_locked());
   assert(!writer_running);
   writer_running = true;
-  writer_thread.create(msgr->cct->_conf->ms_rwthread_stack_bytes);
+  writer_thread.create("ms_pipe_write", msgr->cct->_conf->ms_rwthread_stack_bytes);
 }
 
 void Pipe::join_reader()
index fdb7278292d48a631c508d2966746e233fb455ab..9d5f3c9c0cf887e51f51cce1491a7f22ed6c5dad 100644 (file)
@@ -329,7 +329,7 @@ int SimpleMessenger::start()
   lock.Unlock();
 
   reaper_started = true;
-  reaper_thread.create();
+  reaper_thread.create("ms_reaper");
   return 0;
 }
 
index 073a02cdc479950bb4d732758edb754a65660110..80d828208ff2845d6dc16c17a8c933534c541c66 100644 (file)
@@ -194,7 +194,7 @@ int BlockDevice::_aio_start()
       derr << __func__ << " failed: " << cpp_strerror(r) << dendl;
       return r;
     }
-    aio_thread.create();
+    aio_thread.create("bstore_aio");
   }
   return 0;
 }
index bb033fd9496ba74a1f52fd20aff74df57a3405d4..91cf311f5f8e2f96d74389920465b329a8c43820 100644 (file)
@@ -771,6 +771,7 @@ BlueStore::BlueStore(CephContext *cct, const string& path)
     wal_seq(0),
     wal_tp(cct,
           "BlueStore::wal_tp",
+           "tp_wal",
           cct->_conf->bluestore_wal_threads,
           "bluestore_wal_threads"),
     wal_wq(this,
@@ -1690,7 +1691,7 @@ int BlueStore::mount()
 
   finisher.start();
   wal_tp.start();
-  kv_sync_thread.create();
+  kv_sync_thread.create("bstore_kv_sync");
 
   r = _wal_replay();
   if (r < 0)
index f9e0cc5131b0154e74f3937afe77446826ef3358..fa4751bf66e165af8eba80d6a270099cbd11d434 100644 (file)
@@ -691,10 +691,10 @@ void FileJournal::start_writer()
 {
   write_stop = false;
   aio_stop = false;
-  write_thread.create();
+  write_thread.create("journal_write");
 #ifdef HAVE_LIBAIO
   if (aio)
-    write_finish_thread.create();
+    write_finish_thread.create("journal_wrt_fin");
 #endif
 }
 
index de2df61a0c4cf6a98fe3509429728c503c27b663..fa3db26f3ce21f24cc43cda16c10806a97b936f0 100644 (file)
@@ -518,11 +518,11 @@ FileStore::FileStore(const std::string &base, const std::string &jdev, osflagbit
   fdcache(g_ceph_context),
   wbthrottle(g_ceph_context),
   next_osr_id(0),
-  throttle_ops(g_ceph_context, "filestore_ops",g_conf->filestore_queue_max_ops),
-  throttle_bytes(g_ceph_context, "filestore_bytes",g_conf->filestore_queue_max_bytes),
+  throttle_ops(g_ceph_context, "filestore_ops", g_conf->filestore_queue_max_ops),
+  throttle_bytes(g_ceph_context, "filestore_bytes", g_conf->filestore_queue_max_bytes),
   m_ondisk_finisher_num(g_conf->filestore_ondisk_finisher_threads),
   m_apply_finisher_num(g_conf->filestore_apply_finisher_threads),
-  op_tp(g_ceph_context, "FileStore::op_tp", g_conf->filestore_op_threads, "filestore_op_threads"),
+  op_tp(g_ceph_context, "FileStore::op_tp", "tp_fstore_op", g_conf->filestore_op_threads, "filestore_op_threads"),
   op_wq(this, g_conf->filestore_op_thread_timeout,
        g_conf->filestore_op_thread_suicide_timeout, &op_tp),
   logger(NULL),
@@ -559,13 +559,13 @@ FileStore::FileStore(const std::string &base, const std::string &jdev, osflagbit
   for (int i = 0; i < m_ondisk_finisher_num; ++i) {
     ostringstream oss;
     oss << "filestore-ondisk-" << i;
-    Finisher *f = new Finisher(g_ceph_context, oss.str());
+    Finisher *f = new Finisher(g_ceph_context, oss.str(), "fn_odsk_fstore");
     ondisk_finishers.push_back(f);
   }
   for (int i = 0; i < m_apply_finisher_num; ++i) {
     ostringstream oss;
     oss << "filestore-apply-" << i;
-    Finisher *f = new Finisher(g_ceph_context, oss.str());
+    Finisher *f = new Finisher(g_ceph_context, oss.str(), "fn_appl_fstore");
     apply_finishers.push_back(f);
   }
 
@@ -1592,7 +1592,7 @@ int FileStore::mount()
   }
 
   wbthrottle.start();
-  sync_thread.create();
+  sync_thread.create("filestore_sync");
 
   if (!(generic_flags & SKIP_JOURNAL_REPLAY)) {
     ret = journal_replay(initial_op_seq);
index e757526b749d66f3e5667c40d742e8fc59a7cf5a..f384ba6a1b2da12a43bcb08961a8a42872a0889a 100644 (file)
@@ -132,7 +132,7 @@ public:
   JournalingObjectStore(const std::string& path)
     : ObjectStore(path),
       journal(NULL),
-      finisher(g_ceph_context, "JournalObjectStore"),
+      finisher(g_ceph_context, "JournalObjectStore", "fn_jrn_objstore"),
       apply_manager(journal, finisher),
       replaying(false) {}
 
index 6eb559dc0164a2015612b3f9b04ff0bf0a9ccfba..fb98c97536ce8f7f497e45d20595ec6bbd5c26a3 100644 (file)
@@ -49,7 +49,7 @@ void WBThrottle::start()
     Mutex::Locker l(lock);
     stopping = false;
   }
-  create();
+  create("wb_throttle");
 }
 
 void WBThrottle::stop()
index 35eb6f4489f6f86aabe4396255d020eca5975c2e..b0f98f13c5a6ab524977c7bffe2e9c7b30a7157a 100644 (file)
@@ -533,7 +533,7 @@ KeyValueStore::KeyValueStore(const std::string &base,
   throttle_ops(g_ceph_context, "keyvaluestore_ops", g_conf->keyvaluestore_queue_max_ops),
   throttle_bytes(g_ceph_context, "keyvaluestore_bytes", g_conf->keyvaluestore_queue_max_bytes),
   op_finisher(g_ceph_context),
-  op_tp(g_ceph_context, "KeyValueStore::op_tp",
+  op_tp(g_ceph_context, "KeyValueStore::op_tp", "tp_kvstore",
         g_conf->keyvaluestore_op_threads, "keyvaluestore_op_threads"),
   op_wq(this, g_conf->keyvaluestore_op_thread_timeout,
         g_conf->keyvaluestore_op_thread_suicide_timeout, &op_tp),
index 039f06981e951a9569cf4536934577c66de85433..0a4658d37a822a2f4c4e41f951317df1a4d41a46 100644 (file)
@@ -1011,7 +1011,7 @@ int KStore::mount()
     goto out_db;
 
   finisher.start();
-  kv_sync_thread.create();
+  kv_sync_thread.create("kstore_kv_sync");
 
   mounted = true;
   return 0;
index 70a59810b1924cfa52b56c4356c6ca620355d2fa..cdb138154f75006e8a9524b2950ac9aa81a0b4a4 100644 (file)
@@ -482,7 +482,7 @@ void OSDService::init()
   watch_timer.init();
   agent_timer.init();
 
-  agent_thread.create();
+  agent_thread.create("osd_srv_agent");
 }
 
 void OSDService::final_init()
@@ -1553,12 +1553,12 @@ OSD::OSD(CephContext *cct_, ObjectStore *store_,
   asok_hook(NULL),
   osd_compat(get_osd_compat_set()),
   state(STATE_INITIALIZING),
-  osd_tp(cct, "OSD::osd_tp", cct->_conf->osd_op_threads, "osd_op_threads"),
-  osd_op_tp(cct, "OSD::osd_op_tp", 
+  osd_tp(cct, "OSD::osd_tp", "tp_osd", cct->_conf->osd_op_threads, "osd_op_threads"),
+  osd_op_tp(cct, "OSD::osd_op_tp", "tp_osd_tp",
     cct->_conf->osd_op_num_threads_per_shard * cct->_conf->osd_op_num_shards),
-  recovery_tp(cct, "OSD::recovery_tp", cct->_conf->osd_recovery_threads, "osd_recovery_threads"),
-  disk_tp(cct, "OSD::disk_tp", cct->_conf->osd_disk_threads, "osd_disk_threads"),
-  command_tp(cct, "OSD::command_tp", 1),
+  recovery_tp(cct, "OSD::recovery_tp", "tp_osd_recov", cct->_conf->osd_recovery_threads, "osd_recovery_threads"),
+  disk_tp(cct, "OSD::disk_tp", "tp_osd_disk", cct->_conf->osd_disk_threads, "osd_disk_threads"),
+  command_tp(cct, "OSD::command_tp", "tp_osd_cmd",  1),
   paused_recovery(false),
   session_waiting_lock("OSD::session_waiting_lock"),
   heartbeat_lock("OSD::heartbeat_lock"),
@@ -1960,7 +1960,7 @@ int OSD::init()
   set_disk_tp_priority();
 
   // start the heartbeat
-  heartbeat_thread.create();
+  heartbeat_thread.create("osd_srv_heartbt");
 
   // tick
   tick_timer.add_event_after(cct->_conf->osd_heartbeat_interval, new C_Tick(this));
index 841ee1fd2e82b90951b7a9dd6fa3d955314bdf73..f9283f87fd7148f6a604abe68950e1df03501ccd 100644 (file)
@@ -613,7 +613,7 @@ class ObjectCacher {
   ~ObjectCacher();
 
   void start() {
-    flusher_thread.create();
+    flusher_thread.create("flusher");
   }
   void stop() {
     assert(flusher_thread.is_started());
index 9191fbe8651ccc97353ab781c6b3f31d1236901a..6216a3b09deea2f9fd983d9093a2cb2bd4d0eb52 100644 (file)
@@ -375,7 +375,7 @@ public:
     }
 
     renew_thread = new ChangesRenewThread(cct, this);
-    renew_thread->create();
+    renew_thread->create("rgw_dt_lg_renew");
   }
 
   ~RGWDataChangesLog();
index 3a949b6821aa44d7a78079505c3b8949534cb3e4..05fcbe3e2ffc273fe1943da0d8ef085c7a15b049 100644 (file)
@@ -259,7 +259,7 @@ bool RGWGC::going_down()
 void RGWGC::start_processor()
 {
   worker = new GCWorker(cct, this);
-  worker->create();
+  worker->create("rgw_gc");
 }
 
 void RGWGC::stop_processor()
index 2246b6a40ea11b598e09a422e8dde714b57afd1a..aa0d11bce8b7d8327c4de10caeb69582237e9cce 100644 (file)
@@ -243,7 +243,7 @@ protected:
 
 public:
   RGWProcess(CephContext *cct, RGWProcessEnv *pe, int num_threads, RGWFrontendConfig *_conf)
-    : store(pe->store), olog(pe->olog), m_tp(cct, "RGWProcess::m_tp", num_threads),
+    : store(pe->store), olog(pe->olog), m_tp(cct, "RGWProcess::m_tp", "tp_rgw_process", num_threads),
       req_throttle(cct, "rgw_ops", num_threads * 2),
       rest(pe->rest),
       conf(_conf),
@@ -884,7 +884,7 @@ public:
   int run() {
     assert(pprocess); /* should have initialized by init() */
     thread = new RGWProcessControlThread(pprocess);
-    thread->create();
+    thread->create("rgw_frontend");
     return 0;
   }
 
index 14957d7448cae63922599a951b4ea4498bc2dba7..56bb59f1735b0d01ae2ed0fec762a78611d7ee74 100644 (file)
@@ -218,7 +218,7 @@ bool RGWObjectExpirer::going_down()
 void RGWObjectExpirer::start_processor()
 {
   worker = new OEWorker(store->ctx(), this);
-  worker->create();
+  worker->create("rgw_obj_expirer");
 }
 
 void RGWObjectExpirer::stop_processor()
index 03c152db9df3c092067efde1ec8739929cb79469..c300c4432a0040cc75b5eb6373ee6ea755e45195 100644 (file)
@@ -516,9 +516,9 @@ public:
                                         rwlock("RGWUserStatsCache::rwlock") {
     if (quota_threads) {
       buckets_sync_thread = new BucketsSyncThread(store->ctx(), this);
-      buckets_sync_thread->create();
+      buckets_sync_thread->create("rgw_buck_st_syn");
       user_sync_thread = new UserSyncThread(store->ctx(), this);
-      user_sync_thread->create();
+      user_sync_thread->create("rgw_user_st_syn");
     } else {
       buckets_sync_thread = NULL;
       user_sync_thread = NULL;
index 8d73602858bdd69ffd2a130ec7b31fddd4d73390..b51d37f8afa6ea9b67122609a80c2586f4d24ae5 100644 (file)
@@ -716,7 +716,7 @@ void RGWSwift::init_keystone()
   keystone_token_cache = new RGWKeystoneTokenCache(cct, cct->_conf->rgw_keystone_token_cache_size);
 
   keystone_revoke_thread = new KeystoneRevokeThread(cct, this);
-  keystone_revoke_thread->create();
+  keystone_revoke_thread->create("rgw_swift_k_rev");
 }
 
 
index b3a19a7b9367563240b1374810e62e5b772d51fb..38e160db76f7ac5bd7721c9729333f411df332bd 100644 (file)
@@ -123,13 +123,13 @@ public:
       do_fadvise(do_fadvise),
       sync_interval(sync_interval),
       sync_fd(sync_fd),
-      tp(cct, "DumbBackend::tp", worker_threads),
+      tp(cct, "DumbBackend::tp", "tp_dumb_backend", worker_threads),
       thread(this),
       sync_loop_mutex("DumbBackend::sync_loop_mutex"),
       sync_loop_stop(0),
       pending_commit_mutex("DumbBackend::pending_commit_mutex"),
       queue(this, 20, &tp) {
-    thread.create();
+    thread.create("thread");
     tp.start();
     for (unsigned i = 0; i < 10*worker_threads; ++i) {
       sem.Put();
index 55b2aaeba7b8fd6ba2b32b8f4e09dfd1c6c96f4d..75ec051649383d443bf784065d9f0566b794c08e 100644 (file)
@@ -236,7 +236,7 @@ int main(int argc, char **argv)
   for (vector<ceph::shared_ptr<Bencher> >::iterator i = benchers.begin();
        i != benchers.end();
        ++i) {
-    (*i)->create();
+    (*i)->create("bencher");
   }
   for (vector<ceph::shared_ptr<Bencher> >::iterator i = benchers.begin();
        i != benchers.end();
index 23185a769777c527c19e4b891ba74a59450bc596..6bc6be10a18118c8aa26f0a5a3f5579056f7cf58 100644 (file)
@@ -173,7 +173,7 @@ int main(int argc, char **argv)
     if (*i == 'q') {
       ThreadPool *tp =
        new ThreadPool(
-         g_ceph_context, ss.str(), vm["num-threads"].as<unsigned>(), 0);
+         g_ceph_context, ss.str(), "tp_test",  vm["num-threads"].as<unsigned>(), 0);
       wqs.push_back(
        new WQWrapper(
          new PassAlong(tp, wqs.back()),
index 2cbfa1e77d27c96c0ce3de972c587d052e9b69da..a80e14a5e81158240aed0d3c8af4a5bdd080aa13 100644 (file)
@@ -46,7 +46,7 @@ int main(int argc, const char **argv)
   list<T*> ls;
   for (int i=0; i<threads; i++) {
     T *t = new T(num);
-    t->create();
+    t->create("t");
     ls.push_back(t);
   }
 
index 7ae98b51609d8daedff9bab4ec99fdd34e3f5e8e..bd30471a809a76a07c3740b1bf0ef571745a94ed 100644 (file)
@@ -107,7 +107,7 @@ TEST_F(ThrottleTest, get) {
     ASSERT_FALSE(throttle.get_or_fail(throttle_max));
 
     Thread_get t(throttle, 7);
-    t.create();
+    t.create("t_throttle_1");
     usleep(delay);
     ASSERT_EQ(throttle.put(throttle_max), 0);
     t.join();
@@ -123,11 +123,11 @@ TEST_F(ThrottleTest, get) {
     ASSERT_FALSE(throttle.get_or_fail(throttle_max));
 
     Thread_get t(throttle, throttle_max);
-    t.create();
+    t.create("t_throttle_2");
     usleep(delay);
 
     Thread_get u(throttle, 1);
-    u.create();
+    u.create("u_throttle_2");
     usleep(delay);
 
     throttle.put(throttle_max / 2);
@@ -188,7 +188,7 @@ TEST_F(ThrottleTest, wait) {
     ASSERT_FALSE(throttle.get_or_fail(throttle_max));
 
     Thread_get t(throttle, throttle_max);
-    t.create();
+    t.create("t_throttle_3");
     usleep(delay);
 
     //
@@ -223,7 +223,7 @@ TEST_F(ThrottleTest, destructor) {
     ASSERT_FALSE(throttle->get(5));
 
     t = new Thread_get(*throttle, 7);
-    t->create();
+    t->create("t_throttle");
     bool blocked;
     useconds_t delay = 1;
     do {
index 09f6fb1dc17a0dfb9ed2d4e6156cb14872d26e2a..cd921f527077697b0cea2fe84585f718b87167cb 100644 (file)
@@ -175,7 +175,7 @@ TEST_F(SharedLRU_all, wait_lookup) {
   EXPECT_FALSE(cache.get_weak_refs()[key].first.lock());
 
   Thread_wait t(cache, key, value, Thread_wait::LOOKUP);
-  t.create();
+  t.create("wait_lookup_1");
   ASSERT_TRUE(wait_for(cache, 1));
   EXPECT_EQ(value, *t.ptr);
   // waiting on a key does not block lookups on other keys
@@ -201,7 +201,7 @@ TEST_F(SharedLRU_all, wait_lookup_or_create) {
   EXPECT_FALSE(cache.get_weak_refs()[key].first.lock());
 
   Thread_wait t(cache, key, value, Thread_wait::LOOKUP);
-  t.create();
+  t.create("wait_lookup_2");
   ASSERT_TRUE(wait_for(cache, 1));
   EXPECT_EQ(value, *t.ptr);
   // waiting on a key does not block lookups on other keys
@@ -246,7 +246,7 @@ TEST_F(SharedLRU_all, wait_lower_bound) {
   EXPECT_FALSE(cache.get_weak_refs()[key].first.lock());
 
   Thread_wait t(cache, key, value, Thread_wait::LOWER_BOUND);
-  t.create();
+  t.create("wait_lower_bnd");
   ASSERT_TRUE(wait_for(cache, 1));
   EXPECT_FALSE(t.ptr);
   // waiting on a key does not block getting lower_bound on other keys
index 42bc8e607acac8ffadab46c8549a3957356acaf7..98379c856aa707092cd39354c81bd3fdd3012808 100644 (file)
@@ -130,7 +130,7 @@ TEST_F(SharedPtrRegistry_all, wait_lookup_or_create) {
     EXPECT_FALSE(registry.get_contents()[key].first.lock());
 
     Thread_wait t(registry, key, 0, Thread_wait::LOOKUP_OR_CREATE);
-    t.create();
+    t.create("wait_lookcreate");
     ASSERT_TRUE(wait_for(registry, 1));
     EXPECT_FALSE(t.ptr);
     // waiting on a key does not block lookups on other keys
@@ -150,7 +150,7 @@ TEST_F(SharedPtrRegistry_all, wait_lookup_or_create) {
     EXPECT_FALSE(registry.get_contents()[key].first.lock());
 
     Thread_wait t(registry, key, value, Thread_wait::LOOKUP_OR_CREATE);
-    t.create();
+    t.create("wait_lookcreate");
     ASSERT_TRUE(wait_for(registry, 1));
     EXPECT_FALSE(t.ptr);
     // waiting on a key does not block lookups on other keys
@@ -193,7 +193,7 @@ TEST_F(SharedPtrRegistry_all, wait_lookup) {
   EXPECT_FALSE(registry.get_contents()[key].first.lock());
 
   Thread_wait t(registry, key, value, Thread_wait::LOOKUP);
-  t.create();
+  t.create("wait_lookup");
   ASSERT_TRUE(wait_for(registry, 1));
   EXPECT_EQ(value, *t.ptr);
   // waiting on a key does not block lookups on other keys
index 6cf241e448b9e90dd6f5c8a64e3c9d85b9ea9ef3..4691ff6e23dc7cb822186b70c17918c966abc246 100644 (file)
@@ -55,7 +55,7 @@ TEST_F(ErasureCodePluginRegistryTest, factory_mutex) {
   useconds_t delay = 0;
   const useconds_t DELAY_MAX = 20 * 1000 * 1000;
   Thread_factory sleep_forever;
-  sleep_forever.create();
+  sleep_forever.create("sleep_forever");
   do {
     cout << "Trying (1) with delay " << delay << "us\n";
     if (delay > 0)
index 1f072b773204b81990323d89ab11e6c3cc0489f3..15f5084f9e1289d110760a1fff9d518ccf23b9f9 100644 (file)
@@ -3223,7 +3223,7 @@ TEST_F(TestLibRBD, ObjectMapConsistentSnap)
   ASSERT_EQ(0, rbd.open(ioctx, image1, name.c_str(), NULL));
 
   RBDWriter writer(image1);
-  writer.create();
+  writer.create("rbd_writer");
 
   int num_snaps = 10;
   for (int i = 0; i < num_snaps; ++i) {
index 17c4aca3e038ba4baf9feae9c96e95b8bf213704..fa21b493119f3c23e492bff2dbbda27cfd257465 100644 (file)
@@ -142,7 +142,7 @@ class MessengerClient {
   }
   void start() {
     for (uint64_t i = 0; i < clients.size(); ++i)
-      clients[i]->create();
+      clients[i]->create("client");
     for (uint64_t i = 0; i < msgrs.size(); ++i)
       msgrs[i]->wait();
   }
index b38a76ab1a2039a71940884807fb2644eb51d69b..3989cd234905e24e7c5e5fb0ec49021c718fb89e 100644 (file)
@@ -72,7 +72,7 @@ class ServerDispatcher : public Dispatcher {
 
  public:
   ServerDispatcher(int threads, uint64_t delay): Dispatcher(g_ceph_context), think_time(delay),
-    op_tp(g_ceph_context, "ServerDispatcher::op_tp", threads, "serverdispatcher_op_threads"),
+    op_tp(g_ceph_context, "ServerDispatcher::op_tp", "tp_serv_disp", threads, "serverdispatcher_op_threads"),
     op_wq(30, 30, &op_tp) {
     op_tp.start();
   }
index 1d6d2900b7cb617c228fb63fc58c1a6cf4116c49..a25eef337bbd27c517251e31808ded02c97db667 100644 (file)
@@ -307,8 +307,8 @@ TEST(EventCenterTest, DispatchTest) {
   atomic_t count(0);
   Mutex lock("DispatchTest::lock");
   Cond cond;
-  worker1.create();
-  worker2.create();
+  worker1.create("worker_1");
+  worker2.create("worker_2");
   for (int i = 0; i < 10000; ++i) {
     count.inc();
     worker1.center.dispatch_event_external(EventCallbackRef(new CountEvent(&count, &lock, &cond)));
index c28ed0df05acb9b0077cc4d092d8dc9bec7f913c..ec4fc3d02b5d617f834d1916557544663a9043bf 100644 (file)
@@ -1161,7 +1161,7 @@ TEST_F(ObjectContextTest, read_write_lock)
     EXPECT_EQ(1, obc.unstable_writes);
 
     Thread_read_lock t(obc);
-    t.create();
+    t.create("obc_read");
 
     do {
       cout << "Trying (1) with delay " << delay << "us\n";
@@ -1218,7 +1218,7 @@ TEST_F(ObjectContextTest, read_write_lock)
     EXPECT_EQ(0, obc.unstable_writes);
 
     Thread_write_lock t(obc);
-    t.create();
+    t.create("obc_write");
 
     do {
       cout << "Trying (3) with delay " << delay << "us\n";
index 9672be2553b0b9f855dde0a08a193adcc62ccff6..fe642588f4c660742a24f12c7e8080535c831093 100644 (file)
@@ -340,7 +340,7 @@ class CondPingPong {
   CondPingPong(): mutex("CondPingPong::mutex"), prod(0), cons(0), count(10000), consumer(this) {}
 
   double run() {
-    consumer.create();
+    consumer.create("consumer");
     uint64_t start = Cycles::rdtsc();
     produce();
     uint64_t stop = Cycles::rdtsc();
@@ -498,7 +498,7 @@ double eventcenter_dispatch()
 
   CenterWorker worker(g_ceph_context);
   atomic_t flag(1);
-  worker.create();
+  worker.create("evt_center_disp");
   EventCallbackRef count_event(new CountEvent(&flag));
 
   worker.center.dispatch_event_external(count_event);
@@ -759,7 +759,7 @@ double spawn_thread()
   ThreadHelper thread;
   uint64_t start = Cycles::rdtsc();
   for (int i = 0; i < count; i++) {
-    thread.create();
+    thread.create("thread_helper");
     thread.join();
   }
   uint64_t stop = Cycles::rdtsc();
index 873e88cc3aee610e1c1231843a85bd2cd94b9fcf..17cb898c7608d89a9ac09f9806aafad15b5cd4db 100644 (file)
@@ -167,7 +167,7 @@ private:
 
 public:
   PausyAsyncMap() : lock("PausyAsyncMap"), doer(this) {
-    doer.create();
+    doer.create("doer");
   }
   ~PausyAsyncMap() {
     doer.join();
index 6ddfee5047f9d0e8bbe44945d7f208fbcd3c51ba..cafdc20e6e8e1344691d3bad7bc99a4520d25ed4 100644 (file)
@@ -76,7 +76,7 @@ TEST_P(WatchStress, Stress1) {
   WatchNotifyTestCtx ctx;
 
   WatcherUnwatcher *thr = new WatcherUnwatcher(pool_name);
-  thr->create();
+  thr->create("watcher_unwatch");
   ASSERT_EQ(0, nioctx.create("foo", false));
 
   for (unsigned i = 0; i < 75; ++i) {
index b55de59ba83955f842808c3e371566d074b5fde3..415dea34dc71cba52b8026f2c64eb998251d9005 100644 (file)
@@ -70,7 +70,7 @@ int main(int argc, const char **argv)
   }
   
   dout(0) << "starting thread" << dendl;
-  foo.create();
+  foo.create("foo");
   dout(0) << "starting op" << dendl;
   fs->apply_transaction(&osr, t);
 
index 602a0ec1eaed6568bfc25a47e377fc87a89fc119..c7ff4335472ce6bcec9781d18abff172aa3870d9 100644 (file)
@@ -8,7 +8,7 @@
 
 TEST(WorkQueue, StartStop)
 {
-  ThreadPool tp(g_ceph_context, "foo", 10, "");
+  ThreadPool tp(g_ceph_context, "foo", "tp_foo", 10, "");
   
   tp.start();
   tp.pause();
@@ -21,7 +21,7 @@ TEST(WorkQueue, StartStop)
 
 TEST(WorkQueue, Resize)
 {
-  ThreadPool tp(g_ceph_context, "bar", 2, "osd_op_threads");
+  ThreadPool tp(g_ceph_context, "bar", "tp_bar", 2, "osd_op_threads");
   
   tp.start();
 
index 708b57c85595d34cf873660eb08ad885a04060cb..d335d92fda0f4ed4d25e27475d48802abf5906d5 100644 (file)
@@ -22,7 +22,7 @@ MDSUtility::MDSUtility() :
   objecter(NULL),
   lock("MDSUtility::lock"),
   timer(g_ceph_context, lock),
-  finisher(g_ceph_context, "MDSUtility"),
+  finisher(g_ceph_context, "MDSUtility", "fn_mds_utility"),
   waiting_for_mds_map(NULL)
 {
   monc = new MonClient(g_ceph_context);
index b86bcebdeee43556d9234237e8fe7225fb6a1b05..a6363d4a62c8792224dd52b0feda39a916bc6ced 100644 (file)
@@ -262,8 +262,8 @@ public:
     if (!started) {
       started = true;
 
-      reader_thread.create();
-      writer_thread.create();
+      reader_thread.create("rbd_reader");
+      writer_thread.create("rbd_writer");
     }
   }