]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/os/alienstore: scatter alienstore's threads to different cores
authorXuehan Xu <xxhdx1985126@gmail.com>
Mon, 1 Mar 2021 11:23:27 +0000 (19:23 +0800)
committerXuehan Xu <xxhdx1985126@gmail.com>
Wed, 17 Mar 2021 02:39:21 +0000 (10:39 +0800)
This is for the purpose of performance optimization. According to tests,
there are circumstances in which the single cpu core that's holding all
alienstore threads is the perf bottleneck.

Signed-off-by: Xuehan Xu <xxhdx1985126@gmail.com>
src/crimson/os/alienstore/alien_store.cc
src/crimson/os/alienstore/alien_store.h
src/crimson/os/alienstore/thread_pool.cc
src/crimson/os/alienstore/thread_pool.h

index 827d9f9af2906b925e4c0c46f7d12506cc01a7ec..6b789e4974288ff93db5c42954bb5e1f2ffb307e 100644 (file)
@@ -63,17 +63,10 @@ AlienStore::AlienStore(const std::string& path, const ConfigValues& values)
   g_ceph_context = cct.get();
   cct->_conf.set_config_values(values);
   store = std::make_unique<BlueStore>(cct.get(), path);
-
-  long cpu_id = 0;
-  if (long nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); nr_cpus != -1) {
-    cpu_id = nr_cpus - 1;
-  } else {
-    logger().error("{}: unable to get nproc: {}", __func__, errno);
-    cpu_id = -1;
-  }
   const auto num_threads =
     cct->_conf.get_val<uint64_t>("crimson_alien_op_num_threads");
-  tp = std::make_unique<crimson::os::ThreadPool>(num_threads, 128, cpu_id);
+  std::vector<uint64_t> cpu_cores = _parse_cpu_cores();
+  tp = std::make_unique<crimson::os::ThreadPool>(num_threads, 128, cpu_cores);
 }
 
 seastar::future<> AlienStore::start()
@@ -567,4 +560,26 @@ int AlienStore::AlienOmapIterator::status() const
   return iter->status();
 }
 
+std::vector<uint64_t> AlienStore::_parse_cpu_cores()
+{
+  std::vector<uint64_t> cpu_cores;
+  auto cpu_string =
+    cct->_conf.get_val<std::string>("crimson_alien_thread_cpu_cores");
+
+  std::string token;
+  std::istringstream token_stream(cpu_string);
+  while (std::getline(token_stream, token, ',')) {
+    std::istringstream cpu_stream(token);
+    std::string cpu;
+    std::getline(cpu_stream, cpu, '-');
+    uint64_t start_cpu = std::stoull(cpu);
+    std::getline(cpu_stream, cpu, '-');
+    uint64_t end_cpu = std::stoull(cpu);
+    for (uint64_t i = start_cpu; i < end_cpu; i++) {
+      cpu_cores.push_back(i);
+    }
+  }
+  return cpu_cores;
+}
+
 }
index ae4830981e6e59a370414c59823f5762ebf87000..c94da3ee580b9e5f8c62c4f7a1460be92697fb2d 100644 (file)
@@ -119,5 +119,6 @@ private:
   std::unique_ptr<CephContext> cct;
   seastar::gate transaction_gate;
   std::unordered_map<coll_t, CollectionRef> coll_map;
+  std::vector<uint64_t> _parse_cpu_cores();
 };
 }
index e127d87d524fc9cca0ee290335372a18de52a780..c42947bfa2fb24b0169311d0175d57b189d7b5cb 100644 (file)
@@ -1,3 +1,6 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+// vim: ts=8 sw=2 smarttab expandtab
+
 #include "thread_pool.h"
 
 #include <chrono>
@@ -12,15 +15,15 @@ namespace crimson::os {
 
 ThreadPool::ThreadPool(size_t n_threads,
                        size_t queue_sz,
-                       long cpu_id)
+                       std::vector<uint64_t> cpus)
   : queue_size{round_up_to(queue_sz, seastar::smp::count)},
     pending{queue_size}
 {
   auto queue_max_wait = std::chrono::seconds(local_conf()->threadpool_empty_queue_max_wait);
   for (size_t i = 0; i < n_threads; i++) {
-    threads.emplace_back([this, cpu_id, queue_max_wait] {
-      if (cpu_id >= 0) {
-        pin(cpu_id);
+    threads.emplace_back([this, cpus, queue_max_wait] {
+      if (!cpus.empty()) {
+        pin(cpus);
       }
       loop(queue_max_wait);
     });
@@ -34,11 +37,13 @@ ThreadPool::~ThreadPool()
   }
 }
 
-void ThreadPool::pin(unsigned cpu_id)
+void ThreadPool::pin(const std::vector<uint64_t>& cpus)
 {
   cpu_set_t cs;
   CPU_ZERO(&cs);
-  CPU_SET(cpu_id, &cs);
+  for (auto cpu : cpus) {
+    CPU_SET(cpu, &cs);
+  }
   [[maybe_unused]] auto r = pthread_setaffinity_np(pthread_self(),
                                                    sizeof(cs), &cs);
   ceph_assert(r == 0);
index 27840da189101713043561c5fbe0302f99d29938..8b66725dd70953b2f6bd8d7baaf86b01b9f5bb29 100644 (file)
@@ -86,7 +86,7 @@ class ThreadPool {
   bool is_stopping() const {
     return stopping.load(std::memory_order_relaxed);
   }
-  static void pin(unsigned cpu_id);
+  static void pin(const std::vector<uint64_t>& cpus);
   seastar::semaphore& local_free_slots() {
     return submit_queue.local().free_slots;
   }
@@ -102,7 +102,7 @@ public:
    * @note each @c Task has its own crimson::thread::Condition, which possesses
    * an fd, so we should keep the size of queue under a reasonable limit.
    */
-  ThreadPool(size_t n_threads, size_t queue_sz, long cpu);
+  ThreadPool(size_t n_threads, size_t queue_sz, std::vector<uint64_t> cpus);
   ~ThreadPool();
   seastar::future<> start();
   seastar::future<> stop();