g_ceph_context = cct.get();
cct->_conf.set_config_values(values);
store = std::make_unique<BlueStore>(cct.get(), path);
- tp = std::make_unique<crimson::os::ThreadPool>(1, 128, seastar::this_shard_id() + 10);
+
+ long cpu_id = 0;
+ if (long nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); nr_cpus != -1) {
+ cpu_id = nr_cpus - 1;
+ } else {
+ logger().error("{}: unable to get nproc: {}", __func__, errno);
+ cpu_id = -1;
+ }
+ tp = std::make_unique<crimson::os::ThreadPool>(1, 128, cpu_id);
}
seastar::future<> AlienStore::start()
ThreadPool::ThreadPool(size_t n_threads,
size_t queue_sz,
- unsigned cpu_id)
+ long cpu_id)
: queue_size{round_up_to(queue_sz, seastar::smp::count)},
pending{queue_size}
{
auto queue_max_wait = std::chrono::seconds(local_conf()->threadpool_empty_queue_max_wait);
for (size_t i = 0; i < n_threads; i++) {
threads.emplace_back([this, cpu_id, queue_max_wait] {
- pin(cpu_id);
+ if (cpu_id >= 0) {
+ pin(cpu_id);
+ }
crimson::os::AlienStore::configure_thread_memory();
loop(queue_max_wait);
});
* @note each @c Task has its own crimson::thread::Condition, which possesses
* an fd, so we should keep the size of queue under a reasonable limit.
*/
- ThreadPool(size_t n_threads, size_t queue_sz, unsigned cpu);
+ ThreadPool(size_t n_threads, size_t queue_sz, long cpu);
~ThreadPool();
seastar::future<> start();
seastar::future<> stop();