1 //===--------------------- TaskPool.cpp -------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "lldb/Host/TaskPool.h"
11 #include "lldb/Host/ThreadLauncher.h"
12
13 #include <cstdint>
14 #include <queue>
15 #include <thread>
16
17 namespace lldb_private {
18
19 namespace {
20 class TaskPoolImpl {
21 public:
22 static TaskPoolImpl &GetInstance();
23
24 void AddTask(std::function<void()> &&task_fn);
25
26 private:
27 TaskPoolImpl();
28
29 static lldb::thread_result_t WorkerPtr(void *pool);
30
31 static void Worker(TaskPoolImpl *pool);
32
33 std::queue<std::function<void()>> m_tasks;
34 std::mutex m_tasks_mutex;
35 uint32_t m_thread_count;
36 };
37
38 } // end of anonymous namespace
39
GetInstance()40 TaskPoolImpl &TaskPoolImpl::GetInstance() {
41 static TaskPoolImpl g_task_pool_impl;
42 return g_task_pool_impl;
43 }
44
AddTaskImpl(std::function<void ()> && task_fn)45 void TaskPool::AddTaskImpl(std::function<void()> &&task_fn) {
46 TaskPoolImpl::GetInstance().AddTask(std::move(task_fn));
47 }
48
TaskPoolImpl()49 TaskPoolImpl::TaskPoolImpl() : m_thread_count(0) {}
50
GetHardwareConcurrencyHint()51 unsigned GetHardwareConcurrencyHint() {
52 // std::thread::hardware_concurrency may return 0 if the value is not well
53 // defined or not computable.
54 static const unsigned g_hardware_concurrency =
55 std::max(1u, std::thread::hardware_concurrency());
56 return g_hardware_concurrency;
57 }
58
AddTask(std::function<void ()> && task_fn)59 void TaskPoolImpl::AddTask(std::function<void()> &&task_fn) {
60 const size_t min_stack_size = 8 * 1024 * 1024;
61
62 std::unique_lock<std::mutex> lock(m_tasks_mutex);
63 m_tasks.emplace(std::move(task_fn));
64 if (m_thread_count < GetHardwareConcurrencyHint()) {
65 m_thread_count++;
66 // Note that this detach call needs to happen with the m_tasks_mutex held.
67 // This prevents the thread from exiting prematurely and triggering a linux
68 // libc bug (https://sourceware.org/bugzilla/show_bug.cgi?id=19951).
69 lldb_private::ThreadLauncher::LaunchThread("task-pool.worker", WorkerPtr,
70 this, nullptr, min_stack_size)
71 .Release();
72 }
73 }
74
WorkerPtr(void * pool)75 lldb::thread_result_t TaskPoolImpl::WorkerPtr(void *pool) {
76 Worker((TaskPoolImpl *)pool);
77 return 0;
78 }
79
Worker(TaskPoolImpl * pool)80 void TaskPoolImpl::Worker(TaskPoolImpl *pool) {
81 while (true) {
82 std::unique_lock<std::mutex> lock(pool->m_tasks_mutex);
83 if (pool->m_tasks.empty()) {
84 pool->m_thread_count--;
85 break;
86 }
87
88 std::function<void()> f = std::move(pool->m_tasks.front());
89 pool->m_tasks.pop();
90 lock.unlock();
91
92 f();
93 }
94 }
95
TaskMapOverInt(size_t begin,size_t end,const llvm::function_ref<void (size_t)> & func)96 void TaskMapOverInt(size_t begin, size_t end,
97 const llvm::function_ref<void(size_t)> &func) {
98 const size_t num_workers = std::min<size_t>(end, GetHardwareConcurrencyHint());
99 std::atomic<size_t> idx{begin};
100
101 auto wrapper = [&idx, end, &func]() {
102 while (true) {
103 size_t i = idx.fetch_add(1);
104 if (i >= end)
105 break;
106 func(i);
107 }
108 };
109
110 std::vector<std::future<void>> futures;
111 futures.reserve(num_workers);
112 for (size_t i = 0; i < num_workers; i++)
113 futures.push_back(TaskPool::AddTask(wrapper));
114 for (size_t i = 0; i < num_workers; i++)
115 futures[i].wait();
116 }
117
118 } // namespace lldb_private
119
120