1 //===--- Threading.cpp - Abstractions for multithreading ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "support/Threading.h"
10 #include "support/Trace.h"
11 #include "llvm/ADT/ScopeExit.h"
12 #include "llvm/Support/Threading.h"
13 #include "llvm/Support/thread.h"
14 #include <atomic>
15 #include <thread>
16 #ifdef __USE_POSIX
17 #include <pthread.h>
18 #elif defined(__APPLE__)
19 #include <sys/resource.h>
20 #elif defined(_WIN32)
21 #include <windows.h>
22 #endif
23
24 namespace clang {
25 namespace clangd {
26
notify()27 void Notification::notify() {
28 {
29 std::lock_guard<std::mutex> Lock(Mu);
30 Notified = true;
31 // Broadcast with the lock held. This ensures that it's safe to destroy
32 // a Notification after wait() returns, even from another thread.
33 CV.notify_all();
34 }
35 }
36
wait(Deadline D) const37 bool Notification::wait(Deadline D) const {
38 std::unique_lock<std::mutex> Lock(Mu);
39 return clangd::wait(Lock, CV, D, [&] { return Notified; });
40 }
41
Semaphore(std::size_t MaxLocks)42 Semaphore::Semaphore(std::size_t MaxLocks) : FreeSlots(MaxLocks) {}
43
try_lock()44 bool Semaphore::try_lock() {
45 std::unique_lock<std::mutex> Lock(Mutex);
46 if (FreeSlots > 0) {
47 --FreeSlots;
48 return true;
49 }
50 return false;
51 }
52
lock()53 void Semaphore::lock() {
54 trace::Span Span("WaitForFreeSemaphoreSlot");
55 // trace::Span can also acquire locks in ctor and dtor, we make sure it
56 // happens when Semaphore's own lock is not held.
57 {
58 std::unique_lock<std::mutex> Lock(Mutex);
59 SlotsChanged.wait(Lock, [&]() { return FreeSlots > 0; });
60 --FreeSlots;
61 }
62 }
63
unlock()64 void Semaphore::unlock() {
65 std::unique_lock<std::mutex> Lock(Mutex);
66 ++FreeSlots;
67 Lock.unlock();
68
69 SlotsChanged.notify_one();
70 }
71
~AsyncTaskRunner()72 AsyncTaskRunner::~AsyncTaskRunner() { wait(); }
73
wait(Deadline D) const74 bool AsyncTaskRunner::wait(Deadline D) const {
75 std::unique_lock<std::mutex> Lock(Mutex);
76 return clangd::wait(Lock, TasksReachedZero, D,
77 [&] { return InFlightTasks == 0; });
78 }
79
runAsync(const llvm::Twine & Name,llvm::unique_function<void ()> Action)80 void AsyncTaskRunner::runAsync(const llvm::Twine &Name,
81 llvm::unique_function<void()> Action) {
82 {
83 std::lock_guard<std::mutex> Lock(Mutex);
84 ++InFlightTasks;
85 }
86
87 auto CleanupTask = llvm::make_scope_exit([this]() {
88 std::lock_guard<std::mutex> Lock(Mutex);
89 int NewTasksCnt = --InFlightTasks;
90 if (NewTasksCnt == 0) {
91 // Note: we can't unlock here because we don't want the object to be
92 // destroyed before we notify.
93 TasksReachedZero.notify_one();
94 }
95 });
96
97 auto Task = [Name = Name.str(), Action = std::move(Action),
98 Cleanup = std::move(CleanupTask)]() mutable {
99 llvm::set_thread_name(Name);
100 Action();
101 // Make sure function stored by ThreadFunc is destroyed before Cleanup runs.
102 Action = nullptr;
103 };
104
105 // Ensure our worker threads have big enough stacks to run clang.
106 llvm::thread Thread(
107 /*clang::DesiredStackSize*/ llvm::Optional<unsigned>(8 << 20),
108 std::move(Task));
109 Thread.detach();
110 }
111
timeoutSeconds(llvm::Optional<double> Seconds)112 Deadline timeoutSeconds(llvm::Optional<double> Seconds) {
113 using namespace std::chrono;
114 if (!Seconds)
115 return Deadline::infinity();
116 return steady_clock::now() +
117 duration_cast<steady_clock::duration>(duration<double>(*Seconds));
118 }
119
wait(std::unique_lock<std::mutex> & Lock,std::condition_variable & CV,Deadline D)120 void wait(std::unique_lock<std::mutex> &Lock, std::condition_variable &CV,
121 Deadline D) {
122 if (D == Deadline::zero())
123 return;
124 if (D == Deadline::infinity())
125 return CV.wait(Lock);
126 CV.wait_until(Lock, D.time());
127 }
128
operator ()()129 bool PeriodicThrottler::operator()() {
130 Rep Now = Stopwatch::now().time_since_epoch().count();
131 Rep OldNext = Next.load(std::memory_order_acquire);
132 if (Now < OldNext)
133 return false;
134 // We're ready to run (but may be racing other threads).
135 // Work out the updated target time, and run if we successfully bump it.
136 Rep NewNext = Now + Period;
137 return Next.compare_exchange_strong(OldNext, NewNext,
138 std::memory_order_acq_rel);
139 }
140
141 } // namespace clangd
142 } // namespace clang
143