1 #include "support/Threading.h"
2 #include "support/Trace.h"
3 #include "llvm/ADT/ScopeExit.h"
4 #include "llvm/Support/FormatVariadic.h"
5 #include "llvm/Support/Threading.h"
6 #include "llvm/Support/thread.h"
7 #include <atomic>
8 #include <thread>
9 #ifdef __USE_POSIX
10 #include <pthread.h>
11 #elif defined(__APPLE__)
12 #include <sys/resource.h>
13 #elif defined(_WIN32)
14 #include <windows.h>
15 #endif
16 
17 namespace clang {
18 namespace clangd {
19 
20 void Notification::notify() {
21   {
22     std::lock_guard<std::mutex> Lock(Mu);
23     Notified = true;
24     // Broadcast with the lock held. This ensures that it's safe to destroy
25     // a Notification after wait() returns, even from another thread.
26     CV.notify_all();
27   }
28 }
29 
30 void Notification::wait() const {
31   std::unique_lock<std::mutex> Lock(Mu);
32   CV.wait(Lock, [this] { return Notified; });
33 }
34 
35 Semaphore::Semaphore(std::size_t MaxLocks) : FreeSlots(MaxLocks) {}
36 
37 bool Semaphore::try_lock() {
38   std::unique_lock<std::mutex> Lock(Mutex);
39   if (FreeSlots > 0) {
40     --FreeSlots;
41     return true;
42   }
43   return false;
44 }
45 
46 void Semaphore::lock() {
47   trace::Span Span("WaitForFreeSemaphoreSlot");
48   // trace::Span can also acquire locks in ctor and dtor, we make sure it
49   // happens when Semaphore's own lock is not held.
50   {
51     std::unique_lock<std::mutex> Lock(Mutex);
52     SlotsChanged.wait(Lock, [&]() { return FreeSlots > 0; });
53     --FreeSlots;
54   }
55 }
56 
57 void Semaphore::unlock() {
58   std::unique_lock<std::mutex> Lock(Mutex);
59   ++FreeSlots;
60   Lock.unlock();
61 
62   SlotsChanged.notify_one();
63 }
64 
65 AsyncTaskRunner::~AsyncTaskRunner() { wait(); }
66 
67 bool AsyncTaskRunner::wait(Deadline D) const {
68   std::unique_lock<std::mutex> Lock(Mutex);
69   return clangd::wait(Lock, TasksReachedZero, D,
70                       [&] { return InFlightTasks == 0; });
71 }
72 
73 void AsyncTaskRunner::runAsync(const llvm::Twine &Name,
74                                llvm::unique_function<void()> Action) {
75   {
76     std::lock_guard<std::mutex> Lock(Mutex);
77     ++InFlightTasks;
78   }
79 
80   auto CleanupTask = llvm::make_scope_exit([this]() {
81     std::lock_guard<std::mutex> Lock(Mutex);
82     int NewTasksCnt = --InFlightTasks;
83     if (NewTasksCnt == 0) {
84       // Note: we can't unlock here because we don't want the object to be
85       // destroyed before we notify.
86       TasksReachedZero.notify_one();
87     }
88   });
89 
90   auto Task = [Name = Name.str(), Action = std::move(Action),
91                Cleanup = std::move(CleanupTask)]() mutable {
92     llvm::set_thread_name(Name);
93     Action();
94     // Make sure function stored by ThreadFunc is destroyed before Cleanup runs.
95     Action = nullptr;
96   };
97 
98   // Ensure our worker threads have big enough stacks to run clang.
99   llvm::thread Thread(
100       /*clang::DesiredStackSize*/ llvm::Optional<unsigned>(8 << 20),
101       std::move(Task));
102   Thread.detach();
103 }
104 
105 Deadline timeoutSeconds(llvm::Optional<double> Seconds) {
106   using namespace std::chrono;
107   if (!Seconds)
108     return Deadline::infinity();
109   return steady_clock::now() +
110          duration_cast<steady_clock::duration>(duration<double>(*Seconds));
111 }
112 
113 void wait(std::unique_lock<std::mutex> &Lock, std::condition_variable &CV,
114           Deadline D) {
115   if (D == Deadline::zero())
116     return;
117   if (D == Deadline::infinity())
118     return CV.wait(Lock);
119   CV.wait_until(Lock, D.time());
120 }
121 
122 bool PeriodicThrottler::operator()() {
123   Rep Now = Stopwatch::now().time_since_epoch().count();
124   Rep OldNext = Next.load(std::memory_order_acquire);
125   if (Now < OldNext)
126     return false;
127   // We're ready to run (but may be racing other threads).
128   // Work out the updated target time, and run if we successfully bump it.
129   Rep NewNext = Now + Period;
130   return Next.compare_exchange_strong(OldNext, NewNext,
131                                       std::memory_order_acq_rel);
132 }
133 
134 } // namespace clangd
135 } // namespace clang
136