1 //===--- Threading.cpp - Abstractions for multithreading ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "support/Threading.h"
10 #include "support/Trace.h"
11 #include "llvm/ADT/ScopeExit.h"
12 #include "llvm/Support/Threading.h"
13 #include "llvm/Support/thread.h"
14 #include <atomic>
15 #include <optional>
16 #include <thread>
17 #ifdef __USE_POSIX
18 #include <pthread.h>
19 #elif defined(__APPLE__)
20 #include <sys/resource.h>
21 #elif defined(_WIN32)
22 #include <windows.h>
23 #endif
24
25 namespace clang {
26 namespace clangd {
27
notify()28 void Notification::notify() {
29 {
30 std::lock_guard<std::mutex> Lock(Mu);
31 Notified = true;
32 // Broadcast with the lock held. This ensures that it's safe to destroy
33 // a Notification after wait() returns, even from another thread.
34 CV.notify_all();
35 }
36 }
37
wait(Deadline D) const38 bool Notification::wait(Deadline D) const {
39 std::unique_lock<std::mutex> Lock(Mu);
40 return clangd::wait(Lock, CV, D, [&] { return Notified; });
41 }
42
Semaphore(std::size_t MaxLocks)43 Semaphore::Semaphore(std::size_t MaxLocks) : FreeSlots(MaxLocks) {}
44
try_lock()45 bool Semaphore::try_lock() {
46 std::unique_lock<std::mutex> Lock(Mutex);
47 if (FreeSlots > 0) {
48 --FreeSlots;
49 return true;
50 }
51 return false;
52 }
53
lock()54 void Semaphore::lock() {
55 trace::Span Span("WaitForFreeSemaphoreSlot");
56 // trace::Span can also acquire locks in ctor and dtor, we make sure it
57 // happens when Semaphore's own lock is not held.
58 {
59 std::unique_lock<std::mutex> Lock(Mutex);
60 SlotsChanged.wait(Lock, [&]() { return FreeSlots > 0; });
61 --FreeSlots;
62 }
63 }
64
unlock()65 void Semaphore::unlock() {
66 std::unique_lock<std::mutex> Lock(Mutex);
67 ++FreeSlots;
68 Lock.unlock();
69
70 SlotsChanged.notify_one();
71 }
72
~AsyncTaskRunner()73 AsyncTaskRunner::~AsyncTaskRunner() { wait(); }
74
wait(Deadline D) const75 bool AsyncTaskRunner::wait(Deadline D) const {
76 std::unique_lock<std::mutex> Lock(Mutex);
77 return clangd::wait(Lock, TasksReachedZero, D,
78 [&] { return InFlightTasks == 0; });
79 }
80
runAsync(const llvm::Twine & Name,llvm::unique_function<void ()> Action)81 void AsyncTaskRunner::runAsync(const llvm::Twine &Name,
82 llvm::unique_function<void()> Action) {
83 {
84 std::lock_guard<std::mutex> Lock(Mutex);
85 ++InFlightTasks;
86 }
87
88 auto CleanupTask = llvm::make_scope_exit([this]() {
89 std::lock_guard<std::mutex> Lock(Mutex);
90 int NewTasksCnt = --InFlightTasks;
91 if (NewTasksCnt == 0) {
92 // Note: we can't unlock here because we don't want the object to be
93 // destroyed before we notify.
94 TasksReachedZero.notify_one();
95 }
96 });
97
98 auto Task = [Name = Name.str(), Action = std::move(Action),
99 Cleanup = std::move(CleanupTask)]() mutable {
100 llvm::set_thread_name(Name);
101 Action();
102 // Make sure function stored by ThreadFunc is destroyed before Cleanup runs.
103 Action = nullptr;
104 };
105
106 // Ensure our worker threads have big enough stacks to run clang.
107 llvm::thread Thread(
108 /*clang::DesiredStackSize*/ std::optional<unsigned>(8 << 20),
109 std::move(Task));
110 Thread.detach();
111 }
112
timeoutSeconds(std::optional<double> Seconds)113 Deadline timeoutSeconds(std::optional<double> Seconds) {
114 using namespace std::chrono;
115 if (!Seconds)
116 return Deadline::infinity();
117 return steady_clock::now() +
118 duration_cast<steady_clock::duration>(duration<double>(*Seconds));
119 }
120
wait(std::unique_lock<std::mutex> & Lock,std::condition_variable & CV,Deadline D)121 void wait(std::unique_lock<std::mutex> &Lock, std::condition_variable &CV,
122 Deadline D) {
123 if (D == Deadline::zero())
124 return;
125 if (D == Deadline::infinity())
126 return CV.wait(Lock);
127 CV.wait_until(Lock, D.time());
128 }
129
operator ()()130 bool PeriodicThrottler::operator()() {
131 Rep Now = Stopwatch::now().time_since_epoch().count();
132 Rep OldNext = Next.load(std::memory_order_acquire);
133 if (Now < OldNext)
134 return false;
135 // We're ready to run (but may be racing other threads).
136 // Work out the updated target time, and run if we successfully bump it.
137 Rep NewNext = Now + Period;
138 return Next.compare_exchange_strong(OldNext, NewNext,
139 std::memory_order_acq_rel);
140 }
141
142 } // namespace clangd
143 } // namespace clang
144