xref: /llvm-project/clang-tools-extra/clangd/unittests/support/ThreadingTests.cpp (revision 8edfc2f814fe107bf31e3470478aa9bcacdf5aed)
1 //===-- ThreadingTests.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "support/Threading.h"
10 #include "llvm/ADT/DenseMap.h"
11 #include "gmock/gmock.h"
12 #include "gtest/gtest.h"
13 #include <chrono>
14 #include <mutex>
15 
16 namespace clang {
17 namespace clangd {
18 class ThreadingTest : public ::testing::Test {};
19 
TEST_F(ThreadingTest,TaskRunner)20 TEST_F(ThreadingTest, TaskRunner) {
21   const int TasksCnt = 100;
22   // This should be const, but MSVC does not allow to use const vars in lambdas
23   // without capture. On the other hand, clang gives a warning that capture of
24   // const var is not required.
25   // Making it non-const makes both compilers happy.
26   int IncrementsPerTask = 1000;
27 
28   std::mutex Mutex;
29   int Counter(0); /* GUARDED_BY(Mutex) */
30   {
31     AsyncTaskRunner Tasks;
32     auto ScheduleIncrements = [&]() {
33       for (int TaskI = 0; TaskI < TasksCnt; ++TaskI) {
34         Tasks.runAsync("task", [&Counter, &Mutex, IncrementsPerTask]() {
35           for (int Increment = 0; Increment < IncrementsPerTask; ++Increment) {
36             std::lock_guard<std::mutex> Lock(Mutex);
37             ++Counter;
38           }
39         });
40       }
41     };
42 
43     {
44       // Make sure runAsync is not running tasks synchronously on the same
45       // thread by locking the Mutex used for increments.
46       std::lock_guard<std::mutex> Lock(Mutex);
47       ScheduleIncrements();
48     }
49 
50     Tasks.wait();
51     {
52       std::lock_guard<std::mutex> Lock(Mutex);
53       ASSERT_EQ(Counter, TasksCnt * IncrementsPerTask);
54     }
55 
56     {
57       std::lock_guard<std::mutex> Lock(Mutex);
58       Counter = 0;
59       ScheduleIncrements();
60     }
61   }
62   // Check that destructor has waited for tasks to finish.
63   std::lock_guard<std::mutex> Lock(Mutex);
64   ASSERT_EQ(Counter, TasksCnt * IncrementsPerTask);
65 }
66 
TEST_F(ThreadingTest,Memoize)67 TEST_F(ThreadingTest, Memoize) {
68   const unsigned NumThreads = 5;
69   const unsigned NumKeys = 100;
70   const unsigned NumIterations = 100;
71 
72   Memoize<llvm::DenseMap<int, int>> Cache;
73   std::atomic<unsigned> ComputeCount(0);
74   std::atomic<int> ComputeResult[NumKeys];
75   std::fill(std::begin(ComputeResult), std::end(ComputeResult), -1);
76 
77   AsyncTaskRunner Tasks;
78   for (unsigned I = 0; I < NumThreads; ++I)
79     Tasks.runAsync("worker" + std::to_string(I), [&] {
80       for (unsigned J = 0; J < NumIterations; J++)
81         for (unsigned K = 0; K < NumKeys; K++) {
82           int Result = Cache.get(K, [&] { return ++ComputeCount; });
83           EXPECT_THAT(ComputeResult[K].exchange(Result),
84                       testing::AnyOf(-1, Result))
85               << "Got inconsistent results from memoize";
86         }
87     });
88   Tasks.wait();
89   EXPECT_GE(ComputeCount, NumKeys) << "Computed each key once";
90   EXPECT_LE(ComputeCount, NumThreads * NumKeys)
91       << "Worst case, computed each key in every thread";
92   for (int Result : ComputeResult)
93     EXPECT_GT(Result, 0) << "All results in expected domain";
94 }
95 
TEST_F(ThreadingTest,MemoizeDeterministic)96 TEST_F(ThreadingTest, MemoizeDeterministic) {
97   Memoize<llvm::DenseMap<int, char>> Cache;
98 
99   // Spawn two parallel computations, A and B.
100   // Force concurrency: neither can finish until both have started.
101   // Verify that cache returns consistent results.
102   AsyncTaskRunner Tasks;
103   std::atomic<char> ValueA(0), ValueB(0);
104   Notification ReleaseA, ReleaseB;
105   Tasks.runAsync("A", [&] {
106     ValueA = Cache.get(0, [&] {
107       ReleaseB.notify();
108       ReleaseA.wait();
109       return 'A';
110     });
111   });
112   Tasks.runAsync("A", [&] {
113     ValueB = Cache.get(0, [&] {
114       ReleaseA.notify();
115       ReleaseB.wait();
116       return 'B';
117     });
118   });
119   Tasks.wait();
120 
121   ASSERT_EQ(ValueA, ValueB);
122   ASSERT_THAT(ValueA.load(), testing::AnyOf('A', 'B'));
123 }
124 
125 // It's hard to write a real test of this class, std::chrono is awkward to mock.
126 // But test some degenerate cases at least.
TEST(PeriodicThrottlerTest,Minimal)127 TEST(PeriodicThrottlerTest, Minimal) {
128   PeriodicThrottler Once(std::chrono::hours(24));
129   EXPECT_TRUE(Once());
130   EXPECT_FALSE(Once());
131   EXPECT_FALSE(Once());
132 
133   PeriodicThrottler Later(std::chrono::hours(24),
134                           /*Delay=*/std::chrono::hours(24));
135   EXPECT_FALSE(Later());
136   EXPECT_FALSE(Later());
137   EXPECT_FALSE(Later());
138 
139   PeriodicThrottler Always(std::chrono::seconds(0));
140   EXPECT_TRUE(Always());
141   EXPECT_TRUE(Always());
142   EXPECT_TRUE(Always());
143 }
144 
145 } // namespace clangd
146 } // namespace clang
147