1 //===-- tsan_clock_test.cc ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "tsan_clock.h"
14 #include "tsan_rtl.h"
15 #include "gtest/gtest.h"
16 #include <sys/time.h>
17 #include <time.h>
18
19 namespace __tsan {
20
21 ClockCache cache;
22
TEST(Clock,VectorBasic)23 TEST(Clock, VectorBasic) {
24 ThreadClock clk(0);
25 ASSERT_EQ(clk.size(), 1U);
26 clk.tick();
27 ASSERT_EQ(clk.size(), 1U);
28 ASSERT_EQ(clk.get(0), 1U);
29 clk.set(&cache, 3, clk.get(3) + 1);
30 ASSERT_EQ(clk.size(), 4U);
31 ASSERT_EQ(clk.get(0), 1U);
32 ASSERT_EQ(clk.get(1), 0U);
33 ASSERT_EQ(clk.get(2), 0U);
34 ASSERT_EQ(clk.get(3), 1U);
35 clk.set(&cache, 3, clk.get(3) + 1);
36 ASSERT_EQ(clk.get(3), 2U);
37 }
38
TEST(Clock,ChunkedBasic)39 TEST(Clock, ChunkedBasic) {
40 ThreadClock vector(0);
41 SyncClock chunked;
42 ASSERT_EQ(vector.size(), 1U);
43 ASSERT_EQ(chunked.size(), 0U);
44 vector.acquire(&cache, &chunked);
45 ASSERT_EQ(vector.size(), 1U);
46 ASSERT_EQ(chunked.size(), 0U);
47 vector.release(&cache, &chunked);
48 ASSERT_EQ(vector.size(), 1U);
49 ASSERT_EQ(chunked.size(), 1U);
50 vector.acq_rel(&cache, &chunked);
51 ASSERT_EQ(vector.size(), 1U);
52 ASSERT_EQ(chunked.size(), 1U);
53 chunked.Reset(&cache);
54 }
55
56 static const uptr interesting_sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66,
57 100, 124, 125, 126, 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254,
58 255};
59
TEST(Clock,Iter)60 TEST(Clock, Iter) {
61 const uptr n = ARRAY_SIZE(interesting_sizes);
62 for (uptr fi = 0; fi < n; fi++) {
63 const uptr size = interesting_sizes[fi];
64 SyncClock sync;
65 ThreadClock vector(0);
66 for (uptr i = 0; i < size; i++)
67 vector.set(&cache, i, i + 1);
68 if (size != 0)
69 vector.release(&cache, &sync);
70 uptr i = 0;
71 for (ClockElem &ce : sync) {
72 ASSERT_LT(i, size);
73 ASSERT_EQ(sync.get_clean(i), ce.epoch);
74 i++;
75 }
76 ASSERT_EQ(i, size);
77 sync.Reset(&cache);
78 }
79 }
80
TEST(Clock,AcquireRelease)81 TEST(Clock, AcquireRelease) {
82 ThreadClock vector1(100);
83 vector1.tick();
84 SyncClock chunked;
85 vector1.release(&cache, &chunked);
86 ASSERT_EQ(chunked.size(), 101U);
87 ThreadClock vector2(0);
88 vector2.acquire(&cache, &chunked);
89 ASSERT_EQ(vector2.size(), 101U);
90 ASSERT_EQ(vector2.get(0), 0U);
91 ASSERT_EQ(vector2.get(1), 0U);
92 ASSERT_EQ(vector2.get(99), 0U);
93 ASSERT_EQ(vector2.get(100), 1U);
94 chunked.Reset(&cache);
95 }
96
TEST(Clock,RepeatedAcquire)97 TEST(Clock, RepeatedAcquire) {
98 ThreadClock thr1(1);
99 thr1.tick();
100 ThreadClock thr2(2);
101 thr2.tick();
102
103 SyncClock sync;
104 thr1.ReleaseStore(&cache, &sync);
105
106 thr2.acquire(&cache, &sync);
107 thr2.acquire(&cache, &sync);
108
109 sync.Reset(&cache);
110 }
111
TEST(Clock,ManyThreads)112 TEST(Clock, ManyThreads) {
113 SyncClock chunked;
114 for (unsigned i = 0; i < 200; i++) {
115 ThreadClock vector(0);
116 vector.tick();
117 vector.set(&cache, i, i + 1);
118 vector.release(&cache, &chunked);
119 ASSERT_EQ(i + 1, chunked.size());
120 vector.acquire(&cache, &chunked);
121 ASSERT_EQ(i + 1, vector.size());
122 }
123
124 for (unsigned i = 0; i < 200; i++) {
125 printf("i=%d\n", i);
126 ASSERT_EQ(i + 1, chunked.get(i));
127 }
128
129 ThreadClock vector(1);
130 vector.acquire(&cache, &chunked);
131 ASSERT_EQ(200U, vector.size());
132 for (unsigned i = 0; i < 200; i++)
133 ASSERT_EQ(i + 1, vector.get(i));
134
135 chunked.Reset(&cache);
136 }
137
TEST(Clock,DifferentSizes)138 TEST(Clock, DifferentSizes) {
139 {
140 ThreadClock vector1(10);
141 vector1.tick();
142 ThreadClock vector2(20);
143 vector2.tick();
144 {
145 SyncClock chunked;
146 vector1.release(&cache, &chunked);
147 ASSERT_EQ(chunked.size(), 11U);
148 vector2.release(&cache, &chunked);
149 ASSERT_EQ(chunked.size(), 21U);
150 chunked.Reset(&cache);
151 }
152 {
153 SyncClock chunked;
154 vector2.release(&cache, &chunked);
155 ASSERT_EQ(chunked.size(), 21U);
156 vector1.release(&cache, &chunked);
157 ASSERT_EQ(chunked.size(), 21U);
158 chunked.Reset(&cache);
159 }
160 {
161 SyncClock chunked;
162 vector1.release(&cache, &chunked);
163 vector2.acquire(&cache, &chunked);
164 ASSERT_EQ(vector2.size(), 21U);
165 chunked.Reset(&cache);
166 }
167 {
168 SyncClock chunked;
169 vector2.release(&cache, &chunked);
170 vector1.acquire(&cache, &chunked);
171 ASSERT_EQ(vector1.size(), 21U);
172 chunked.Reset(&cache);
173 }
174 }
175 }
176
TEST(Clock,Growth)177 TEST(Clock, Growth) {
178 {
179 ThreadClock vector(10);
180 vector.tick();
181 vector.set(&cache, 5, 42);
182 SyncClock sync;
183 vector.release(&cache, &sync);
184 ASSERT_EQ(sync.size(), 11U);
185 ASSERT_EQ(sync.get(0), 0ULL);
186 ASSERT_EQ(sync.get(1), 0ULL);
187 ASSERT_EQ(sync.get(5), 42ULL);
188 ASSERT_EQ(sync.get(9), 0ULL);
189 ASSERT_EQ(sync.get(10), 1ULL);
190 sync.Reset(&cache);
191 }
192 {
193 ThreadClock vector1(10);
194 vector1.tick();
195 ThreadClock vector2(20);
196 vector2.tick();
197 SyncClock sync;
198 vector1.release(&cache, &sync);
199 vector2.release(&cache, &sync);
200 ASSERT_EQ(sync.size(), 21U);
201 ASSERT_EQ(sync.get(0), 0ULL);
202 ASSERT_EQ(sync.get(10), 1ULL);
203 ASSERT_EQ(sync.get(19), 0ULL);
204 ASSERT_EQ(sync.get(20), 1ULL);
205 sync.Reset(&cache);
206 }
207 {
208 ThreadClock vector(100);
209 vector.tick();
210 vector.set(&cache, 5, 42);
211 vector.set(&cache, 90, 84);
212 SyncClock sync;
213 vector.release(&cache, &sync);
214 ASSERT_EQ(sync.size(), 101U);
215 ASSERT_EQ(sync.get(0), 0ULL);
216 ASSERT_EQ(sync.get(1), 0ULL);
217 ASSERT_EQ(sync.get(5), 42ULL);
218 ASSERT_EQ(sync.get(60), 0ULL);
219 ASSERT_EQ(sync.get(70), 0ULL);
220 ASSERT_EQ(sync.get(90), 84ULL);
221 ASSERT_EQ(sync.get(99), 0ULL);
222 ASSERT_EQ(sync.get(100), 1ULL);
223 sync.Reset(&cache);
224 }
225 {
226 ThreadClock vector1(10);
227 vector1.tick();
228 ThreadClock vector2(100);
229 vector2.tick();
230 SyncClock sync;
231 vector1.release(&cache, &sync);
232 vector2.release(&cache, &sync);
233 ASSERT_EQ(sync.size(), 101U);
234 ASSERT_EQ(sync.get(0), 0ULL);
235 ASSERT_EQ(sync.get(10), 1ULL);
236 ASSERT_EQ(sync.get(99), 0ULL);
237 ASSERT_EQ(sync.get(100), 1ULL);
238 sync.Reset(&cache);
239 }
240 }
241
TEST(Clock,Growth2)242 TEST(Clock, Growth2) {
243 // Test clock growth for every pair of sizes:
244 const uptr n = ARRAY_SIZE(interesting_sizes);
245 for (uptr fi = 0; fi < n; fi++) {
246 for (uptr ti = fi + 1; ti < n; ti++) {
247 const uptr from = interesting_sizes[fi];
248 const uptr to = interesting_sizes[ti];
249 SyncClock sync;
250 ThreadClock vector(0);
251 for (uptr i = 0; i < from; i++)
252 vector.set(&cache, i, i + 1);
253 if (from != 0)
254 vector.release(&cache, &sync);
255 ASSERT_EQ(sync.size(), from);
256 for (uptr i = 0; i < from; i++)
257 ASSERT_EQ(sync.get(i), i + 1);
258 for (uptr i = 0; i < to; i++)
259 vector.set(&cache, i, i + 1);
260 vector.release(&cache, &sync);
261 ASSERT_EQ(sync.size(), to);
262 for (uptr i = 0; i < to; i++)
263 ASSERT_EQ(sync.get(i), i + 1);
264 vector.set(&cache, to + 1, to + 1);
265 vector.release(&cache, &sync);
266 ASSERT_EQ(sync.size(), to + 2);
267 for (uptr i = 0; i < to; i++)
268 ASSERT_EQ(sync.get(i), i + 1);
269 ASSERT_EQ(sync.get(to), 0U);
270 ASSERT_EQ(sync.get(to + 1), to + 1);
271 sync.Reset(&cache);
272 }
273 }
274 }
275
276 const uptr kThreads = 4;
277 const uptr kClocks = 4;
278
279 // SimpleSyncClock and SimpleThreadClock implement the same thing as
280 // SyncClock and ThreadClock, but in a very simple way.
281 struct SimpleSyncClock {
282 u64 clock[kThreads];
283 uptr size;
284
SimpleSyncClock__tsan::SimpleSyncClock285 SimpleSyncClock() {
286 Reset();
287 }
288
Reset__tsan::SimpleSyncClock289 void Reset() {
290 size = 0;
291 for (uptr i = 0; i < kThreads; i++)
292 clock[i] = 0;
293 }
294
verify__tsan::SimpleSyncClock295 bool verify(const SyncClock *other) const {
296 for (uptr i = 0; i < min(size, other->size()); i++) {
297 if (clock[i] != other->get(i))
298 return false;
299 }
300 for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
301 if (i < size && clock[i] != 0)
302 return false;
303 if (i < other->size() && other->get(i) != 0)
304 return false;
305 }
306 return true;
307 }
308 };
309
310 struct SimpleThreadClock {
311 u64 clock[kThreads];
312 uptr size;
313 unsigned tid;
314
SimpleThreadClock__tsan::SimpleThreadClock315 explicit SimpleThreadClock(unsigned tid) {
316 this->tid = tid;
317 size = tid + 1;
318 for (uptr i = 0; i < kThreads; i++)
319 clock[i] = 0;
320 }
321
tick__tsan::SimpleThreadClock322 void tick() {
323 clock[tid]++;
324 }
325
acquire__tsan::SimpleThreadClock326 void acquire(const SimpleSyncClock *src) {
327 if (size < src->size)
328 size = src->size;
329 for (uptr i = 0; i < kThreads; i++)
330 clock[i] = max(clock[i], src->clock[i]);
331 }
332
release__tsan::SimpleThreadClock333 void release(SimpleSyncClock *dst) const {
334 if (dst->size < size)
335 dst->size = size;
336 for (uptr i = 0; i < kThreads; i++)
337 dst->clock[i] = max(dst->clock[i], clock[i]);
338 }
339
acq_rel__tsan::SimpleThreadClock340 void acq_rel(SimpleSyncClock *dst) {
341 acquire(dst);
342 release(dst);
343 }
344
ReleaseStore__tsan::SimpleThreadClock345 void ReleaseStore(SimpleSyncClock *dst) const {
346 if (dst->size < size)
347 dst->size = size;
348 for (uptr i = 0; i < kThreads; i++)
349 dst->clock[i] = clock[i];
350 }
351
verify__tsan::SimpleThreadClock352 bool verify(const ThreadClock *other) const {
353 for (uptr i = 0; i < min(size, other->size()); i++) {
354 if (clock[i] != other->get(i))
355 return false;
356 }
357 for (uptr i = min(size, other->size()); i < max(size, other->size()); i++) {
358 if (i < size && clock[i] != 0)
359 return false;
360 if (i < other->size() && other->get(i) != 0)
361 return false;
362 }
363 return true;
364 }
365 };
366
ClockFuzzer(bool printing)367 static bool ClockFuzzer(bool printing) {
368 // Create kThreads thread clocks.
369 SimpleThreadClock *thr0[kThreads];
370 ThreadClock *thr1[kThreads];
371 unsigned reused[kThreads];
372 for (unsigned i = 0; i < kThreads; i++) {
373 reused[i] = 0;
374 thr0[i] = new SimpleThreadClock(i);
375 thr1[i] = new ThreadClock(i, reused[i]);
376 }
377
378 // Create kClocks sync clocks.
379 SimpleSyncClock *sync0[kClocks];
380 SyncClock *sync1[kClocks];
381 for (unsigned i = 0; i < kClocks; i++) {
382 sync0[i] = new SimpleSyncClock();
383 sync1[i] = new SyncClock();
384 }
385
386 // Do N random operations (acquire, release, etc) and compare results
387 // for SimpleThread/SyncClock and real Thread/SyncClock.
388 for (int i = 0; i < 10000; i++) {
389 unsigned tid = rand() % kThreads;
390 unsigned cid = rand() % kClocks;
391 thr0[tid]->tick();
392 thr1[tid]->tick();
393
394 switch (rand() % 6) {
395 case 0:
396 if (printing)
397 printf("acquire thr%d <- clk%d\n", tid, cid);
398 thr0[tid]->acquire(sync0[cid]);
399 thr1[tid]->acquire(&cache, sync1[cid]);
400 break;
401 case 1:
402 if (printing)
403 printf("release thr%d -> clk%d\n", tid, cid);
404 thr0[tid]->release(sync0[cid]);
405 thr1[tid]->release(&cache, sync1[cid]);
406 break;
407 case 2:
408 if (printing)
409 printf("acq_rel thr%d <> clk%d\n", tid, cid);
410 thr0[tid]->acq_rel(sync0[cid]);
411 thr1[tid]->acq_rel(&cache, sync1[cid]);
412 break;
413 case 3:
414 if (printing)
415 printf("rel_str thr%d >> clk%d\n", tid, cid);
416 thr0[tid]->ReleaseStore(sync0[cid]);
417 thr1[tid]->ReleaseStore(&cache, sync1[cid]);
418 break;
419 case 4:
420 if (printing)
421 printf("reset clk%d\n", cid);
422 sync0[cid]->Reset();
423 sync1[cid]->Reset(&cache);
424 break;
425 case 5:
426 if (printing)
427 printf("reset thr%d\n", tid);
428 u64 epoch = thr0[tid]->clock[tid] + 1;
429 reused[tid]++;
430 delete thr0[tid];
431 thr0[tid] = new SimpleThreadClock(tid);
432 thr0[tid]->clock[tid] = epoch;
433 delete thr1[tid];
434 thr1[tid] = new ThreadClock(tid, reused[tid]);
435 thr1[tid]->set(epoch);
436 break;
437 }
438
439 if (printing) {
440 for (unsigned i = 0; i < kThreads; i++) {
441 printf("thr%d: ", i);
442 thr1[i]->DebugDump(printf);
443 printf("\n");
444 }
445 for (unsigned i = 0; i < kClocks; i++) {
446 printf("clk%d: ", i);
447 sync1[i]->DebugDump(printf);
448 printf("\n");
449 }
450
451 printf("\n");
452 }
453
454 if (!thr0[tid]->verify(thr1[tid]) || !sync0[cid]->verify(sync1[cid])) {
455 if (!printing)
456 return false;
457 printf("differs with model:\n");
458 for (unsigned i = 0; i < kThreads; i++) {
459 printf("thr%d: clock=[", i);
460 for (uptr j = 0; j < thr0[i]->size; j++)
461 printf("%s%llu", j == 0 ? "" : ",", thr0[i]->clock[j]);
462 printf("]\n");
463 }
464 for (unsigned i = 0; i < kClocks; i++) {
465 printf("clk%d: clock=[", i);
466 for (uptr j = 0; j < sync0[i]->size; j++)
467 printf("%s%llu", j == 0 ? "" : ",", sync0[i]->clock[j]);
468 printf("]\n");
469 }
470 return false;
471 }
472 }
473
474 for (unsigned i = 0; i < kClocks; i++) {
475 sync1[i]->Reset(&cache);
476 }
477 return true;
478 }
479
TEST(Clock,Fuzzer)480 TEST(Clock, Fuzzer) {
481 struct timeval tv;
482 gettimeofday(&tv, NULL);
483 int seed = tv.tv_sec + tv.tv_usec;
484 printf("seed=%d\n", seed);
485 srand(seed);
486 if (!ClockFuzzer(false)) {
487 // Redo the test with the same seed, but logging operations.
488 srand(seed);
489 ClockFuzzer(true);
490 ASSERT_TRUE(false);
491 }
492 }
493
494 } // namespace __tsan
495