1 //===-- tsan_test_util_posix.cc -------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // Test utils, Linux, FreeBSD, NetBSD and Darwin implementation.
13 //===----------------------------------------------------------------------===//
14
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "tsan_interface.h"
17 #include "tsan_posix_util.h"
18 #include "tsan_test_util.h"
19 #include "tsan_report.h"
20
21 #include "gtest/gtest.h"
22
23 #include <assert.h>
24 #include <pthread.h>
25 #include <stdio.h>
26 #include <stdint.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <errno.h>
30
31 using namespace __tsan; // NOLINT
32
33 static __thread bool expect_report;
34 static __thread bool expect_report_reported;
35 static __thread ReportType expect_report_type;
36
BeforeInitThread(void * param)37 static void *BeforeInitThread(void *param) {
38 (void)param;
39 return 0;
40 }
41
AtExit()42 static void AtExit() {
43 }
44
TestMutexBeforeInit()45 void TestMutexBeforeInit() {
46 // Mutexes must be usable before __tsan_init();
47 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
48 __interceptor_pthread_mutex_lock(&mtx);
49 __interceptor_pthread_mutex_unlock(&mtx);
50 __interceptor_pthread_mutex_destroy(&mtx);
51 pthread_t thr;
52 __interceptor_pthread_create(&thr, 0, BeforeInitThread, 0);
53 __interceptor_pthread_join(thr, 0);
54 atexit(AtExit);
55 }
56
57 namespace __tsan {
OnReport(const ReportDesc * rep,bool suppressed)58 bool OnReport(const ReportDesc *rep, bool suppressed) {
59 if (expect_report) {
60 if (rep->typ != expect_report_type) {
61 printf("Expected report of type %d, got type %d\n",
62 (int)expect_report_type, (int)rep->typ);
63 EXPECT_TRUE(false) << "Wrong report type";
64 return false;
65 }
66 } else {
67 EXPECT_TRUE(false) << "Unexpected report";
68 return false;
69 }
70 expect_report_reported = true;
71 return true;
72 }
73 } // namespace __tsan
74
allocate_addr(int size,int offset_from_aligned=0)75 static void* allocate_addr(int size, int offset_from_aligned = 0) {
76 static uintptr_t foo;
77 static atomic_uintptr_t uniq = {(uintptr_t)&foo}; // Some real address.
78 const int kAlign = 16;
79 CHECK(offset_from_aligned < kAlign);
80 size = (size + 2 * kAlign) & ~(kAlign - 1);
81 uintptr_t addr = atomic_fetch_add(&uniq, size, memory_order_relaxed);
82 return (void*)(addr + offset_from_aligned);
83 }
84
MemLoc(int offset_from_aligned)85 MemLoc::MemLoc(int offset_from_aligned)
86 : loc_(allocate_addr(16, offset_from_aligned)) {
87 }
88
~MemLoc()89 MemLoc::~MemLoc() {
90 }
91
Mutex(Type type)92 Mutex::Mutex(Type type)
93 : alive_()
94 , type_(type) {
95 }
96
~Mutex()97 Mutex::~Mutex() {
98 CHECK(!alive_);
99 }
100
Init()101 void Mutex::Init() {
102 CHECK(!alive_);
103 alive_ = true;
104 if (type_ == Normal)
105 CHECK_EQ(__interceptor_pthread_mutex_init((pthread_mutex_t*)mtx_, 0), 0);
106 #ifndef __APPLE__
107 else if (type_ == Spin)
108 CHECK_EQ(pthread_spin_init((pthread_spinlock_t*)mtx_, 0), 0);
109 #endif
110 else if (type_ == RW)
111 CHECK_EQ(__interceptor_pthread_rwlock_init((pthread_rwlock_t*)mtx_, 0), 0);
112 else
113 CHECK(0);
114 }
115
StaticInit()116 void Mutex::StaticInit() {
117 CHECK(!alive_);
118 CHECK(type_ == Normal);
119 alive_ = true;
120 pthread_mutex_t tmp = PTHREAD_MUTEX_INITIALIZER;
121 memcpy(mtx_, &tmp, sizeof(tmp));
122 }
123
Destroy()124 void Mutex::Destroy() {
125 CHECK(alive_);
126 alive_ = false;
127 if (type_ == Normal)
128 CHECK_EQ(__interceptor_pthread_mutex_destroy((pthread_mutex_t*)mtx_), 0);
129 #ifndef __APPLE__
130 else if (type_ == Spin)
131 CHECK_EQ(pthread_spin_destroy((pthread_spinlock_t*)mtx_), 0);
132 #endif
133 else if (type_ == RW)
134 CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0);
135 }
136
Lock()137 void Mutex::Lock() {
138 CHECK(alive_);
139 if (type_ == Normal)
140 CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t*)mtx_), 0);
141 #ifndef __APPLE__
142 else if (type_ == Spin)
143 CHECK_EQ(pthread_spin_lock((pthread_spinlock_t*)mtx_), 0);
144 #endif
145 else if (type_ == RW)
146 CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0);
147 }
148
TryLock()149 bool Mutex::TryLock() {
150 CHECK(alive_);
151 if (type_ == Normal)
152 return __interceptor_pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0;
153 #ifndef __APPLE__
154 else if (type_ == Spin)
155 return pthread_spin_trylock((pthread_spinlock_t*)mtx_) == 0;
156 #endif
157 else if (type_ == RW)
158 return __interceptor_pthread_rwlock_trywrlock((pthread_rwlock_t*)mtx_) == 0;
159 return false;
160 }
161
Unlock()162 void Mutex::Unlock() {
163 CHECK(alive_);
164 if (type_ == Normal)
165 CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0);
166 #ifndef __APPLE__
167 else if (type_ == Spin)
168 CHECK_EQ(pthread_spin_unlock((pthread_spinlock_t*)mtx_), 0);
169 #endif
170 else if (type_ == RW)
171 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
172 }
173
ReadLock()174 void Mutex::ReadLock() {
175 CHECK(alive_);
176 CHECK(type_ == RW);
177 CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0);
178 }
179
TryReadLock()180 bool Mutex::TryReadLock() {
181 CHECK(alive_);
182 CHECK(type_ == RW);
183 return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0;
184 }
185
ReadUnlock()186 void Mutex::ReadUnlock() {
187 CHECK(alive_);
188 CHECK(type_ == RW);
189 CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0);
190 }
191
192 struct Event {
193 enum Type {
194 SHUTDOWN,
195 READ,
196 WRITE,
197 VPTR_UPDATE,
198 CALL,
199 RETURN,
200 MUTEX_CREATE,
201 MUTEX_DESTROY,
202 MUTEX_LOCK,
203 MUTEX_TRYLOCK,
204 MUTEX_UNLOCK,
205 MUTEX_READLOCK,
206 MUTEX_TRYREADLOCK,
207 MUTEX_READUNLOCK,
208 MEMCPY,
209 MEMSET
210 };
211 Type type;
212 void *ptr;
213 uptr arg;
214 uptr arg2;
215 bool res;
216 bool expect_report;
217 ReportType report_type;
218
EventEvent219 Event(Type type, const void *ptr = 0, uptr arg = 0, uptr arg2 = 0)
220 : type(type)
221 , ptr(const_cast<void*>(ptr))
222 , arg(arg)
223 , arg2(arg2)
224 , res()
225 , expect_report()
226 , report_type() {
227 }
228
ExpectReportEvent229 void ExpectReport(ReportType type) {
230 expect_report = true;
231 report_type = type;
232 }
233 };
234
235 struct ScopedThread::Impl {
236 pthread_t thread;
237 bool main;
238 bool detached;
239 atomic_uintptr_t event; // Event*
240
241 static void *ScopedThreadCallback(void *arg);
242 void send(Event *ev);
243 void HandleEvent(Event *ev);
244 };
245
HandleEvent(Event * ev)246 void ScopedThread::Impl::HandleEvent(Event *ev) {
247 CHECK_EQ(expect_report, false);
248 expect_report = ev->expect_report;
249 expect_report_reported = false;
250 expect_report_type = ev->report_type;
251 switch (ev->type) {
252 case Event::READ:
253 case Event::WRITE: {
254 void (*tsan_mop)(void *addr) = 0;
255 if (ev->type == Event::READ) {
256 switch (ev->arg /*size*/) {
257 case 1: tsan_mop = __tsan_read1; break;
258 case 2: tsan_mop = __tsan_read2; break;
259 case 4: tsan_mop = __tsan_read4; break;
260 case 8: tsan_mop = __tsan_read8; break;
261 case 16: tsan_mop = __tsan_read16; break;
262 }
263 } else {
264 switch (ev->arg /*size*/) {
265 case 1: tsan_mop = __tsan_write1; break;
266 case 2: tsan_mop = __tsan_write2; break;
267 case 4: tsan_mop = __tsan_write4; break;
268 case 8: tsan_mop = __tsan_write8; break;
269 case 16: tsan_mop = __tsan_write16; break;
270 }
271 }
272 CHECK_NE(tsan_mop, 0);
273 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__NetBSD__)
274 const int ErrCode = ESOCKTNOSUPPORT;
275 #else
276 const int ErrCode = ECHRNG;
277 #endif
278 errno = ErrCode;
279 tsan_mop(ev->ptr);
280 CHECK_EQ(ErrCode, errno); // In no case must errno be changed.
281 break;
282 }
283 case Event::VPTR_UPDATE:
284 __tsan_vptr_update((void**)ev->ptr, (void*)ev->arg);
285 break;
286 case Event::CALL:
287 __tsan_func_entry((void*)((uptr)ev->ptr));
288 break;
289 case Event::RETURN:
290 __tsan_func_exit();
291 break;
292 case Event::MUTEX_CREATE:
293 static_cast<Mutex*>(ev->ptr)->Init();
294 break;
295 case Event::MUTEX_DESTROY:
296 static_cast<Mutex*>(ev->ptr)->Destroy();
297 break;
298 case Event::MUTEX_LOCK:
299 static_cast<Mutex*>(ev->ptr)->Lock();
300 break;
301 case Event::MUTEX_TRYLOCK:
302 ev->res = static_cast<Mutex*>(ev->ptr)->TryLock();
303 break;
304 case Event::MUTEX_UNLOCK:
305 static_cast<Mutex*>(ev->ptr)->Unlock();
306 break;
307 case Event::MUTEX_READLOCK:
308 static_cast<Mutex*>(ev->ptr)->ReadLock();
309 break;
310 case Event::MUTEX_TRYREADLOCK:
311 ev->res = static_cast<Mutex*>(ev->ptr)->TryReadLock();
312 break;
313 case Event::MUTEX_READUNLOCK:
314 static_cast<Mutex*>(ev->ptr)->ReadUnlock();
315 break;
316 case Event::MEMCPY:
317 __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2);
318 break;
319 case Event::MEMSET:
320 __interceptor_memset(ev->ptr, ev->arg, ev->arg2);
321 break;
322 default: CHECK(0);
323 }
324 if (expect_report && !expect_report_reported) {
325 printf("Missed expected report of type %d\n", (int)ev->report_type);
326 EXPECT_TRUE(false) << "Missed expected race";
327 }
328 expect_report = false;
329 }
330
ScopedThreadCallback(void * arg)331 void *ScopedThread::Impl::ScopedThreadCallback(void *arg) {
332 __tsan_func_entry(__builtin_return_address(0));
333 Impl *impl = (Impl*)arg;
334 for (;;) {
335 Event* ev = (Event*)atomic_load(&impl->event, memory_order_acquire);
336 if (ev == 0) {
337 sched_yield();
338 continue;
339 }
340 if (ev->type == Event::SHUTDOWN) {
341 atomic_store(&impl->event, 0, memory_order_release);
342 break;
343 }
344 impl->HandleEvent(ev);
345 atomic_store(&impl->event, 0, memory_order_release);
346 }
347 __tsan_func_exit();
348 return 0;
349 }
350
send(Event * e)351 void ScopedThread::Impl::send(Event *e) {
352 if (main) {
353 HandleEvent(e);
354 } else {
355 CHECK_EQ(atomic_load(&event, memory_order_relaxed), 0);
356 atomic_store(&event, (uintptr_t)e, memory_order_release);
357 while (atomic_load(&event, memory_order_acquire) != 0)
358 sched_yield();
359 }
360 }
361
ScopedThread(bool detached,bool main)362 ScopedThread::ScopedThread(bool detached, bool main) {
363 impl_ = new Impl;
364 impl_->main = main;
365 impl_->detached = detached;
366 atomic_store(&impl_->event, 0, memory_order_relaxed);
367 if (!main) {
368 pthread_attr_t attr;
369 pthread_attr_init(&attr);
370 pthread_attr_setdetachstate(
371 &attr, detached ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE);
372 pthread_attr_setstacksize(&attr, 64*1024);
373 __interceptor_pthread_create(&impl_->thread, &attr,
374 ScopedThread::Impl::ScopedThreadCallback, impl_);
375 }
376 }
377
~ScopedThread()378 ScopedThread::~ScopedThread() {
379 if (!impl_->main) {
380 Event event(Event::SHUTDOWN);
381 impl_->send(&event);
382 if (!impl_->detached)
383 __interceptor_pthread_join(impl_->thread, 0);
384 }
385 delete impl_;
386 }
387
Detach()388 void ScopedThread::Detach() {
389 CHECK(!impl_->main);
390 CHECK(!impl_->detached);
391 impl_->detached = true;
392 __interceptor_pthread_detach(impl_->thread);
393 }
394
Access(void * addr,bool is_write,int size,bool expect_race)395 void ScopedThread::Access(void *addr, bool is_write,
396 int size, bool expect_race) {
397 Event event(is_write ? Event::WRITE : Event::READ, addr, size);
398 if (expect_race)
399 event.ExpectReport(ReportTypeRace);
400 impl_->send(&event);
401 }
402
VptrUpdate(const MemLoc & vptr,const MemLoc & new_val,bool expect_race)403 void ScopedThread::VptrUpdate(const MemLoc &vptr,
404 const MemLoc &new_val,
405 bool expect_race) {
406 Event event(Event::VPTR_UPDATE, vptr.loc(), (uptr)new_val.loc());
407 if (expect_race)
408 event.ExpectReport(ReportTypeRace);
409 impl_->send(&event);
410 }
411
Call(void (* pc)())412 void ScopedThread::Call(void(*pc)()) {
413 Event event(Event::CALL, (void*)((uintptr_t)pc));
414 impl_->send(&event);
415 }
416
Return()417 void ScopedThread::Return() {
418 Event event(Event::RETURN);
419 impl_->send(&event);
420 }
421
Create(const Mutex & m)422 void ScopedThread::Create(const Mutex &m) {
423 Event event(Event::MUTEX_CREATE, &m);
424 impl_->send(&event);
425 }
426
Destroy(const Mutex & m)427 void ScopedThread::Destroy(const Mutex &m) {
428 Event event(Event::MUTEX_DESTROY, &m);
429 impl_->send(&event);
430 }
431
Lock(const Mutex & m)432 void ScopedThread::Lock(const Mutex &m) {
433 Event event(Event::MUTEX_LOCK, &m);
434 impl_->send(&event);
435 }
436
TryLock(const Mutex & m)437 bool ScopedThread::TryLock(const Mutex &m) {
438 Event event(Event::MUTEX_TRYLOCK, &m);
439 impl_->send(&event);
440 return event.res;
441 }
442
Unlock(const Mutex & m)443 void ScopedThread::Unlock(const Mutex &m) {
444 Event event(Event::MUTEX_UNLOCK, &m);
445 impl_->send(&event);
446 }
447
ReadLock(const Mutex & m)448 void ScopedThread::ReadLock(const Mutex &m) {
449 Event event(Event::MUTEX_READLOCK, &m);
450 impl_->send(&event);
451 }
452
TryReadLock(const Mutex & m)453 bool ScopedThread::TryReadLock(const Mutex &m) {
454 Event event(Event::MUTEX_TRYREADLOCK, &m);
455 impl_->send(&event);
456 return event.res;
457 }
458
ReadUnlock(const Mutex & m)459 void ScopedThread::ReadUnlock(const Mutex &m) {
460 Event event(Event::MUTEX_READUNLOCK, &m);
461 impl_->send(&event);
462 }
463
Memcpy(void * dst,const void * src,int size,bool expect_race)464 void ScopedThread::Memcpy(void *dst, const void *src, int size,
465 bool expect_race) {
466 Event event(Event::MEMCPY, dst, (uptr)src, size);
467 if (expect_race)
468 event.ExpectReport(ReportTypeRace);
469 impl_->send(&event);
470 }
471
Memset(void * dst,int val,int size,bool expect_race)472 void ScopedThread::Memset(void *dst, int val, int size,
473 bool expect_race) {
474 Event event(Event::MEMSET, dst, val, size);
475 if (expect_race)
476 event.ExpectReport(ReportTypeRace);
477 impl_->send(&event);
478 }
479