1 //===-- tsan_platform_linux.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Linux- and BSD-specific code.
12 //===----------------------------------------------------------------------===//
13
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
16
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_linux.h"
20 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
21 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "sanitizer_common/sanitizer_procmaps.h"
24 #include "sanitizer_common/sanitizer_stackdepot.h"
25 #include "sanitizer_common/sanitizer_stoptheworld.h"
26 #include "tsan_flags.h"
27 #include "tsan_platform.h"
28 #include "tsan_rtl.h"
29
30 #include <fcntl.h>
31 #include <pthread.h>
32 #include <signal.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <stdarg.h>
37 #include <sys/mman.h>
38 #if SANITIZER_LINUX
39 #include <sys/personality.h>
40 #include <setjmp.h>
41 #endif
42 #include <sys/syscall.h>
43 #include <sys/socket.h>
44 #include <sys/time.h>
45 #include <sys/types.h>
46 #include <sys/resource.h>
47 #include <sys/stat.h>
48 #include <unistd.h>
49 #include <sched.h>
50 #include <dlfcn.h>
51 #if SANITIZER_LINUX
52 #define __need_res_state
53 #include <resolv.h>
54 #endif
55
56 #ifdef sa_handler
57 # undef sa_handler
58 #endif
59
60 #ifdef sa_sigaction
61 # undef sa_sigaction
62 #endif
63
64 #if SANITIZER_FREEBSD
65 extern "C" void *__libc_stack_end;
66 void *__libc_stack_end = 0;
67 #endif
68
69 #if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO
70 # define INIT_LONGJMP_XOR_KEY 1
71 #else
72 # define INIT_LONGJMP_XOR_KEY 0
73 #endif
74
75 #if INIT_LONGJMP_XOR_KEY
76 #include "interception/interception.h"
77 // Must be declared outside of other namespaces.
78 DECLARE_REAL(int, _setjmp, void *env)
79 #endif
80
81 namespace __tsan {
82
83 #if INIT_LONGJMP_XOR_KEY
84 static void InitializeLongjmpXorKey();
85 static uptr longjmp_xor_key;
86 #endif
87
88 // Runtime detected VMA size.
89 uptr vmaSize;
90
91 enum {
92 MemTotal,
93 MemShadow,
94 MemMeta,
95 MemFile,
96 MemMmap,
97 MemTrace,
98 MemHeap,
99 MemOther,
100 MemCount,
101 };
102
FillProfileCallback(uptr p,uptr rss,bool file,uptr * mem)103 void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) {
104 mem[MemTotal] += rss;
105 if (p >= ShadowBeg() && p < ShadowEnd())
106 mem[MemShadow] += rss;
107 else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
108 mem[MemMeta] += rss;
109 else if ((p >= LoAppMemBeg() && p < LoAppMemEnd()) ||
110 (p >= MidAppMemBeg() && p < MidAppMemEnd()) ||
111 (p >= HiAppMemBeg() && p < HiAppMemEnd()))
112 mem[file ? MemFile : MemMmap] += rss;
113 else if (p >= HeapMemBeg() && p < HeapMemEnd())
114 mem[MemHeap] += rss;
115 else if (p >= TraceMemBeg() && p < TraceMemEnd())
116 mem[MemTrace] += rss;
117 else
118 mem[MemOther] += rss;
119 }
120
WriteMemoryProfile(char * buf,uptr buf_size,u64 uptime_ns)121 void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
122 uptr mem[MemCount];
123 internal_memset(mem, 0, sizeof(mem));
124 GetMemoryProfile(FillProfileCallback, mem);
125 auto meta = ctx->metamap.GetMemoryStats();
126 StackDepotStats stacks = StackDepotGetStats();
127 uptr nthread, nlive;
128 ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
129 uptr internal_stats[AllocatorStatCount];
130 internal_allocator()->GetStats(internal_stats);
131 // All these are allocated from the common mmap region.
132 mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks.allocated +
133 internal_stats[AllocatorStatMapped];
134 if (s64(mem[MemMmap]) < 0)
135 mem[MemMmap] = 0;
136 internal_snprintf(
137 buf, buf_size,
138 "%llus: RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
139 " trace:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
140 " stacks=%zd[%zd] nthr=%zd/%zd\n",
141 uptime_ns / (1000 * 1000 * 1000), mem[MemTotal] >> 20,
142 mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20,
143 mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20,
144 mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
145 meta.mem_block >> 20, meta.sync_obj >> 20, stacks.allocated >> 20,
146 stacks.n_uniq_ids, nlive, nthread);
147 }
148
149 # if SANITIZER_LINUX
FlushShadowMemoryCallback(const SuspendedThreadsList & suspended_threads_list,void * argument)150 void FlushShadowMemoryCallback(
151 const SuspendedThreadsList &suspended_threads_list,
152 void *argument) {
153 ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
154 }
155 #endif
156
FlushShadowMemory()157 void FlushShadowMemory() {
158 #if SANITIZER_LINUX
159 StopTheWorld(FlushShadowMemoryCallback, 0);
160 #endif
161 }
162
163 #if !SANITIZER_GO
164 // Mark shadow for .rodata sections with the special kShadowRodata marker.
165 // Accesses to .rodata can't race, so this saves time, memory and trace space.
MapRodata()166 static void MapRodata() {
167 // First create temp file.
168 const char *tmpdir = GetEnv("TMPDIR");
169 if (tmpdir == 0)
170 tmpdir = GetEnv("TEST_TMPDIR");
171 #ifdef P_tmpdir
172 if (tmpdir == 0)
173 tmpdir = P_tmpdir;
174 #endif
175 if (tmpdir == 0)
176 return;
177 char name[256];
178 internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
179 tmpdir, (int)internal_getpid());
180 uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
181 if (internal_iserror(openrv))
182 return;
183 internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
184 fd_t fd = openrv;
185 // Fill the file with kShadowRodata.
186 const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow);
187 InternalMmapVector<RawShadow> marker(kMarkerSize);
188 // volatile to prevent insertion of memset
189 for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize;
190 p++)
191 *p = kShadowRodata;
192 internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow));
193 // Map the file into memory.
194 uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
195 MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
196 if (internal_iserror(page)) {
197 internal_close(fd);
198 return;
199 }
200 // Map the file into shadow of .rodata sections.
201 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
202 // Reusing the buffer 'name'.
203 MemoryMappedSegment segment(name, ARRAY_SIZE(name));
204 while (proc_maps.Next(&segment)) {
205 if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
206 segment.IsReadable() && segment.IsExecutable() &&
207 !segment.IsWritable() && IsAppMem(segment.start)) {
208 // Assume it's .rodata
209 char *shadow_start = (char *)MemToShadow(segment.start);
210 char *shadow_end = (char *)MemToShadow(segment.end);
211 for (char *p = shadow_start; p < shadow_end;
212 p += marker.size() * sizeof(RawShadow)) {
213 internal_mmap(
214 p, Min<uptr>(marker.size() * sizeof(RawShadow), shadow_end - p),
215 PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
216 }
217 }
218 }
219 internal_close(fd);
220 }
221
InitializeShadowMemoryPlatform()222 void InitializeShadowMemoryPlatform() {
223 MapRodata();
224 }
225
226 #endif // #if !SANITIZER_GO
227
InitializePlatformEarly()228 void InitializePlatformEarly() {
229 vmaSize =
230 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
231 #if defined(__aarch64__)
232 # if !SANITIZER_GO
233 if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
234 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
235 Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize);
236 Die();
237 }
238 #else
239 if (vmaSize != 48) {
240 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
241 Printf("FATAL: Found %zd - Supported 48\n", vmaSize);
242 Die();
243 }
244 #endif
245 #elif defined(__powerpc64__)
246 # if !SANITIZER_GO
247 if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
248 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
249 Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize);
250 Die();
251 }
252 # else
253 if (vmaSize != 46 && vmaSize != 47) {
254 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
255 Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize);
256 Die();
257 }
258 # endif
259 #elif defined(__mips64)
260 # if !SANITIZER_GO
261 if (vmaSize != 40) {
262 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
263 Printf("FATAL: Found %zd - Supported 40\n", vmaSize);
264 Die();
265 }
266 # else
267 if (vmaSize != 47) {
268 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
269 Printf("FATAL: Found %zd - Supported 47\n", vmaSize);
270 Die();
271 }
272 # endif
273 #endif
274 }
275
InitializePlatform()276 void InitializePlatform() {
277 DisableCoreDumperIfNecessary();
278
279 // Go maps shadow memory lazily and works fine with limited address space.
280 // Unlimited stack is not a problem as well, because the executable
281 // is not compiled with -pie.
282 #if !SANITIZER_GO
283 {
284 bool reexec = false;
285 // TSan doesn't play well with unlimited stack size (as stack
286 // overlaps with shadow memory). If we detect unlimited stack size,
287 // we re-exec the program with limited stack size as a best effort.
288 if (StackSizeIsUnlimited()) {
289 const uptr kMaxStackSize = 32 * 1024 * 1024;
290 VReport(1, "Program is run with unlimited stack size, which wouldn't "
291 "work with ThreadSanitizer.\n"
292 "Re-execing with stack size limited to %zd bytes.\n",
293 kMaxStackSize);
294 SetStackSizeLimitInBytes(kMaxStackSize);
295 reexec = true;
296 }
297
298 if (!AddressSpaceIsUnlimited()) {
299 Report("WARNING: Program is run with limited virtual address space,"
300 " which wouldn't work with ThreadSanitizer.\n");
301 Report("Re-execing with unlimited virtual address space.\n");
302 SetAddressSpaceUnlimited();
303 reexec = true;
304 }
305 #if SANITIZER_LINUX && defined(__aarch64__)
306 // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
307 // linux kernel, the random gap between stack and mapped area is increased
308 // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
309 // this big range, we should disable randomized virtual space on aarch64.
310 int old_personality = personality(0xffffffff);
311 if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
312 VReport(1, "WARNING: Program is run with randomized virtual address "
313 "space, which wouldn't work with ThreadSanitizer.\n"
314 "Re-execing with fixed virtual address space.\n");
315 CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
316 reexec = true;
317 }
318 // Initialize the xor key used in {sig}{set,long}jump.
319 InitializeLongjmpXorKey();
320 #endif
321 if (reexec)
322 ReExec();
323 }
324
325 CheckAndProtect();
326 InitTlsSize();
327 #endif // !SANITIZER_GO
328 }
329
330 #if !SANITIZER_GO
331 // Extract file descriptors passed to glibc internal __res_iclose function.
332 // This is required to properly "close" the fds, because we do not see internal
333 // closes within glibc. The code is a pure hack.
ExtractResolvFDs(void * state,int * fds,int nfd)334 int ExtractResolvFDs(void *state, int *fds, int nfd) {
335 #if SANITIZER_LINUX && !SANITIZER_ANDROID
336 int cnt = 0;
337 struct __res_state *statp = (struct __res_state*)state;
338 for (int i = 0; i < MAXNS && cnt < nfd; i++) {
339 if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
340 fds[cnt++] = statp->_u._ext.nssocks[i];
341 }
342 return cnt;
343 #else
344 return 0;
345 #endif
346 }
347
348 // Extract file descriptors passed via UNIX domain sockets.
349 // This is required to properly handle "open" of these fds.
350 // see 'man recvmsg' and 'man 3 cmsg'.
ExtractRecvmsgFDs(void * msgp,int * fds,int nfd)351 int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
352 int res = 0;
353 msghdr *msg = (msghdr*)msgp;
354 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
355 for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
356 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
357 continue;
358 int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
359 for (int i = 0; i < n; i++) {
360 fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
361 if (res == nfd)
362 return res;
363 }
364 }
365 return res;
366 }
367
368 // Reverse operation of libc stack pointer mangling
UnmangleLongJmpSp(uptr mangled_sp)369 static uptr UnmangleLongJmpSp(uptr mangled_sp) {
370 #if defined(__x86_64__)
371 # if SANITIZER_LINUX
372 // Reverse of:
373 // xor %fs:0x30, %rsi
374 // rol $0x11, %rsi
375 uptr sp;
376 asm("ror $0x11, %0 \n"
377 "xor %%fs:0x30, %0 \n"
378 : "=r" (sp)
379 : "0" (mangled_sp));
380 return sp;
381 # else
382 return mangled_sp;
383 # endif
384 #elif defined(__aarch64__)
385 # if SANITIZER_LINUX
386 return mangled_sp ^ longjmp_xor_key;
387 # else
388 return mangled_sp;
389 # endif
390 #elif defined(__powerpc64__)
391 // Reverse of:
392 // ld r4, -28696(r13)
393 // xor r4, r3, r4
394 uptr xor_key;
395 asm("ld %0, -28696(%%r13)" : "=r" (xor_key));
396 return mangled_sp ^ xor_key;
397 #elif defined(__mips__)
398 return mangled_sp;
399 #elif defined(__s390x__)
400 // tcbhead_t.stack_guard
401 uptr xor_key = ((uptr *)__builtin_thread_pointer())[5];
402 return mangled_sp ^ xor_key;
403 #else
404 #error "Unknown platform"
405 #endif
406 }
407
408 #if SANITIZER_NETBSD
409 # ifdef __x86_64__
410 # define LONG_JMP_SP_ENV_SLOT 6
411 # elifdef __aarch64__
412 # define LONG_JMP_SP_ENV_SLOT 1
413 # else
414 # error unsupported
415 # endif
416 #elif defined(__powerpc__)
417 # define LONG_JMP_SP_ENV_SLOT 0
418 #elif SANITIZER_FREEBSD
419 # define LONG_JMP_SP_ENV_SLOT 2
420 #elif SANITIZER_LINUX
421 # ifdef __aarch64__
422 # define LONG_JMP_SP_ENV_SLOT 13
423 # elif defined(__mips64)
424 # define LONG_JMP_SP_ENV_SLOT 1
425 # elif defined(__s390x__)
426 # define LONG_JMP_SP_ENV_SLOT 9
427 # else
428 # define LONG_JMP_SP_ENV_SLOT 6
429 # endif
430 #endif
431
ExtractLongJmpSp(uptr * env)432 uptr ExtractLongJmpSp(uptr *env) {
433 uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
434 return UnmangleLongJmpSp(mangled_sp);
435 }
436
437 #if INIT_LONGJMP_XOR_KEY
438 // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
439 // functions) by XORing them with a random key. For AArch64 it is a global
440 // variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by
441 // issuing a setjmp and XORing the SP pointer values to derive the key.
InitializeLongjmpXorKey()442 static void InitializeLongjmpXorKey() {
443 // 1. Call REAL(setjmp), which stores the mangled SP in env.
444 jmp_buf env;
445 REAL(_setjmp)(env);
446
447 // 2. Retrieve vanilla/mangled SP.
448 uptr sp;
449 asm("mov %0, sp" : "=r" (sp));
450 uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT];
451
452 // 3. xor SPs to obtain key.
453 longjmp_xor_key = mangled_sp ^ sp;
454 }
455 #endif
456
__tsan_tls_initialization()457 extern "C" void __tsan_tls_initialization() {}
458
ImitateTlsWrite(ThreadState * thr,uptr tls_addr,uptr tls_size)459 void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
460 // Check that the thr object is in tls;
461 const uptr thr_beg = (uptr)thr;
462 const uptr thr_end = (uptr)thr + sizeof(*thr);
463 CHECK_GE(thr_beg, tls_addr);
464 CHECK_LE(thr_beg, tls_addr + tls_size);
465 CHECK_GE(thr_end, tls_addr);
466 CHECK_LE(thr_end, tls_addr + tls_size);
467 // Since the thr object is huge, skip it.
468 const uptr pc = StackTrace::GetNextInstructionPc(
469 reinterpret_cast<uptr>(__tsan_tls_initialization));
470 MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr);
471 MemoryRangeImitateWrite(thr, pc, thr_end, tls_addr + tls_size - thr_end);
472 }
473
474 // Note: this function runs with async signals enabled,
475 // so it must not touch any tsan state.
call_pthread_cancel_with_cleanup(int (* fn)(void * arg),void (* cleanup)(void * arg),void * arg)476 int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
477 void (*cleanup)(void *arg), void *arg) {
478 // pthread_cleanup_push/pop are hardcore macros mess.
479 // We can't intercept nor call them w/o including pthread.h.
480 int res;
481 pthread_cleanup_push(cleanup, arg);
482 res = fn(arg);
483 pthread_cleanup_pop(0);
484 return res;
485 }
486 #endif // !SANITIZER_GO
487
488 #if !SANITIZER_GO
ReplaceSystemMalloc()489 void ReplaceSystemMalloc() { }
490 #endif
491
492 #if !SANITIZER_GO
493 #if SANITIZER_ANDROID
494 // On Android, one thread can call intercepted functions after
495 // DestroyThreadState(), so add a fake thread state for "dead" threads.
496 static ThreadState *dead_thread_state = nullptr;
497
cur_thread()498 ThreadState *cur_thread() {
499 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
500 if (thr == nullptr) {
501 __sanitizer_sigset_t emptyset;
502 internal_sigfillset(&emptyset);
503 __sanitizer_sigset_t oldset;
504 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
505 thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
506 if (thr == nullptr) {
507 thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
508 "ThreadState"));
509 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
510 if (dead_thread_state == nullptr) {
511 dead_thread_state = reinterpret_cast<ThreadState*>(
512 MmapOrDie(sizeof(ThreadState), "ThreadState"));
513 dead_thread_state->fast_state.SetIgnoreBit();
514 dead_thread_state->ignore_interceptors = 1;
515 dead_thread_state->is_dead = true;
516 *const_cast<u32*>(&dead_thread_state->tid) = -1;
517 CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
518 PROT_READ));
519 }
520 }
521 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
522 }
523 return thr;
524 }
525
set_cur_thread(ThreadState * thr)526 void set_cur_thread(ThreadState *thr) {
527 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
528 }
529
cur_thread_finalize()530 void cur_thread_finalize() {
531 __sanitizer_sigset_t emptyset;
532 internal_sigfillset(&emptyset);
533 __sanitizer_sigset_t oldset;
534 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
535 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
536 if (thr != dead_thread_state) {
537 *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state);
538 UnmapOrDie(thr, sizeof(ThreadState));
539 }
540 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
541 }
542 #endif // SANITIZER_ANDROID
543 #endif // if !SANITIZER_GO
544
545 } // namespace __tsan
546
547 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
548