1 //===-- tsan_fd.cc --------------------------------------------------------===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // This file is a part of ThreadSanitizer (TSan), a race detector. 9 // 10 //===----------------------------------------------------------------------===// 11 12 #include "tsan_fd.h" 13 #include "tsan_rtl.h" 14 #include <sanitizer_common/sanitizer_atomic.h> 15 16 namespace __tsan { 17 18 const int kTableSizeL1 = 1024; 19 const int kTableSizeL2 = 1024; 20 const int kTableSize = kTableSizeL1 * kTableSizeL2; 21 22 struct FdSync { 23 atomic_uint64_t rc; 24 }; 25 26 struct FdDesc { 27 FdSync *sync; 28 int creation_tid; 29 u32 creation_stack; 30 }; 31 32 struct FdContext { 33 atomic_uintptr_t tab[kTableSizeL1]; 34 // Addresses used for synchronization. 35 FdSync globsync; 36 FdSync filesync; 37 FdSync socksync; 38 u64 connectsync; 39 }; 40 41 static FdContext fdctx; 42 43 static FdSync *allocsync() { 44 FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync)); 45 atomic_store(&s->rc, 1, memory_order_relaxed); 46 return s; 47 } 48 49 static FdSync *ref(FdSync *s) { 50 if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) 51 atomic_fetch_add(&s->rc, 1, memory_order_relaxed); 52 return s; 53 } 54 55 static void unref(ThreadState *thr, uptr pc, FdSync *s) { 56 if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) { 57 if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) { 58 CHECK_NE(s, &fdctx.globsync); 59 CHECK_NE(s, &fdctx.filesync); 60 CHECK_NE(s, &fdctx.socksync); 61 SyncVar *v = CTX()->synctab.GetAndRemove(thr, pc, (uptr)s); 62 if (v) 63 DestroyAndFree(v); 64 internal_free(s); 65 } 66 } 67 } 68 69 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) { 70 CHECK_LT(fd, kTableSize); 71 atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2]; 72 uptr l1 = atomic_load(pl1, memory_order_consume); 73 if (l1 == 0) { 74 uptr size = kTableSizeL2 * sizeof(FdDesc); 75 void *p = internal_alloc(MBlockFD, size); 76 internal_memset(p, 0, size); 77 MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size); 78 if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel)) 79 l1 = (uptr)p; 80 else 81 internal_free(p); 82 } 83 return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT 84 } 85 86 // pd must be already ref'ed. 87 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) { 88 FdDesc *d = fddesc(thr, pc, fd); 89 // As a matter of fact, we don't intercept all close calls. 90 // See e.g. libc __res_iclose(). 91 if (d->sync) { 92 unref(thr, pc, d->sync); 93 d->sync = 0; 94 } 95 if (flags()->io_sync == 0) { 96 unref(thr, pc, s); 97 } else if (flags()->io_sync == 1) { 98 d->sync = s; 99 } else if (flags()->io_sync == 2) { 100 unref(thr, pc, s); 101 d->sync = &fdctx.globsync; 102 } 103 d->creation_tid = thr->tid; 104 d->creation_stack = CurrentStackId(thr, pc); 105 // To catch races between fd usage and open. 106 MemoryRangeImitateWrite(thr, pc, (uptr)d, 8); 107 } 108 109 void FdInit() { 110 atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed); 111 atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed); 112 atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed); 113 } 114 115 void FdOnFork(ThreadState *thr, uptr pc) { 116 // On fork() we need to reset all fd's, because the child is going 117 // close all them, and that will cause races between previous read/write 118 // and the close. 119 for (int l1 = 0; l1 < kTableSizeL1; l1++) { 120 FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); 121 if (tab == 0) 122 break; 123 for (int l2 = 0; l2 < kTableSizeL2; l2++) { 124 FdDesc *d = &tab[l2]; 125 MemoryResetRange(thr, pc, (uptr)d, 8); 126 } 127 } 128 } 129 130 bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) { 131 for (int l1 = 0; l1 < kTableSizeL1; l1++) { 132 FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); 133 if (tab == 0) 134 break; 135 if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) { 136 int l2 = (addr - (uptr)tab) / sizeof(FdDesc); 137 FdDesc *d = &tab[l2]; 138 *fd = l1 * kTableSizeL1 + l2; 139 *tid = d->creation_tid; 140 *stack = d->creation_stack; 141 return true; 142 } 143 } 144 return false; 145 } 146 147 void FdAcquire(ThreadState *thr, uptr pc, int fd) { 148 FdDesc *d = fddesc(thr, pc, fd); 149 FdSync *s = d->sync; 150 DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s); 151 MemoryRead(thr, pc, (uptr)d, kSizeLog8); 152 if (s) 153 Acquire(thr, pc, (uptr)s); 154 } 155 156 void FdRelease(ThreadState *thr, uptr pc, int fd) { 157 FdDesc *d = fddesc(thr, pc, fd); 158 FdSync *s = d->sync; 159 DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s); 160 if (s) 161 Release(thr, pc, (uptr)s); 162 MemoryRead(thr, pc, (uptr)d, kSizeLog8); 163 } 164 165 void FdAccess(ThreadState *thr, uptr pc, int fd) { 166 DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd); 167 FdDesc *d = fddesc(thr, pc, fd); 168 MemoryRead(thr, pc, (uptr)d, kSizeLog8); 169 } 170 171 void FdClose(ThreadState *thr, uptr pc, int fd) { 172 DPrintf("#%d: FdClose(%d)\n", thr->tid, fd); 173 FdDesc *d = fddesc(thr, pc, fd); 174 // To catch races between fd usage and close. 175 MemoryWrite(thr, pc, (uptr)d, kSizeLog8); 176 // We need to clear it, because if we do not intercept any call out there 177 // that creates fd, we will hit false postives. 178 MemoryResetRange(thr, pc, (uptr)d, 8); 179 unref(thr, pc, d->sync); 180 d->sync = 0; 181 d->creation_tid = 0; 182 d->creation_stack = 0; 183 } 184 185 void FdFileCreate(ThreadState *thr, uptr pc, int fd) { 186 DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd); 187 init(thr, pc, fd, &fdctx.filesync); 188 } 189 190 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) { 191 DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd); 192 // Ignore the case when user dups not yet connected socket. 193 FdDesc *od = fddesc(thr, pc, oldfd); 194 MemoryRead(thr, pc, (uptr)od, kSizeLog8); 195 FdClose(thr, pc, newfd); 196 init(thr, pc, newfd, ref(od->sync)); 197 } 198 199 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) { 200 DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd); 201 FdSync *s = allocsync(); 202 init(thr, pc, rfd, ref(s)); 203 init(thr, pc, wfd, ref(s)); 204 unref(thr, pc, s); 205 } 206 207 void FdEventCreate(ThreadState *thr, uptr pc, int fd) { 208 DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd); 209 init(thr, pc, fd, allocsync()); 210 } 211 212 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) { 213 DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd); 214 init(thr, pc, fd, 0); 215 } 216 217 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) { 218 DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd); 219 init(thr, pc, fd, 0); 220 } 221 222 void FdPollCreate(ThreadState *thr, uptr pc, int fd) { 223 DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd); 224 init(thr, pc, fd, allocsync()); 225 } 226 227 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) { 228 DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd); 229 // It can be a UDP socket. 230 init(thr, pc, fd, &fdctx.socksync); 231 } 232 233 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) { 234 DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd); 235 // Synchronize connect->accept. 236 Acquire(thr, pc, (uptr)&fdctx.connectsync); 237 init(thr, pc, newfd, &fdctx.socksync); 238 } 239 240 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) { 241 DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd); 242 // Synchronize connect->accept. 243 Release(thr, pc, (uptr)&fdctx.connectsync); 244 } 245 246 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) { 247 DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd); 248 init(thr, pc, fd, &fdctx.socksync); 249 } 250 251 uptr File2addr(char *path) { 252 (void)path; 253 static u64 addr; 254 return (uptr)&addr; 255 } 256 257 uptr Dir2addr(char *path) { 258 (void)path; 259 static u64 addr; 260 return (uptr)&addr; 261 } 262 263 } // namespace __tsan 264