1 /* $OpenBSD: rthread_libc.c,v 1.3 2019/01/10 18:45:33 otto Exp $ */ 2 3 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */ 4 5 #include <pthread.h> 6 #include <stdlib.h> 7 #include <string.h> 8 9 #include "rthread.h" 10 #include "rthread_cb.h" 11 12 /* 13 * A thread tag is a pointer to a structure of this type. An opaque 14 * tag is used to decouple libc from the thread library. 15 */ 16 struct _thread_tag { 17 pthread_mutex_t m; /* the tag's mutex */ 18 pthread_key_t k; /* a key for private data */ 19 }; 20 21 /* 22 * local mutex to protect against tag creation races. 23 */ 24 static pthread_mutex_t _thread_tag_mutex = PTHREAD_MUTEX_INITIALIZER; 25 26 /* 27 * Initialize a thread tag structure once. This function is called 28 * if the tag is null. Allocation and initialization are controlled 29 * by a mutex. If the tag is not null when the mutex is obtained 30 * the caller lost a race -- some other thread initialized the tag. 31 * This function will never return NULL. 32 */ 33 static void 34 _thread_tag_init(void **tag) 35 { 36 struct _thread_tag *tt; 37 int result; 38 39 result = pthread_mutex_lock(&_thread_tag_mutex); 40 if (result == 0) { 41 if (*tag == NULL) { 42 tt = malloc(sizeof *tt); 43 if (tt != NULL) { 44 result = pthread_mutex_init(&tt->m, NULL); 45 result |= pthread_key_create(&tt->k, free); 46 *tag = tt; 47 } 48 } 49 result |= pthread_mutex_unlock(&_thread_tag_mutex); 50 } 51 if (result != 0) 52 _rthread_debug(1, "tag init failure"); 53 } 54 55 /* 56 * lock the mutex associated with the given tag 57 */ 58 void 59 _thread_tag_lock(void **tag) 60 { 61 struct _thread_tag *tt; 62 63 if (__isthreaded) { 64 if (*tag == NULL) 65 _thread_tag_init(tag); 66 tt = *tag; 67 if (pthread_mutex_lock(&tt->m) != 0) 68 _rthread_debug(1, "tag mutex lock failure"); 69 } 70 } 71 72 /* 73 * unlock the mutex associated with the given tag 74 */ 75 void 76 _thread_tag_unlock(void **tag) 77 { 78 struct _thread_tag *tt; 79 80 if (__isthreaded) { 81 if (*tag == NULL) 82 _thread_tag_init(tag); 83 tt = *tag; 84 if (pthread_mutex_unlock(&tt->m) != 0) 85 _rthread_debug(1, "tag mutex unlock failure"); 86 } 87 } 88 89 /* 90 * return the thread specific data for the given tag. If there 91 * is no data for this thread initialize it from 'storage'. 92 * On any error return 'err'. 93 */ 94 void * 95 _thread_tag_storage(void **tag, void *storage, size_t sz, void *err) 96 { 97 struct _thread_tag *tt; 98 void *ret; 99 100 if (*tag == NULL) 101 _thread_tag_init(tag); 102 tt = *tag; 103 104 ret = pthread_getspecific(tt->k); 105 if (ret == NULL) { 106 ret = malloc(sz); 107 if (ret == NULL) 108 ret = err; 109 else { 110 if (pthread_setspecific(tt->k, ret) == 0) 111 memcpy(ret, storage, sz); 112 else { 113 free(ret); 114 ret = err; 115 } 116 } 117 } 118 return ret; 119 } 120 121 void 122 _thread_mutex_lock(void **mutex) 123 { 124 pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex; 125 126 if (pthread_mutex_lock(pmutex) != 0) 127 _rthread_debug(1, "mutex lock failure"); 128 } 129 130 void 131 _thread_mutex_unlock(void **mutex) 132 { 133 pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex; 134 135 if (pthread_mutex_unlock(pmutex) != 0) 136 _rthread_debug(1, "mutex unlock failure"); 137 } 138 139 void 140 _thread_mutex_destroy(void **mutex) 141 { 142 pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex; 143 144 if (pthread_mutex_destroy(pmutex) != 0) 145 _rthread_debug(1, "mutex destroy failure"); 146 } 147 148 /* 149 * the malloc lock 150 */ 151 #ifndef FUTEX 152 #define MALLOC_LOCK_INITIALIZER(n) { \ 153 _SPINLOCK_UNLOCKED, \ 154 TAILQ_HEAD_INITIALIZER(malloc_lock[n].lockers), \ 155 PTHREAD_MUTEX_DEFAULT, \ 156 NULL, \ 157 0, \ 158 -1 } 159 #else 160 #define MALLOC_LOCK_INITIALIZER(n) { \ 161 _SPINLOCK_UNLOCKED, \ 162 PTHREAD_MUTEX_DEFAULT, \ 163 NULL, \ 164 0, \ 165 -1 } 166 #endif 167 168 static struct pthread_mutex malloc_lock[_MALLOC_MUTEXES] = { 169 MALLOC_LOCK_INITIALIZER(0), 170 MALLOC_LOCK_INITIALIZER(1), 171 MALLOC_LOCK_INITIALIZER(2), 172 MALLOC_LOCK_INITIALIZER(3), 173 MALLOC_LOCK_INITIALIZER(4), 174 MALLOC_LOCK_INITIALIZER(5), 175 MALLOC_LOCK_INITIALIZER(6), 176 MALLOC_LOCK_INITIALIZER(7), 177 MALLOC_LOCK_INITIALIZER(8), 178 MALLOC_LOCK_INITIALIZER(9), 179 MALLOC_LOCK_INITIALIZER(10), 180 MALLOC_LOCK_INITIALIZER(11), 181 MALLOC_LOCK_INITIALIZER(12), 182 MALLOC_LOCK_INITIALIZER(13), 183 MALLOC_LOCK_INITIALIZER(14), 184 MALLOC_LOCK_INITIALIZER(15), 185 MALLOC_LOCK_INITIALIZER(16), 186 MALLOC_LOCK_INITIALIZER(17), 187 MALLOC_LOCK_INITIALIZER(18), 188 MALLOC_LOCK_INITIALIZER(19), 189 MALLOC_LOCK_INITIALIZER(20), 190 MALLOC_LOCK_INITIALIZER(21), 191 MALLOC_LOCK_INITIALIZER(22), 192 MALLOC_LOCK_INITIALIZER(23), 193 MALLOC_LOCK_INITIALIZER(24), 194 MALLOC_LOCK_INITIALIZER(25), 195 MALLOC_LOCK_INITIALIZER(26), 196 MALLOC_LOCK_INITIALIZER(27), 197 MALLOC_LOCK_INITIALIZER(28), 198 MALLOC_LOCK_INITIALIZER(29), 199 MALLOC_LOCK_INITIALIZER(30), 200 MALLOC_LOCK_INITIALIZER(31) 201 }; 202 203 static pthread_mutex_t malloc_mutex[_MALLOC_MUTEXES] = { 204 &malloc_lock[0], 205 &malloc_lock[1], 206 &malloc_lock[2], 207 &malloc_lock[3], 208 &malloc_lock[4], 209 &malloc_lock[5], 210 &malloc_lock[6], 211 &malloc_lock[7], 212 &malloc_lock[8], 213 &malloc_lock[9], 214 &malloc_lock[10], 215 &malloc_lock[11], 216 &malloc_lock[12], 217 &malloc_lock[13], 218 &malloc_lock[14], 219 &malloc_lock[15], 220 &malloc_lock[16], 221 &malloc_lock[17], 222 &malloc_lock[18], 223 &malloc_lock[19], 224 &malloc_lock[20], 225 &malloc_lock[21], 226 &malloc_lock[22], 227 &malloc_lock[23], 228 &malloc_lock[24], 229 &malloc_lock[25], 230 &malloc_lock[26], 231 &malloc_lock[27], 232 &malloc_lock[28], 233 &malloc_lock[29], 234 &malloc_lock[30], 235 &malloc_lock[31] 236 }; 237 238 void 239 _thread_malloc_lock(int i) 240 { 241 pthread_mutex_lock(&malloc_mutex[i]); 242 } 243 244 void 245 _thread_malloc_unlock(int i) 246 { 247 pthread_mutex_unlock(&malloc_mutex[i]); 248 } 249 250 static void 251 _thread_malloc_reinit(void) 252 { 253 int i; 254 255 for (i = 0; i < _MALLOC_MUTEXES; i++) { 256 malloc_lock[i].lock = _SPINLOCK_UNLOCKED; 257 #ifndef FUTEX 258 TAILQ_INIT(&malloc_lock[i].lockers); 259 #endif 260 malloc_lock[i].owner = NULL; 261 malloc_lock[i].count = 0; 262 } 263 } 264 265 /* 266 * atexit lock 267 */ 268 static _atomic_lock_t atexit_lock = _SPINLOCK_UNLOCKED; 269 270 void 271 _thread_atexit_lock(void) 272 { 273 _spinlock(&atexit_lock); 274 } 275 276 void 277 _thread_atexit_unlock(void) 278 { 279 _spinunlock(&atexit_lock); 280 } 281 282 /* 283 * atfork lock 284 */ 285 static _atomic_lock_t atfork_lock = _SPINLOCK_UNLOCKED; 286 287 void 288 _thread_atfork_lock(void) 289 { 290 _spinlock(&atfork_lock); 291 } 292 293 void 294 _thread_atfork_unlock(void) 295 { 296 _spinunlock(&atfork_lock); 297 } 298 299 /* 300 * arc4random lock 301 */ 302 static _atomic_lock_t arc4_lock = _SPINLOCK_UNLOCKED; 303 304 void 305 _thread_arc4_lock(void) 306 { 307 _spinlock(&arc4_lock); 308 } 309 310 void 311 _thread_arc4_unlock(void) 312 { 313 _spinunlock(&arc4_lock); 314 } 315 316 pid_t 317 _thread_dofork(pid_t (*sys_fork)(void)) 318 { 319 int i; 320 pid_t newid; 321 322 _thread_atexit_lock(); 323 for (i = 0; i < _MALLOC_MUTEXES; i++) 324 _thread_malloc_lock(i); 325 _thread_arc4_lock(); 326 327 newid = sys_fork(); 328 329 _thread_arc4_unlock(); 330 if (newid == 0) 331 _thread_malloc_reinit(); 332 else 333 for (i = 0; i < _MALLOC_MUTEXES; i++) 334 _thread_malloc_unlock(i); 335 _thread_atexit_unlock(); 336 337 return newid; 338 } 339 340