1 /* $OpenBSD: thread_private.h,v 1.35 2019/02/13 13:22:14 mpi Exp $ */ 2 3 /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */ 4 5 #ifndef _THREAD_PRIVATE_H_ 6 #define _THREAD_PRIVATE_H_ 7 8 #include <stdio.h> /* for FILE and __isthreaded */ 9 10 #define _MALLOC_MUTEXES 32 11 void _malloc_init(int); 12 #ifdef __LIBC__ 13 PROTO_NORMAL(_malloc_init); 14 #endif /* __LIBC__ */ 15 16 /* 17 * The callbacks needed by libc to handle the threaded case. 18 * NOTE: Bump the version when you change the struct contents! 19 * 20 * tc_canceled: 21 * If not NULL, what to do when canceled (otherwise _exit(0)) 22 * 23 * tc_flockfile, tc_ftrylockfile, and tc_funlockfile: 24 * If not NULL, these implement the flockfile() family. 25 * XXX In theory, you should be able to lock a FILE before 26 * XXX loading libpthread and have that be a real lock on it, 27 * XXX but that doesn't work without the libc base version 28 * XXX tracking the recursion count. 29 * 30 * tc_malloc_lock and tc_malloc_unlock: 31 * tc_atexit_lock and tc_atexit_unlock: 32 * tc_atfork_lock and tc_atfork_unlock: 33 * tc_arc4_lock and tc_arc4_unlock: 34 * The locks used by the malloc, atexit, atfork, and arc4 subsystems. 35 * These have to be ordered specially in the fork/vfork wrappers 36 * and may be implemented differently than the general mutexes 37 * in the callbacks below. 38 * 39 * tc_mutex_lock and tc_mutex_unlock: 40 * Lock and unlock the given mutex. If the given mutex is NULL 41 * a mutex is allocated and initialized automatically. 42 * 43 * tc_mutex_destroy: 44 * Destroy/deallocate the given mutex. 45 * 46 * tc_tag_lock and tc_tag_unlock: 47 * Lock and unlock the mutex associated with the given tag. 48 * If the given tag is NULL a tag is allocated and initialized 49 * automatically. 50 * 51 * tc_tag_storage: 52 * Returns a pointer to per-thread instance of data associated 53 * with the given tag. If the given tag is NULL a tag is 54 * allocated and initialized automatically. 55 * 56 * tc_fork, tc_vfork: 57 * If not NULL, they are called instead of the syscall stub, so that 58 * the thread library can do necessary locking and reinitialization. 59 * 60 * tc_thread_release: 61 * Handles the release of a thread's TIB and struct pthread and the 62 * notification of other threads...when there are other threads. 63 * 64 * tc_thread_key_zero: 65 * For each thread, zero out the key data associated with the given key. 66 67 * If <machine/tcb.h> doesn't define TCB_GET(), then locating the TCB in a 68 * threaded process requires a syscall (__get_tcb(2)) which is too much 69 * overhead for single-threaded processes. For those archs, there are two 70 * additional callbacks, though they are placed first in the struct for 71 * convenience in ASM: 72 * 73 * tc_errnoptr: 74 * Returns the address of the thread's errno. 75 * 76 * tc_tcb: 77 * Returns the address of the thread's TCB. 78 */ 79 80 struct pthread; 81 struct thread_callbacks { 82 int *(*tc_errnoptr)(void); /* MUST BE FIRST */ 83 void *(*tc_tcb)(void); 84 __dead void (*tc_canceled)(void); 85 void (*tc_flockfile)(FILE *); 86 int (*tc_ftrylockfile)(FILE *); 87 void (*tc_funlockfile)(FILE *); 88 void (*tc_malloc_lock)(int); 89 void (*tc_malloc_unlock)(int); 90 void (*tc_atexit_lock)(void); 91 void (*tc_atexit_unlock)(void); 92 void (*tc_atfork_lock)(void); 93 void (*tc_atfork_unlock)(void); 94 void (*tc_arc4_lock)(void); 95 void (*tc_arc4_unlock)(void); 96 void (*tc_mutex_lock)(void **); 97 void (*tc_mutex_unlock)(void **); 98 void (*tc_mutex_destroy)(void **); 99 void (*tc_tag_lock)(void **); 100 void (*tc_tag_unlock)(void **); 101 void *(*tc_tag_storage)(void **, void *, size_t, void *); 102 __pid_t (*tc_fork)(void); 103 __pid_t (*tc_vfork)(void); 104 void (*tc_thread_release)(struct pthread *); 105 void (*tc_thread_key_zero)(int); 106 }; 107 108 __BEGIN_PUBLIC_DECLS 109 /* 110 * Set the callbacks used by libc 111 */ 112 void _thread_set_callbacks(const struct thread_callbacks *_cb, size_t _len); 113 __END_PUBLIC_DECLS 114 115 #ifdef __LIBC__ 116 __BEGIN_HIDDEN_DECLS 117 /* the current set */ 118 extern struct thread_callbacks _thread_cb; 119 __END_HIDDEN_DECLS 120 #endif /* __LIBC__ */ 121 122 /* 123 * helper macro to make unique names in the thread namespace 124 */ 125 #define __THREAD_NAME(name) __CONCAT(_thread_tagname_,name) 126 127 /* 128 * Macros used in libc to access thread mutex, keys, and per thread storage. 129 * _THREAD_PRIVATE_KEY and _THREAD_PRIVATE_MUTEX are different macros for 130 * historical reasons. They do the same thing, define a static variable 131 * keyed by 'name' that identifies a mutex and a key to identify per thread 132 * data. 133 */ 134 #define _THREAD_PRIVATE_KEY(name) \ 135 static void *__THREAD_NAME(name) 136 #define _THREAD_PRIVATE_MUTEX(name) \ 137 static void *__THREAD_NAME(name) 138 139 140 #ifndef __LIBC__ /* building some sort of reach around */ 141 142 #define _THREAD_PRIVATE_MUTEX_LOCK(name) do {} while (0) 143 #define _THREAD_PRIVATE_MUTEX_UNLOCK(name) do {} while (0) 144 #define _THREAD_PRIVATE(keyname, storage, error) &(storage) 145 #define _MUTEX_LOCK(mutex) do {} while (0) 146 #define _MUTEX_UNLOCK(mutex) do {} while (0) 147 #define _MUTEX_DESTROY(mutex) do {} while (0) 148 #define _MALLOC_LOCK(n) do {} while (0) 149 #define _MALLOC_UNLOCK(n) do {} while (0) 150 #define _ATEXIT_LOCK() do {} while (0) 151 #define _ATEXIT_UNLOCK() do {} while (0) 152 #define _ATFORK_LOCK() do {} while (0) 153 #define _ATFORK_UNLOCK() do {} while (0) 154 #define _ARC4_LOCK() do {} while (0) 155 #define _ARC4_UNLOCK() do {} while (0) 156 157 #else /* building libc */ 158 #define _THREAD_PRIVATE_MUTEX_LOCK(name) \ 159 do { \ 160 if (_thread_cb.tc_tag_lock != NULL) \ 161 _thread_cb.tc_tag_lock(&(__THREAD_NAME(name))); \ 162 } while (0) 163 #define _THREAD_PRIVATE_MUTEX_UNLOCK(name) \ 164 do { \ 165 if (_thread_cb.tc_tag_unlock != NULL) \ 166 _thread_cb.tc_tag_unlock(&(__THREAD_NAME(name))); \ 167 } while (0) 168 #define _THREAD_PRIVATE(keyname, storage, error) \ 169 (_thread_cb.tc_tag_storage == NULL ? &(storage) : \ 170 _thread_cb.tc_tag_storage(&(__THREAD_NAME(keyname)), \ 171 &(storage), sizeof(storage), error)) 172 173 /* 174 * Macros used in libc to access mutexes. 175 */ 176 #define _MUTEX_LOCK(mutex) \ 177 do { \ 178 if (__isthreaded) \ 179 _thread_cb.tc_mutex_lock(mutex); \ 180 } while (0) 181 #define _MUTEX_UNLOCK(mutex) \ 182 do { \ 183 if (__isthreaded) \ 184 _thread_cb.tc_mutex_unlock(mutex); \ 185 } while (0) 186 #define _MUTEX_DESTROY(mutex) \ 187 do { \ 188 if (__isthreaded) \ 189 _thread_cb.tc_mutex_destroy(mutex); \ 190 } while (0) 191 192 /* 193 * malloc lock/unlock prototypes and definitions 194 */ 195 #define _MALLOC_LOCK(n) \ 196 do { \ 197 if (__isthreaded) \ 198 _thread_cb.tc_malloc_lock(n); \ 199 } while (0) 200 #define _MALLOC_UNLOCK(n) \ 201 do { \ 202 if (__isthreaded) \ 203 _thread_cb.tc_malloc_unlock(n); \ 204 } while (0) 205 206 #define _ATEXIT_LOCK() \ 207 do { \ 208 if (__isthreaded) \ 209 _thread_cb.tc_atexit_lock(); \ 210 } while (0) 211 #define _ATEXIT_UNLOCK() \ 212 do { \ 213 if (__isthreaded) \ 214 _thread_cb.tc_atexit_unlock(); \ 215 } while (0) 216 217 #define _ATFORK_LOCK() \ 218 do { \ 219 if (__isthreaded) \ 220 _thread_cb.tc_atfork_lock(); \ 221 } while (0) 222 #define _ATFORK_UNLOCK() \ 223 do { \ 224 if (__isthreaded) \ 225 _thread_cb.tc_atfork_unlock(); \ 226 } while (0) 227 228 #define _ARC4_LOCK() \ 229 do { \ 230 if (__isthreaded) \ 231 _thread_cb.tc_arc4_lock(); \ 232 } while (0) 233 #define _ARC4_UNLOCK() \ 234 do { \ 235 if (__isthreaded) \ 236 _thread_cb.tc_arc4_unlock(); \ 237 } while (0) 238 #endif /* __LIBC__ */ 239 240 241 /* 242 * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> 243 * All Rights Reserved. 244 * 245 * Permission to use, copy, modify, and distribute this software for any 246 * purpose with or without fee is hereby granted, provided that the above 247 * copyright notice and this permission notice appear in all copies. 248 * 249 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 250 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 251 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 252 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 253 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 254 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 255 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 256 */ 257 /* 258 * Private data structures that back up the typedefs in pthread.h. 259 * Since only the thread library cares about their size or arrangement, 260 * it should be possible to switch libraries without relinking. 261 * 262 * Do not reorder _atomic_lock_t and sem_t variables in the structs. 263 * This is due to alignment requirements of certain arches like hppa. 264 * The current requirement is 16 bytes. 265 * 266 * THE MACHINE DEPENDENT CERROR CODE HAS HARD CODED OFFSETS INTO PTHREAD_T! 267 */ 268 269 #include <sys/queue.h> 270 #include <pthread.h> 271 #include <semaphore.h> 272 #include <machine/spinlock.h> 273 274 #define _SPINLOCK_UNLOCKED _ATOMIC_LOCK_UNLOCKED 275 276 struct __sem { 277 _atomic_lock_t lock; 278 volatile int waitcount; 279 volatile int value; 280 int shared; 281 }; 282 283 TAILQ_HEAD(pthread_queue, pthread); 284 285 #ifdef FUTEX 286 287 struct pthread_mutex { 288 volatile unsigned int lock; 289 int type; 290 pthread_t owner; 291 int count; 292 int prioceiling; 293 }; 294 295 struct pthread_cond { 296 volatile unsigned int seq; 297 clockid_t clock; 298 struct pthread_mutex *mutex; 299 }; 300 301 struct pthread_rwlock { 302 volatile unsigned int value; 303 }; 304 305 #else 306 307 struct pthread_mutex { 308 _atomic_lock_t lock; 309 struct pthread_queue lockers; 310 int type; 311 pthread_t owner; 312 int count; 313 int prioceiling; 314 }; 315 316 struct pthread_cond { 317 _atomic_lock_t lock; 318 struct pthread_queue waiters; 319 struct pthread_mutex *mutex; 320 clockid_t clock; 321 }; 322 323 struct pthread_rwlock { 324 _atomic_lock_t lock; 325 pthread_t owner; 326 struct pthread_queue writers; 327 int readers; 328 }; 329 #endif /* FUTEX */ 330 331 struct pthread_mutex_attr { 332 int ma_type; 333 int ma_protocol; 334 int ma_prioceiling; 335 }; 336 337 struct pthread_cond_attr { 338 clockid_t ca_clock; 339 }; 340 341 struct pthread_attr { 342 void *stack_addr; 343 size_t stack_size; 344 size_t guard_size; 345 int detach_state; 346 int contention_scope; 347 int sched_policy; 348 struct sched_param sched_param; 349 int sched_inherit; 350 }; 351 352 struct rthread_storage { 353 int keyid; 354 struct rthread_storage *next; 355 void *data; 356 }; 357 358 struct rthread_cleanup_fn { 359 void (*fn)(void *); 360 void *arg; 361 struct rthread_cleanup_fn *next; 362 }; 363 364 struct tib; 365 struct stack; 366 struct pthread { 367 struct __sem donesem; 368 unsigned int flags; 369 _atomic_lock_t flags_lock; 370 struct tib *tib; 371 void *retval; 372 void *(*fn)(void *); 373 void *arg; 374 char name[32]; 375 struct stack *stack; 376 LIST_ENTRY(pthread) threads; 377 TAILQ_ENTRY(pthread) waiting; 378 pthread_cond_t blocking_cond; 379 struct pthread_attr attr; 380 struct rthread_storage *local_storage; 381 struct rthread_cleanup_fn *cleanup_fns; 382 383 /* cancel received in a delayed cancel block? */ 384 int delayed_cancel; 385 }; 386 /* flags in pthread->flags */ 387 #define THREAD_DONE 0x001 388 #define THREAD_DETACHED 0x002 389 390 /* flags in tib->tib_thread_flags */ 391 #define TIB_THREAD_ASYNC_CANCEL 0x001 392 #define TIB_THREAD_INITIAL_STACK 0x002 /* has stack from exec */ 393 394 #define ENTER_DELAYED_CANCEL_POINT(tib, self) \ 395 (self)->delayed_cancel = 0; \ 396 ENTER_CANCEL_POINT_INNER(tib, 1, 1) 397 398 /* 399 * Internal functions exported from libc's thread bits for use by libpthread 400 */ 401 void _spinlock(volatile _atomic_lock_t *); 402 int _spinlocktry(volatile _atomic_lock_t *); 403 void _spinunlock(volatile _atomic_lock_t *); 404 405 void _rthread_debug(int, const char *, ...) 406 __attribute__((__format__ (printf, 2, 3))); 407 pid_t _thread_dofork(pid_t (*_sys_fork)(void)); 408 void _thread_finalize(void); 409 410 /* 411 * Threading syscalls not declared in system headers 412 */ 413 __dead void __threxit(pid_t *); 414 int __thrsleep(const volatile void *, clockid_t, 415 const struct timespec *, volatile void *, const int *); 416 int __thrwakeup(const volatile void *, int n); 417 int __thrsigdivert(sigset_t, siginfo_t *, const struct timespec *); 418 419 #endif /* _THREAD_PRIVATE_H_ */ 420