1*2c53affbSjmc /* $OpenBSD: rthread.c,v 1.100 2022/12/27 17:10:07 jmc Exp $ */
21a251377Stedu /*
355aa0b8cStedu * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
41a251377Stedu * All Rights Reserved.
51a251377Stedu *
61a251377Stedu * Permission to use, copy, modify, and distribute this software for any
71a251377Stedu * purpose with or without fee is hereby granted, provided that the above
81a251377Stedu * copyright notice and this permission notice appear in all copies.
91a251377Stedu *
101a251377Stedu * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
111a251377Stedu * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
121a251377Stedu * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
131a251377Stedu * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
141a251377Stedu * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
151a251377Stedu * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
161a251377Stedu * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
171a251377Stedu */
181a251377Stedu /*
191a251377Stedu * The heart of rthreads. Basic functions like creating and joining
201a251377Stedu * threads.
211a251377Stedu */
221a251377Stedu
23abaa7c77Sguenther #include <sys/types.h>
24b8ec2dbdSguenther #ifndef NO_PIC
259f79a698Smpi #include <elf.h>
2628e55baeSmatthew #pragma weak _DYNAMIC
2728e55baeSmatthew #endif
281a251377Stedu
291a251377Stedu #include <stdlib.h>
301a251377Stedu #include <unistd.h>
311a251377Stedu #include <signal.h>
321a251377Stedu #include <stdio.h>
331a251377Stedu #include <string.h>
341a251377Stedu #include <errno.h>
3595853897Sart #include <dlfcn.h>
36fe38b55cSguenther #include <tib.h>
371a251377Stedu
381a251377Stedu #include <pthread.h>
391a251377Stedu
40fe38b55cSguenther #include "cancel.h" /* in libc/include */
411a251377Stedu #include "rthread.h"
42fe38b55cSguenther #include "rthread_cb.h"
431a251377Stedu
4429088be8Sguenther /*
4529088be8Sguenther * Call nonstandard functions via names in the reserved namespace:
46fe38b55cSguenther * dlctl() -> _dlctl()
4729088be8Sguenther * getthrid -> _thread_sys_getthrid
4829088be8Sguenther */
49fe38b55cSguenther typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
5029088be8Sguenther REDIRECT_SYSCALL(getthrid);
5129088be8Sguenther
52*2c53affbSjmc /* weak stub to be overridden by ld.so */
dlctl(void * handle,int cmd,void * data)53fe38b55cSguenther int dlctl(void *handle, int cmd, void *data) { return 0; }
54fe38b55cSguenther
5529088be8Sguenther /*
5629088be8Sguenther * libc's signal wrappers hide SIGTHR; we need to call the real syscall
5729088be8Sguenther * stubs _thread_sys_* directly.
5829088be8Sguenther */
5929088be8Sguenther REDIRECT_SYSCALL(sigaction);
6029088be8Sguenther REDIRECT_SYSCALL(sigprocmask);
6129088be8Sguenther REDIRECT_SYSCALL(thrkill);
6229088be8Sguenther
63ee620e50Stedu static int concurrency_level; /* not used */
641a251377Stedu
658af1ee89Sotto int _threads_ready;
66a9128774Stedu int _post_threaded;
6758dbb15cSguenther size_t _thread_pagesize;
688af1ee89Sotto struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
69f050dd83Sakfaew _atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
70c5891344Sguenther static struct pthread_queue _thread_gc_list
71c5891344Sguenther = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
72f050dd83Sakfaew static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
73c756f254Sguenther static struct pthread _initial_thread;
74574b4e6cStedu
7558dbb15cSguenther struct pthread_attr _rthread_attr_default = {
7658dbb15cSguenther .stack_addr = NULL,
7758dbb15cSguenther .stack_size = RTHREAD_STACK_SIZE_DEF,
7858dbb15cSguenther /* .guard_size set in _rthread_init */
7958dbb15cSguenther .detach_state = PTHREAD_CREATE_JOINABLE,
8058dbb15cSguenther .contention_scope = PTHREAD_SCOPE_SYSTEM,
8158dbb15cSguenther .sched_policy = SCHED_OTHER,
82fda55665Sguenther .sched_param = { .sched_priority = 0 },
8358dbb15cSguenther .sched_inherit = PTHREAD_INHERIT_SCHED,
8458dbb15cSguenther };
8558dbb15cSguenther
861a251377Stedu /*
871a251377Stedu * internal support functions
881a251377Stedu */
891a251377Stedu
901a251377Stedu static void
_rthread_start(void * v)91eaffd144Stedu _rthread_start(void *v)
921a251377Stedu {
931a251377Stedu pthread_t thread = v;
941a251377Stedu void *retval;
951a251377Stedu
961a251377Stedu retval = thread->fn(thread->arg);
971a251377Stedu pthread_exit(retval);
981a251377Stedu }
991a251377Stedu
10077995a42Sguenther static void
sigthr_handler(__unused int sig)10126ab602eSguenther sigthr_handler(__unused int sig)
10277995a42Sguenther {
103fe38b55cSguenther struct tib *tib = TIB_GET();
104fe38b55cSguenther pthread_t self = tib->tib_thread;
10577995a42Sguenther
1062aa8ea07Sguenther /*
1072aa8ea07Sguenther * Do nothing unless
1082aa8ea07Sguenther * 1) pthread_cancel() has been called on this thread,
1092aa8ea07Sguenther * 2) cancelation is enabled for it, and
1102aa8ea07Sguenther * 3) we're not already in cancelation processing
1112aa8ea07Sguenther */
112fe38b55cSguenther if (!tib->tib_canceled || tib->tib_cantcancel)
1132aa8ea07Sguenther return;
1142aa8ea07Sguenther
1152aa8ea07Sguenther /*
1162aa8ea07Sguenther * If delaying cancels inside complex ops (pthread_cond_wait,
1172aa8ea07Sguenther * pthread_join, etc), just mark that this has happened to
1182aa8ea07Sguenther * prevent a race with going to sleep
1192aa8ea07Sguenther */
120fe38b55cSguenther if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
1212aa8ea07Sguenther self->delayed_cancel = 1;
1222aa8ea07Sguenther return;
1232aa8ea07Sguenther }
1242aa8ea07Sguenther
1252aa8ea07Sguenther /*
1262aa8ea07Sguenther * otherwise, if in a cancel point or async cancels are
1272aa8ea07Sguenther * enabled, then exit
1282aa8ea07Sguenther */
129fe38b55cSguenther if (tib->tib_cancel_point ||
130fe38b55cSguenther (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
13177995a42Sguenther pthread_exit(PTHREAD_CANCELED);
13277995a42Sguenther }
13377995a42Sguenther
134fe38b55cSguenther
135fe38b55cSguenther /*
136fe38b55cSguenther * A few basic callbacks for libc. The first couple are only used
137fe38b55cSguenther * on archs where there isn't a fast TCB_GET()
138fe38b55cSguenther */
139fe38b55cSguenther #ifndef TCB_HAVE_MD_GET
140fe38b55cSguenther static int *
multi_threaded_errnoptr(void)141fe38b55cSguenther multi_threaded_errnoptr(void)
142fe38b55cSguenther {
143fe38b55cSguenther return (&TIB_GET()->tib_errno);
144fe38b55cSguenther }
145fe38b55cSguenther
146fe38b55cSguenther static void *
multi_threaded_tcb(void)147fe38b55cSguenther multi_threaded_tcb(void)
148fe38b55cSguenther {
149fe38b55cSguenther return (TCB_GET());
150fe38b55cSguenther }
151fe38b55cSguenther #endif /* TCB_HAVE_MD_GET */
152fe38b55cSguenther
153a5511fa9Sguenther static void
_rthread_free(pthread_t thread)154a5511fa9Sguenther _rthread_free(pthread_t thread)
155fe38b55cSguenther {
156a5511fa9Sguenther _spinlock(&_thread_gc_lock);
157a5511fa9Sguenther TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
158a5511fa9Sguenther _spinunlock(&_thread_gc_lock);
159a5511fa9Sguenther }
160a5511fa9Sguenther
161a5511fa9Sguenther static void
_thread_release(pthread_t thread)162a5511fa9Sguenther _thread_release(pthread_t thread)
163a5511fa9Sguenther {
164a5511fa9Sguenther _spinlock(&_thread_lock);
165a5511fa9Sguenther LIST_REMOVE(thread, threads);
166a5511fa9Sguenther _spinunlock(&_thread_lock);
167a5511fa9Sguenther
168a5511fa9Sguenther _spinlock(&thread->flags_lock);
169a5511fa9Sguenther if (thread->flags & THREAD_DETACHED) {
170a5511fa9Sguenther _spinunlock(&thread->flags_lock);
171a5511fa9Sguenther _rthread_free(thread);
172a5511fa9Sguenther } else {
173a5511fa9Sguenther thread->flags |= THREAD_DONE;
174a5511fa9Sguenther _spinunlock(&thread->flags_lock);
175a5511fa9Sguenther _sem_post(&thread->donesem);
176a5511fa9Sguenther }
177a5511fa9Sguenther }
178a5511fa9Sguenther
179a5511fa9Sguenther static void
_thread_key_zero(int key)180a5511fa9Sguenther _thread_key_zero(int key)
181a5511fa9Sguenther {
182a5511fa9Sguenther pthread_t thread;
183a5511fa9Sguenther struct rthread_storage *rs;
184a5511fa9Sguenther
185a5511fa9Sguenther LIST_FOREACH(thread, &_thread_list, threads) {
186a5511fa9Sguenther for (rs = thread->local_storage; rs; rs = rs->next) {
187a5511fa9Sguenther if (rs->keyid == key)
188a5511fa9Sguenther rs->data = NULL;
189a5511fa9Sguenther }
190a5511fa9Sguenther }
191fe38b55cSguenther }
192fe38b55cSguenther
193fe38b55cSguenther void
_rthread_init(void)194eaffd144Stedu _rthread_init(void)
1951a251377Stedu {
196a5511fa9Sguenther pthread_t thread = pthread_self();
19777995a42Sguenther struct sigaction sa;
1981a251377Stedu
199a5511fa9Sguenther if (_threads_ready)
200a5511fa9Sguenther return;
201fe38b55cSguenther
2029fc2d97aSotto LIST_INSERT_HEAD(&_thread_list, thread, threads);
2032aa8ea07Sguenther
20458dbb15cSguenther _thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
20558dbb15cSguenther _rthread_attr_default.guard_size = _thread_pagesize;
206b33c17d3Sguenther thread->attr = _rthread_attr_default;
20758dbb15cSguenther
208fe38b55cSguenther /* get libc to start using our callbacks */
209fe38b55cSguenther {
210fe38b55cSguenther struct thread_callbacks cb = { 0 };
211601355eeSguenther
212fe38b55cSguenther #ifndef TCB_HAVE_MD_GET
213fe38b55cSguenther cb.tc_errnoptr = multi_threaded_errnoptr;
214fe38b55cSguenther cb.tc_tcb = multi_threaded_tcb;
215fe38b55cSguenther #endif
216fe38b55cSguenther cb.tc_fork = _thread_fork;
217fe38b55cSguenther cb.tc_vfork = _thread_vfork;
218a5511fa9Sguenther cb.tc_thread_release = _thread_release;
219a5511fa9Sguenther cb.tc_thread_key_zero = _thread_key_zero;
220fe38b55cSguenther _thread_set_callbacks(&cb, sizeof(cb));
221fe38b55cSguenther }
2222aa8ea07Sguenther
223b8ec2dbdSguenther #ifndef NO_PIC
22428e55baeSmatthew if (_DYNAMIC) {
225dc989ac6Skurt dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
22628e55baeSmatthew }
22795853897Sart #endif
22895853897Sart
22977995a42Sguenther /*
23077995a42Sguenther * Set the handler on the signal used for cancelation and
23177995a42Sguenther * suspension, and make sure it's unblocked
23277995a42Sguenther */
23377995a42Sguenther memset(&sa, 0, sizeof(sa));
23477995a42Sguenther sigemptyset(&sa.sa_mask);
23577995a42Sguenther sa.sa_handler = sigthr_handler;
23629088be8Sguenther sigaction(SIGTHR, &sa, NULL);
23777995a42Sguenther sigaddset(&sa.sa_mask, SIGTHR);
23829088be8Sguenther sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
23977995a42Sguenther
240fe38b55cSguenther _threads_ready = 1;
241fe38b55cSguenther
242ca609fedSotto _malloc_init(1);
243ca609fedSotto
244fe38b55cSguenther _rthread_debug(1, "rthread init\n");
2451a251377Stedu }
2461a251377Stedu
247e02c9995Sotto static void
_rthread_reaper(void)248c5891344Sguenther _rthread_reaper(void)
249c5891344Sguenther {
250c5891344Sguenther pthread_t thread;
251c5891344Sguenther
252e9ea442fSfgsch restart:
253e9ea442fSfgsch _spinlock(&_thread_gc_lock);
254c5891344Sguenther TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
255fe38b55cSguenther if (thread->tib->tib_tid != 0)
256c5891344Sguenther continue;
257c5891344Sguenther TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
258c5891344Sguenther _spinunlock(&_thread_gc_lock);
259fe38b55cSguenther if (thread != &_initial_thread) {
260c5891344Sguenther _rthread_debug(3, "rthread reaping %p stack %p\n",
261c5891344Sguenther (void *)thread, (void *)thread->stack);
262c5891344Sguenther _rthread_free_stack(thread->stack);
263fe38b55cSguenther _dl_free_tib(thread->tib, sizeof(*thread));
264fe38b55cSguenther } else {
265fe38b55cSguenther /* initial thread isn't part of TIB allocation */
266fe38b55cSguenther _rthread_debug(3, "rthread reaping %p (initial)\n",
267fe38b55cSguenther (void *)thread);
268fe38b55cSguenther _dl_free_tib(thread->tib, 0);
269fe38b55cSguenther }
270c5891344Sguenther goto restart;
271c5891344Sguenther }
272c5891344Sguenther _spinunlock(&_thread_gc_lock);
273c5891344Sguenther }
274c5891344Sguenther
27577995a42Sguenther /*
276a5511fa9Sguenther * real pthread functions
27777995a42Sguenther */
2781a251377Stedu
2791a251377Stedu int
pthread_join(pthread_t thread,void ** retval)2801a251377Stedu pthread_join(pthread_t thread, void **retval)
2811a251377Stedu {
28237ebc96dSguenther int e;
283fe38b55cSguenther struct tib *tib = TIB_GET();
284fe38b55cSguenther pthread_t self;
285fe38b55cSguenther PREP_CANCEL_POINT(tib);
286fe38b55cSguenther
287a9128774Stedu if (_post_threaded) {
288a9128774Stedu #define GREATSCOTT "great scott! serious repercussions on future events!\n"
289a9128774Stedu write(2, GREATSCOTT, sizeof(GREATSCOTT) - 1);
290a9128774Stedu abort();
291a9128774Stedu }
292fe38b55cSguenther if (!_threads_ready)
293fe38b55cSguenther _rthread_init();
294fe38b55cSguenther self = tib->tib_thread;
2951a251377Stedu
29637ebc96dSguenther e = 0;
297fe38b55cSguenther ENTER_DELAYED_CANCEL_POINT(tib, self);
2984b3fbaa6Sguenther if (thread == NULL)
2994b3fbaa6Sguenther e = EINVAL;
3001bbb51b7Sguenther else if (thread == self)
3012c35048eSotto e = EDEADLK;
3022c35048eSotto else if (thread->flags & THREAD_DETACHED)
30362feea1bSmarc e = EINVAL;
30437ebc96dSguenther else if ((e = _sem_wait(&thread->donesem, 0, NULL,
30537ebc96dSguenther &self->delayed_cancel)) == 0) {
3061a251377Stedu if (retval)
3071a251377Stedu *retval = thread->retval;
3082aa8ea07Sguenther
3092aa8ea07Sguenther /*
3102aa8ea07Sguenther * We should be the last having a ref to this thread,
3112aa8ea07Sguenther * but someone stupid or evil might haved detached it;
3122aa8ea07Sguenther * in that case the thread will clean up itself
3132aa8ea07Sguenther */
314e02c9995Sotto if ((thread->flags & THREAD_DETACHED) == 0)
315e02c9995Sotto _rthread_free(thread);
3162c35048eSotto }
3171a251377Stedu
318fe38b55cSguenther LEAVE_CANCEL_POINT_INNER(tib, e);
319e02c9995Sotto _rthread_reaper();
32062feea1bSmarc return (e);
3211a251377Stedu }
3221a251377Stedu
3231a251377Stedu int
pthread_detach(pthread_t thread)3241a251377Stedu pthread_detach(pthread_t thread)
3251a251377Stedu {
326e02c9995Sotto int rc = 0;
327e02c9995Sotto
328e02c9995Sotto _spinlock(&thread->flags_lock);
329e02c9995Sotto if (thread->flags & THREAD_DETACHED) {
330e02c9995Sotto rc = EINVAL;
331e02c9995Sotto _spinunlock(&thread->flags_lock);
332e02c9995Sotto } else if (thread->flags & THREAD_DONE) {
333e02c9995Sotto _spinunlock(&thread->flags_lock);
334e02c9995Sotto _rthread_free(thread);
335e02c9995Sotto } else {
3361a251377Stedu thread->flags |= THREAD_DETACHED;
337e02c9995Sotto _spinunlock(&thread->flags_lock);
338e02c9995Sotto }
339e02c9995Sotto _rthread_reaper();
340e02c9995Sotto return (rc);
3411a251377Stedu }
3421a251377Stedu
3431a251377Stedu int
pthread_create(pthread_t * threadp,const pthread_attr_t * attr,void * (* start_routine)(void *),void * arg)3441a251377Stedu pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
3451a251377Stedu void *(*start_routine)(void *), void *arg)
3461a251377Stedu {
347d66c66cbSkettenis extern int __isthreaded;
348fe38b55cSguenther struct tib *tib;
3491a251377Stedu pthread_t thread;
350a7f24347Sguenther struct __tfork param;
351fe38b55cSguenther int rc;
3521a251377Stedu
3538af1ee89Sotto if (!_threads_ready)
354fe38b55cSguenther _rthread_init();
3551a251377Stedu
356c5891344Sguenther _rthread_reaper();
357c5891344Sguenther
358fe38b55cSguenther tib = _dl_allocate_tib(sizeof(*thread));
359fe38b55cSguenther if (tib == NULL)
360fe38b55cSguenther return (ENOMEM);
361fe38b55cSguenther thread = tib->tib_thread;
362fe38b55cSguenther memset(thread, 0, sizeof(*thread));
363fe38b55cSguenther thread->tib = tib;
364f050dd83Sakfaew thread->donesem.lock = _SPINLOCK_UNLOCKED;
365f050dd83Sakfaew thread->flags_lock = _SPINLOCK_UNLOCKED;
3661a251377Stedu thread->fn = start_routine;
3671a251377Stedu thread->arg = arg;
368fe38b55cSguenther tib->tib_tid = -1;
369a7f24347Sguenther
37058dbb15cSguenther thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
371b33c17d3Sguenther if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
372b33c17d3Sguenther pthread_t self = pthread_self();
373b33c17d3Sguenther
374b33c17d3Sguenther thread->attr.sched_policy = self->attr.sched_policy;
375b33c17d3Sguenther thread->attr.sched_param = self->attr.sched_param;
376b33c17d3Sguenther }
37762feea1bSmarc if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
37862feea1bSmarc thread->flags |= THREAD_DETACHED;
379b1b23116Stedu
3802b8233b0Smarc thread->stack = _rthread_alloc_stack(thread);
3817a468b58Stedu if (!thread->stack) {
3827a468b58Stedu rc = errno;
3837a468b58Stedu goto fail1;
3847a468b58Stedu }
3851a251377Stedu
386fe38b55cSguenther param.tf_tcb = TIB_TO_TCB(tib);
387fe38b55cSguenther param.tf_tid = &tib->tib_tid;
38816b62b6aSguenther param.tf_stack = thread->stack->sp;
3898af1ee89Sotto
390a7f24347Sguenther _spinlock(&_thread_lock);
391a7f24347Sguenther LIST_INSERT_HEAD(&_thread_list, thread, threads);
3929fc2d97aSotto _spinunlock(&_thread_lock);
3937a468b58Stedu
394d66c66cbSkettenis /* we're going to be multi-threaded real soon now */
395d66c66cbSkettenis __isthreaded = 1;
39616b62b6aSguenther rc = __tfork_thread(¶m, sizeof(param), _rthread_start, thread);
397a7f24347Sguenther if (rc != -1) {
398a7f24347Sguenther /* success */
399a7f24347Sguenther *threadp = thread;
4001a251377Stedu return (0);
401a7f24347Sguenther }
402b1b23116Stedu
403a7f24347Sguenther rc = errno;
404a7f24347Sguenther
405a7f24347Sguenther _spinlock(&_thread_lock);
406a7f24347Sguenther LIST_REMOVE(thread, threads);
407a7f24347Sguenther _spinunlock(&_thread_lock);
408eaffd144Stedu _rthread_free_stack(thread->stack);
409b1b23116Stedu fail1:
410fe38b55cSguenther _dl_free_tib(tib, sizeof(*thread));
411b1b23116Stedu
412b1b23116Stedu return (rc);
4131a251377Stedu }
4141a251377Stedu
4151a251377Stedu int
pthread_kill(pthread_t thread,int sig)416ed469d5cStedu pthread_kill(pthread_t thread, int sig)
417ed469d5cStedu {
418fe38b55cSguenther struct tib *tib = thread->tib;
419fe38b55cSguenther
42060d49506Sguenther if (sig == SIGTHR)
42160d49506Sguenther return (EINVAL);
422fe38b55cSguenther if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
42360d49506Sguenther return (errno);
42460d49506Sguenther return (0);
425ed469d5cStedu }
426ed469d5cStedu
427ed469d5cStedu int
pthread_cancel(pthread_t thread)428a36f0e58Stedu pthread_cancel(pthread_t thread)
429a36f0e58Stedu {
430fe38b55cSguenther struct tib *tib = thread->tib;
431fe38b55cSguenther pid_t tid = tib->tib_tid;
432a36f0e58Stedu
433fe38b55cSguenther if (tib->tib_canceled == 0 && tid != 0 &&
434fe38b55cSguenther (tib->tib_cantcancel & CANCEL_DYING) == 0) {
435fe38b55cSguenther tib->tib_canceled = 1;
436b63f3a34Sguenther
437fe38b55cSguenther if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
438fe38b55cSguenther thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
439b63f3a34Sguenther return (0);
440b63f3a34Sguenther }
441b63f3a34Sguenther }
442a36f0e58Stedu return (0);
443a36f0e58Stedu }
444a36f0e58Stedu
445a36f0e58Stedu void
pthread_testcancel(void)446a36f0e58Stedu pthread_testcancel(void)
447a36f0e58Stedu {
448fe38b55cSguenther struct tib *tib = TIB_GET();
449a36f0e58Stedu
450fe38b55cSguenther if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
451fe38b55cSguenther pthread_exit(PTHREAD_CANCELED);
452a36f0e58Stedu }
453a36f0e58Stedu
454a36f0e58Stedu int
pthread_setcancelstate(int state,int * oldstatep)455a36f0e58Stedu pthread_setcancelstate(int state, int *oldstatep)
456a36f0e58Stedu {
457fe38b55cSguenther struct tib *tib = TIB_GET();
458a36f0e58Stedu int oldstate;
459a36f0e58Stedu
460fe38b55cSguenther oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
461fe38b55cSguenther PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
462a36f0e58Stedu if (state == PTHREAD_CANCEL_ENABLE) {
463fe38b55cSguenther tib->tib_cantcancel &= ~CANCEL_DISABLED;
464a36f0e58Stedu } else if (state == PTHREAD_CANCEL_DISABLE) {
465fe38b55cSguenther tib->tib_cantcancel |= CANCEL_DISABLED;
466a36f0e58Stedu } else {
467a36f0e58Stedu return (EINVAL);
468a36f0e58Stedu }
469a36f0e58Stedu if (oldstatep)
470a36f0e58Stedu *oldstatep = oldstate;
471a36f0e58Stedu
472a36f0e58Stedu return (0);
473a36f0e58Stedu }
4747567a0bfSguenther DEF_STD(pthread_setcancelstate);
475a36f0e58Stedu
476a36f0e58Stedu int
pthread_setcanceltype(int type,int * oldtypep)477a36f0e58Stedu pthread_setcanceltype(int type, int *oldtypep)
478a36f0e58Stedu {
479fe38b55cSguenther struct tib *tib = TIB_GET();
480a36f0e58Stedu int oldtype;
481a36f0e58Stedu
482fe38b55cSguenther oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
483fe38b55cSguenther PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
484a36f0e58Stedu if (type == PTHREAD_CANCEL_DEFERRED) {
485fe38b55cSguenther tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
486a36f0e58Stedu } else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
487fe38b55cSguenther tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
488a36f0e58Stedu } else {
489a36f0e58Stedu return (EINVAL);
490a36f0e58Stedu }
491a36f0e58Stedu if (oldtypep)
492a36f0e58Stedu *oldtypep = oldtype;
493a36f0e58Stedu
494a36f0e58Stedu return (0);
495a36f0e58Stedu }
496a36f0e58Stedu
497a318b99dSjca void
pthread_cleanup_push(void (* fn)(void *),void * arg)498a318b99dSjca pthread_cleanup_push(void (*fn)(void *), void *arg)
499a318b99dSjca {
500a318b99dSjca struct rthread_cleanup_fn *clfn;
501a318b99dSjca pthread_t self = pthread_self();
502a318b99dSjca
503a318b99dSjca clfn = calloc(1, sizeof(*clfn));
504a318b99dSjca if (!clfn)
505a318b99dSjca return;
506a318b99dSjca clfn->fn = fn;
507a318b99dSjca clfn->arg = arg;
508a318b99dSjca clfn->next = self->cleanup_fns;
509a318b99dSjca self->cleanup_fns = clfn;
510a318b99dSjca }
511a318b99dSjca
512a318b99dSjca void
pthread_cleanup_pop(int execute)513a318b99dSjca pthread_cleanup_pop(int execute)
514a318b99dSjca {
515a318b99dSjca struct rthread_cleanup_fn *clfn;
516a318b99dSjca pthread_t self = pthread_self();
517a318b99dSjca
518a318b99dSjca clfn = self->cleanup_fns;
519a318b99dSjca if (clfn) {
520a318b99dSjca self->cleanup_fns = clfn->next;
521a318b99dSjca if (execute)
522a318b99dSjca clfn->fn(clfn->arg);
523a318b99dSjca free(clfn);
524a318b99dSjca }
525a318b99dSjca }
526a318b99dSjca
527ee620e50Stedu int
pthread_getconcurrency(void)528ee620e50Stedu pthread_getconcurrency(void)
529ee620e50Stedu {
530ee620e50Stedu return (concurrency_level);
531ee620e50Stedu }
532ee620e50Stedu
533ee620e50Stedu int
pthread_setconcurrency(int new_level)534f236048dSbrad pthread_setconcurrency(int new_level)
535ee620e50Stedu {
536ee620e50Stedu if (new_level < 0)
537ee620e50Stedu return (EINVAL);
538ee620e50Stedu concurrency_level = new_level;
539ee620e50Stedu return (0);
540ee620e50Stedu }
541ee620e50Stedu
5421a251377Stedu /*
5431a251377Stedu * compat debug stuff
5441a251377Stedu */
5451a251377Stedu void
_thread_dump_info(void)5461a251377Stedu _thread_dump_info(void)
5471a251377Stedu {
5481a251377Stedu pthread_t thread;
5491a251377Stedu
5509fc2d97aSotto _spinlock(&_thread_lock);
5519fc2d97aSotto LIST_FOREACH(thread, &_thread_list, threads)
552fe38b55cSguenther printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
553fe38b55cSguenther thread->tib->tib_thread_flags, thread->name);
5549fc2d97aSotto _spinunlock(&_thread_lock);
5551a251377Stedu }
55695853897Sart
557b8ec2dbdSguenther #ifndef NO_PIC
558750c05d7Sguenther /*
559750c05d7Sguenther * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
560750c05d7Sguenther * the function called via atexit() to invoke all destructors. The latter
561750c05d7Sguenther * two call shared-object destructors, which may need to call dlclose(),
562750c05d7Sguenther * so this lock needs to permit recursive locking.
563750c05d7Sguenther * The specific code here was extracted from _rthread_mutex_lock() and
564750c05d7Sguenther * pthread_mutex_unlock() and simplified to use the static variables.
565750c05d7Sguenther */
566dc989ac6Skurt void
_rthread_dl_lock(int what)567dc989ac6Skurt _rthread_dl_lock(int what)
56895853897Sart {
569f050dd83Sakfaew static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
570750c05d7Sguenther static pthread_t owner = NULL;
571750c05d7Sguenther static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
572750c05d7Sguenther static int count = 0;
57395853897Sart
574e0ad4b33Sguenther if (what == 0) {
575750c05d7Sguenther pthread_t self = pthread_self();
576750c05d7Sguenther
577750c05d7Sguenther /* lock, possibly recursive */
57895853897Sart _spinlock(&lock);
579750c05d7Sguenther if (owner == NULL) {
580750c05d7Sguenther owner = self;
581750c05d7Sguenther } else if (owner != self) {
582750c05d7Sguenther TAILQ_INSERT_TAIL(&lockers, self, waiting);
583750c05d7Sguenther while (owner != self) {
584f050dd83Sakfaew __thrsleep(self, 0, NULL, &lock, NULL);
585750c05d7Sguenther _spinlock(&lock);
586750c05d7Sguenther }
587750c05d7Sguenther }
588750c05d7Sguenther count++;
58995853897Sart _spinunlock(&lock);
590e0ad4b33Sguenther } else if (what == 1) {
591750c05d7Sguenther /* unlock, possibly recursive */
592750c05d7Sguenther if (--count == 0) {
593750c05d7Sguenther pthread_t next;
594750c05d7Sguenther
595750c05d7Sguenther _spinlock(&lock);
596750c05d7Sguenther owner = next = TAILQ_FIRST(&lockers);
597750c05d7Sguenther if (next != NULL)
598750c05d7Sguenther TAILQ_REMOVE(&lockers, next, waiting);
599750c05d7Sguenther _spinunlock(&lock);
600750c05d7Sguenther if (next != NULL)
601750c05d7Sguenther __thrwakeup(next, 1);
602750c05d7Sguenther }
603e0ad4b33Sguenther } else {
604e0ad4b33Sguenther /* reinit: used in child after fork to clear the queue */
605f050dd83Sakfaew lock = _SPINLOCK_UNLOCKED;
606e0ad4b33Sguenther if (--count == 0)
607e0ad4b33Sguenther owner = NULL;
608e0ad4b33Sguenther TAILQ_INIT(&lockers);
609750c05d7Sguenther }
610750c05d7Sguenther }
61195853897Sart #endif
612