xref: /openbsd-src/lib/librthread/rthread.h (revision e5157e49389faebcb42b7237d55fbf096d9c2523)
1 /*	$OpenBSD: rthread.h,v 1.50 2014/08/31 04:02:08 guenther Exp $ */
2 /*
3  * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
4  * All Rights Reserved.
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 /*
19  * Private data structures that back up the typedefs in pthread.h.
20  * Since only the thread library cares about their size or arrangement,
21  * it should be possible to switch libraries without relinking.
22  *
23  * Do not reorder struct _spinlock and sem_t variables in the structs.
24  * This is due to alignment requirements of certain arches like hppa.
25  * The current requirement is 16 bytes.
26  *
27  * THE MACHINE DEPENDENT CERROR CODE HAS HARD CODED OFFSETS INTO PTHREAD_T!
28  */
29 
30 #include <sys/queue.h>
31 #include <semaphore.h>
32 #include <machine/spinlock.h>
33 #include <machine/tcb.h>		/* for TLS_VARIANT */
34 
35 #ifdef __LP64__
36 #define RTHREAD_STACK_SIZE_DEF (512 * 1024)
37 #else
38 #define RTHREAD_STACK_SIZE_DEF (256 * 1024)
39 #endif
40 
41 #define _USING_TICKETS 0
42 /*
43  * tickets don't work yet? (or seem much slower, with lots of system time)
44  * until then, keep the struct around to avoid excessive changes going
45  * back and forth.
46  */
47 struct _spinlock {
48 	_atomic_lock_t ticket;
49 };
50 
51 #define	_SPINLOCK_UNLOCKED { _ATOMIC_LOCK_UNLOCKED }
52 extern struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN;
53 
54 struct stack {
55 	SLIST_ENTRY(stack)	link;	/* link for free default stacks */
56 	void	*sp;			/* machine stack pointer */
57 	void	*base;			/* bottom of allocated area */
58 	size_t	guardsize;		/* size of PROT_NONE zone or */
59 					/* ==1 if application alloced */
60 	size_t	len;			/* total size of allocated stack */
61 };
62 
63 struct __sem {
64 	struct _spinlock lock;
65 	volatile int waitcount;
66 	volatile int value;
67 	int shared;
68 };
69 
70 TAILQ_HEAD(pthread_queue, pthread);
71 
72 struct pthread_mutex {
73 	struct _spinlock lock;
74 	struct pthread_queue lockers;
75 	int type;
76 	pthread_t owner;
77 	int count;
78 	int prioceiling;
79 };
80 
81 struct pthread_mutex_attr {
82 	int ma_type;
83 	int ma_protocol;
84 	int ma_prioceiling;
85 };
86 
87 struct pthread_cond {
88 	struct _spinlock lock;
89 	struct pthread_queue waiters;
90 	struct pthread_mutex *mutex;
91 	clockid_t clock;
92 };
93 
94 struct pthread_cond_attr {
95 	clockid_t ca_clock;
96 };
97 
98 struct pthread_rwlock {
99 	struct _spinlock lock;
100 	pthread_t owner;
101 	struct pthread_queue writers;
102 	int readers;
103 };
104 
105 struct pthread_rwlockattr {
106 	int pshared;
107 };
108 
109 struct pthread_attr {
110 	void *stack_addr;
111 	size_t stack_size;
112 	size_t guard_size;
113 	int detach_state;
114 	int contention_scope;
115 	int sched_policy;
116 	struct sched_param sched_param;
117 	int sched_inherit;
118 };
119 
120 #define	PTHREAD_MIN_PRIORITY	0
121 #define	PTHREAD_MAX_PRIORITY	31
122 
123 struct rthread_key {
124 	int used;
125 	void (*destructor)(void *);
126 };
127 
128 struct rthread_storage {
129 	int keyid;
130 	struct rthread_storage *next;
131 	void *data;
132 };
133 
134 struct rthread_cleanup_fn {
135 	void (*fn)(void *);
136 	void *arg;
137 	struct rthread_cleanup_fn *next;
138 };
139 
140 struct pthread_barrier {
141 	pthread_mutex_t mutex;
142 	pthread_cond_t cond;
143 	int threshold;
144 	int sofar;
145 	int generation;
146 };
147 
148 struct pthread_barrierattr {
149 	int pshared;
150 };
151 
152 struct pthread_spinlock {
153 	struct _spinlock lock;
154 	pthread_t owner;
155 };
156 
157 struct pthread {
158 	struct __sem donesem;
159 #if TLS_VARIANT == 1
160 	int *errno_ptr;
161 #endif
162 	pid_t tid;
163 	unsigned int flags;
164 	struct _spinlock flags_lock;
165 	void *retval;
166 	void *(*fn)(void *);
167 	void *arg;
168 	char name[32];
169 	struct stack *stack;
170 	LIST_ENTRY(pthread) threads;
171 	TAILQ_ENTRY(pthread) waiting;
172 	pthread_cond_t blocking_cond;
173 	int sched_policy;
174 	struct pthread_attr attr;
175 	struct sched_param sched_param;
176 	struct rthread_storage *local_storage;
177 	struct rthread_cleanup_fn *cleanup_fns;
178 	int myerrno;
179 
180 	/* currently in a cancel point? */
181 	int cancel_point;
182 
183 	/* cancel received in a delayed cancel block? */
184 	int delayed_cancel;
185 };
186 #define	THREAD_DONE		0x001
187 #define	THREAD_DETACHED		0x002
188 #define	THREAD_CANCELED		0x004
189 #define	THREAD_CANCEL_ENABLE	0x008
190 #define	THREAD_CANCEL_DEFERRED	0x010
191 #define	THREAD_CANCEL_DELAY	0x020
192 #define	THREAD_DYING		0x040
193 #define	THREAD_ORIGINAL		0x080	/* original thread from fork */
194 #define	THREAD_INITIAL_STACK	0x100	/* thread with stack from exec */
195 
196 #define	IS_CANCELED(thread) \
197 	(((thread)->flags & (THREAD_CANCELED|THREAD_DYING)) == THREAD_CANCELED)
198 
199 
200 extern int _threads_ready;
201 extern size_t _thread_pagesize;
202 extern LIST_HEAD(listhead, pthread) _thread_list;
203 extern struct _spinlock _thread_lock;
204 extern struct pthread_attr _rthread_attr_default;
205 
206 #define	ROUND_TO_PAGE(size) \
207 	(((size) + (_thread_pagesize - 1)) & ~(_thread_pagesize - 1))
208 
209 void	_spinlock(volatile struct _spinlock *);
210 int	_spinlocktry(volatile struct _spinlock *);
211 void	_spinunlock(volatile struct _spinlock *);
212 int	_sem_wait(sem_t, int, const struct timespec *, int *);
213 int	_sem_post(sem_t);
214 
215 int	_rthread_init(void);
216 void	_rthread_setflag(pthread_t, int);
217 void	_rthread_clearflag(pthread_t, int);
218 struct stack *_rthread_alloc_stack(pthread_t);
219 void	_rthread_free_stack(struct stack *);
220 void	_rthread_tls_destructors(pthread_t);
221 void	_rthread_debug(int, const char *, ...)
222 		__attribute__((__format__ (printf, 2, 3)));
223 void	_rthread_debug_init(void);
224 #if defined(__ELF__)
225 void	_rthread_dl_lock(int what);
226 void	_rthread_bind_lock(int);
227 #endif
228 
229 /* rthread_cancel.c */
230 void	_enter_cancel(pthread_t);
231 void	_leave_cancel(pthread_t);
232 void	_enter_delayed_cancel(pthread_t);
233 void	_leave_delayed_cancel(pthread_t, int);
234 
235 void	_thread_dump_info(void);
236 
237 /* syscalls */
238 void	__threxit(pid_t *);
239 int	__thrsleep(const volatile void *, clockid_t, const struct timespec *,
240 	    volatile void *, const int *);
241 int	__thrwakeup(const volatile void *, int n);
242 int	__thrsigdivert(sigset_t, siginfo_t *, const struct timespec *);
243 int	sched_yield(void);
244 int	_thread_sys_sigaction(int, const struct sigaction *,
245 	    struct sigaction *);
246 int	_thread_sys_sigprocmask(int, const sigset_t *, sigset_t *);
247 
248