xref: /netbsd-src/lib/libpthread/pthread_int.h (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: pthread_int.h,v 1.14 2003/06/26 01:26:11 nathanw Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #ifndef _LIB_PTHREAD_INT_H
40 #define _LIB_PTHREAD_INT_H
41 
42 #define PTHREAD__DEBUG
43 #define ERRORCHECK
44 
45 #include "pthread_types.h"
46 #include "pthread_queue.h"
47 #include "pthread_debug.h"
48 #include "pthread_md.h"
49 
50 #include <sa.h>
51 #include <signal.h>
52 
53 /*
54  * The size of this structure needs to be no larger than struct
55  * __pthread_cleanup_store, defined in pthread.h.
56  */
57 struct pt_clean_t {
58 	PTQ_ENTRY(pt_clean_t)	ptc_next;
59 	void	(*ptc_cleanup)(void *);
60 	void	*ptc_arg;
61 };
62 
63 struct pt_alarm_t {
64 	PTQ_ENTRY(pt_alarm_t)	pta_next;
65 	pthread_spin_t	pta_lock;
66 	const struct timespec	*pta_time;
67 	void	(*pta_func)(void *);
68 	void	*pta_arg;
69 	int	pta_fired;
70 };
71 
72 struct	pthread_st {
73 	unsigned int	pt_magic;
74 	/* Identifier, for debugging and for preventing recycling. */
75 	int		pt_num;
76 
77 	int	pt_type;	/* normal, upcall, or idle */
78 	int	pt_state;	/* running, blocked, etc. */
79 	pthread_spin_t pt_statelock;	/* lock on pt_state */
80 	int	pt_flags;	/* see PT_FLAG_* below */
81 	int	pt_cancel;	/* Deferred cancellation */
82 	int	pt_spinlocks;	/* Number of spinlocks held. */
83 	int	pt_blockedlwp;	/* LWP/SA number when blocked */
84 
85 	int	pt_errno;	/* Thread-specific errno. */
86 
87 	/* Entry on the run queue */
88 	PTQ_ENTRY(pthread_st)	pt_runq;
89 	/* Entry on the list of all threads */
90 	PTQ_ENTRY(pthread_st)	pt_allq;
91 	/* Entry on the sleep queue (xxx should be same as run queue?) */
92 	PTQ_ENTRY(pthread_st)	pt_sleep;
93 	/* Object we're sleeping on */
94 	void			*pt_sleepobj;
95 	/* Queue we're sleeping on */
96 	struct pthread_queue_t	*pt_sleepq;
97 	/* Lock protecting that queue */
98 	pthread_spin_t		*pt_sleeplock;
99 
100 	stack_t		pt_stack;	/* Our stack */
101 	ucontext_t	*pt_uc;		/* Saved context when we're stopped */
102 	ucontext_t	*pt_trapuc;   	/* Kernel-saved context */
103 
104 	sigset_t	pt_sigmask;	/* Signals we won't take. */
105 	sigset_t	pt_siglist;	/* Signals pending for us. */
106 	sigset_t	pt_sigblocked;	/* Signals delivered while blocked. */
107 	sigset_t	*pt_sigwait;	/* Signals waited for in sigwait */
108 	siginfo_t	*pt_wsig;
109 	pthread_spin_t	pt_siglock;	/* Lock on above */
110 
111 	void *		pt_exitval;	/* Read by pthread_join() */
112 
113 	/* Stack of cancellation cleanup handlers and their arguments */
114 	PTQ_HEAD(, pt_clean_t)	pt_cleanup_stack;
115 
116 	/* Thread's name, set by the application. */
117 	char*		pt_name;
118 
119 	/* Other threads trying to pthread_join() us. */
120 	struct pthread_queue_t	pt_joiners;
121 	/* Lock for above, and for changing pt_state to ZOMBIE or DEAD,
122 	 * and for setting the DETACHED flag.  Also protects pt_name.
123 	 */
124 	pthread_spin_t	pt_join_lock;
125 
126 	/* Thread we were going to switch to before we were preempted
127 	 * ourselves. Will be used by the upcall that's continuing us.
128 	 */
129 	pthread_t	pt_switchto;
130 	ucontext_t*	pt_switchtouc;
131 
132 	/* Threads that are preempted with spinlocks held will be
133 	 * continued until they unlock their spinlock. When they do
134 	 * so, they should jump ship to the thread pointed to by
135 	 * pt_next.
136 	 */
137 	pthread_t	pt_next;
138 
139 	/* The upcall that is continuing this thread */
140 	pthread_t	pt_parent;
141 
142 	/* A queue lock that this thread held while trying to
143 	 * context switch to another process.
144 	 */
145 	pthread_spin_t*	pt_heldlock;
146 
147 	/* Thread-specific data */
148 	void*		pt_specific[PTHREAD_KEYS_MAX];
149 
150 #ifdef PTHREAD__DEBUG
151 	int	blocks;
152 	int	preempts;
153 	int	rescheds;
154 #endif
155 };
156 
157 struct pthread_lock_ops {
158 	void	(*plo_init)(__cpu_simple_lock_t *);
159 	int	(*plo_try)(__cpu_simple_lock_t *);
160 	void	(*plo_unlock)(__cpu_simple_lock_t *);
161 };
162 
163 /* Thread types */
164 #define PT_THREAD_NORMAL	1
165 #define PT_THREAD_UPCALL	2
166 #define PT_THREAD_IDLE		3
167 
168 /* Thread states */
169 #define PT_STATE_RUNNING	1
170 #define PT_STATE_RUNNABLE	2
171 #define PT_STATE_BLOCKED_SYS	3
172 #define PT_STATE_BLOCKED_QUEUE	4
173 #define PT_STATE_ZOMBIE		5
174 #define PT_STATE_DEAD		6
175 #define PT_STATE_RECYCLABLE	7
176 
177 /* Flag values */
178 
179 #define PT_FLAG_DETACHED	0x0001
180 #define PT_FLAG_IDLED		0x0002
181 #define PT_FLAG_CS_DISABLED	0x0004	/* Cancellation disabled */
182 #define PT_FLAG_CS_ASYNC	0x0008  /* Cancellation is async */
183 #define PT_FLAG_CS_PENDING	0x0010
184 #define PT_FLAG_SIGDEFERRED     0x0020	/* There are signals to take */
185 
186 #define PT_MAGIC	0x11110001
187 #define PT_DEAD		0xDEAD0001
188 
189 #define PT_ATTR_MAGIC	0x22220002
190 #define PT_ATTR_DEAD	0xDEAD0002
191 
192 #define PT_STACKSIZE	(1<<18)
193 #define PT_STACKMASK	(PT_STACKSIZE-1)
194 
195 #define PT_UPCALLSTACKS	16
196 
197 #define PT_ALARMTIMER_MAGIC	0x88880010
198 #define PT_RRTIMER_MAGIC	0x88880020
199 #define NIDLETHREADS	4
200 #define IDLESPINS	1000
201 
202 /* Flag to be used in a ucontext_t's uc_flags indicating that
203  * the saved register state is "user" state only, not full
204  * trap state.
205  */
206 #define _UC_USER_BIT		30
207 #define _UC_USER		(1LU << _UC_USER_BIT)
208 
209 void	pthread_init(void)  __attribute__ ((__constructor__));
210 
211 /* Utility functions */
212 
213 /* Set up/clean up a thread's basic state. */
214 void	pthread__initthread(pthread_t self, pthread_t t);
215 
216 /* Go do something else. Don't go back on the run queue */
217 void	pthread__block(pthread_t self, pthread_spin_t* queuelock);
218 /* Put a thread back on the run queue */
219 void	pthread__sched(pthread_t self, pthread_t thread);
220 void	pthread__sched_sleepers(pthread_t self, struct pthread_queue_t *threadq);
221 void	pthread__sched_idle(pthread_t self, pthread_t thread);
222 void	pthread__sched_idle2(pthread_t self);
223 
224 void	pthread__sched_bulk(pthread_t self, pthread_t qhead);
225 
226 void	pthread__idle(void);
227 
228 /* Get the next thread */
229 pthread_t pthread__next(pthread_t self);
230 
231 int	pthread__stackalloc(pthread_t *t);
232 void	pthread__initmain(pthread_t *t);
233 
234 void	pthread__sa_start(void);
235 void	pthread__sa_recycle(pthread_t old, pthread_t new);
236 
237 /* Alarm code */
238 void	pthread__alarm_init(void);
239 void	pthread__alarm_add(pthread_t, struct pt_alarm_t *,
240     const struct timespec *, void (*)(void *), void *);
241 void	pthread__alarm_del(pthread_t, struct pt_alarm_t *);
242 int	pthread__alarm_fired(struct pt_alarm_t *);
243 void	pthread__alarm_process(pthread_t self, void *arg);
244 
245 /* Internal locking primitives */
246 void	pthread__lockprim_init(void);
247 void	pthread_lockinit(pthread_spin_t *lock);
248 void	pthread_spinlock(pthread_t thread, pthread_spin_t *lock);
249 int	pthread_spintrylock(pthread_t thread, pthread_spin_t *lock);
250 void	pthread_spinunlock(pthread_t thread, pthread_spin_t *lock);
251 
252 extern const struct pthread_lock_ops *pthread__lock_ops;
253 
254 #define	pthread__simple_lock_init(alp)	(*pthread__lock_ops->plo_init)(alp)
255 #define	pthread__simple_lock_try(alp)	(*pthread__lock_ops->plo_try)(alp)
256 #define	pthread__simple_unlock(alp)	(*pthread__lock_ops->plo_unlock)(alp)
257 
258 #ifndef _getcontext_u
259 int	_getcontext_u(ucontext_t *);
260 #endif
261 #ifndef _setcontext_u
262 int	_setcontext_u(const ucontext_t *);
263 #endif
264 #ifndef _swapcontext_u
265 int	_swapcontext_u(ucontext_t *, const ucontext_t *);
266 #endif
267 
268 void	pthread__testcancel(pthread_t self);
269 int	pthread__find(pthread_t self, pthread_t target);
270 
271 #ifndef PTHREAD_MD_INIT
272 #define PTHREAD_MD_INIT
273 #endif
274 
275 #ifndef _INITCONTEXT_U_MD
276 #define _INITCONTEXT_U_MD(ucp)
277 #endif
278 
279 #define _INITCONTEXT_U(ucp) do {					\
280 	(ucp)->uc_flags = _UC_CPU | _UC_STACK;				\
281 	_INITCONTEXT_U_MD(ucp)						\
282 	} while (/*CONSTCOND*/0)
283 
284 #ifdef __PTHREAD_SIGNAL_PRIVATE
285 
286 /*
287  * Macros for converting from ucontext to sigcontext and vice-versa.
288  * Note that going from sigcontext->ucontext is only safe for a
289  * sigcontext that was first created from a ucontext.
290  *
291  * Arch-specific code can override this, if necessary.  It may also
292  * be necessary for arch-specific code to include extra info along with
293  * the sigcontext.
294  */
295 #ifndef PTHREAD_SIGCONTEXT_EXTRA
296 #define	PTHREAD_SIGCONTEXT_EXTRA
297 #endif
298 
299 struct pthread__sigcontext {
300 	struct sigcontext	psc_context;
301 	PTHREAD_SIGCONTEXT_EXTRA
302 };
303 
304 #ifndef PTHREAD_UCONTEXT_TO_SIGCONTEXT
305 #define	PTHREAD_UCONTEXT_TO_SIGCONTEXT(mask, uc, psc)			\
306 do {									\
307 	(uc)->uc_sigmask = *(mask);					\
308 	/*								\
309 	 * XXX We may want to check for _UC_USER here and do a		\
310 	 * XXX _INITCONTEXT_U_MD() and clearing _UC_USER on such	\
311 	 * XXX contexts before converting to a signcontext, thus	\
312 	 * XXX allowing signal handlers to modify the non-_UC_USER	\
313 	 * XXX registers.  Hazy territory; ignore it for now.		\
314 	 */								\
315 	_UCONTEXT_TO_SIGCONTEXT((uc), &(psc)->psc_context);		\
316 } while (/*CONSTCOND*/0)
317 
318 #define	PTHREAD_SIGCONTEXT_TO_UCONTEXT(psc, uc)				\
319 do {									\
320 	_SIGCONTEXT_TO_UCONTEXT(&(psc)->psc_context, (uc));		\
321 	(uc)->uc_flags &= ~_UC_SIGMASK;					\
322 } while (/*CONSTCOND*/0)
323 #else
324 void	pthread__ucontext_to_sigcontext(const sigset_t *, ucontext_t *,
325 	    struct pthread__sigcontext *);
326 void	pthread__sigcontext_to_ucontext(const struct pthread__sigcontext *,
327 	    ucontext_t *);
328 #endif /* PTHREAD_UCONTEXT_TO_SIGCONTEXT */
329 
330 #endif /* __PTHREAD_SIGNAL_PRIVATE */
331 
332 #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
333 #define pthread__id(reg) (reg)
334 #else
335 /* Stack location of pointer to a particular thread */
336 #define pthread__id(sp) \
337 	((pthread_t) (((vaddr_t)(sp)) & ~PT_STACKMASK))
338 
339 #define pthread__id_reg() pthread__sp()
340 #endif
341 
342 #define pthread__self() (pthread__id(pthread__id_reg()))
343 
344 #define pthread__abort()						\
345 	pthread__assertfunc(__FILE__, __LINE__, __func__, "unreachable")
346 
347 #define pthread__assert(e) do {						\
348 	if (__predict_false(!(e)))					\
349        	       pthread__assertfunc(__FILE__, __LINE__, __func__, #e);	\
350         } while (/*CONSTCOND*/0)
351 
352 #define pthread__error(err, msg, e) do {				\
353 	if (__predict_false(!(e))) {					\
354        	       pthread__errorfunc(__FILE__, __LINE__, __func__, msg);	\
355 	       return (err);						\
356 	} 								\
357         } while (/*CONSTCOND*/0)
358 
359 
360 
361 /* These three routines are defined in processor-specific code. */
362 void	pthread__upcall_switch(pthread_t self, pthread_t next);
363 void	pthread__switch(pthread_t self, pthread_t next);
364 void	pthread__locked_switch(pthread_t self, pthread_t next,
365     pthread_spin_t *lock);
366 
367 void	pthread__signal_init(void);
368 
369 void	pthread__signal(pthread_t self, pthread_t t, int sig, int code);
370 void	pthread__deliver_signal(pthread_t self, pthread_t t, int sig, int code);
371 void	pthread__signal_deferred(pthread_t self, pthread_t t);
372 
373 void	pthread__destroy_tsd(pthread_t self);
374 void	pthread__assertfunc(char *file, int line, char *function, char *expr);
375 void	pthread__errorfunc(char *file, int line, char *function, char *msg);
376 
377 #endif /* _LIB_PTHREAD_INT_H */
378