xref: /dflybsd-src/lib/libthread_xu/thread/thr_private.h (revision 41871674d0079dec70d55eb824f39d07dc7b3310)
1 /*
2  * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by John Birrell.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * Private thread definitions for the uthread kernel.
33  *
34  * $FreeBSD: src/lib/libpthread/thread/thr_private.h,v 1.120 2004/11/01 10:49:34 davidxu Exp $
35  * $DragonFly: src/lib/libthread_xu/thread/thr_private.h,v 1.14 2006/04/07 14:11:22 davidxu Exp $
36  */
37 
38 #ifndef _THR_PRIVATE_H
39 #define _THR_PRIVATE_H
40 
41 /*
42  * Include files.
43  */
44 #include <sys/types.h>
45 #include <sys/time.h>
46 #include <sys/cdefs.h>
47 #include <sys/queue.h>
48 #include <machine/atomic.h>
49 #include <errno.h>
50 #include <limits.h>
51 #include <signal.h>
52 #include <stdio.h>
53 #include <sched.h>
54 #include <unistd.h>
55 #include <pthread.h>
56 #include <pthread_np.h>
57 
58 #include "pthread_md.h"
59 #include "thr_umtx.h"
60 #include "thread_db.h"
61 
62 /* Signal to do cancellation */
63 #define	SIGCANCEL		32
64 
65 /*
66  * Kernel fatal error handler macro.
67  */
68 #define PANIC(string)		_thread_exit(__FILE__,__LINE__,string)
69 
70 /* Output debug messages like this: */
71 #define stdout_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
72 #define stderr_debug(args...)	_thread_printf(STDOUT_FILENO, ##args)
73 
74 #ifdef _PTHREADS_INVARIANTS
75 #define THR_ASSERT(cond, msg) do {	\
76 	if (__predict_false(!(cond)))	\
77 		PANIC(msg);		\
78 } while (0)
79 #else
80 #define THR_ASSERT(cond, msg)
81 #endif
82 
83 #ifdef PIC
84 #define	STATIC_LIB_REQUIRE(name)
85 #else
86 #define STATIC_LIB_REQUIRE(name)	__asm(".globl " #name)
87 #endif
88 
89 TAILQ_HEAD(thread_head, pthread)	thread_head;
90 TAILQ_HEAD(atfork_head, pthread_atfork)	atfork_head;
91 
92 #define	TIMESPEC_ADD(dst, src, val)				\
93 	do { 							\
94 		(dst)->tv_sec = (src)->tv_sec + (val)->tv_sec;	\
95 		(dst)->tv_nsec = (src)->tv_nsec + (val)->tv_nsec; \
96 		if ((dst)->tv_nsec >= 1000000000) {		\
97 			(dst)->tv_sec++;			\
98 			(dst)->tv_nsec -= 1000000000;		\
99 		}						\
100 	} while (0)
101 
102 #define	TIMESPEC_SUB(dst, src, val)				\
103 	do { 							\
104 		(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec;	\
105 		(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
106 		if ((dst)->tv_nsec < 0) {			\
107 			(dst)->tv_sec--;			\
108 			(dst)->tv_nsec += 1000000000;		\
109 		}						\
110 	} while (0)
111 
112 struct pthread_mutex {
113 	/*
114 	 * Lock for accesses to this structure.
115 	 */
116 	volatile umtx_t			m_lock;
117 	enum pthread_mutextype		m_type;
118 	int				m_protocol;
119 	TAILQ_HEAD(mutex_head, pthread)	m_queue;
120 	struct pthread			*m_owner;
121 	long				m_flags;
122 	int				m_count;
123 	int				m_refcount;
124 
125 	/*
126 	 * Used for priority inheritence and protection.
127 	 *
128 	 *   m_prio       - For priority inheritence, the highest active
129 	 *                  priority (threads locking the mutex inherit
130 	 *                  this priority).  For priority protection, the
131 	 *                  ceiling priority of this mutex.
132 	 *   m_saved_prio - mutex owners inherited priority before
133 	 *                  taking the mutex, restored when the owner
134 	 *                  unlocks the mutex.
135 	 */
136 	int				m_prio;
137 	int				m_saved_prio;
138 
139 	/*
140 	 * Link for list of all mutexes a thread currently owns.
141 	 */
142 	TAILQ_ENTRY(pthread_mutex)	m_qe;
143 };
144 
145 #define TAILQ_INITIALIZER	{ NULL, NULL }
146 
147 #define PTHREAD_MUTEX_STATIC_INITIALIZER   \
148 	{0, PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
149 	NULL, { NULL }, MUTEX_FLAGS_PRIVATE, 0, 0, 0, TAILQ_INITIALIZER }
150 /*
151  * Flags for mutexes.
152  */
153 #define MUTEX_FLAGS_PRIVATE	0x01
154 #define MUTEX_FLAGS_INITED	0x02
155 #define MUTEX_FLAGS_BUSY	0x04
156 
157 struct pthread_mutex_attr {
158 	enum pthread_mutextype	m_type;
159 	int			m_protocol;
160 	int			m_ceiling;
161 	long			m_flags;
162 };
163 
164 #define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
165 	{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
166 
167 struct pthread_cond {
168 	/*
169 	 * Lock for accesses to this structure.
170 	 */
171 	volatile umtx_t	c_lock;
172 	volatile umtx_t	c_seqno;
173 	volatile int	c_waiters;
174 	volatile int	c_wakeups;
175 	int		c_pshared;
176 	int		c_clockid;
177 };
178 
179 struct pthread_cond_attr {
180 	int		c_pshared;
181 	int		c_clockid;
182 };
183 
184 struct pthread_barrier {
185 	volatile umtx_t	b_lock;
186 	volatile umtx_t	b_cycle;
187 	volatile int	b_count;
188 	volatile int	b_waiters;
189 };
190 
191 struct pthread_barrierattr {
192 	int		pshared;
193 };
194 
195 struct pthread_spinlock {
196 	volatile umtx_t	s_lock;
197 };
198 
199 /*
200  * Flags for condition variables.
201  */
202 #define COND_FLAGS_PRIVATE	0x01
203 #define COND_FLAGS_INITED	0x02
204 #define COND_FLAGS_BUSY		0x04
205 
206 /*
207  * Cleanup definitions.
208  */
209 struct pthread_cleanup {
210 	struct pthread_cleanup	*next;
211 	void			(*routine)(void *);
212 	void			*routine_arg;
213 	int			onstack;
214 };
215 
216 #define	THR_CLEANUP_PUSH(td, func, arg) {		\
217 	struct pthread_cleanup __cup;			\
218 							\
219 	__cup.routine = func;				\
220 	__cup.routine_arg = arg;			\
221 	__cup.onstack = 1;				\
222 	__cup.next = (td)->cleanup;			\
223 	(td)->cleanup = &__cup;
224 
225 #define	THR_CLEANUP_POP(td, exec)			\
226 	(td)->cleanup = __cup.next;			\
227 	if ((exec) != 0)				\
228 		__cup.routine(__cup.routine_arg);	\
229 }
230 
231 struct pthread_atfork {
232 	TAILQ_ENTRY(pthread_atfork) qe;
233 	void (*prepare)(void);
234 	void (*parent)(void);
235 	void (*child)(void);
236 };
237 
238 struct pthread_attr {
239 	int	sched_policy;
240 	int	sched_inherit;
241 	int	sched_interval;
242 	int	prio;
243 	int	suspend;
244 #define	THR_STACK_USER		0x100	/* 0xFF reserved for <pthread.h> */
245 	int	flags;
246 	void	*arg_attr;
247 	void	*stackaddr_attr;
248 	size_t	stacksize_attr;
249 	size_t	guardsize_attr;
250 };
251 
252 /*
253  * Thread creation state attributes.
254  */
255 #define THR_CREATE_RUNNING		0
256 #define THR_CREATE_SUSPENDED		1
257 
258 /*
259  * Miscellaneous definitions.
260  */
261 #define THR_STACK_DEFAULT		(sizeof(void *) / 4 * 1024 * 1024)
262 
263 /*
264  * Maximum size of initial thread's stack.  This perhaps deserves to be larger
265  * than the stacks of other threads, since many applications are likely to run
266  * almost entirely on this stack.
267  */
268 #define THR_STACK_INITIAL		(THR_STACK_DEFAULT * 2)
269 
270 /*
271  * Define the different priority ranges.  All applications have thread
272  * priorities constrained within 0-31.  The threads library raises the
273  * priority when delivering signals in order to ensure that signal
274  * delivery happens (from the POSIX spec) "as soon as possible".
275  * In the future, the threads library will also be able to map specific
276  * threads into real-time (cooperating) processes or kernel threads.
277  * The RT and SIGNAL priorities will be used internally and added to
278  * thread base priorities so that the scheduling queue can handle both
279  * normal and RT priority threads with and without signal handling.
280  *
281  * The approach taken is that, within each class, signal delivery
282  * always has priority over thread execution.
283  */
284 #define THR_DEFAULT_PRIORITY			15
285 #define THR_MIN_PRIORITY			0
286 #define THR_MAX_PRIORITY			31	/* 0x1F */
287 #define THR_SIGNAL_PRIORITY			32	/* 0x20 */
288 #define THR_RT_PRIORITY				64	/* 0x40 */
289 #define THR_FIRST_PRIORITY			THR_MIN_PRIORITY
290 #define THR_LAST_PRIORITY	\
291 	(THR_MAX_PRIORITY + THR_SIGNAL_PRIORITY + THR_RT_PRIORITY)
292 #define THR_BASE_PRIORITY(prio)	((prio) & THR_MAX_PRIORITY)
293 
294 /*
295  * Time slice period in microseconds.
296  */
297 #define TIMESLICE_USEC				20000
298 
299 struct pthread_rwlockattr {
300 	int		pshared;
301 };
302 
303 struct pthread_rwlock {
304 	pthread_mutex_t	lock;	/* monitor lock */
305 	pthread_cond_t	read_signal;
306 	pthread_cond_t	write_signal;
307 	int		state;	/* 0 = idle  >0 = # of readers  -1 = writer */
308 	int		blocked_writers;
309 };
310 
311 /*
312  * Thread states.
313  */
314 enum pthread_state {
315 	PS_RUNNING,
316 	PS_DEAD
317 };
318 
319 union pthread_wait_data {
320 	pthread_mutex_t	mutex;
321 };
322 
323 struct pthread_specific_elem {
324 	const void	*data;
325 	int		seqno;
326 };
327 
328 struct pthread_key {
329 	volatile int	allocated;
330 	volatile int	count;
331 	int		seqno;
332 	void            (*destructor)(void *);
333 };
334 
335 /*
336  * Thread structure.
337  */
338 struct pthread {
339 	/*
340 	 * Magic value to help recognize a valid thread structure
341 	 * from an invalid one:
342 	 */
343 #define	THR_MAGIC		((u_int32_t) 0xd09ba115)
344 	u_int32_t		magic;
345 	char			*name;
346 	u_int64_t		uniqueid; /* for gdb */
347 
348 	/*
349 	 * Lock for accesses to this thread structure.
350 	 */
351 	umtx_t			lock;
352 
353 	/* Thread is terminated in kernel, written by kernel. */
354 	long			terminated;
355 
356 	/* Kernel thread id. */
357 	long			tid;
358 
359 	/* Internal condition variable cycle number. */
360 	umtx_t			cycle;
361 
362 	/* How many low level locks the thread held. */
363 	int			locklevel;
364 
365 	/*
366 	 * Set to non-zero when this thread has entered a critical
367 	 * region.  We allow for recursive entries into critical regions.
368 	 */
369 	int			critical_count;
370 
371 	/* Signal blocked counter. */
372 	int			sigblock;
373 
374 	/* Queue entry for list of all threads. */
375 	TAILQ_ENTRY(pthread)	tle;	/* link for all threads in process */
376 
377 	/* Queue entry for GC lists. */
378 	TAILQ_ENTRY(pthread)	gcle;
379 
380 	/* Hash queue entry. */
381 	LIST_ENTRY(pthread)	hle;
382 
383 	/* Threads reference count. */
384 	int			refcount;
385 
386 	/*
387 	 * Thread start routine, argument, stack pointer and thread
388 	 * attributes.
389 	 */
390 	void			*(*start_routine)(void *);
391 	void			*arg;
392 	struct pthread_attr	attr;
393 
394  	/*
395 	 * Cancelability flags
396 	 */
397 #define	THR_CANCEL_DISABLE		0x0001
398 #define	THR_CANCEL_EXITING		0x0002
399 #define THR_CANCEL_AT_POINT		0x0004
400 #define THR_CANCEL_NEEDED		0x0008
401 #define	SHOULD_CANCEL(val)					\
402 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
403 		 THR_CANCEL_NEEDED)) == THR_CANCEL_NEEDED)
404 
405 #define	SHOULD_ASYNC_CANCEL(val)				\
406 	(((val) & (THR_CANCEL_DISABLE | THR_CANCEL_EXITING |	\
407 		 THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT)) ==	\
408 		 (THR_CANCEL_NEEDED | THR_CANCEL_AT_POINT))
409 	int			cancelflags;
410 
411 	/* Thread temporary signal mask. */
412 	sigset_t		sigmask;
413 
414 	/* Thread state: */
415 	umtx_t			state;
416 
417 	/*
418 	 * Error variable used instead of errno, used for internal.
419 	 */
420 	int			error;
421 
422 	/*
423 	 * The joiner is the thread that is joining to this thread.  The
424 	 * join status keeps track of a join operation to another thread.
425 	 */
426 	struct pthread		*joiner;
427 
428 	/*
429 	 * The current thread can belong to a priority mutex queue.
430 	 * This is the synchronization queue link.
431 	 */
432 	TAILQ_ENTRY(pthread)	sqe;
433 
434 	/* Wait data. */
435 	union pthread_wait_data data;
436 
437 	/* Miscellaneous flags; only set with scheduling lock held. */
438 	int			flags;
439 #define THR_FLAGS_PRIVATE	0x0001
440 #define	THR_FLAGS_NEED_SUSPEND	0x0002	/* thread should be suspended */
441 #define	THR_FLAGS_SUSPENDED	0x0004	/* thread is suspended */
442 
443 	/* Thread list flags; only set with thread list lock held. */
444 	int			tlflags;
445 #define	TLFLAGS_GC_SAFE		0x0001	/* thread safe for cleaning */
446 #define	TLFLAGS_IN_TDLIST	0x0002	/* thread in all thread list */
447 #define	TLFLAGS_IN_GCLIST	0x0004	/* thread in gc list */
448 #define	TLFLAGS_DETACHED	0x0008	/* thread is detached */
449 
450 	/*
451 	 * Base priority is the user setable and retrievable priority
452 	 * of the thread.  It is only affected by explicit calls to
453 	 * set thread priority and upon thread creation via a thread
454 	 * attribute or default priority.
455 	 */
456 	char			base_priority;
457 
458 	/*
459 	 * Inherited priority is the priority a thread inherits by
460 	 * taking a priority inheritence or protection mutex.  It
461 	 * is not affected by base priority changes.  Inherited
462 	 * priority defaults to and remains 0 until a mutex is taken
463 	 * that is being waited on by any other thread whose priority
464 	 * is non-zero.
465 	 */
466 	char			inherited_priority;
467 
468 	/*
469 	 * Active priority is always the maximum of the threads base
470 	 * priority and inherited priority.  When there is a change
471 	 * in either the base or inherited priority, the active
472 	 * priority must be recalculated.
473 	 */
474 	char			active_priority;
475 
476 	/* Number of priority ceiling or protection mutexes owned. */
477 	int			priority_mutex_count;
478 
479 	/* Queue of currently owned simple type mutexes. */
480 	TAILQ_HEAD(, pthread_mutex)	mutexq;
481 
482 	void				*ret;
483 	struct pthread_specific_elem	*specific;
484 	int				specific_data_count;
485 
486 	/* Number rwlocks rdlocks held. */
487 	int			rdlock_count;
488 
489 	/*
490 	 * Current locks bitmap for rtld. */
491 	int			rtld_bits;
492 
493 	/* Thread control block */
494 	struct tls_tcb		*tcb;
495 
496 	/* Cleanup handlers Link List */
497 	struct pthread_cleanup *cleanup;
498 
499 	/* Enable event reporting */
500 	int			report_events;
501 
502 	/* Event mask */
503 	td_thr_events_t		event_mask;
504 
505 	/* Event */
506 	td_event_msg_t		event_buf;
507 };
508 
509 #define	THR_IN_CRITICAL(thrd)				\
510 	(((thrd)->locklevel > 0) ||			\
511 	((thrd)->critical_count > 0))
512 
513 #define THR_UMTX_TRYLOCK(thrd, lck)			\
514 	_thr_umtx_trylock((lck), (thrd)->tid)
515 
516 #define	THR_UMTX_LOCK(thrd, lck)			\
517 	_thr_umtx_lock((lck), (thrd)->tid)
518 
519 #define	THR_UMTX_TIMEDLOCK(thrd, lck, timo)		\
520 	_thr_umtx_timedlock((lck), (thrd)->tid, (timo))
521 
522 #define	THR_UMTX_UNLOCK(thrd, lck)			\
523 	_thr_umtx_unlock((lck), (thrd)->tid)
524 
525 #define	THR_LOCK_ACQUIRE(thrd, lck)			\
526 do {							\
527 	(thrd)->locklevel++;				\
528 	_thr_umtx_lock((lck), (thrd)->tid);		\
529 } while (0)
530 
531 #ifdef	_PTHREADS_INVARIANTS
532 #define	THR_ASSERT_LOCKLEVEL(thrd)			\
533 do {							\
534 	if (__predict_false((thrd)->locklevel <= 0))	\
535 		_thr_assert_lock_level();		\
536 } while (0)
537 #else
538 #define THR_ASSERT_LOCKLEVEL(thrd)
539 #endif
540 
541 #define	THR_LOCK_RELEASE(thrd, lck)			\
542 do {							\
543 	THR_ASSERT_LOCKLEVEL(thrd);			\
544 	_thr_umtx_unlock((lck), (thrd)->tid);		\
545 	(thrd)->locklevel--;				\
546 	_thr_ast(thrd);					\
547 } while (0)
548 
549 #define	THR_LOCK(curthrd)		THR_LOCK_ACQUIRE(curthrd, &(curthrd)->lock)
550 #define	THR_UNLOCK(curthrd)		THR_LOCK_RELEASE(curthrd, &(curthrd)->lock)
551 #define	THR_THREAD_LOCK(curthrd, thr)	THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
552 #define	THR_THREAD_UNLOCK(curthrd, thr)	THR_LOCK_RELEASE(curthrd, &(thr)->lock)
553 
554 #define	THREAD_LIST_LOCK(curthrd)				\
555 do {								\
556 	THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock);		\
557 } while (0)
558 
559 #define	THREAD_LIST_UNLOCK(curthrd)				\
560 do {								\
561 	THR_LOCK_RELEASE((curthrd), &_thr_list_lock);		\
562 } while (0)
563 
564 /*
565  * Macros to insert/remove threads to the all thread list and
566  * the gc list.
567  */
568 #define	THR_LIST_ADD(thrd) do {					\
569 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) == 0) {	\
570 		TAILQ_INSERT_HEAD(&_thread_list, thrd, tle);	\
571 		_thr_hash_add(thrd);				\
572 		(thrd)->tlflags |= TLFLAGS_IN_TDLIST;		\
573 	}							\
574 } while (0)
575 #define	THR_LIST_REMOVE(thrd) do {				\
576 	if (((thrd)->tlflags & TLFLAGS_IN_TDLIST) != 0) {	\
577 		TAILQ_REMOVE(&_thread_list, thrd, tle);		\
578 		_thr_hash_remove(thrd);				\
579 		(thrd)->tlflags &= ~TLFLAGS_IN_TDLIST;		\
580 	}							\
581 } while (0)
582 #define	THR_GCLIST_ADD(thrd) do {				\
583 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) {	\
584 		TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
585 		(thrd)->tlflags |= TLFLAGS_IN_GCLIST;		\
586 		_thr_gc_count++;					\
587 	}							\
588 } while (0)
589 #define	THR_GCLIST_REMOVE(thrd) do {				\
590 	if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) {	\
591 		TAILQ_REMOVE(&_thread_gc_list, thrd, gcle);	\
592 		(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST;		\
593 		_thr_gc_count--;					\
594 	}							\
595 } while (0)
596 
597 #define GC_NEEDED()	(_thr_gc_count >= 5)
598 
599 #define	THR_IN_SYNCQ(thrd)	(((thrd)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
600 
601 #define SHOULD_REPORT_EVENT(curthr, e)			\
602 	(curthr->report_events && 			\
603 	 (((curthr)->event_mask | _thread_event_mask ) & e) != 0)
604 
605 extern int __isthreaded;
606 
607 /*
608  * Global variables for the pthread library.
609  */
610 extern char		*_usrstack;
611 extern struct pthread	*_thr_initial;
612 extern int		_thread_scope_system;
613 
614 /* For debugger */
615 extern int		_libthread_xu_debug;
616 extern int		_thread_event_mask;
617 extern struct pthread	*_thread_last_event;
618 
619 /* List of all threads */
620 extern struct thread_head	_thread_list;
621 
622 /* List of threads needing GC */
623 extern struct thread_head	_thread_gc_list;
624 
625 extern int	_thread_active_threads;
626 
627 extern struct	atfork_head	_thr_atfork_list;
628 extern umtx_t	_thr_atfork_lock;
629 
630 /* Default thread attributes */
631 extern struct pthread_attr _pthread_attr_default;
632 
633 /* Default mutex attributes */
634 extern struct pthread_mutex_attr _pthread_mutexattr_default;
635 
636 /* Default condition variable attributes */
637 extern struct pthread_cond_attr _pthread_condattr_default;
638 
639 extern pid_t	_thr_pid;
640 extern size_t	_thr_guard_default;
641 extern size_t	_thr_stack_default;
642 extern size_t	_thr_stack_initial;
643 extern int	_thr_page_size;
644 extern int	_thr_gc_count;
645 
646 extern umtx_t	_mutex_static_lock;
647 extern umtx_t	_cond_static_lock;
648 extern umtx_t	_rwlock_static_lock;
649 extern umtx_t	_keytable_lock;
650 extern umtx_t	_thr_list_lock;
651 extern umtx_t	_thr_event_lock;
652 
653 /*
654  * Function prototype definitions.
655  */
656 __BEGIN_DECLS
657 int	_thr_setthreaded(int);
658 int	_mutex_cv_lock(pthread_mutex_t *, int count);
659 int	_mutex_cv_unlock(pthread_mutex_t *, int *count);
660 void	_mutex_notify_priochange(struct pthread *, struct pthread *, int);
661 int	_mutex_reinit(pthread_mutex_t *);
662 void	_mutex_fork(struct pthread *curthread);
663 void	_mutex_unlock_private(struct pthread *);
664 void	_libpthread_init(struct pthread *);
665 struct pthread *_thr_alloc(struct pthread *);
666 void	_thread_exit(const char *, int, const char *) __dead2;
667 void	_thr_exit_cleanup(void);
668 int	_thr_ref_add(struct pthread *, struct pthread *, int);
669 void	_thr_ref_delete(struct pthread *, struct pthread *);
670 void	_thr_ref_delete_unlocked(struct pthread *, struct pthread *);
671 int	_thr_find_thread(struct pthread *, struct pthread *, int);
672 void	_thr_rtld_init(void);
673 void	_thr_rtld_fini(void);
674 int	_thr_stack_alloc(struct pthread_attr *);
675 void	_thr_stack_free(struct pthread_attr *);
676 void	_thr_free(struct pthread *, struct pthread *);
677 void	_thr_gc(struct pthread *);
678 void    _thread_cleanupspecific(void);
679 void    _thread_dump_info(void);
680 void	_thread_printf(int, const char *, ...);
681 void	_thr_spinlock_init(void);
682 int	_thr_cancel_enter(struct pthread *);
683 void	_thr_cancel_leave(struct pthread *, int);
684 void	_thr_signal_block(struct pthread *);
685 void	_thr_signal_unblock(struct pthread *);
686 void	_thr_signal_init(void);
687 void	_thr_signal_deinit(void);
688 int	_thr_send_sig(struct pthread *, int sig);
689 void	_thr_list_init(void);
690 void	_thr_hash_add(struct pthread *);
691 void	_thr_hash_remove(struct pthread *);
692 struct pthread *_thr_hash_find(struct pthread *);
693 void	_thr_link(struct pthread *curthread, struct pthread *thread);
694 void	_thr_unlink(struct pthread *curthread, struct pthread *thread);
695 void	_thr_suspend_check(struct pthread *curthread);
696 void	_thr_assert_lock_level(void) __dead2;
697 void	_thr_ast(struct pthread *);
698 int	_thr_get_tid(void);
699 void	_thr_report_creation(struct pthread *curthread,
700 			   struct pthread *newthread);
701 void	_thr_report_death(struct pthread *curthread);
702 void	_thread_bp_create(void);
703 void	_thread_bp_death(void);
704 
705 /* #include <sys/aio.h> */
706 #ifdef _SYS_AIO_H_
707 int	__sys_aio_suspend(const struct aiocb * const[], int, const struct timespec *);
708 #endif
709 
710 /* #include <fcntl.h> */
711 #ifdef  _SYS_FCNTL_H_
712 int     __sys_fcntl(int, int, ...);
713 int     __sys_open(const char *, int, ...);
714 #endif
715 
716 /* #include <sys/ioctl.h> */
717 #ifdef _SYS_IOCTL_H_
718 int	__sys_ioctl(int, unsigned long, ...);
719 #endif
720 
721 /* #inclde <sched.h> */
722 #ifdef	_SCHED_H_
723 int	__sys_sched_yield(void);
724 #endif
725 
726 /* #include <signal.h> */
727 #ifdef _SIGNAL_H_
728 int	__sys_kill(pid_t, int);
729 int     __sys_sigaction(int, const struct sigaction *, struct sigaction *);
730 int     __sys_sigpending(sigset_t *);
731 int     __sys_sigprocmask(int, const sigset_t *, sigset_t *);
732 int     __sys_sigsuspend(const sigset_t *);
733 int     __sys_sigreturn(ucontext_t *);
734 int     __sys_sigaltstack(const struct sigaltstack *, struct sigaltstack *);
735 #endif
736 
737 /* #include <sys/socket.h> */
738 #ifdef _SYS_SOCKET_H_
739 int	__sys_accept(int, struct sockaddr *, socklen_t *);
740 int	__sys_connect(int, const struct sockaddr *, socklen_t);
741 ssize_t __sys_recv(int, void *, size_t, int);
742 ssize_t __sys_recvfrom(int, void *, size_t, int, struct sockaddr *, socklen_t *);
743 ssize_t __sys_recvmsg(int, struct msghdr *, int);
744 int	__sys_sendfile(int, int, off_t, size_t, struct sf_hdtr *,
745 	    off_t *, int);
746 ssize_t __sys_sendmsg(int, const struct msghdr *, int);
747 ssize_t __sys_sendto(int, const void *,size_t, int, const struct sockaddr *, socklen_t);
748 #endif
749 
750 /* #include <sys/uio.h> */
751 #ifdef  _SYS_UIO_H_
752 ssize_t __sys_readv(int, const struct iovec *, int);
753 ssize_t __sys_writev(int, const struct iovec *, int);
754 #endif
755 
756 /* #include <time.h> */
757 #ifdef	_TIME_H_
758 int	__sys_nanosleep(const struct timespec *, struct timespec *);
759 #endif
760 
761 /* #include <unistd.h> */
762 #ifdef  _UNISTD_H_
763 int     __sys_close(int);
764 int     __sys_execve(const char *, char * const *, char * const *);
765 int	__sys_fork(void);
766 int	__sys_fsync(int);
767 pid_t	__sys_getpid(void);
768 int     __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
769 ssize_t __sys_read(int, void *, size_t);
770 ssize_t __sys_write(int, const void *, size_t);
771 void	__sys_exit(int);
772 int	__sys_sigwait(const sigset_t *, int *);
773 int	__sys_sigtimedwait(const sigset_t *, siginfo_t *,
774 		const struct timespec *);
775 int	__sys_sigwaitinfo(const sigset_t *set, siginfo_t *info);
776 #endif
777 
778 /* #include <poll.h> */
779 #ifdef _SYS_POLL_H_
780 int 	__sys_poll(struct pollfd *, unsigned, int);
781 #endif
782 
783 /* #include <sys/mman.h> */
784 #ifdef _SYS_MMAN_H_
785 int	__sys_msync(void *, size_t, int);
786 #endif
787 
788 static inline int
789 _thr_isthreaded(void)
790 {
791 	return (__isthreaded != 0);
792 }
793 
794 static inline int
795 _thr_is_inited(void)
796 {
797 	return (_thr_initial != 0);
798 }
799 
800 static inline void
801 _thr_check_init(void)
802 {
803 	if (_thr_initial == 0)
804 		_libpthread_init(0);
805 }
806 
807 __END_DECLS
808 
809 #endif  /* !_THR_PRIVATE_H */
810