1*404b540aSrobert /* Threads compatibility routines for libgcc2 and libobjc. */
2*404b540aSrobert /* Compile this one with gcc. */
3*404b540aSrobert /* Copyright (C) 1997, 1999, 2000, 2004, 2005, 2006
4*404b540aSrobert Free Software Foundation, Inc.
5*404b540aSrobert
6*404b540aSrobert This file is part of GCC.
7*404b540aSrobert
8*404b540aSrobert GCC is free software; you can redistribute it and/or modify it under
9*404b540aSrobert the terms of the GNU General Public License as published by the Free
10*404b540aSrobert Software Foundation; either version 2, or (at your option) any later
11*404b540aSrobert version.
12*404b540aSrobert
13*404b540aSrobert GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14*404b540aSrobert WARRANTY; without even the implied warranty of MERCHANTABILITY or
15*404b540aSrobert FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16*404b540aSrobert for more details.
17*404b540aSrobert
18*404b540aSrobert You should have received a copy of the GNU General Public License
19*404b540aSrobert along with GCC; see the file COPYING. If not, write to the Free
20*404b540aSrobert Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21*404b540aSrobert 02110-1301, USA. */
22*404b540aSrobert
23*404b540aSrobert /* As a special exception, if you link this library with other files,
24*404b540aSrobert some of which are compiled with GCC, to produce an executable,
25*404b540aSrobert this library does not by itself cause the resulting executable
26*404b540aSrobert to be covered by the GNU General Public License.
27*404b540aSrobert This exception does not however invalidate any other reasons why
28*404b540aSrobert the executable file might be covered by the GNU General Public License. */
29*404b540aSrobert
30*404b540aSrobert #ifndef GCC_GTHR_SOLARIS_H
31*404b540aSrobert #define GCC_GTHR_SOLARIS_H
32*404b540aSrobert
33*404b540aSrobert /* Solaris threads as found in Solaris 2.[456].
34*404b540aSrobert Actually these are Unix International (UI) threads, but I don't
35*404b540aSrobert know if anyone else implements these. */
36*404b540aSrobert
37*404b540aSrobert #define __GTHREADS 1
38*404b540aSrobert
39*404b540aSrobert #include <thread.h>
40*404b540aSrobert #include <errno.h>
41*404b540aSrobert
42*404b540aSrobert #ifdef __cplusplus
43*404b540aSrobert #define UNUSED(x)
44*404b540aSrobert #else
45*404b540aSrobert #define UNUSED(x) x __attribute__((unused))
46*404b540aSrobert #endif
47*404b540aSrobert
48*404b540aSrobert typedef thread_key_t __gthread_key_t;
49*404b540aSrobert typedef struct {
50*404b540aSrobert mutex_t mutex;
51*404b540aSrobert int once;
52*404b540aSrobert } __gthread_once_t;
53*404b540aSrobert typedef mutex_t __gthread_mutex_t;
54*404b540aSrobert
55*404b540aSrobert typedef struct {
56*404b540aSrobert long depth;
57*404b540aSrobert thread_t owner;
58*404b540aSrobert mutex_t actual;
59*404b540aSrobert } __gthread_recursive_mutex_t;
60*404b540aSrobert
61*404b540aSrobert #define __GTHREAD_ONCE_INIT { DEFAULTMUTEX, 0 }
62*404b540aSrobert #define __GTHREAD_MUTEX_INIT DEFAULTMUTEX
63*404b540aSrobert #define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
64*404b540aSrobert
65*404b540aSrobert #if SUPPORTS_WEAK && GTHREAD_USE_WEAK
66*404b540aSrobert # define __gthrw(name) \
67*404b540aSrobert static __typeof(name) __gthrw_ ## name __attribute__ ((__weakref__(#name)));
68*404b540aSrobert # define __gthrw_(name) __gthrw_ ## name
69*404b540aSrobert #else
70*404b540aSrobert # define __gthrw(name)
71*404b540aSrobert # define __gthrw_(name) name
72*404b540aSrobert #endif
73*404b540aSrobert
74*404b540aSrobert __gthrw(thr_keycreate)
__gthrw(thr_getspecific)75*404b540aSrobert __gthrw(thr_getspecific)
76*404b540aSrobert __gthrw(thr_setspecific)
77*404b540aSrobert __gthrw(thr_create)
78*404b540aSrobert __gthrw(thr_self)
79*404b540aSrobert
80*404b540aSrobert __gthrw(mutex_init)
81*404b540aSrobert __gthrw(mutex_destroy)
82*404b540aSrobert __gthrw(mutex_lock)
83*404b540aSrobert __gthrw(mutex_trylock)
84*404b540aSrobert __gthrw(mutex_unlock)
85*404b540aSrobert
86*404b540aSrobert #ifdef _LIBOBJC
87*404b540aSrobert __gthrw(thr_exit)
88*404b540aSrobert __gthrw(thr_getprio)
89*404b540aSrobert __gthrw(thr_setprio)
90*404b540aSrobert __gthrw(thr_yield)
91*404b540aSrobert
92*404b540aSrobert __gthrw(cond_init)
93*404b540aSrobert __gthrw(cond_destroy)
94*404b540aSrobert __gthrw(cond_wait)
95*404b540aSrobert __gthrw(cond_broadcast)
96*404b540aSrobert __gthrw(cond_signal)
97*404b540aSrobert
98*404b540aSrobert #endif
99*404b540aSrobert
100*404b540aSrobert #if SUPPORTS_WEAK && GTHREAD_USE_WEAK
101*404b540aSrobert
102*404b540aSrobert /* This will not actually work in Solaris 2.5, since libc contains
103*404b540aSrobert dummy symbols of all thr_* routines. */
104*404b540aSrobert
105*404b540aSrobert static inline int
106*404b540aSrobert __gthread_active_p (void)
107*404b540aSrobert {
108*404b540aSrobert static void *const __gthread_active_ptr = (void *) &__gthrw_(thr_create);
109*404b540aSrobert return __gthread_active_ptr != 0;
110*404b540aSrobert }
111*404b540aSrobert
112*404b540aSrobert #else /* not SUPPORTS_WEAK */
113*404b540aSrobert
114*404b540aSrobert static inline int
115*404b540aSrobert __gthread_active_p (void)
116*404b540aSrobert {
117*404b540aSrobert return 1;
118*404b540aSrobert }
119*404b540aSrobert
120*404b540aSrobert #endif /* SUPPORTS_WEAK */
121*404b540aSrobert
122*404b540aSrobert #ifdef _LIBOBJC
123*404b540aSrobert
124*404b540aSrobert /* Key structure for maintaining thread specific storage */
125*404b540aSrobert static thread_key_t _objc_thread_storage;
126*404b540aSrobert
127*404b540aSrobert /* Thread local storage for a single thread */
128*404b540aSrobert static void *thread_local_storage = NULL;
129*404b540aSrobert
130*404b540aSrobert /* Backend initialization functions */
131*404b540aSrobert
132*404b540aSrobert /* Initialize the threads subsystem. */
133*404b540aSrobert static inline int
__gthread_objc_init_thread_system(void)134*404b540aSrobert __gthread_objc_init_thread_system (void)
135*404b540aSrobert {
136*404b540aSrobert /* Initialize the thread storage key. */
137*404b540aSrobert if (__gthread_active_p ()
138*404b540aSrobert && __gthrw_(thr_keycreate) (&_objc_thread_storage, NULL) == 0)
139*404b540aSrobert return 0;
140*404b540aSrobert
141*404b540aSrobert return -1;
142*404b540aSrobert }
143*404b540aSrobert
144*404b540aSrobert /* Close the threads subsystem. */
145*404b540aSrobert static inline int
__gthread_objc_close_thread_system(void)146*404b540aSrobert __gthread_objc_close_thread_system (void)
147*404b540aSrobert {
148*404b540aSrobert if (__gthread_active_p ())
149*404b540aSrobert return 0;
150*404b540aSrobert else
151*404b540aSrobert return -1;
152*404b540aSrobert }
153*404b540aSrobert
154*404b540aSrobert /* Backend thread functions */
155*404b540aSrobert
156*404b540aSrobert /* Create a new thread of execution. */
157*404b540aSrobert static inline objc_thread_t
__gthread_objc_thread_detach(void (* func)(void *),void * arg)158*404b540aSrobert __gthread_objc_thread_detach (void (*func)(void *), void *arg)
159*404b540aSrobert {
160*404b540aSrobert objc_thread_t thread_id;
161*404b540aSrobert thread_t new_thread_id = 0;
162*404b540aSrobert
163*404b540aSrobert if (!__gthread_active_p ())
164*404b540aSrobert return NULL;
165*404b540aSrobert
166*404b540aSrobert if (__gthrw_(thr_create) (NULL, 0, (void *) func, arg,
167*404b540aSrobert THR_DETACHED | THR_NEW_LWP,
168*404b540aSrobert &new_thread_id) == 0)
169*404b540aSrobert thread_id = *(objc_thread_t *) &new_thread_id;
170*404b540aSrobert else
171*404b540aSrobert thread_id = NULL;
172*404b540aSrobert
173*404b540aSrobert return thread_id;
174*404b540aSrobert }
175*404b540aSrobert
176*404b540aSrobert /* Set the current thread's priority. */
177*404b540aSrobert static inline int
__gthread_objc_thread_set_priority(int priority)178*404b540aSrobert __gthread_objc_thread_set_priority (int priority)
179*404b540aSrobert {
180*404b540aSrobert int sys_priority = 0;
181*404b540aSrobert
182*404b540aSrobert if (!__gthread_active_p ())
183*404b540aSrobert return -1;
184*404b540aSrobert
185*404b540aSrobert switch (priority)
186*404b540aSrobert {
187*404b540aSrobert case OBJC_THREAD_INTERACTIVE_PRIORITY:
188*404b540aSrobert sys_priority = 300;
189*404b540aSrobert break;
190*404b540aSrobert default:
191*404b540aSrobert case OBJC_THREAD_BACKGROUND_PRIORITY:
192*404b540aSrobert sys_priority = 200;
193*404b540aSrobert break;
194*404b540aSrobert case OBJC_THREAD_LOW_PRIORITY:
195*404b540aSrobert sys_priority = 1000;
196*404b540aSrobert break;
197*404b540aSrobert }
198*404b540aSrobert
199*404b540aSrobert /* Change priority */
200*404b540aSrobert if (__gthrw_(thr_setprio) (__gthrw_(thr_self) (), sys_priority) == 0)
201*404b540aSrobert return 0;
202*404b540aSrobert else
203*404b540aSrobert return -1;
204*404b540aSrobert }
205*404b540aSrobert
206*404b540aSrobert /* Return the current thread's priority. */
207*404b540aSrobert static inline int
__gthread_objc_thread_get_priority(void)208*404b540aSrobert __gthread_objc_thread_get_priority (void)
209*404b540aSrobert {
210*404b540aSrobert int sys_priority;
211*404b540aSrobert
212*404b540aSrobert if (!__gthread_active_p ())
213*404b540aSrobert return OBJC_THREAD_INTERACTIVE_PRIORITY;
214*404b540aSrobert
215*404b540aSrobert if (__gthrw_(thr_getprio) (__gthrw_(thr_self) (), &sys_priority) == 0)
216*404b540aSrobert {
217*404b540aSrobert if (sys_priority >= 250)
218*404b540aSrobert return OBJC_THREAD_INTERACTIVE_PRIORITY;
219*404b540aSrobert else if (sys_priority >= 150)
220*404b540aSrobert return OBJC_THREAD_BACKGROUND_PRIORITY;
221*404b540aSrobert return OBJC_THREAD_LOW_PRIORITY;
222*404b540aSrobert }
223*404b540aSrobert
224*404b540aSrobert /* Couldn't get priority. */
225*404b540aSrobert return -1;
226*404b540aSrobert }
227*404b540aSrobert
228*404b540aSrobert /* Yield our process time to another thread. */
229*404b540aSrobert static inline void
__gthread_objc_thread_yield(void)230*404b540aSrobert __gthread_objc_thread_yield (void)
231*404b540aSrobert {
232*404b540aSrobert if (__gthread_active_p ())
233*404b540aSrobert __gthrw_(thr_yield) ();
234*404b540aSrobert }
235*404b540aSrobert
236*404b540aSrobert /* Terminate the current thread. */
237*404b540aSrobert static inline int
__gthread_objc_thread_exit(void)238*404b540aSrobert __gthread_objc_thread_exit (void)
239*404b540aSrobert {
240*404b540aSrobert if (__gthread_active_p ())
241*404b540aSrobert /* exit the thread */
242*404b540aSrobert __gthrw_(thr_exit) (&__objc_thread_exit_status);
243*404b540aSrobert
244*404b540aSrobert /* Failed if we reached here */
245*404b540aSrobert return -1;
246*404b540aSrobert }
247*404b540aSrobert
248*404b540aSrobert /* Returns an integer value which uniquely describes a thread. */
249*404b540aSrobert static inline objc_thread_t
__gthread_objc_thread_id(void)250*404b540aSrobert __gthread_objc_thread_id (void)
251*404b540aSrobert {
252*404b540aSrobert if (__gthread_active_p ())
253*404b540aSrobert return (objc_thread_t) __gthrw_(thr_self) ();
254*404b540aSrobert else
255*404b540aSrobert return (objc_thread_t) 1;
256*404b540aSrobert }
257*404b540aSrobert
258*404b540aSrobert /* Sets the thread's local storage pointer. */
259*404b540aSrobert static inline int
__gthread_objc_thread_set_data(void * value)260*404b540aSrobert __gthread_objc_thread_set_data (void *value)
261*404b540aSrobert {
262*404b540aSrobert if (__gthread_active_p ())
263*404b540aSrobert {
264*404b540aSrobert if (__gthrw_(thr_setspecific) (_objc_thread_storage, value) == 0)
265*404b540aSrobert return 0;
266*404b540aSrobert else
267*404b540aSrobert return -1;
268*404b540aSrobert }
269*404b540aSrobert else
270*404b540aSrobert {
271*404b540aSrobert thread_local_storage = value;
272*404b540aSrobert return 0;
273*404b540aSrobert }
274*404b540aSrobert }
275*404b540aSrobert
276*404b540aSrobert /* Returns the thread's local storage pointer. */
277*404b540aSrobert static inline void *
__gthread_objc_thread_get_data(void)278*404b540aSrobert __gthread_objc_thread_get_data (void)
279*404b540aSrobert {
280*404b540aSrobert void *value = NULL;
281*404b540aSrobert
282*404b540aSrobert if (__gthread_active_p ())
283*404b540aSrobert {
284*404b540aSrobert if (__gthrw_(thr_getspecific) (_objc_thread_storage, &value) == 0)
285*404b540aSrobert return value;
286*404b540aSrobert else
287*404b540aSrobert return NULL;
288*404b540aSrobert }
289*404b540aSrobert else
290*404b540aSrobert return thread_local_storage;
291*404b540aSrobert }
292*404b540aSrobert
293*404b540aSrobert /* Backend mutex functions */
294*404b540aSrobert
295*404b540aSrobert /* Allocate a mutex. */
296*404b540aSrobert static inline int
__gthread_objc_mutex_allocate(objc_mutex_t mutex)297*404b540aSrobert __gthread_objc_mutex_allocate (objc_mutex_t mutex)
298*404b540aSrobert {
299*404b540aSrobert if (__gthread_active_p ()
300*404b540aSrobert && __gthrw_(mutex_init) ((mutex_t *) (&(mutex->backend)), USYNC_THREAD, 0))
301*404b540aSrobert return -1;
302*404b540aSrobert
303*404b540aSrobert return 0;
304*404b540aSrobert }
305*404b540aSrobert
306*404b540aSrobert /* Deallocate a mutex. */
307*404b540aSrobert static inline int
__gthread_objc_mutex_deallocate(objc_mutex_t mutex)308*404b540aSrobert __gthread_objc_mutex_deallocate (objc_mutex_t mutex)
309*404b540aSrobert {
310*404b540aSrobert if (__gthread_active_p ())
311*404b540aSrobert __gthrw_(mutex_destroy) ((mutex_t *) (&(mutex->backend)));
312*404b540aSrobert
313*404b540aSrobert return 0;
314*404b540aSrobert }
315*404b540aSrobert
316*404b540aSrobert /* Grab a lock on a mutex. */
317*404b540aSrobert static inline int
__gthread_objc_mutex_lock(objc_mutex_t mutex)318*404b540aSrobert __gthread_objc_mutex_lock (objc_mutex_t mutex)
319*404b540aSrobert {
320*404b540aSrobert if (__gthread_active_p ()
321*404b540aSrobert && __gthrw_(mutex_lock) ((mutex_t *) (&(mutex->backend))) != 0)
322*404b540aSrobert return -1;
323*404b540aSrobert
324*404b540aSrobert return 0;
325*404b540aSrobert }
326*404b540aSrobert
327*404b540aSrobert /* Try to grab a lock on a mutex. */
328*404b540aSrobert static inline int
__gthread_objc_mutex_trylock(objc_mutex_t mutex)329*404b540aSrobert __gthread_objc_mutex_trylock (objc_mutex_t mutex)
330*404b540aSrobert {
331*404b540aSrobert if (__gthread_active_p ()
332*404b540aSrobert && __gthrw_(mutex_trylock) ((mutex_t *) (&(mutex->backend))) != 0)
333*404b540aSrobert return -1;
334*404b540aSrobert
335*404b540aSrobert return 0;
336*404b540aSrobert }
337*404b540aSrobert
338*404b540aSrobert /* Unlock the mutex */
339*404b540aSrobert static inline int
__gthread_objc_mutex_unlock(objc_mutex_t mutex)340*404b540aSrobert __gthread_objc_mutex_unlock (objc_mutex_t mutex)
341*404b540aSrobert {
342*404b540aSrobert if (__gthread_active_p ()
343*404b540aSrobert && __gthrw_(mutex_unlock) ((mutex_t *) (&(mutex->backend))) != 0)
344*404b540aSrobert return -1;
345*404b540aSrobert
346*404b540aSrobert return 0;
347*404b540aSrobert }
348*404b540aSrobert
349*404b540aSrobert /* Backend condition mutex functions */
350*404b540aSrobert
351*404b540aSrobert /* Allocate a condition. */
352*404b540aSrobert static inline int
__gthread_objc_condition_allocate(objc_condition_t condition)353*404b540aSrobert __gthread_objc_condition_allocate (objc_condition_t condition)
354*404b540aSrobert {
355*404b540aSrobert if (__gthread_active_p ())
356*404b540aSrobert return __gthrw_(cond_init) ((cond_t *) (&(condition->backend)), USYNC_THREAD,
357*404b540aSrobert NULL);
358*404b540aSrobert else
359*404b540aSrobert return 0;
360*404b540aSrobert }
361*404b540aSrobert
362*404b540aSrobert /* Deallocate a condition. */
363*404b540aSrobert static inline int
__gthread_objc_condition_deallocate(objc_condition_t condition)364*404b540aSrobert __gthread_objc_condition_deallocate (objc_condition_t condition)
365*404b540aSrobert {
366*404b540aSrobert if (__gthread_active_p ())
367*404b540aSrobert return __gthrw_(cond_destroy) ((cond_t *) (&(condition->backend)));
368*404b540aSrobert else
369*404b540aSrobert return 0;
370*404b540aSrobert }
371*404b540aSrobert
372*404b540aSrobert /* Wait on the condition */
373*404b540aSrobert static inline int
__gthread_objc_condition_wait(objc_condition_t condition,objc_mutex_t mutex)374*404b540aSrobert __gthread_objc_condition_wait (objc_condition_t condition, objc_mutex_t mutex)
375*404b540aSrobert {
376*404b540aSrobert if (__gthread_active_p ())
377*404b540aSrobert return __gthrw_(cond_wait) ((cond_t *) (&(condition->backend)),
378*404b540aSrobert (mutex_t *) (&(mutex->backend)));
379*404b540aSrobert else
380*404b540aSrobert return 0;
381*404b540aSrobert }
382*404b540aSrobert
383*404b540aSrobert /* Wake up all threads waiting on this condition. */
384*404b540aSrobert static inline int
__gthread_objc_condition_broadcast(objc_condition_t condition)385*404b540aSrobert __gthread_objc_condition_broadcast (objc_condition_t condition)
386*404b540aSrobert {
387*404b540aSrobert if (__gthread_active_p ())
388*404b540aSrobert return __gthrw_(cond_broadcast) ((cond_t *) (&(condition->backend)));
389*404b540aSrobert else
390*404b540aSrobert return 0;
391*404b540aSrobert }
392*404b540aSrobert
393*404b540aSrobert /* Wake up one thread waiting on this condition. */
394*404b540aSrobert static inline int
__gthread_objc_condition_signal(objc_condition_t condition)395*404b540aSrobert __gthread_objc_condition_signal (objc_condition_t condition)
396*404b540aSrobert {
397*404b540aSrobert if (__gthread_active_p ())
398*404b540aSrobert return __gthrw_(cond_signal) ((cond_t *) (&(condition->backend)));
399*404b540aSrobert else
400*404b540aSrobert return 0;
401*404b540aSrobert }
402*404b540aSrobert
403*404b540aSrobert #else /* _LIBOBJC */
404*404b540aSrobert
405*404b540aSrobert static inline int
__gthread_once(__gthread_once_t * once,void (* func)(void))406*404b540aSrobert __gthread_once (__gthread_once_t *once, void (*func) (void))
407*404b540aSrobert {
408*404b540aSrobert if (! __gthread_active_p ())
409*404b540aSrobert return -1;
410*404b540aSrobert
411*404b540aSrobert if (once == 0 || func == 0)
412*404b540aSrobert return EINVAL;
413*404b540aSrobert
414*404b540aSrobert if (once->once == 0)
415*404b540aSrobert {
416*404b540aSrobert int status = __gthrw_(mutex_lock) (&once->mutex);
417*404b540aSrobert if (status != 0)
418*404b540aSrobert return status;
419*404b540aSrobert if (once->once == 0)
420*404b540aSrobert {
421*404b540aSrobert (*func) ();
422*404b540aSrobert once->once++;
423*404b540aSrobert }
424*404b540aSrobert __gthrw_(mutex_unlock) (&once->mutex);
425*404b540aSrobert }
426*404b540aSrobert return 0;
427*404b540aSrobert }
428*404b540aSrobert
429*404b540aSrobert static inline int
__gthread_key_create(__gthread_key_t * key,void (* dtor)(void *))430*404b540aSrobert __gthread_key_create (__gthread_key_t *key, void (*dtor) (void *))
431*404b540aSrobert {
432*404b540aSrobert /* Solaris 2.5 contains thr_* routines no-op in libc, so test if we actually
433*404b540aSrobert got a reasonable key value, and if not, fail. */
434*404b540aSrobert *key = (__gthread_key_t)-1;
435*404b540aSrobert if (__gthrw_(thr_keycreate) (key, dtor) != 0 || *key == (__gthread_key_t)-1)
436*404b540aSrobert return -1;
437*404b540aSrobert else
438*404b540aSrobert return 0;
439*404b540aSrobert }
440*404b540aSrobert
441*404b540aSrobert static inline int
__gthread_key_delete(__gthread_key_t UNUSED (key))442*404b540aSrobert __gthread_key_delete (__gthread_key_t UNUSED (key))
443*404b540aSrobert {
444*404b540aSrobert /* Not possible. */
445*404b540aSrobert return -1;
446*404b540aSrobert }
447*404b540aSrobert
448*404b540aSrobert static inline void *
__gthread_getspecific(__gthread_key_t key)449*404b540aSrobert __gthread_getspecific (__gthread_key_t key)
450*404b540aSrobert {
451*404b540aSrobert void *ptr;
452*404b540aSrobert if (__gthrw_(thr_getspecific) (key, &ptr) == 0)
453*404b540aSrobert return ptr;
454*404b540aSrobert else
455*404b540aSrobert return 0;
456*404b540aSrobert }
457*404b540aSrobert
458*404b540aSrobert static inline int
__gthread_setspecific(__gthread_key_t key,const void * ptr)459*404b540aSrobert __gthread_setspecific (__gthread_key_t key, const void *ptr)
460*404b540aSrobert {
461*404b540aSrobert return __gthrw_(thr_setspecific) (key, (void *) ptr);
462*404b540aSrobert }
463*404b540aSrobert
464*404b540aSrobert static inline int
__gthread_mutex_lock(__gthread_mutex_t * mutex)465*404b540aSrobert __gthread_mutex_lock (__gthread_mutex_t *mutex)
466*404b540aSrobert {
467*404b540aSrobert if (__gthread_active_p ())
468*404b540aSrobert return __gthrw_(mutex_lock) (mutex);
469*404b540aSrobert else
470*404b540aSrobert return 0;
471*404b540aSrobert }
472*404b540aSrobert
473*404b540aSrobert static inline int
__gthread_mutex_trylock(__gthread_mutex_t * mutex)474*404b540aSrobert __gthread_mutex_trylock (__gthread_mutex_t *mutex)
475*404b540aSrobert {
476*404b540aSrobert if (__gthread_active_p ())
477*404b540aSrobert return __gthrw_(mutex_trylock) (mutex);
478*404b540aSrobert else
479*404b540aSrobert return 0;
480*404b540aSrobert }
481*404b540aSrobert
482*404b540aSrobert static inline int
__gthread_mutex_unlock(__gthread_mutex_t * mutex)483*404b540aSrobert __gthread_mutex_unlock (__gthread_mutex_t *mutex)
484*404b540aSrobert {
485*404b540aSrobert if (__gthread_active_p ())
486*404b540aSrobert return __gthrw_(mutex_unlock) (mutex);
487*404b540aSrobert else
488*404b540aSrobert return 0;
489*404b540aSrobert }
490*404b540aSrobert
491*404b540aSrobert static inline int
__gthread_recursive_mutex_init_function(__gthread_recursive_mutex_t * mutex)492*404b540aSrobert __gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
493*404b540aSrobert {
494*404b540aSrobert mutex->depth = 0;
495*404b540aSrobert mutex->owner = (thread_t) 0;
496*404b540aSrobert return __gthrw_(mutex_init) (&mutex->actual, USYNC_THREAD, 0);
497*404b540aSrobert }
498*404b540aSrobert
499*404b540aSrobert static inline int
__gthread_recursive_mutex_lock(__gthread_recursive_mutex_t * mutex)500*404b540aSrobert __gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
501*404b540aSrobert {
502*404b540aSrobert if (__gthread_active_p ())
503*404b540aSrobert {
504*404b540aSrobert thread_t me = __gthrw_(thr_self) ();
505*404b540aSrobert
506*404b540aSrobert if (mutex->owner != me)
507*404b540aSrobert {
508*404b540aSrobert __gthrw_(mutex_lock) (&mutex->actual);
509*404b540aSrobert mutex->owner = me;
510*404b540aSrobert }
511*404b540aSrobert
512*404b540aSrobert mutex->depth++;
513*404b540aSrobert }
514*404b540aSrobert return 0;
515*404b540aSrobert }
516*404b540aSrobert
517*404b540aSrobert static inline int
__gthread_recursive_mutex_trylock(__gthread_recursive_mutex_t * mutex)518*404b540aSrobert __gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
519*404b540aSrobert {
520*404b540aSrobert if (__gthread_active_p ())
521*404b540aSrobert {
522*404b540aSrobert thread_t me = __gthrw_(thr_self) ();
523*404b540aSrobert
524*404b540aSrobert if (mutex->owner != me)
525*404b540aSrobert {
526*404b540aSrobert if (__gthrw_(mutex_trylock) (&mutex->actual))
527*404b540aSrobert return 1;
528*404b540aSrobert mutex->owner = me;
529*404b540aSrobert }
530*404b540aSrobert
531*404b540aSrobert mutex->depth++;
532*404b540aSrobert }
533*404b540aSrobert return 0;
534*404b540aSrobert }
535*404b540aSrobert
536*404b540aSrobert static inline int
__gthread_recursive_mutex_unlock(__gthread_recursive_mutex_t * mutex)537*404b540aSrobert __gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
538*404b540aSrobert {
539*404b540aSrobert if (__gthread_active_p ())
540*404b540aSrobert {
541*404b540aSrobert if (--mutex->depth == 0)
542*404b540aSrobert {
543*404b540aSrobert mutex->owner = (thread_t) 0;
544*404b540aSrobert __gthrw_(mutex_unlock) (&mutex->actual);
545*404b540aSrobert }
546*404b540aSrobert }
547*404b540aSrobert return 0;
548*404b540aSrobert }
549*404b540aSrobert
550*404b540aSrobert #endif /* _LIBOBJC */
551*404b540aSrobert
552*404b540aSrobert #undef UNUSED
553*404b540aSrobert
554*404b540aSrobert #endif /* ! GCC_GTHR_SOLARIS_H */
555