1*38fd1498Szrj // Copyright (C) 2002-2018 Free Software Foundation, Inc.
2*38fd1498Szrj //
3*38fd1498Szrj // This file is part of GCC.
4*38fd1498Szrj //
5*38fd1498Szrj // GCC is free software; you can redistribute it and/or modify
6*38fd1498Szrj // it under the terms of the GNU General Public License as published by
7*38fd1498Szrj // the Free Software Foundation; either version 3, or (at your option)
8*38fd1498Szrj // any later version.
9*38fd1498Szrj
10*38fd1498Szrj // GCC is distributed in the hope that it will be useful,
11*38fd1498Szrj // but WITHOUT ANY WARRANTY; without even the implied warranty of
12*38fd1498Szrj // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13*38fd1498Szrj // GNU General Public License for more details.
14*38fd1498Szrj
15*38fd1498Szrj // Under Section 7 of GPL version 3, you are granted additional
16*38fd1498Szrj // permissions described in the GCC Runtime Library Exception, version
17*38fd1498Szrj // 3.1, as published by the Free Software Foundation.
18*38fd1498Szrj
19*38fd1498Szrj // You should have received a copy of the GNU General Public License and
20*38fd1498Szrj // a copy of the GCC Runtime Library Exception along with this program;
21*38fd1498Szrj // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22*38fd1498Szrj // <http://www.gnu.org/licenses/>.
23*38fd1498Szrj
24*38fd1498Szrj // Written by Mark Mitchell, CodeSourcery LLC, <mark@codesourcery.com>
25*38fd1498Szrj // Thread support written by Jason Merrill, Red Hat Inc. <jason@redhat.com>
26*38fd1498Szrj
27*38fd1498Szrj #include <bits/c++config.h>
28*38fd1498Szrj #include <cxxabi.h>
29*38fd1498Szrj #include <exception>
30*38fd1498Szrj #include <new>
31*38fd1498Szrj #include <ext/atomicity.h>
32*38fd1498Szrj #include <ext/concurrence.h>
33*38fd1498Szrj #include <bits/atomic_lockfree_defines.h>
34*38fd1498Szrj #if defined(__GTHREADS) && defined(__GTHREAD_HAS_COND) \
35*38fd1498Szrj && (ATOMIC_INT_LOCK_FREE > 1) && defined(_GLIBCXX_HAVE_LINUX_FUTEX)
36*38fd1498Szrj # include <climits>
37*38fd1498Szrj # include <syscall.h>
38*38fd1498Szrj # include <unistd.h>
39*38fd1498Szrj # define _GLIBCXX_USE_FUTEX
40*38fd1498Szrj # define _GLIBCXX_FUTEX_WAIT 0
41*38fd1498Szrj # define _GLIBCXX_FUTEX_WAKE 1
42*38fd1498Szrj #endif
43*38fd1498Szrj
44*38fd1498Szrj // The IA64/generic ABI uses the first byte of the guard variable.
45*38fd1498Szrj // The ARM EABI uses the least significant bit.
46*38fd1498Szrj
47*38fd1498Szrj // Thread-safe static local initialization support.
48*38fd1498Szrj #ifdef __GTHREADS
49*38fd1498Szrj # ifndef _GLIBCXX_USE_FUTEX
50*38fd1498Szrj namespace
51*38fd1498Szrj {
52*38fd1498Szrj // A single mutex controlling all static initializations.
53*38fd1498Szrj static __gnu_cxx::__recursive_mutex* static_mutex;
54*38fd1498Szrj
55*38fd1498Szrj typedef char fake_recursive_mutex[sizeof(__gnu_cxx::__recursive_mutex)]
56*38fd1498Szrj __attribute__ ((aligned(__alignof__(__gnu_cxx::__recursive_mutex))));
57*38fd1498Szrj fake_recursive_mutex fake_mutex;
58*38fd1498Szrj
init()59*38fd1498Szrj static void init()
60*38fd1498Szrj { static_mutex = new (&fake_mutex) __gnu_cxx::__recursive_mutex(); }
61*38fd1498Szrj
62*38fd1498Szrj __gnu_cxx::__recursive_mutex&
get_static_mutex()63*38fd1498Szrj get_static_mutex()
64*38fd1498Szrj {
65*38fd1498Szrj static __gthread_once_t once = __GTHREAD_ONCE_INIT;
66*38fd1498Szrj __gthread_once(&once, init);
67*38fd1498Szrj return *static_mutex;
68*38fd1498Szrj }
69*38fd1498Szrj
70*38fd1498Szrj // Simple wrapper for exception safety.
71*38fd1498Szrj struct mutex_wrapper
72*38fd1498Szrj {
73*38fd1498Szrj bool unlock;
mutex_wrapper__anon5443f27d0111::mutex_wrapper74*38fd1498Szrj mutex_wrapper() : unlock(true)
75*38fd1498Szrj { get_static_mutex().lock(); }
76*38fd1498Szrj
~mutex_wrapper__anon5443f27d0111::mutex_wrapper77*38fd1498Szrj ~mutex_wrapper()
78*38fd1498Szrj {
79*38fd1498Szrj if (unlock)
80*38fd1498Szrj static_mutex->unlock();
81*38fd1498Szrj }
82*38fd1498Szrj };
83*38fd1498Szrj }
84*38fd1498Szrj # endif
85*38fd1498Szrj
86*38fd1498Szrj # if defined(__GTHREAD_HAS_COND) && !defined(_GLIBCXX_USE_FUTEX)
87*38fd1498Szrj namespace
88*38fd1498Szrj {
89*38fd1498Szrj // A single condition variable controlling all static initializations.
90*38fd1498Szrj static __gnu_cxx::__cond* static_cond;
91*38fd1498Szrj
92*38fd1498Szrj // using a fake type to avoid initializing a static class.
93*38fd1498Szrj typedef char fake_cond_t[sizeof(__gnu_cxx::__cond)]
94*38fd1498Szrj __attribute__ ((aligned(__alignof__(__gnu_cxx::__cond))));
95*38fd1498Szrj fake_cond_t fake_cond;
96*38fd1498Szrj
init_static_cond()97*38fd1498Szrj static void init_static_cond()
98*38fd1498Szrj { static_cond = new (&fake_cond) __gnu_cxx::__cond(); }
99*38fd1498Szrj
100*38fd1498Szrj __gnu_cxx::__cond&
get_static_cond()101*38fd1498Szrj get_static_cond()
102*38fd1498Szrj {
103*38fd1498Szrj static __gthread_once_t once = __GTHREAD_ONCE_INIT;
104*38fd1498Szrj __gthread_once(&once, init_static_cond);
105*38fd1498Szrj return *static_cond;
106*38fd1498Szrj }
107*38fd1498Szrj }
108*38fd1498Szrj # endif
109*38fd1498Szrj
110*38fd1498Szrj # ifndef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
111*38fd1498Szrj
112*38fd1498Szrj // Test the guard variable with a memory load with
113*38fd1498Szrj // acquire semantics.
114*38fd1498Szrj
115*38fd1498Szrj inline bool
__test_and_acquire(__cxxabiv1::__guard * g)116*38fd1498Szrj __test_and_acquire (__cxxabiv1::__guard *g)
117*38fd1498Szrj {
118*38fd1498Szrj unsigned char __c;
119*38fd1498Szrj unsigned char *__p = reinterpret_cast<unsigned char *>(g);
120*38fd1498Szrj __atomic_load (__p, &__c, __ATOMIC_ACQUIRE);
121*38fd1498Szrj (void) __p;
122*38fd1498Szrj return _GLIBCXX_GUARD_TEST(&__c);
123*38fd1498Szrj }
124*38fd1498Szrj # define _GLIBCXX_GUARD_TEST_AND_ACQUIRE(G) __test_and_acquire (G)
125*38fd1498Szrj # endif
126*38fd1498Szrj
127*38fd1498Szrj # ifndef _GLIBCXX_GUARD_SET_AND_RELEASE
128*38fd1498Szrj
129*38fd1498Szrj // Set the guard variable to 1 with memory order release semantics.
130*38fd1498Szrj
131*38fd1498Szrj inline void
__set_and_release(__cxxabiv1::__guard * g)132*38fd1498Szrj __set_and_release (__cxxabiv1::__guard *g)
133*38fd1498Szrj {
134*38fd1498Szrj unsigned char *__p = reinterpret_cast<unsigned char *>(g);
135*38fd1498Szrj unsigned char val = 1;
136*38fd1498Szrj __atomic_store (__p, &val, __ATOMIC_RELEASE);
137*38fd1498Szrj (void) __p;
138*38fd1498Szrj }
139*38fd1498Szrj # define _GLIBCXX_GUARD_SET_AND_RELEASE(G) __set_and_release (G)
140*38fd1498Szrj # endif
141*38fd1498Szrj
142*38fd1498Szrj #else /* !__GTHREADS */
143*38fd1498Szrj
144*38fd1498Szrj # undef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
145*38fd1498Szrj # undef _GLIBCXX_GUARD_SET_AND_RELEASE
146*38fd1498Szrj # define _GLIBCXX_GUARD_SET_AND_RELEASE(G) _GLIBCXX_GUARD_SET (G)
147*38fd1498Szrj
148*38fd1498Szrj #endif /* __GTHREADS */
149*38fd1498Szrj
150*38fd1498Szrj //
151*38fd1498Szrj // Here are C++ run-time routines for guarded initialization of static
152*38fd1498Szrj // variables. There are 4 scenarios under which these routines are called:
153*38fd1498Szrj //
154*38fd1498Szrj // 1. Threads not supported (__GTHREADS not defined)
155*38fd1498Szrj // 2. Threads are supported but not enabled at run-time.
156*38fd1498Szrj // 3. Threads enabled at run-time but __gthreads_* are not fully POSIX.
157*38fd1498Szrj // 4. Threads enabled at run-time and __gthreads_* support all POSIX threads
158*38fd1498Szrj // primitives we need here.
159*38fd1498Szrj //
160*38fd1498Szrj // The old code supported scenarios 1-3 but was broken since it used a global
161*38fd1498Szrj // mutex for all threads and had the mutex locked during the whole duration of
162*38fd1498Szrj // initialization of a guarded static variable. The following created a
163*38fd1498Szrj // dead-lock with the old code.
164*38fd1498Szrj //
165*38fd1498Szrj // Thread 1 acquires the global mutex.
166*38fd1498Szrj // Thread 1 starts initializing static variable.
167*38fd1498Szrj // Thread 1 creates thread 2 during initialization.
168*38fd1498Szrj // Thread 2 attempts to acquire mutex to initialize another variable.
169*38fd1498Szrj // Thread 2 blocks since thread 1 is locking the mutex.
170*38fd1498Szrj // Thread 1 waits for result from thread 2 and also blocks. A deadlock.
171*38fd1498Szrj //
172*38fd1498Szrj // The new code here can handle this situation and thus is more robust. However,
173*38fd1498Szrj // we need to use the POSIX thread condition variable, which is not supported
174*38fd1498Szrj // in all platforms, notably older versions of Microsoft Windows. The gthr*.h
175*38fd1498Szrj // headers define a symbol __GTHREAD_HAS_COND for platforms that support POSIX
176*38fd1498Szrj // like condition variables. For platforms that do not support condition
177*38fd1498Szrj // variables, we need to fall back to the old code.
178*38fd1498Szrj
179*38fd1498Szrj // If _GLIBCXX_USE_FUTEX, no global mutex or condition variable is used,
180*38fd1498Szrj // only atomic operations are used together with futex syscall.
181*38fd1498Szrj // Valid values of the first integer in guard are:
182*38fd1498Szrj // 0 No thread encountered the guarded init
183*38fd1498Szrj // yet or it has been aborted.
184*38fd1498Szrj // _GLIBCXX_GUARD_BIT The guarded static var has been successfully
185*38fd1498Szrj // initialized.
186*38fd1498Szrj // _GLIBCXX_GUARD_PENDING_BIT The guarded static var is being initialized
187*38fd1498Szrj // and no other thread is waiting for its
188*38fd1498Szrj // initialization.
189*38fd1498Szrj // (_GLIBCXX_GUARD_PENDING_BIT The guarded static var is being initialized
190*38fd1498Szrj // | _GLIBCXX_GUARD_WAITING_BIT) and some other threads are waiting until
191*38fd1498Szrj // it is initialized.
192*38fd1498Szrj
193*38fd1498Szrj namespace __cxxabiv1
194*38fd1498Szrj {
195*38fd1498Szrj #ifdef _GLIBCXX_USE_FUTEX
196*38fd1498Szrj namespace
197*38fd1498Szrj {
__guard_test_bit(const int __byte,const int __val)198*38fd1498Szrj static inline int __guard_test_bit (const int __byte, const int __val)
199*38fd1498Szrj {
200*38fd1498Szrj union { int __i; char __c[sizeof (int)]; } __u = { 0 };
201*38fd1498Szrj __u.__c[__byte] = __val;
202*38fd1498Szrj return __u.__i;
203*38fd1498Szrj }
204*38fd1498Szrj }
205*38fd1498Szrj #endif
206*38fd1498Szrj
207*38fd1498Szrj static inline int
init_in_progress_flag(__guard * g)208*38fd1498Szrj init_in_progress_flag(__guard* g)
209*38fd1498Szrj { return ((char *)g)[1]; }
210*38fd1498Szrj
211*38fd1498Szrj static inline void
set_init_in_progress_flag(__guard * g,int v)212*38fd1498Szrj set_init_in_progress_flag(__guard* g, int v)
213*38fd1498Szrj { ((char *)g)[1] = v; }
214*38fd1498Szrj
215*38fd1498Szrj static inline void
throw_recursive_init_exception()216*38fd1498Szrj throw_recursive_init_exception()
217*38fd1498Szrj {
218*38fd1498Szrj #if __cpp_exceptions
219*38fd1498Szrj throw __gnu_cxx::recursive_init_error();
220*38fd1498Szrj #else
221*38fd1498Szrj // Use __builtin_trap so we don't require abort().
222*38fd1498Szrj __builtin_trap();
223*38fd1498Szrj #endif
224*38fd1498Szrj }
225*38fd1498Szrj
226*38fd1498Szrj // acquire() is a helper function used to acquire guard if thread support is
227*38fd1498Szrj // not compiled in or is compiled in but not enabled at run-time.
228*38fd1498Szrj static int
acquire(__guard * g)229*38fd1498Szrj acquire(__guard *g)
230*38fd1498Szrj {
231*38fd1498Szrj // Quit if the object is already initialized.
232*38fd1498Szrj if (_GLIBCXX_GUARD_TEST(g))
233*38fd1498Szrj return 0;
234*38fd1498Szrj
235*38fd1498Szrj if (init_in_progress_flag(g))
236*38fd1498Szrj throw_recursive_init_exception();
237*38fd1498Szrj
238*38fd1498Szrj set_init_in_progress_flag(g, 1);
239*38fd1498Szrj return 1;
240*38fd1498Szrj }
241*38fd1498Szrj
242*38fd1498Szrj extern "C"
__cxa_guard_acquire(__guard * g)243*38fd1498Szrj int __cxa_guard_acquire (__guard *g)
244*38fd1498Szrj {
245*38fd1498Szrj #ifdef __GTHREADS
246*38fd1498Szrj // If the target can reorder loads, we need to insert a read memory
247*38fd1498Szrj // barrier so that accesses to the guarded variable happen after the
248*38fd1498Szrj // guard test.
249*38fd1498Szrj if (_GLIBCXX_GUARD_TEST_AND_ACQUIRE (g))
250*38fd1498Szrj return 0;
251*38fd1498Szrj
252*38fd1498Szrj # ifdef _GLIBCXX_USE_FUTEX
253*38fd1498Szrj // If __atomic_* and futex syscall are supported, don't use any global
254*38fd1498Szrj // mutex.
255*38fd1498Szrj if (__gthread_active_p ())
256*38fd1498Szrj {
257*38fd1498Szrj int *gi = (int *) (void *) g;
258*38fd1498Szrj const int guard_bit = _GLIBCXX_GUARD_BIT;
259*38fd1498Szrj const int pending_bit = _GLIBCXX_GUARD_PENDING_BIT;
260*38fd1498Szrj const int waiting_bit = _GLIBCXX_GUARD_WAITING_BIT;
261*38fd1498Szrj
262*38fd1498Szrj while (1)
263*38fd1498Szrj {
264*38fd1498Szrj int expected(0);
265*38fd1498Szrj if (__atomic_compare_exchange_n(gi, &expected, pending_bit, false,
266*38fd1498Szrj __ATOMIC_ACQ_REL,
267*38fd1498Szrj __ATOMIC_ACQUIRE))
268*38fd1498Szrj {
269*38fd1498Szrj // This thread should do the initialization.
270*38fd1498Szrj return 1;
271*38fd1498Szrj }
272*38fd1498Szrj
273*38fd1498Szrj if (expected == guard_bit)
274*38fd1498Szrj {
275*38fd1498Szrj // Already initialized.
276*38fd1498Szrj return 0;
277*38fd1498Szrj }
278*38fd1498Szrj
279*38fd1498Szrj if (expected == pending_bit)
280*38fd1498Szrj {
281*38fd1498Szrj // Use acquire here.
282*38fd1498Szrj int newv = expected | waiting_bit;
283*38fd1498Szrj if (!__atomic_compare_exchange_n(gi, &expected, newv, false,
284*38fd1498Szrj __ATOMIC_ACQ_REL,
285*38fd1498Szrj __ATOMIC_ACQUIRE))
286*38fd1498Szrj {
287*38fd1498Szrj if (expected == guard_bit)
288*38fd1498Szrj {
289*38fd1498Szrj // Make a thread that failed to set the
290*38fd1498Szrj // waiting bit exit the function earlier,
291*38fd1498Szrj // if it detects that another thread has
292*38fd1498Szrj // successfully finished initialising.
293*38fd1498Szrj return 0;
294*38fd1498Szrj }
295*38fd1498Szrj if (expected == 0)
296*38fd1498Szrj continue;
297*38fd1498Szrj }
298*38fd1498Szrj
299*38fd1498Szrj expected = newv;
300*38fd1498Szrj }
301*38fd1498Szrj
302*38fd1498Szrj syscall (SYS_futex, gi, _GLIBCXX_FUTEX_WAIT, expected, 0);
303*38fd1498Szrj }
304*38fd1498Szrj }
305*38fd1498Szrj # else
306*38fd1498Szrj if (__gthread_active_p ())
307*38fd1498Szrj {
308*38fd1498Szrj mutex_wrapper mw;
309*38fd1498Szrj
310*38fd1498Szrj while (1) // When this loop is executing, mutex is locked.
311*38fd1498Szrj {
312*38fd1498Szrj # ifdef __GTHREAD_HAS_COND
313*38fd1498Szrj // The static is already initialized.
314*38fd1498Szrj if (_GLIBCXX_GUARD_TEST(g))
315*38fd1498Szrj return 0; // The mutex will be unlocked via wrapper
316*38fd1498Szrj
317*38fd1498Szrj if (init_in_progress_flag(g))
318*38fd1498Szrj {
319*38fd1498Szrj // The guarded static is currently being initialized by
320*38fd1498Szrj // another thread, so we release mutex and wait for the
321*38fd1498Szrj // condition variable. We will lock the mutex again after
322*38fd1498Szrj // this.
323*38fd1498Szrj get_static_cond().wait_recursive(&get_static_mutex());
324*38fd1498Szrj }
325*38fd1498Szrj else
326*38fd1498Szrj {
327*38fd1498Szrj set_init_in_progress_flag(g, 1);
328*38fd1498Szrj return 1; // The mutex will be unlocked via wrapper.
329*38fd1498Szrj }
330*38fd1498Szrj # else
331*38fd1498Szrj // This provides compatibility with older systems not supporting
332*38fd1498Szrj // POSIX like condition variables.
333*38fd1498Szrj if (acquire(g))
334*38fd1498Szrj {
335*38fd1498Szrj mw.unlock = false;
336*38fd1498Szrj return 1; // The mutex still locked.
337*38fd1498Szrj }
338*38fd1498Szrj return 0; // The mutex will be unlocked via wrapper.
339*38fd1498Szrj # endif
340*38fd1498Szrj }
341*38fd1498Szrj }
342*38fd1498Szrj # endif
343*38fd1498Szrj #endif
344*38fd1498Szrj
345*38fd1498Szrj return acquire (g);
346*38fd1498Szrj }
347*38fd1498Szrj
348*38fd1498Szrj extern "C"
__cxa_guard_abort(__guard * g)349*38fd1498Szrj void __cxa_guard_abort (__guard *g) throw ()
350*38fd1498Szrj {
351*38fd1498Szrj #ifdef _GLIBCXX_USE_FUTEX
352*38fd1498Szrj // If __atomic_* and futex syscall are supported, don't use any global
353*38fd1498Szrj // mutex.
354*38fd1498Szrj if (__gthread_active_p ())
355*38fd1498Szrj {
356*38fd1498Szrj int *gi = (int *) (void *) g;
357*38fd1498Szrj const int waiting_bit = _GLIBCXX_GUARD_WAITING_BIT;
358*38fd1498Szrj int old = __atomic_exchange_n (gi, 0, __ATOMIC_ACQ_REL);
359*38fd1498Szrj
360*38fd1498Szrj if ((old & waiting_bit) != 0)
361*38fd1498Szrj syscall (SYS_futex, gi, _GLIBCXX_FUTEX_WAKE, INT_MAX);
362*38fd1498Szrj return;
363*38fd1498Szrj }
364*38fd1498Szrj #elif defined(__GTHREAD_HAS_COND)
365*38fd1498Szrj if (__gthread_active_p())
366*38fd1498Szrj {
367*38fd1498Szrj mutex_wrapper mw;
368*38fd1498Szrj
369*38fd1498Szrj set_init_in_progress_flag(g, 0);
370*38fd1498Szrj
371*38fd1498Szrj // If we abort, we still need to wake up all other threads waiting for
372*38fd1498Szrj // the condition variable.
373*38fd1498Szrj get_static_cond().broadcast();
374*38fd1498Szrj return;
375*38fd1498Szrj }
376*38fd1498Szrj #endif
377*38fd1498Szrj
378*38fd1498Szrj set_init_in_progress_flag(g, 0);
379*38fd1498Szrj #if defined(__GTHREADS) && !defined(__GTHREAD_HAS_COND)
380*38fd1498Szrj // This provides compatibility with older systems not supporting POSIX like
381*38fd1498Szrj // condition variables.
382*38fd1498Szrj if (__gthread_active_p ())
383*38fd1498Szrj static_mutex->unlock();
384*38fd1498Szrj #endif
385*38fd1498Szrj }
386*38fd1498Szrj
387*38fd1498Szrj extern "C"
__cxa_guard_release(__guard * g)388*38fd1498Szrj void __cxa_guard_release (__guard *g) throw ()
389*38fd1498Szrj {
390*38fd1498Szrj #ifdef _GLIBCXX_USE_FUTEX
391*38fd1498Szrj // If __atomic_* and futex syscall are supported, don't use any global
392*38fd1498Szrj // mutex.
393*38fd1498Szrj if (__gthread_active_p ())
394*38fd1498Szrj {
395*38fd1498Szrj int *gi = (int *) (void *) g;
396*38fd1498Szrj const int guard_bit = _GLIBCXX_GUARD_BIT;
397*38fd1498Szrj const int waiting_bit = _GLIBCXX_GUARD_WAITING_BIT;
398*38fd1498Szrj int old = __atomic_exchange_n (gi, guard_bit, __ATOMIC_ACQ_REL);
399*38fd1498Szrj
400*38fd1498Szrj if ((old & waiting_bit) != 0)
401*38fd1498Szrj syscall (SYS_futex, gi, _GLIBCXX_FUTEX_WAKE, INT_MAX);
402*38fd1498Szrj return;
403*38fd1498Szrj }
404*38fd1498Szrj #elif defined(__GTHREAD_HAS_COND)
405*38fd1498Szrj if (__gthread_active_p())
406*38fd1498Szrj {
407*38fd1498Szrj mutex_wrapper mw;
408*38fd1498Szrj
409*38fd1498Szrj set_init_in_progress_flag(g, 0);
410*38fd1498Szrj _GLIBCXX_GUARD_SET_AND_RELEASE(g);
411*38fd1498Szrj
412*38fd1498Szrj get_static_cond().broadcast();
413*38fd1498Szrj return;
414*38fd1498Szrj }
415*38fd1498Szrj #endif
416*38fd1498Szrj
417*38fd1498Szrj set_init_in_progress_flag(g, 0);
418*38fd1498Szrj _GLIBCXX_GUARD_SET_AND_RELEASE (g);
419*38fd1498Szrj
420*38fd1498Szrj #if defined(__GTHREADS) && !defined(__GTHREAD_HAS_COND)
421*38fd1498Szrj // This provides compatibility with older systems not supporting POSIX like
422*38fd1498Szrj // condition variables.
423*38fd1498Szrj if (__gthread_active_p())
424*38fd1498Szrj static_mutex->unlock();
425*38fd1498Szrj #endif
426*38fd1498Szrj }
427*38fd1498Szrj }
428