1*629ff9f7SJohn Marino /* Copyright (C) 2005, 2009, 2011 Free Software Foundation, Inc.
2*629ff9f7SJohn Marino Contributed by Richard Henderson <rth@redhat.com>.
3*629ff9f7SJohn Marino
4*629ff9f7SJohn Marino This file is part of the GNU OpenMP Library (libgomp).
5*629ff9f7SJohn Marino
6*629ff9f7SJohn Marino Libgomp is free software; you can redistribute it and/or modify it
7*629ff9f7SJohn Marino under the terms of the GNU General Public License as published by
8*629ff9f7SJohn Marino the Free Software Foundation; either version 3, or (at your option)
9*629ff9f7SJohn Marino any later version.
10*629ff9f7SJohn Marino
11*629ff9f7SJohn Marino Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12*629ff9f7SJohn Marino WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13*629ff9f7SJohn Marino FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14*629ff9f7SJohn Marino more details.
15*629ff9f7SJohn Marino
16*629ff9f7SJohn Marino Under Section 7 of GPL version 3, you are granted additional
17*629ff9f7SJohn Marino permissions described in the GCC Runtime Library Exception, version
18*629ff9f7SJohn Marino 3.1, as published by the Free Software Foundation.
19*629ff9f7SJohn Marino
20*629ff9f7SJohn Marino You should have received a copy of the GNU General Public License and
21*629ff9f7SJohn Marino a copy of the GCC Runtime Library Exception along with this program;
22*629ff9f7SJohn Marino see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23*629ff9f7SJohn Marino <http://www.gnu.org/licenses/>. */
24*629ff9f7SJohn Marino
25*629ff9f7SJohn Marino /* This file handles the CRITICAL construct. */
26*629ff9f7SJohn Marino
27*629ff9f7SJohn Marino #include "libgomp.h"
28*629ff9f7SJohn Marino #include <stdlib.h>
29*629ff9f7SJohn Marino
30*629ff9f7SJohn Marino
31*629ff9f7SJohn Marino static gomp_mutex_t default_lock;
32*629ff9f7SJohn Marino
33*629ff9f7SJohn Marino void
GOMP_critical_start(void)34*629ff9f7SJohn Marino GOMP_critical_start (void)
35*629ff9f7SJohn Marino {
36*629ff9f7SJohn Marino /* There is an implicit flush on entry to a critical region. */
37*629ff9f7SJohn Marino __atomic_thread_fence (MEMMODEL_RELEASE);
38*629ff9f7SJohn Marino gomp_mutex_lock (&default_lock);
39*629ff9f7SJohn Marino }
40*629ff9f7SJohn Marino
41*629ff9f7SJohn Marino void
GOMP_critical_end(void)42*629ff9f7SJohn Marino GOMP_critical_end (void)
43*629ff9f7SJohn Marino {
44*629ff9f7SJohn Marino gomp_mutex_unlock (&default_lock);
45*629ff9f7SJohn Marino }
46*629ff9f7SJohn Marino
47*629ff9f7SJohn Marino #ifndef HAVE_SYNC_BUILTINS
48*629ff9f7SJohn Marino static gomp_mutex_t create_lock_lock;
49*629ff9f7SJohn Marino #endif
50*629ff9f7SJohn Marino
51*629ff9f7SJohn Marino void
GOMP_critical_name_start(void ** pptr)52*629ff9f7SJohn Marino GOMP_critical_name_start (void **pptr)
53*629ff9f7SJohn Marino {
54*629ff9f7SJohn Marino gomp_mutex_t *plock;
55*629ff9f7SJohn Marino
56*629ff9f7SJohn Marino /* If a mutex fits within the space for a pointer, and is zero initialized,
57*629ff9f7SJohn Marino then use the pointer space directly. */
58*629ff9f7SJohn Marino if (GOMP_MUTEX_INIT_0
59*629ff9f7SJohn Marino && sizeof (gomp_mutex_t) <= sizeof (void *)
60*629ff9f7SJohn Marino && __alignof (gomp_mutex_t) <= sizeof (void *))
61*629ff9f7SJohn Marino plock = (gomp_mutex_t *)pptr;
62*629ff9f7SJohn Marino
63*629ff9f7SJohn Marino /* Otherwise we have to be prepared to malloc storage. */
64*629ff9f7SJohn Marino else
65*629ff9f7SJohn Marino {
66*629ff9f7SJohn Marino plock = *pptr;
67*629ff9f7SJohn Marino
68*629ff9f7SJohn Marino if (plock == NULL)
69*629ff9f7SJohn Marino {
70*629ff9f7SJohn Marino #ifdef HAVE_SYNC_BUILTINS
71*629ff9f7SJohn Marino gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
72*629ff9f7SJohn Marino gomp_mutex_init (nlock);
73*629ff9f7SJohn Marino
74*629ff9f7SJohn Marino plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
75*629ff9f7SJohn Marino if (plock != NULL)
76*629ff9f7SJohn Marino {
77*629ff9f7SJohn Marino gomp_mutex_destroy (nlock);
78*629ff9f7SJohn Marino free (nlock);
79*629ff9f7SJohn Marino }
80*629ff9f7SJohn Marino else
81*629ff9f7SJohn Marino plock = nlock;
82*629ff9f7SJohn Marino #else
83*629ff9f7SJohn Marino gomp_mutex_lock (&create_lock_lock);
84*629ff9f7SJohn Marino plock = *pptr;
85*629ff9f7SJohn Marino if (plock == NULL)
86*629ff9f7SJohn Marino {
87*629ff9f7SJohn Marino plock = gomp_malloc (sizeof (gomp_mutex_t));
88*629ff9f7SJohn Marino gomp_mutex_init (plock);
89*629ff9f7SJohn Marino __sync_synchronize ();
90*629ff9f7SJohn Marino *pptr = plock;
91*629ff9f7SJohn Marino }
92*629ff9f7SJohn Marino gomp_mutex_unlock (&create_lock_lock);
93*629ff9f7SJohn Marino #endif
94*629ff9f7SJohn Marino }
95*629ff9f7SJohn Marino }
96*629ff9f7SJohn Marino
97*629ff9f7SJohn Marino gomp_mutex_lock (plock);
98*629ff9f7SJohn Marino }
99*629ff9f7SJohn Marino
100*629ff9f7SJohn Marino void
GOMP_critical_name_end(void ** pptr)101*629ff9f7SJohn Marino GOMP_critical_name_end (void **pptr)
102*629ff9f7SJohn Marino {
103*629ff9f7SJohn Marino gomp_mutex_t *plock;
104*629ff9f7SJohn Marino
105*629ff9f7SJohn Marino /* If a mutex fits within the space for a pointer, and is zero initialized,
106*629ff9f7SJohn Marino then use the pointer space directly. */
107*629ff9f7SJohn Marino if (GOMP_MUTEX_INIT_0
108*629ff9f7SJohn Marino && sizeof (gomp_mutex_t) <= sizeof (void *)
109*629ff9f7SJohn Marino && __alignof (gomp_mutex_t) <= sizeof (void *))
110*629ff9f7SJohn Marino plock = (gomp_mutex_t *)pptr;
111*629ff9f7SJohn Marino else
112*629ff9f7SJohn Marino plock = *pptr;
113*629ff9f7SJohn Marino
114*629ff9f7SJohn Marino gomp_mutex_unlock (plock);
115*629ff9f7SJohn Marino }
116*629ff9f7SJohn Marino
117*629ff9f7SJohn Marino /* This mutex is used when atomic operations don't exist for the target
118*629ff9f7SJohn Marino in the mode requested. The result is not globally atomic, but works so
119*629ff9f7SJohn Marino long as all parallel references are within #pragma omp atomic directives.
120*629ff9f7SJohn Marino According to responses received from omp@openmp.org, appears to be within
121*629ff9f7SJohn Marino spec. Which makes sense, since that's how several other compilers
122*629ff9f7SJohn Marino handle this situation as well. */
123*629ff9f7SJohn Marino
124*629ff9f7SJohn Marino static gomp_mutex_t atomic_lock;
125*629ff9f7SJohn Marino
126*629ff9f7SJohn Marino void
GOMP_atomic_start(void)127*629ff9f7SJohn Marino GOMP_atomic_start (void)
128*629ff9f7SJohn Marino {
129*629ff9f7SJohn Marino gomp_mutex_lock (&atomic_lock);
130*629ff9f7SJohn Marino }
131*629ff9f7SJohn Marino
132*629ff9f7SJohn Marino void
GOMP_atomic_end(void)133*629ff9f7SJohn Marino GOMP_atomic_end (void)
134*629ff9f7SJohn Marino {
135*629ff9f7SJohn Marino gomp_mutex_unlock (&atomic_lock);
136*629ff9f7SJohn Marino }
137*629ff9f7SJohn Marino
138*629ff9f7SJohn Marino #if !GOMP_MUTEX_INIT_0
139*629ff9f7SJohn Marino static void __attribute__((constructor))
initialize_critical(void)140*629ff9f7SJohn Marino initialize_critical (void)
141*629ff9f7SJohn Marino {
142*629ff9f7SJohn Marino gomp_mutex_init (&default_lock);
143*629ff9f7SJohn Marino gomp_mutex_init (&atomic_lock);
144*629ff9f7SJohn Marino #ifndef HAVE_SYNC_BUILTINS
145*629ff9f7SJohn Marino gomp_mutex_init (&create_lock_lock);
146*629ff9f7SJohn Marino #endif
147*629ff9f7SJohn Marino }
148*629ff9f7SJohn Marino #endif
149