1*38fd1498Szrj /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2*38fd1498Szrj Contributed by Richard Henderson <rth@redhat.com>.
3*38fd1498Szrj
4*38fd1498Szrj This file is part of the GNU Offloading and Multi Processing Library
5*38fd1498Szrj (libgomp).
6*38fd1498Szrj
7*38fd1498Szrj Libgomp is free software; you can redistribute it and/or modify it
8*38fd1498Szrj under the terms of the GNU General Public License as published by
9*38fd1498Szrj the Free Software Foundation; either version 3, or (at your option)
10*38fd1498Szrj any later version.
11*38fd1498Szrj
12*38fd1498Szrj Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14*38fd1498Szrj FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15*38fd1498Szrj more details.
16*38fd1498Szrj
17*38fd1498Szrj Under Section 7 of GPL version 3, you are granted additional
18*38fd1498Szrj permissions described in the GCC Runtime Library Exception, version
19*38fd1498Szrj 3.1, as published by the Free Software Foundation.
20*38fd1498Szrj
21*38fd1498Szrj You should have received a copy of the GNU General Public License and
22*38fd1498Szrj a copy of the GCC Runtime Library Exception along with this program;
23*38fd1498Szrj see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24*38fd1498Szrj <http://www.gnu.org/licenses/>. */
25*38fd1498Szrj
26*38fd1498Szrj /* This file handles the CRITICAL construct. */
27*38fd1498Szrj
28*38fd1498Szrj #include "libgomp.h"
29*38fd1498Szrj #include <stdlib.h>
30*38fd1498Szrj
31*38fd1498Szrj
32*38fd1498Szrj static gomp_mutex_t default_lock;
33*38fd1498Szrj
34*38fd1498Szrj void
GOMP_critical_start(void)35*38fd1498Szrj GOMP_critical_start (void)
36*38fd1498Szrj {
37*38fd1498Szrj /* There is an implicit flush on entry to a critical region. */
38*38fd1498Szrj __atomic_thread_fence (MEMMODEL_RELEASE);
39*38fd1498Szrj gomp_mutex_lock (&default_lock);
40*38fd1498Szrj }
41*38fd1498Szrj
42*38fd1498Szrj void
GOMP_critical_end(void)43*38fd1498Szrj GOMP_critical_end (void)
44*38fd1498Szrj {
45*38fd1498Szrj gomp_mutex_unlock (&default_lock);
46*38fd1498Szrj }
47*38fd1498Szrj
48*38fd1498Szrj #ifndef HAVE_SYNC_BUILTINS
49*38fd1498Szrj static gomp_mutex_t create_lock_lock;
50*38fd1498Szrj #endif
51*38fd1498Szrj
52*38fd1498Szrj void
GOMP_critical_name_start(void ** pptr)53*38fd1498Szrj GOMP_critical_name_start (void **pptr)
54*38fd1498Szrj {
55*38fd1498Szrj gomp_mutex_t *plock;
56*38fd1498Szrj
57*38fd1498Szrj /* If a mutex fits within the space for a pointer, and is zero initialized,
58*38fd1498Szrj then use the pointer space directly. */
59*38fd1498Szrj if (GOMP_MUTEX_INIT_0
60*38fd1498Szrj && sizeof (gomp_mutex_t) <= sizeof (void *)
61*38fd1498Szrj && __alignof (gomp_mutex_t) <= sizeof (void *))
62*38fd1498Szrj plock = (gomp_mutex_t *)pptr;
63*38fd1498Szrj
64*38fd1498Szrj /* Otherwise we have to be prepared to malloc storage. */
65*38fd1498Szrj else
66*38fd1498Szrj {
67*38fd1498Szrj plock = *pptr;
68*38fd1498Szrj
69*38fd1498Szrj if (plock == NULL)
70*38fd1498Szrj {
71*38fd1498Szrj #ifdef HAVE_SYNC_BUILTINS
72*38fd1498Szrj gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
73*38fd1498Szrj gomp_mutex_init (nlock);
74*38fd1498Szrj
75*38fd1498Szrj plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
76*38fd1498Szrj if (plock != NULL)
77*38fd1498Szrj {
78*38fd1498Szrj gomp_mutex_destroy (nlock);
79*38fd1498Szrj free (nlock);
80*38fd1498Szrj }
81*38fd1498Szrj else
82*38fd1498Szrj plock = nlock;
83*38fd1498Szrj #else
84*38fd1498Szrj gomp_mutex_lock (&create_lock_lock);
85*38fd1498Szrj plock = *pptr;
86*38fd1498Szrj if (plock == NULL)
87*38fd1498Szrj {
88*38fd1498Szrj plock = gomp_malloc (sizeof (gomp_mutex_t));
89*38fd1498Szrj gomp_mutex_init (plock);
90*38fd1498Szrj __sync_synchronize ();
91*38fd1498Szrj *pptr = plock;
92*38fd1498Szrj }
93*38fd1498Szrj gomp_mutex_unlock (&create_lock_lock);
94*38fd1498Szrj #endif
95*38fd1498Szrj }
96*38fd1498Szrj }
97*38fd1498Szrj
98*38fd1498Szrj gomp_mutex_lock (plock);
99*38fd1498Szrj }
100*38fd1498Szrj
101*38fd1498Szrj void
GOMP_critical_name_end(void ** pptr)102*38fd1498Szrj GOMP_critical_name_end (void **pptr)
103*38fd1498Szrj {
104*38fd1498Szrj gomp_mutex_t *plock;
105*38fd1498Szrj
106*38fd1498Szrj /* If a mutex fits within the space for a pointer, and is zero initialized,
107*38fd1498Szrj then use the pointer space directly. */
108*38fd1498Szrj if (GOMP_MUTEX_INIT_0
109*38fd1498Szrj && sizeof (gomp_mutex_t) <= sizeof (void *)
110*38fd1498Szrj && __alignof (gomp_mutex_t) <= sizeof (void *))
111*38fd1498Szrj plock = (gomp_mutex_t *)pptr;
112*38fd1498Szrj else
113*38fd1498Szrj plock = *pptr;
114*38fd1498Szrj
115*38fd1498Szrj gomp_mutex_unlock (plock);
116*38fd1498Szrj }
117*38fd1498Szrj
118*38fd1498Szrj #if !GOMP_MUTEX_INIT_0
119*38fd1498Szrj static void __attribute__((constructor))
initialize_critical(void)120*38fd1498Szrj initialize_critical (void)
121*38fd1498Szrj {
122*38fd1498Szrj gomp_mutex_init (&default_lock);
123*38fd1498Szrj #ifndef HAVE_SYNC_BUILTINS
124*38fd1498Szrj gomp_mutex_init (&create_lock_lock);
125*38fd1498Szrj #endif
126*38fd1498Szrj }
127*38fd1498Szrj #endif
128