xref: /netbsd-src/external/gpl3/gcc/dist/libgomp/config/gcn/bar.c (revision 7d62b00eb9ad855ffcd7da46b41e23feb5476fac)
1 /* Copyright (C) 2015-2020 Free Software Foundation, Inc.
2    Contributed by Mentor Embedded.
3 
4    This file is part of the GNU Offloading and Multi Processing Library
5    (libgomp).
6 
7    Libgomp is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15    more details.
16 
17    Under Section 7 of GPL version 3, you are granted additional
18    permissions described in the GCC Runtime Library Exception, version
19    3.1, as published by the Free Software Foundation.
20 
21    You should have received a copy of the GNU General Public License and
22    a copy of the GCC Runtime Library Exception along with this program;
23    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24    <http://www.gnu.org/licenses/>.  */
25 
26 /* This is an AMD GCN specific implementation of a barrier synchronization
27    mechanism for libgomp.  This type is private to the library.  This
28    implementation uses atomic instructions and s_barrier instruction.  It
29    uses MEMMODEL_RELAXED here because barriers are within workgroups and
30    therefore don't need to flush caches.  */
31 
32 #include <limits.h>
33 #include "libgomp.h"
34 
35 
36 void
37 gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
38 {
39   if (__builtin_expect (state & BAR_WAS_LAST, 0))
40     {
41       /* Next time we'll be awaiting TOTAL threads again.  */
42       bar->awaited = bar->total;
43       __atomic_store_n (&bar->generation, bar->generation + BAR_INCR,
44 			MEMMODEL_RELAXED);
45     }
46   asm ("s_barrier" ::: "memory");
47 }
48 
49 void
50 gomp_barrier_wait (gomp_barrier_t *bar)
51 {
52   gomp_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
53 }
54 
55 /* Like gomp_barrier_wait, except that if the encountering thread
56    is not the last one to hit the barrier, it returns immediately.
57    The intended usage is that a thread which intends to gomp_barrier_destroy
58    this barrier calls gomp_barrier_wait, while all other threads
59    call gomp_barrier_wait_last.  When gomp_barrier_wait returns,
60    the barrier can be safely destroyed.  */
61 
62 void
63 gomp_barrier_wait_last (gomp_barrier_t *bar)
64 {
65   /* Deferring to gomp_barrier_wait does not use the optimization opportunity
66      allowed by the interface contract for all-but-last participants.  The
67      original implementation in config/linux/bar.c handles this better.  */
68   gomp_barrier_wait (bar);
69 }
70 
71 void
72 gomp_team_barrier_wake (gomp_barrier_t *bar, int count)
73 {
74   asm ("s_barrier" ::: "memory");
75 }
76 
77 void
78 gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
79 {
80   unsigned int generation, gen;
81 
82   if (__builtin_expect (state & BAR_WAS_LAST, 0))
83     {
84       /* Next time we'll be awaiting TOTAL threads again.  */
85       struct gomp_thread *thr = gomp_thread ();
86       struct gomp_team *team = thr->ts.team;
87 
88       bar->awaited = bar->total;
89       team->work_share_cancelled = 0;
90       if (__builtin_expect (team->task_count, 0))
91 	{
92 	  gomp_barrier_handle_tasks (state);
93 	  state &= ~BAR_WAS_LAST;
94 	}
95       else
96 	{
97 	  state &= ~BAR_CANCELLED;
98 	  state += BAR_INCR - BAR_WAS_LAST;
99 	  __atomic_store_n (&bar->generation, state, MEMMODEL_RELAXED);
100 	  asm ("s_barrier" ::: "memory");
101 	  return;
102 	}
103     }
104 
105   generation = state;
106   state &= ~BAR_CANCELLED;
107   int retry = 100;
108   do
109     {
110       if (retry-- == 0)
111 	{
112 	  /* It really shouldn't happen that barriers get out of sync, but
113 	     if they do then this will loop until they realign, so we need
114 	     to avoid an infinite loop where the thread just isn't there.  */
115 	  const char msg[] = ("Barrier sync failed (another thread died?);"
116 			      " aborting.");
117 	  write (2, msg, sizeof (msg)-1);
118 	  abort();
119 	}
120 
121       asm ("s_barrier" ::: "memory");
122       gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
123       if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
124 	{
125 	  gomp_barrier_handle_tasks (state);
126 	  gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
127 	}
128       generation |= gen & BAR_WAITING_FOR_TASK;
129     }
130   while (gen != state + BAR_INCR);
131 }
132 
133 void
134 gomp_team_barrier_wait (gomp_barrier_t *bar)
135 {
136   gomp_team_barrier_wait_end (bar, gomp_barrier_wait_start (bar));
137 }
138 
139 void
140 gomp_team_barrier_wait_final (gomp_barrier_t *bar)
141 {
142   gomp_barrier_state_t state = gomp_barrier_wait_final_start (bar);
143   if (__builtin_expect (state & BAR_WAS_LAST, 0))
144     bar->awaited_final = bar->total;
145   gomp_team_barrier_wait_end (bar, state);
146 }
147 
148 bool
149 gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar,
150 				   gomp_barrier_state_t state)
151 {
152   unsigned int generation, gen;
153 
154   if (__builtin_expect (state & BAR_WAS_LAST, 0))
155     {
156       /* Next time we'll be awaiting TOTAL threads again.  */
157       /* BAR_CANCELLED should never be set in state here, because
158 	 cancellation means that at least one of the threads has been
159 	 cancelled, thus on a cancellable barrier we should never see
160 	 all threads to arrive.  */
161       struct gomp_thread *thr = gomp_thread ();
162       struct gomp_team *team = thr->ts.team;
163 
164       bar->awaited = bar->total;
165       team->work_share_cancelled = 0;
166       if (__builtin_expect (team->task_count, 0))
167 	{
168 	  gomp_barrier_handle_tasks (state);
169 	  state &= ~BAR_WAS_LAST;
170 	}
171       else
172 	{
173 	  state += BAR_INCR - BAR_WAS_LAST;
174 	  __atomic_store_n (&bar->generation, state, MEMMODEL_RELAXED);
175 	  asm ("s_barrier" ::: "memory");
176 	  return false;
177 	}
178     }
179 
180   if (__builtin_expect (state & BAR_CANCELLED, 0))
181     return true;
182 
183   generation = state;
184   int retry = 100;
185   do
186     {
187       if (retry-- == 0)
188 	{
189 	  /* It really shouldn't happen that barriers get out of sync, but
190 	     if they do then this will loop until they realign, so we need
191 	     to avoid an infinite loop where the thread just isn't there.  */
192 	  const char msg[] = ("Barrier sync failed (another thread died?);"
193 			      " aborting.");
194 	  write (2, msg, sizeof (msg)-1);
195 	  abort();
196 	}
197 
198       asm ("s_barrier" ::: "memory");
199       gen = __atomic_load_n (&bar->generation, MEMMODEL_RELAXED);
200       if (__builtin_expect (gen & BAR_CANCELLED, 0))
201 	return true;
202       if (__builtin_expect (gen & BAR_TASK_PENDING, 0))
203 	{
204 	  gomp_barrier_handle_tasks (state);
205 	  gen = __atomic_load_n (&bar->generation, MEMMODEL_RELAXED);
206 	}
207       generation |= gen & BAR_WAITING_FOR_TASK;
208     }
209   while (gen != state + BAR_INCR);
210 
211   return false;
212 }
213 
214 bool
215 gomp_team_barrier_wait_cancel (gomp_barrier_t *bar)
216 {
217   return gomp_team_barrier_wait_cancel_end (bar, gomp_barrier_wait_start (bar));
218 }
219 
220 void
221 gomp_team_barrier_cancel (struct gomp_team *team)
222 {
223   gomp_mutex_lock (&team->task_lock);
224   if (team->barrier.generation & BAR_CANCELLED)
225     {
226       gomp_mutex_unlock (&team->task_lock);
227       return;
228     }
229   team->barrier.generation |= BAR_CANCELLED;
230   gomp_mutex_unlock (&team->task_lock);
231   gomp_team_barrier_wake (&team->barrier, INT_MAX);
232 }
233