xref: /llvm-project/offload/DeviceRTL/src/Synchronization.cpp (revision 3274bf6b4282a0dafd4b5a2efa09824e5ca417d0)
1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Include all synchronization.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Synchronization.h"
14 
15 #include "Debug.h"
16 #include "DeviceTypes.h"
17 #include "DeviceUtils.h"
18 #include "Interface.h"
19 #include "Mapping.h"
20 #include "State.h"
21 
22 #pragma omp begin declare target device_type(nohost)
23 
24 using namespace ompx;
25 
26 namespace impl {
27 
28 /// Atomics
29 ///
30 ///{
31 /// NOTE: This function needs to be implemented by every target.
32 uint32_t atomicInc(uint32_t *Address, uint32_t Val, atomic::OrderingTy Ordering,
33                    atomic::MemScopeTy MemScope);
34 ///}
35 
36 // Forward declarations defined to be defined for AMDGCN and NVPTX.
37 uint32_t atomicInc(uint32_t *A, uint32_t V, atomic::OrderingTy Ordering,
38                    atomic::MemScopeTy MemScope);
39 void namedBarrierInit();
40 void namedBarrier();
41 void fenceTeam(atomic::OrderingTy Ordering);
42 void fenceKernel(atomic::OrderingTy Ordering);
43 void fenceSystem(atomic::OrderingTy Ordering);
44 void syncWarp(__kmpc_impl_lanemask_t);
45 void syncThreads(atomic::OrderingTy Ordering);
46 void syncThreadsAligned(atomic::OrderingTy Ordering) { syncThreads(Ordering); }
47 void unsetLock(omp_lock_t *);
48 int testLock(omp_lock_t *);
49 void initLock(omp_lock_t *);
50 void destroyLock(omp_lock_t *);
51 void setLock(omp_lock_t *);
52 void unsetCriticalLock(omp_lock_t *);
53 void setCriticalLock(omp_lock_t *);
54 
55 /// AMDGCN Implementation
56 ///
57 ///{
58 #pragma omp begin declare variant match(device = {arch(amdgcn)})
59 
60 uint32_t atomicInc(uint32_t *A, uint32_t V, atomic::OrderingTy Ordering,
61                    atomic::MemScopeTy MemScope) {
62   // builtin_amdgcn_atomic_inc32 should expand to this switch when
63   // passed a runtime value, but does not do so yet. Workaround here.
64 
65 #define ScopeSwitch(ORDER)                                                     \
66   switch (MemScope) {                                                          \
67   case atomic::MemScopeTy::system:                                             \
68     return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "");                     \
69   case atomic::MemScopeTy::device:                                             \
70     return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "agent");                \
71   case atomic::MemScopeTy::workgroup:                                          \
72     return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "workgroup");            \
73   case atomic::MemScopeTy::wavefront:                                          \
74     return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "wavefront");            \
75   case atomic::MemScopeTy::single:                                             \
76     return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "singlethread");         \
77   }
78 
79 #define Case(ORDER)                                                            \
80   case ORDER:                                                                  \
81     ScopeSwitch(ORDER)
82 
83   switch (Ordering) {
84   default:
85     __builtin_unreachable();
86     Case(atomic::relaxed);
87     Case(atomic::aquire);
88     Case(atomic::release);
89     Case(atomic::acq_rel);
90     Case(atomic::seq_cst);
91 #undef Case
92 #undef ScopeSwitch
93   }
94 }
95 
96 uint32_t SHARED(namedBarrierTracker);
97 
98 void namedBarrierInit() {
99   // Don't have global ctors, and shared memory is not zero init
100   atomic::store(&namedBarrierTracker, 0u, atomic::release);
101 }
102 
103 void namedBarrier() {
104   uint32_t NumThreads = omp_get_num_threads();
105   // assert(NumThreads % 32 == 0);
106 
107   uint32_t WarpSize = mapping::getWarpSize();
108   uint32_t NumWaves = NumThreads / WarpSize;
109 
110   fence::team(atomic::aquire);
111 
112   // named barrier implementation for amdgcn.
113   // Uses two 16 bit unsigned counters. One for the number of waves to have
114   // reached the barrier, and one to count how many times the barrier has been
115   // passed. These are packed in a single atomically accessed 32 bit integer.
116   // Low bits for the number of waves, assumed zero before this call.
117   // High bits to count the number of times the barrier has been passed.
118 
119   // precondition: NumWaves != 0;
120   // invariant: NumWaves * WarpSize == NumThreads;
121   // precondition: NumWaves < 0xffffu;
122 
123   // Increment the low 16 bits once, using the lowest active thread.
124   if (mapping::isLeaderInWarp()) {
125     uint32_t load = atomic::add(&namedBarrierTracker, 1,
126                                 atomic::relaxed); // commutative
127 
128     // Record the number of times the barrier has been passed
129     uint32_t generation = load & 0xffff0000u;
130 
131     if ((load & 0x0000ffffu) == (NumWaves - 1)) {
132       // Reached NumWaves in low bits so this is the last wave.
133       // Set low bits to zero and increment high bits
134       load += 0x00010000u; // wrap is safe
135       load &= 0xffff0000u; // because bits zeroed second
136 
137       // Reset the wave counter and release the waiting waves
138       atomic::store(&namedBarrierTracker, load, atomic::relaxed);
139     } else {
140       // more waves still to go, spin until generation counter changes
141       do {
142         __builtin_amdgcn_s_sleep(0);
143         load = atomic::load(&namedBarrierTracker, atomic::relaxed);
144       } while ((load & 0xffff0000u) == generation);
145     }
146   }
147   fence::team(atomic::release);
148 }
149 
150 void fenceTeam(atomic::OrderingTy Ordering) {
151   return __scoped_atomic_thread_fence(Ordering, atomic::workgroup);
152 }
153 
154 void fenceKernel(atomic::OrderingTy Ordering) {
155   return __scoped_atomic_thread_fence(Ordering, atomic::device);
156 }
157 
158 void fenceSystem(atomic::OrderingTy Ordering) {
159   return __scoped_atomic_thread_fence(Ordering, atomic::system);
160 }
161 
162 void syncWarp(__kmpc_impl_lanemask_t) {
163   // This is a no-op on current AMDGPU hardware but it is used by the optimizer
164   // to enforce convergent behaviour between control flow graphs.
165   __builtin_amdgcn_wave_barrier();
166 }
167 
168 void syncThreads(atomic::OrderingTy Ordering) {
169   if (Ordering != atomic::relaxed)
170     fenceTeam(Ordering == atomic::acq_rel ? atomic::release : atomic::seq_cst);
171 
172   __builtin_amdgcn_s_barrier();
173 
174   if (Ordering != atomic::relaxed)
175     fenceTeam(Ordering == atomic::acq_rel ? atomic::aquire : atomic::seq_cst);
176 }
177 void syncThreadsAligned(atomic::OrderingTy Ordering) { syncThreads(Ordering); }
178 
179 // TODO: Don't have wavefront lane locks. Possibly can't have them.
180 void unsetLock(omp_lock_t *) { __builtin_trap(); }
181 int testLock(omp_lock_t *) { __builtin_trap(); }
182 void initLock(omp_lock_t *) { __builtin_trap(); }
183 void destroyLock(omp_lock_t *) { __builtin_trap(); }
184 void setLock(omp_lock_t *) { __builtin_trap(); }
185 
186 constexpr uint32_t UNSET = 0;
187 constexpr uint32_t SET = 1;
188 
189 void unsetCriticalLock(omp_lock_t *Lock) {
190   (void)atomicExchange((uint32_t *)Lock, UNSET, atomic::acq_rel);
191 }
192 
193 void setCriticalLock(omp_lock_t *Lock) {
194   uint64_t LowestActiveThread = utils::ffs(mapping::activemask()) - 1;
195   if (mapping::getThreadIdInWarp() == LowestActiveThread) {
196     fenceKernel(atomic::release);
197     while (
198         !cas((uint32_t *)Lock, UNSET, SET, atomic::relaxed, atomic::relaxed)) {
199       __builtin_amdgcn_s_sleep(32);
200     }
201     fenceKernel(atomic::aquire);
202   }
203 }
204 
205 #pragma omp end declare variant
206 ///}
207 
208 /// NVPTX Implementation
209 ///
210 ///{
211 #pragma omp begin declare variant match(                                       \
212         device = {arch(nvptx, nvptx64)},                                       \
213             implementation = {extension(match_any)})
214 
215 uint32_t atomicInc(uint32_t *Address, uint32_t Val, atomic::OrderingTy Ordering,
216                    atomic::MemScopeTy MemScope) {
217   return __nvvm_atom_inc_gen_ui(Address, Val);
218 }
219 
220 void namedBarrierInit() {}
221 
222 void namedBarrier() {
223   uint32_t NumThreads = omp_get_num_threads();
224   ASSERT(NumThreads % 32 == 0, nullptr);
225 
226   // The named barrier for active parallel threads of a team in an L1 parallel
227   // region to synchronize with each other.
228   constexpr int BarrierNo = 7;
229   __nvvm_barrier_sync_cnt(BarrierNo, NumThreads);
230 }
231 
232 void fenceTeam(atomic::OrderingTy) { __nvvm_membar_cta(); }
233 
234 void fenceKernel(atomic::OrderingTy) { __nvvm_membar_gl(); }
235 
236 void fenceSystem(atomic::OrderingTy) { __nvvm_membar_sys(); }
237 
238 void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); }
239 
240 void syncThreads(atomic::OrderingTy Ordering) {
241   constexpr int BarrierNo = 8;
242   __nvvm_barrier_sync(BarrierNo);
243 }
244 
245 void syncThreadsAligned(atomic::OrderingTy Ordering) { __syncthreads(); }
246 
247 constexpr uint32_t OMP_SPIN = 1000;
248 constexpr uint32_t UNSET = 0;
249 constexpr uint32_t SET = 1;
250 
251 // TODO: This seems to hide a bug in the declare variant handling. If it is
252 // called before it is defined
253 //       here the overload won't happen. Investigate lalter!
254 void unsetLock(omp_lock_t *Lock) {
255   (void)atomicExchange((uint32_t *)Lock, UNSET, atomic::seq_cst);
256 }
257 
258 int testLock(omp_lock_t *Lock) {
259   return atomic::add((uint32_t *)Lock, 0u, atomic::seq_cst);
260 }
261 
262 void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
263 
264 void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
265 
266 void setLock(omp_lock_t *Lock) {
267   // TODO: not sure spinning is a good idea here..
268   while (atomic::cas((uint32_t *)Lock, UNSET, SET, atomic::seq_cst,
269                      atomic::seq_cst) != UNSET) {
270     int32_t start = __nvvm_read_ptx_sreg_clock();
271     int32_t now;
272     for (;;) {
273       now = __nvvm_read_ptx_sreg_clock();
274       int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
275       if (cycles >= OMP_SPIN * mapping::getBlockIdInKernel()) {
276         break;
277       }
278     }
279   } // wait for 0 to be the read value
280 }
281 
282 void unsetCriticalLock(omp_lock_t *Lock) { unsetLock(Lock); }
283 
284 void setCriticalLock(omp_lock_t *Lock) { setLock(Lock); }
285 
286 #pragma omp end declare variant
287 ///}
288 
289 } // namespace impl
290 
291 void synchronize::init(bool IsSPMD) {
292   if (!IsSPMD)
293     impl::namedBarrierInit();
294 }
295 
296 void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); }
297 
298 void synchronize::threads(atomic::OrderingTy Ordering) {
299   impl::syncThreads(Ordering);
300 }
301 
302 void synchronize::threadsAligned(atomic::OrderingTy Ordering) {
303   impl::syncThreadsAligned(Ordering);
304 }
305 
306 void fence::team(atomic::OrderingTy Ordering) { impl::fenceTeam(Ordering); }
307 
308 void fence::kernel(atomic::OrderingTy Ordering) { impl::fenceKernel(Ordering); }
309 
310 void fence::system(atomic::OrderingTy Ordering) { impl::fenceSystem(Ordering); }
311 
312 uint32_t atomic::inc(uint32_t *Addr, uint32_t V, atomic::OrderingTy Ordering,
313                      atomic::MemScopeTy MemScope) {
314   return impl::atomicInc(Addr, V, Ordering, MemScope);
315 }
316 
317 void unsetCriticalLock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
318 
319 void setCriticalLock(omp_lock_t *Lock) { impl::setLock(Lock); }
320 
321 extern "C" {
322 void __kmpc_ordered(IdentTy *Loc, int32_t TId) {}
323 
324 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) {}
325 
326 int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
327   __kmpc_barrier(Loc, TId);
328   return 0;
329 }
330 
331 void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
332   if (mapping::isMainThreadInGenericMode())
333     return __kmpc_flush(Loc);
334 
335   if (mapping::isSPMDMode())
336     return __kmpc_barrier_simple_spmd(Loc, TId);
337 
338   impl::namedBarrier();
339 }
340 
341 [[clang::noinline]] void __kmpc_barrier_simple_spmd(IdentTy *Loc, int32_t TId) {
342   synchronize::threadsAligned(atomic::OrderingTy::seq_cst);
343 }
344 
345 [[clang::noinline]] void __kmpc_barrier_simple_generic(IdentTy *Loc,
346                                                        int32_t TId) {
347   synchronize::threads(atomic::OrderingTy::seq_cst);
348 }
349 
350 int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
351   return omp_get_thread_num() == 0;
352 }
353 
354 void __kmpc_end_master(IdentTy *Loc, int32_t TId) {}
355 
356 int32_t __kmpc_masked(IdentTy *Loc, int32_t TId, int32_t Filter) {
357   return omp_get_thread_num() == Filter;
358 }
359 
360 void __kmpc_end_masked(IdentTy *Loc, int32_t TId) {}
361 
362 int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
363   return __kmpc_master(Loc, TId);
364 }
365 
366 void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
367   // The barrier is explicitly called.
368 }
369 
370 void __kmpc_flush(IdentTy *Loc) { fence::kernel(atomic::seq_cst); }
371 
372 uint64_t __kmpc_warp_active_thread_mask(void) { return mapping::activemask(); }
373 
374 void __kmpc_syncwarp(uint64_t Mask) { synchronize::warp(Mask); }
375 
376 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
377   impl::setCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
378 }
379 
380 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
381   impl::unsetCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
382 }
383 
384 void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
385 
386 void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); }
387 
388 void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
389 
390 void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
391 
392 int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
393 
394 void ompx_sync_block(int Ordering) {
395   impl::syncThreadsAligned(atomic::OrderingTy(Ordering));
396 }
397 void ompx_sync_block_acq_rel() {
398   impl::syncThreadsAligned(atomic::OrderingTy::acq_rel);
399 }
400 void ompx_sync_block_divergent(int Ordering) {
401   impl::syncThreads(atomic::OrderingTy(Ordering));
402 }
403 } // extern "C"
404 
405 #pragma omp end declare target
406