xref: /netbsd-src/external/gpl3/gcc.old/dist/libgomp/config/nvptx/bar.h (revision 4c3eb207d36f67d31994830c0a694161fc1ca39b)
1 /* Copyright (C) 2015-2020 Free Software Foundation, Inc.
2    Contributed by Alexander Monakov <amonakov@ispras.ru>
3 
4    This file is part of the GNU Offloading and Multi Processing Library
5    (libgomp).
6 
7    Libgomp is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15    more details.
16 
17    Under Section 7 of GPL version 3, you are granted additional
18    permissions described in the GCC Runtime Library Exception, version
19    3.1, as published by the Free Software Foundation.
20 
21    You should have received a copy of the GNU General Public License and
22    a copy of the GCC Runtime Library Exception along with this program;
23    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24    <http://www.gnu.org/licenses/>.  */
25 
26 /* This is an NVPTX specific implementation of a barrier synchronization
27    mechanism for libgomp.  This type is private to the library.  This
28    implementation uses atomic instructions and bar.sync instruction.  */
29 
30 #ifndef GOMP_BARRIER_H
31 #define GOMP_BARRIER_H 1
32 
33 #include "mutex.h"
34 
35 typedef struct
36 {
37   unsigned total;
38   unsigned generation;
39   unsigned awaited;
40   unsigned awaited_final;
41 } gomp_barrier_t;
42 
43 typedef unsigned int gomp_barrier_state_t;
44 
45 /* The generation field contains a counter in the high bits, with a few
46    low bits dedicated to flags.  Note that TASK_PENDING and WAS_LAST can
47    share space because WAS_LAST is never stored back to generation.  */
48 #define BAR_TASK_PENDING	1
49 #define BAR_WAS_LAST		1
50 #define BAR_WAITING_FOR_TASK	2
51 #define BAR_CANCELLED		4
52 #define BAR_INCR		8
53 
gomp_barrier_init(gomp_barrier_t * bar,unsigned count)54 static inline void gomp_barrier_init (gomp_barrier_t *bar, unsigned count)
55 {
56   bar->total = count;
57   bar->awaited = count;
58   bar->awaited_final = count;
59   bar->generation = 0;
60 }
61 
gomp_barrier_reinit(gomp_barrier_t * bar,unsigned count)62 static inline void gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count)
63 {
64   __atomic_add_fetch (&bar->awaited, count - bar->total, MEMMODEL_ACQ_REL);
65   bar->total = count;
66 }
67 
gomp_barrier_destroy(gomp_barrier_t * bar)68 static inline void gomp_barrier_destroy (gomp_barrier_t *bar)
69 {
70 }
71 
72 extern void gomp_barrier_wait (gomp_barrier_t *);
73 extern void gomp_barrier_wait_last (gomp_barrier_t *);
74 extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t);
75 extern void gomp_team_barrier_wait (gomp_barrier_t *);
76 extern void gomp_team_barrier_wait_final (gomp_barrier_t *);
77 extern void gomp_team_barrier_wait_end (gomp_barrier_t *,
78 					gomp_barrier_state_t);
79 extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *);
80 extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *,
81 					       gomp_barrier_state_t);
82 extern void gomp_team_barrier_wake (gomp_barrier_t *, int);
83 struct gomp_team;
84 extern void gomp_team_barrier_cancel (struct gomp_team *);
85 
86 static inline gomp_barrier_state_t
gomp_barrier_wait_start(gomp_barrier_t * bar)87 gomp_barrier_wait_start (gomp_barrier_t *bar)
88 {
89   unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
90   ret &= -BAR_INCR | BAR_CANCELLED;
91   /* A memory barrier is needed before exiting from the various forms
92      of gomp_barrier_wait, to satisfy OpenMP API version 3.1 section
93      2.8.6 flush Construct, which says there is an implicit flush during
94      a barrier region.  This is a convenient place to add the barrier,
95      so we use MEMMODEL_ACQ_REL here rather than MEMMODEL_ACQUIRE.  */
96   if (__atomic_add_fetch (&bar->awaited, -1, MEMMODEL_ACQ_REL) == 0)
97     ret |= BAR_WAS_LAST;
98   return ret;
99 }
100 
101 static inline gomp_barrier_state_t
gomp_barrier_wait_cancel_start(gomp_barrier_t * bar)102 gomp_barrier_wait_cancel_start (gomp_barrier_t *bar)
103 {
104   return gomp_barrier_wait_start (bar);
105 }
106 
107 /* This is like gomp_barrier_wait_start, except it decrements
108    bar->awaited_final rather than bar->awaited and should be used
109    for the gomp_team_end barrier only.  */
110 static inline gomp_barrier_state_t
gomp_barrier_wait_final_start(gomp_barrier_t * bar)111 gomp_barrier_wait_final_start (gomp_barrier_t *bar)
112 {
113   unsigned int ret = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE);
114   ret &= -BAR_INCR | BAR_CANCELLED;
115   /* See above gomp_barrier_wait_start comment.  */
116   if (__atomic_add_fetch (&bar->awaited_final, -1, MEMMODEL_ACQ_REL) == 0)
117     ret |= BAR_WAS_LAST;
118   return ret;
119 }
120 
121 static inline bool
gomp_barrier_last_thread(gomp_barrier_state_t state)122 gomp_barrier_last_thread (gomp_barrier_state_t state)
123 {
124   return state & BAR_WAS_LAST;
125 }
126 
127 /* All the inlines below must be called with team->task_lock
128    held.  */
129 
130 static inline void
gomp_team_barrier_set_task_pending(gomp_barrier_t * bar)131 gomp_team_barrier_set_task_pending (gomp_barrier_t *bar)
132 {
133   bar->generation |= BAR_TASK_PENDING;
134 }
135 
136 static inline void
gomp_team_barrier_clear_task_pending(gomp_barrier_t * bar)137 gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar)
138 {
139   bar->generation &= ~BAR_TASK_PENDING;
140 }
141 
142 static inline void
gomp_team_barrier_set_waiting_for_tasks(gomp_barrier_t * bar)143 gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar)
144 {
145   bar->generation |= BAR_WAITING_FOR_TASK;
146 }
147 
148 static inline bool
gomp_team_barrier_waiting_for_tasks(gomp_barrier_t * bar)149 gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar)
150 {
151   return (bar->generation & BAR_WAITING_FOR_TASK) != 0;
152 }
153 
154 static inline bool
gomp_team_barrier_cancelled(gomp_barrier_t * bar)155 gomp_team_barrier_cancelled (gomp_barrier_t *bar)
156 {
157   return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0);
158 }
159 
160 static inline void
gomp_team_barrier_done(gomp_barrier_t * bar,gomp_barrier_state_t state)161 gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state)
162 {
163   bar->generation = (state & -BAR_INCR) + BAR_INCR;
164 }
165 
166 #endif /* GOMP_BARRIER_H */
167