xref: /netbsd-src/external/apache2/llvm/dist/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 //===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides a class for OpenMP runtime code generation specialized to NVPTX
10 // targets.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGOpenMPRuntimeNVPTX.h"
15 #include "CodeGenFunction.h"
16 #include "clang/AST/DeclOpenMP.h"
17 #include "clang/AST/StmtOpenMP.h"
18 #include "clang/AST/StmtVisitor.h"
19 #include "clang/Basic/Cuda.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 
22 using namespace clang;
23 using namespace CodeGen;
24 
25 namespace {
26 enum OpenMPRTLFunctionNVPTX {
27   /// Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
28   /// int16_t RequiresOMPRuntime);
29   OMPRTL_NVPTX__kmpc_kernel_init,
30   /// Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
31   OMPRTL_NVPTX__kmpc_kernel_deinit,
32   /// Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
33   /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
34   OMPRTL_NVPTX__kmpc_spmd_kernel_init,
35   /// Call to void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
36   OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2,
37   /// Call to void __kmpc_kernel_prepare_parallel(void
38   /// *outlined_function, int16_t
39   /// IsOMPRuntimeInitialized);
40   OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
41   /// Call to bool __kmpc_kernel_parallel(void **outlined_function,
42   /// int16_t IsOMPRuntimeInitialized);
43   OMPRTL_NVPTX__kmpc_kernel_parallel,
44   /// Call to void __kmpc_kernel_end_parallel();
45   OMPRTL_NVPTX__kmpc_kernel_end_parallel,
46   /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
47   /// global_tid);
48   OMPRTL_NVPTX__kmpc_serialized_parallel,
49   /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
50   /// global_tid);
51   OMPRTL_NVPTX__kmpc_end_serialized_parallel,
52   /// Call to int32_t __kmpc_shuffle_int32(int32_t element,
53   /// int16_t lane_offset, int16_t warp_size);
54   OMPRTL_NVPTX__kmpc_shuffle_int32,
55   /// Call to int64_t __kmpc_shuffle_int64(int64_t element,
56   /// int16_t lane_offset, int16_t warp_size);
57   OMPRTL_NVPTX__kmpc_shuffle_int64,
58   /// Call to __kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc, kmp_int32
59   /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
60   /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
61   /// lane_offset, int16_t shortCircuit),
62   /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
63   OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2,
64   /// Call to __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
65   /// global_tid, void *global_buffer, int32_t num_of_records, void*
66   /// reduce_data,
67   /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
68   /// lane_offset, int16_t shortCircuit),
69   /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
70   /// (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
71   /// void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
72   /// void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
73   /// int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
74   /// *buffer, int idx, void *reduce_data));
75   OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2,
76   /// Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
77   OMPRTL_NVPTX__kmpc_end_reduce_nowait,
78   /// Call to void __kmpc_data_sharing_init_stack();
79   OMPRTL_NVPTX__kmpc_data_sharing_init_stack,
80   /// Call to void __kmpc_data_sharing_init_stack_spmd();
81   OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd,
82   /// Call to void* __kmpc_data_sharing_coalesced_push_stack(size_t size,
83   /// int16_t UseSharedMemory);
84   OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack,
85   /// Call to void __kmpc_data_sharing_pop_stack(void *a);
86   OMPRTL_NVPTX__kmpc_data_sharing_pop_stack,
87   /// Call to void __kmpc_begin_sharing_variables(void ***args,
88   /// size_t n_args);
89   OMPRTL_NVPTX__kmpc_begin_sharing_variables,
90   /// Call to void __kmpc_end_sharing_variables();
91   OMPRTL_NVPTX__kmpc_end_sharing_variables,
92   /// Call to void __kmpc_get_shared_variables(void ***GlobalArgs)
93   OMPRTL_NVPTX__kmpc_get_shared_variables,
94   /// Call to uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32
95   /// global_tid);
96   OMPRTL_NVPTX__kmpc_parallel_level,
97   /// Call to int8_t __kmpc_is_spmd_exec_mode();
98   OMPRTL_NVPTX__kmpc_is_spmd_exec_mode,
99   /// Call to void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
100   /// const void *buf, size_t size, int16_t is_shared, const void **res);
101   OMPRTL_NVPTX__kmpc_get_team_static_memory,
102   /// Call to void __kmpc_restore_team_static_memory(int16_t
103   /// isSPMDExecutionMode, int16_t is_shared);
104   OMPRTL_NVPTX__kmpc_restore_team_static_memory,
105   /// Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
106   OMPRTL__kmpc_barrier,
107   /// Call to void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
108   /// global_tid);
109   OMPRTL__kmpc_barrier_simple_spmd,
110   /// Call to int32_t __kmpc_warp_active_thread_mask(void);
111   OMPRTL_NVPTX__kmpc_warp_active_thread_mask,
112   /// Call to void __kmpc_syncwarp(int32_t Mask);
113   OMPRTL_NVPTX__kmpc_syncwarp,
114 };
115 
116 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
117 class NVPTXActionTy final : public PrePostActionTy {
118   llvm::FunctionCallee EnterCallee = nullptr;
119   ArrayRef<llvm::Value *> EnterArgs;
120   llvm::FunctionCallee ExitCallee = nullptr;
121   ArrayRef<llvm::Value *> ExitArgs;
122   bool Conditional = false;
123   llvm::BasicBlock *ContBlock = nullptr;
124 
125 public:
126   NVPTXActionTy(llvm::FunctionCallee EnterCallee,
127                 ArrayRef<llvm::Value *> EnterArgs,
128                 llvm::FunctionCallee ExitCallee,
129                 ArrayRef<llvm::Value *> ExitArgs, bool Conditional = false)
130       : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
131         ExitArgs(ExitArgs), Conditional(Conditional) {}
132   void Enter(CodeGenFunction &CGF) override {
133     llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
134     if (Conditional) {
135       llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
136       auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
137       ContBlock = CGF.createBasicBlock("omp_if.end");
138       // Generate the branch (If-stmt)
139       CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
140       CGF.EmitBlock(ThenBlock);
141     }
142   }
143   void Done(CodeGenFunction &CGF) {
144     // Emit the rest of blocks/branches
145     CGF.EmitBranch(ContBlock);
146     CGF.EmitBlock(ContBlock, true);
147   }
148   void Exit(CodeGenFunction &CGF) override {
149     CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
150   }
151 };
152 
153 /// A class to track the execution mode when codegening directives within
154 /// a target region. The appropriate mode (SPMD|NON-SPMD) is set on entry
155 /// to the target region and used by containing directives such as 'parallel'
156 /// to emit optimized code.
157 class ExecutionRuntimeModesRAII {
158 private:
159   CGOpenMPRuntimeNVPTX::ExecutionMode SavedExecMode =
160       CGOpenMPRuntimeNVPTX::EM_Unknown;
161   CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode;
162   bool SavedRuntimeMode = false;
163   bool *RuntimeMode = nullptr;
164 
165 public:
166   /// Constructor for Non-SPMD mode.
167   ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode)
168       : ExecMode(ExecMode) {
169     SavedExecMode = ExecMode;
170     ExecMode = CGOpenMPRuntimeNVPTX::EM_NonSPMD;
171   }
172   /// Constructor for SPMD mode.
173   ExecutionRuntimeModesRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &ExecMode,
174                             bool &RuntimeMode, bool FullRuntimeMode)
175       : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
176     SavedExecMode = ExecMode;
177     SavedRuntimeMode = RuntimeMode;
178     ExecMode = CGOpenMPRuntimeNVPTX::EM_SPMD;
179     RuntimeMode = FullRuntimeMode;
180   }
181   ~ExecutionRuntimeModesRAII() {
182     ExecMode = SavedExecMode;
183     if (RuntimeMode)
184       *RuntimeMode = SavedRuntimeMode;
185   }
186 };
187 
188 /// GPU Configuration:  This information can be derived from cuda registers,
189 /// however, providing compile time constants helps generate more efficient
190 /// code.  For all practical purposes this is fine because the configuration
191 /// is the same for all known NVPTX architectures.
192 enum MachineConfiguration : unsigned {
193   WarpSize = 32,
194   /// Number of bits required to represent a lane identifier, which is
195   /// computed as log_2(WarpSize).
196   LaneIDBits = 5,
197   LaneIDMask = WarpSize - 1,
198 
199   /// Global memory alignment for performance.
200   GlobalMemoryAlignment = 128,
201 
202   /// Maximal size of the shared memory buffer.
203   SharedMemorySize = 128,
204 };
205 
206 static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
207   RefExpr = RefExpr->IgnoreParens();
208   if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) {
209     const Expr *Base = ASE->getBase()->IgnoreParenImpCasts();
210     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
211       Base = TempASE->getBase()->IgnoreParenImpCasts();
212     RefExpr = Base;
213   } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(RefExpr)) {
214     const Expr *Base = OASE->getBase()->IgnoreParenImpCasts();
215     while (const auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base))
216       Base = TempOASE->getBase()->IgnoreParenImpCasts();
217     while (const auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base))
218       Base = TempASE->getBase()->IgnoreParenImpCasts();
219     RefExpr = Base;
220   }
221   RefExpr = RefExpr->IgnoreParenImpCasts();
222   if (const auto *DE = dyn_cast<DeclRefExpr>(RefExpr))
223     return cast<ValueDecl>(DE->getDecl()->getCanonicalDecl());
224   const auto *ME = cast<MemberExpr>(RefExpr);
225   return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
226 }
227 
228 
229 static RecordDecl *buildRecordForGlobalizedVars(
230     ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
231     ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
232     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
233         &MappedDeclsFields, int BufSize) {
234   using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
235   if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
236     return nullptr;
237   SmallVector<VarsDataTy, 4> GlobalizedVars;
238   for (const ValueDecl *D : EscapedDecls)
239     GlobalizedVars.emplace_back(
240         CharUnits::fromQuantity(std::max(
241             C.getDeclAlign(D).getQuantity(),
242             static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
243         D);
244   for (const ValueDecl *D : EscapedDeclsForTeams)
245     GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
246   llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
247     return L.first > R.first;
248   });
249 
250   // Build struct _globalized_locals_ty {
251   //         /*  globalized vars  */[WarSize] align (max(decl_align,
252   //         GlobalMemoryAlignment))
253   //         /*  globalized vars  */ for EscapedDeclsForTeams
254   //       };
255   RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
256   GlobalizedRD->startDefinition();
257   llvm::SmallPtrSet<const ValueDecl *, 16> SingleEscaped(
258       EscapedDeclsForTeams.begin(), EscapedDeclsForTeams.end());
259   for (const auto &Pair : GlobalizedVars) {
260     const ValueDecl *VD = Pair.second;
261     QualType Type = VD->getType();
262     if (Type->isLValueReferenceType())
263       Type = C.getPointerType(Type.getNonReferenceType());
264     else
265       Type = Type.getNonReferenceType();
266     SourceLocation Loc = VD->getLocation();
267     FieldDecl *Field;
268     if (SingleEscaped.count(VD)) {
269       Field = FieldDecl::Create(
270           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
271           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
272           /*BW=*/nullptr, /*Mutable=*/false,
273           /*InitStyle=*/ICIS_NoInit);
274       Field->setAccess(AS_public);
275       if (VD->hasAttrs()) {
276         for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()),
277              E(VD->getAttrs().end());
278              I != E; ++I)
279           Field->addAttr(*I);
280       }
281     } else {
282       llvm::APInt ArraySize(32, BufSize);
283       Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
284                                     0);
285       Field = FieldDecl::Create(
286           C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
287           C.getTrivialTypeSourceInfo(Type, SourceLocation()),
288           /*BW=*/nullptr, /*Mutable=*/false,
289           /*InitStyle=*/ICIS_NoInit);
290       Field->setAccess(AS_public);
291       llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
292                                      static_cast<CharUnits::QuantityType>(
293                                          GlobalMemoryAlignment)));
294       Field->addAttr(AlignedAttr::CreateImplicit(
295           C, /*IsAlignmentExpr=*/true,
296           IntegerLiteral::Create(C, Align,
297                                  C.getIntTypeForBitwidth(32, /*Signed=*/0),
298                                  SourceLocation()),
299           {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
300     }
301     GlobalizedRD->addDecl(Field);
302     MappedDeclsFields.try_emplace(VD, Field);
303   }
304   GlobalizedRD->completeDefinition();
305   return GlobalizedRD;
306 }
307 
308 /// Get the list of variables that can escape their declaration context.
309 class CheckVarsEscapingDeclContext final
310     : public ConstStmtVisitor<CheckVarsEscapingDeclContext> {
311   CodeGenFunction &CGF;
312   llvm::SetVector<const ValueDecl *> EscapedDecls;
313   llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
314   llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
315   RecordDecl *GlobalizedRD = nullptr;
316   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
317   bool AllEscaped = false;
318   bool IsForCombinedParallelRegion = false;
319 
320   void markAsEscaped(const ValueDecl *VD) {
321     // Do not globalize declare target variables.
322     if (!isa<VarDecl>(VD) ||
323         OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
324       return;
325     VD = cast<ValueDecl>(VD->getCanonicalDecl());
326     // Use user-specified allocation.
327     if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
328       return;
329     // Variables captured by value must be globalized.
330     if (auto *CSI = CGF.CapturedStmtInfo) {
331       if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
332         // Check if need to capture the variable that was already captured by
333         // value in the outer region.
334         if (!IsForCombinedParallelRegion) {
335           if (!FD->hasAttrs())
336             return;
337           const auto *Attr = FD->getAttr<OMPCaptureKindAttr>();
338           if (!Attr)
339             return;
340           if (((Attr->getCaptureKind() != OMPC_map) &&
341                !isOpenMPPrivate(
342                    static_cast<OpenMPClauseKind>(Attr->getCaptureKind()))) ||
343               ((Attr->getCaptureKind() == OMPC_map) &&
344                !FD->getType()->isAnyPointerType()))
345             return;
346         }
347         if (!FD->getType()->isReferenceType()) {
348           assert(!VD->getType()->isVariablyModifiedType() &&
349                  "Parameter captured by value with variably modified type");
350           EscapedParameters.insert(VD);
351         } else if (!IsForCombinedParallelRegion) {
352           return;
353         }
354       }
355     }
356     if ((!CGF.CapturedStmtInfo ||
357          (IsForCombinedParallelRegion && CGF.CapturedStmtInfo)) &&
358         VD->getType()->isReferenceType())
359       // Do not globalize variables with reference type.
360       return;
361     if (VD->getType()->isVariablyModifiedType())
362       EscapedVariableLengthDecls.insert(VD);
363     else
364       EscapedDecls.insert(VD);
365   }
366 
367   void VisitValueDecl(const ValueDecl *VD) {
368     if (VD->getType()->isLValueReferenceType())
369       markAsEscaped(VD);
370     if (const auto *VarD = dyn_cast<VarDecl>(VD)) {
371       if (!isa<ParmVarDecl>(VarD) && VarD->hasInit()) {
372         const bool SavedAllEscaped = AllEscaped;
373         AllEscaped = VD->getType()->isLValueReferenceType();
374         Visit(VarD->getInit());
375         AllEscaped = SavedAllEscaped;
376       }
377     }
378   }
379   void VisitOpenMPCapturedStmt(const CapturedStmt *S,
380                                ArrayRef<OMPClause *> Clauses,
381                                bool IsCombinedParallelRegion) {
382     if (!S)
383       return;
384     for (const CapturedStmt::Capture &C : S->captures()) {
385       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
386         const ValueDecl *VD = C.getCapturedVar();
387         bool SavedIsForCombinedParallelRegion = IsForCombinedParallelRegion;
388         if (IsCombinedParallelRegion) {
389           // Check if the variable is privatized in the combined construct and
390           // those private copies must be shared in the inner parallel
391           // directive.
392           IsForCombinedParallelRegion = false;
393           for (const OMPClause *C : Clauses) {
394             if (!isOpenMPPrivate(C->getClauseKind()) ||
395                 C->getClauseKind() == OMPC_reduction ||
396                 C->getClauseKind() == OMPC_linear ||
397                 C->getClauseKind() == OMPC_private)
398               continue;
399             ArrayRef<const Expr *> Vars;
400             if (const auto *PC = dyn_cast<OMPFirstprivateClause>(C))
401               Vars = PC->getVarRefs();
402             else if (const auto *PC = dyn_cast<OMPLastprivateClause>(C))
403               Vars = PC->getVarRefs();
404             else
405               llvm_unreachable("Unexpected clause.");
406             for (const auto *E : Vars) {
407               const Decl *D =
408                   cast<DeclRefExpr>(E)->getDecl()->getCanonicalDecl();
409               if (D == VD->getCanonicalDecl()) {
410                 IsForCombinedParallelRegion = true;
411                 break;
412               }
413             }
414             if (IsForCombinedParallelRegion)
415               break;
416           }
417         }
418         markAsEscaped(VD);
419         if (isa<OMPCapturedExprDecl>(VD))
420           VisitValueDecl(VD);
421         IsForCombinedParallelRegion = SavedIsForCombinedParallelRegion;
422       }
423     }
424   }
425 
426   void buildRecordForGlobalizedVars(bool IsInTTDRegion) {
427     assert(!GlobalizedRD &&
428            "Record for globalized variables is built already.");
429     ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
430     if (IsInTTDRegion)
431       EscapedDeclsForTeams = EscapedDecls.getArrayRef();
432     else
433       EscapedDeclsForParallel = EscapedDecls.getArrayRef();
434     GlobalizedRD = ::buildRecordForGlobalizedVars(
435         CGF.getContext(), EscapedDeclsForParallel, EscapedDeclsForTeams,
436         MappedDeclsFields, WarpSize);
437   }
438 
439 public:
440   CheckVarsEscapingDeclContext(CodeGenFunction &CGF,
441                                ArrayRef<const ValueDecl *> TeamsReductions)
442       : CGF(CGF), EscapedDecls(TeamsReductions.begin(), TeamsReductions.end()) {
443   }
444   virtual ~CheckVarsEscapingDeclContext() = default;
445   void VisitDeclStmt(const DeclStmt *S) {
446     if (!S)
447       return;
448     for (const Decl *D : S->decls())
449       if (const auto *VD = dyn_cast_or_null<ValueDecl>(D))
450         VisitValueDecl(VD);
451   }
452   void VisitOMPExecutableDirective(const OMPExecutableDirective *D) {
453     if (!D)
454       return;
455     if (!D->hasAssociatedStmt())
456       return;
457     if (const auto *S =
458             dyn_cast_or_null<CapturedStmt>(D->getAssociatedStmt())) {
459       // Do not analyze directives that do not actually require capturing,
460       // like `omp for` or `omp simd` directives.
461       llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
462       getOpenMPCaptureRegions(CaptureRegions, D->getDirectiveKind());
463       if (CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown) {
464         VisitStmt(S->getCapturedStmt());
465         return;
466       }
467       VisitOpenMPCapturedStmt(
468           S, D->clauses(),
469           CaptureRegions.back() == OMPD_parallel &&
470               isOpenMPDistributeDirective(D->getDirectiveKind()));
471     }
472   }
473   void VisitCapturedStmt(const CapturedStmt *S) {
474     if (!S)
475       return;
476     for (const CapturedStmt::Capture &C : S->captures()) {
477       if (C.capturesVariable() && !C.capturesVariableByCopy()) {
478         const ValueDecl *VD = C.getCapturedVar();
479         markAsEscaped(VD);
480         if (isa<OMPCapturedExprDecl>(VD))
481           VisitValueDecl(VD);
482       }
483     }
484   }
485   void VisitLambdaExpr(const LambdaExpr *E) {
486     if (!E)
487       return;
488     for (const LambdaCapture &C : E->captures()) {
489       if (C.capturesVariable()) {
490         if (C.getCaptureKind() == LCK_ByRef) {
491           const ValueDecl *VD = C.getCapturedVar();
492           markAsEscaped(VD);
493           if (E->isInitCapture(&C) || isa<OMPCapturedExprDecl>(VD))
494             VisitValueDecl(VD);
495         }
496       }
497     }
498   }
499   void VisitBlockExpr(const BlockExpr *E) {
500     if (!E)
501       return;
502     for (const BlockDecl::Capture &C : E->getBlockDecl()->captures()) {
503       if (C.isByRef()) {
504         const VarDecl *VD = C.getVariable();
505         markAsEscaped(VD);
506         if (isa<OMPCapturedExprDecl>(VD) || VD->isInitCapture())
507           VisitValueDecl(VD);
508       }
509     }
510   }
511   void VisitCallExpr(const CallExpr *E) {
512     if (!E)
513       return;
514     for (const Expr *Arg : E->arguments()) {
515       if (!Arg)
516         continue;
517       if (Arg->isLValue()) {
518         const bool SavedAllEscaped = AllEscaped;
519         AllEscaped = true;
520         Visit(Arg);
521         AllEscaped = SavedAllEscaped;
522       } else {
523         Visit(Arg);
524       }
525     }
526     Visit(E->getCallee());
527   }
528   void VisitDeclRefExpr(const DeclRefExpr *E) {
529     if (!E)
530       return;
531     const ValueDecl *VD = E->getDecl();
532     if (AllEscaped)
533       markAsEscaped(VD);
534     if (isa<OMPCapturedExprDecl>(VD))
535       VisitValueDecl(VD);
536     else if (const auto *VarD = dyn_cast<VarDecl>(VD))
537       if (VarD->isInitCapture())
538         VisitValueDecl(VD);
539   }
540   void VisitUnaryOperator(const UnaryOperator *E) {
541     if (!E)
542       return;
543     if (E->getOpcode() == UO_AddrOf) {
544       const bool SavedAllEscaped = AllEscaped;
545       AllEscaped = true;
546       Visit(E->getSubExpr());
547       AllEscaped = SavedAllEscaped;
548     } else {
549       Visit(E->getSubExpr());
550     }
551   }
552   void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
553     if (!E)
554       return;
555     if (E->getCastKind() == CK_ArrayToPointerDecay) {
556       const bool SavedAllEscaped = AllEscaped;
557       AllEscaped = true;
558       Visit(E->getSubExpr());
559       AllEscaped = SavedAllEscaped;
560     } else {
561       Visit(E->getSubExpr());
562     }
563   }
564   void VisitExpr(const Expr *E) {
565     if (!E)
566       return;
567     bool SavedAllEscaped = AllEscaped;
568     if (!E->isLValue())
569       AllEscaped = false;
570     for (const Stmt *Child : E->children())
571       if (Child)
572         Visit(Child);
573     AllEscaped = SavedAllEscaped;
574   }
575   void VisitStmt(const Stmt *S) {
576     if (!S)
577       return;
578     for (const Stmt *Child : S->children())
579       if (Child)
580         Visit(Child);
581   }
582 
583   /// Returns the record that handles all the escaped local variables and used
584   /// instead of their original storage.
585   const RecordDecl *getGlobalizedRecord(bool IsInTTDRegion) {
586     if (!GlobalizedRD)
587       buildRecordForGlobalizedVars(IsInTTDRegion);
588     return GlobalizedRD;
589   }
590 
591   /// Returns the field in the globalized record for the escaped variable.
592   const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
593     assert(GlobalizedRD &&
594            "Record for globalized variables must be generated already.");
595     auto I = MappedDeclsFields.find(VD);
596     if (I == MappedDeclsFields.end())
597       return nullptr;
598     return I->getSecond();
599   }
600 
601   /// Returns the list of the escaped local variables/parameters.
602   ArrayRef<const ValueDecl *> getEscapedDecls() const {
603     return EscapedDecls.getArrayRef();
604   }
605 
606   /// Checks if the escaped local variable is actually a parameter passed by
607   /// value.
608   const llvm::SmallPtrSetImpl<const Decl *> &getEscapedParameters() const {
609     return EscapedParameters;
610   }
611 
612   /// Returns the list of the escaped variables with the variably modified
613   /// types.
614   ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
615     return EscapedVariableLengthDecls.getArrayRef();
616   }
617 };
618 } // anonymous namespace
619 
620 /// Get the GPU warp size.
621 static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
622   return CGF.EmitRuntimeCall(
623       llvm::Intrinsic::getDeclaration(
624           &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
625       "nvptx_warp_size");
626 }
627 
628 /// Get the id of the current thread on the GPU.
629 static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
630   return CGF.EmitRuntimeCall(
631       llvm::Intrinsic::getDeclaration(
632           &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
633       "nvptx_tid");
634 }
635 
636 /// Get the id of the warp in the block.
637 /// We assume that the warp size is 32, which is always the case
638 /// on the NVPTX device, to generate more efficient code.
639 static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
640   CGBuilderTy &Bld = CGF.Builder;
641   return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
642 }
643 
644 /// Get the id of the current lane in the Warp.
645 /// We assume that the warp size is 32, which is always the case
646 /// on the NVPTX device, to generate more efficient code.
647 static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
648   CGBuilderTy &Bld = CGF.Builder;
649   return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
650                        "nvptx_lane_id");
651 }
652 
653 /// Get the maximum number of threads in a block of the GPU.
654 static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
655   return CGF.EmitRuntimeCall(
656       llvm::Intrinsic::getDeclaration(
657           &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
658       "nvptx_num_threads");
659 }
660 
661 /// Get the value of the thread_limit clause in the teams directive.
662 /// For the 'generic' execution mode, the runtime encodes thread_limit in
663 /// the launch parameters, always starting thread_limit+warpSize threads per
664 /// CTA. The threads in the last warp are reserved for master execution.
665 /// For the 'spmd' execution mode, all threads in a CTA are part of the team.
666 static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
667                                    bool IsInSPMDExecutionMode = false) {
668   CGBuilderTy &Bld = CGF.Builder;
669   return IsInSPMDExecutionMode
670              ? getNVPTXNumThreads(CGF)
671              : Bld.CreateNUWSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
672                                 "thread_limit");
673 }
674 
675 /// Get the thread id of the OMP master thread.
676 /// The master thread id is the first thread (lane) of the last warp in the
677 /// GPU block.  Warp size is assumed to be some power of 2.
678 /// Thread id is 0 indexed.
679 /// E.g: If NumThreads is 33, master id is 32.
680 ///      If NumThreads is 64, master id is 32.
681 ///      If NumThreads is 1024, master id is 992.
682 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
683   CGBuilderTy &Bld = CGF.Builder;
684   llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
685 
686   // We assume that the warp size is a power of 2.
687   llvm::Value *Mask = Bld.CreateNUWSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
688 
689   return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
690                        Bld.CreateNot(Mask), "master_tid");
691 }
692 
693 CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
694     CodeGenModule &CGM, SourceLocation Loc)
695     : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
696       Loc(Loc) {
697   createWorkerFunction(CGM);
698 }
699 
700 void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
701     CodeGenModule &CGM) {
702   // Create an worker function with no arguments.
703 
704   WorkerFn = llvm::Function::Create(
705       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
706       /*placeholder=*/"_worker", &CGM.getModule());
707   CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
708   WorkerFn->setDoesNotRecurse();
709 }
710 
711 CGOpenMPRuntimeNVPTX::ExecutionMode
712 CGOpenMPRuntimeNVPTX::getExecutionMode() const {
713   return CurrentExecutionMode;
714 }
715 
716 static CGOpenMPRuntimeNVPTX::DataSharingMode
717 getDataSharingMode(CodeGenModule &CGM) {
718   return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeNVPTX::CUDA
719                                           : CGOpenMPRuntimeNVPTX::Generic;
720 }
721 
722 /// Check for inner (nested) SPMD construct, if any
723 static bool hasNestedSPMDDirective(ASTContext &Ctx,
724                                    const OMPExecutableDirective &D) {
725   const auto *CS = D.getInnermostCapturedStmt();
726   const auto *Body =
727       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
728   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
729 
730   if (const auto *NestedDir =
731           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
732     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
733     switch (D.getDirectiveKind()) {
734     case OMPD_target:
735       if (isOpenMPParallelDirective(DKind))
736         return true;
737       if (DKind == OMPD_teams) {
738         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
739             /*IgnoreCaptured=*/true);
740         if (!Body)
741           return false;
742         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
743         if (const auto *NND =
744                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
745           DKind = NND->getDirectiveKind();
746           if (isOpenMPParallelDirective(DKind))
747             return true;
748         }
749       }
750       return false;
751     case OMPD_target_teams:
752       return isOpenMPParallelDirective(DKind);
753     case OMPD_target_simd:
754     case OMPD_target_parallel:
755     case OMPD_target_parallel_for:
756     case OMPD_target_parallel_for_simd:
757     case OMPD_target_teams_distribute:
758     case OMPD_target_teams_distribute_simd:
759     case OMPD_target_teams_distribute_parallel_for:
760     case OMPD_target_teams_distribute_parallel_for_simd:
761     case OMPD_parallel:
762     case OMPD_for:
763     case OMPD_parallel_for:
764     case OMPD_parallel_sections:
765     case OMPD_for_simd:
766     case OMPD_parallel_for_simd:
767     case OMPD_cancel:
768     case OMPD_cancellation_point:
769     case OMPD_ordered:
770     case OMPD_threadprivate:
771     case OMPD_allocate:
772     case OMPD_task:
773     case OMPD_simd:
774     case OMPD_sections:
775     case OMPD_section:
776     case OMPD_single:
777     case OMPD_master:
778     case OMPD_critical:
779     case OMPD_taskyield:
780     case OMPD_barrier:
781     case OMPD_taskwait:
782     case OMPD_taskgroup:
783     case OMPD_atomic:
784     case OMPD_flush:
785     case OMPD_teams:
786     case OMPD_target_data:
787     case OMPD_target_exit_data:
788     case OMPD_target_enter_data:
789     case OMPD_distribute:
790     case OMPD_distribute_simd:
791     case OMPD_distribute_parallel_for:
792     case OMPD_distribute_parallel_for_simd:
793     case OMPD_teams_distribute:
794     case OMPD_teams_distribute_simd:
795     case OMPD_teams_distribute_parallel_for:
796     case OMPD_teams_distribute_parallel_for_simd:
797     case OMPD_target_update:
798     case OMPD_declare_simd:
799     case OMPD_declare_variant:
800     case OMPD_declare_target:
801     case OMPD_end_declare_target:
802     case OMPD_declare_reduction:
803     case OMPD_declare_mapper:
804     case OMPD_taskloop:
805     case OMPD_taskloop_simd:
806     case OMPD_master_taskloop:
807     case OMPD_master_taskloop_simd:
808     case OMPD_parallel_master_taskloop:
809     case OMPD_requires:
810     case OMPD_unknown:
811       llvm_unreachable("Unexpected directive.");
812     }
813   }
814 
815   return false;
816 }
817 
818 static bool supportsSPMDExecutionMode(ASTContext &Ctx,
819                                       const OMPExecutableDirective &D) {
820   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
821   switch (DirectiveKind) {
822   case OMPD_target:
823   case OMPD_target_teams:
824     return hasNestedSPMDDirective(Ctx, D);
825   case OMPD_target_parallel:
826   case OMPD_target_parallel_for:
827   case OMPD_target_parallel_for_simd:
828   case OMPD_target_teams_distribute_parallel_for:
829   case OMPD_target_teams_distribute_parallel_for_simd:
830   case OMPD_target_simd:
831   case OMPD_target_teams_distribute_simd:
832     return true;
833   case OMPD_target_teams_distribute:
834     return false;
835   case OMPD_parallel:
836   case OMPD_for:
837   case OMPD_parallel_for:
838   case OMPD_parallel_sections:
839   case OMPD_for_simd:
840   case OMPD_parallel_for_simd:
841   case OMPD_cancel:
842   case OMPD_cancellation_point:
843   case OMPD_ordered:
844   case OMPD_threadprivate:
845   case OMPD_allocate:
846   case OMPD_task:
847   case OMPD_simd:
848   case OMPD_sections:
849   case OMPD_section:
850   case OMPD_single:
851   case OMPD_master:
852   case OMPD_critical:
853   case OMPD_taskyield:
854   case OMPD_barrier:
855   case OMPD_taskwait:
856   case OMPD_taskgroup:
857   case OMPD_atomic:
858   case OMPD_flush:
859   case OMPD_teams:
860   case OMPD_target_data:
861   case OMPD_target_exit_data:
862   case OMPD_target_enter_data:
863   case OMPD_distribute:
864   case OMPD_distribute_simd:
865   case OMPD_distribute_parallel_for:
866   case OMPD_distribute_parallel_for_simd:
867   case OMPD_teams_distribute:
868   case OMPD_teams_distribute_simd:
869   case OMPD_teams_distribute_parallel_for:
870   case OMPD_teams_distribute_parallel_for_simd:
871   case OMPD_target_update:
872   case OMPD_declare_simd:
873   case OMPD_declare_variant:
874   case OMPD_declare_target:
875   case OMPD_end_declare_target:
876   case OMPD_declare_reduction:
877   case OMPD_declare_mapper:
878   case OMPD_taskloop:
879   case OMPD_taskloop_simd:
880   case OMPD_master_taskloop:
881   case OMPD_master_taskloop_simd:
882   case OMPD_parallel_master_taskloop:
883   case OMPD_requires:
884   case OMPD_unknown:
885     break;
886   }
887   llvm_unreachable(
888       "Unknown programming model for OpenMP directive on NVPTX target.");
889 }
890 
891 /// Check if the directive is loops based and has schedule clause at all or has
892 /// static scheduling.
893 static bool hasStaticScheduling(const OMPExecutableDirective &D) {
894   assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
895          isOpenMPLoopDirective(D.getDirectiveKind()) &&
896          "Expected loop-based directive.");
897   return !D.hasClausesOfKind<OMPOrderedClause>() &&
898          (!D.hasClausesOfKind<OMPScheduleClause>() ||
899           llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
900                        [](const OMPScheduleClause *C) {
901                          return C->getScheduleKind() == OMPC_SCHEDULE_static;
902                        }));
903 }
904 
905 /// Check for inner (nested) lightweight runtime construct, if any
906 static bool hasNestedLightweightDirective(ASTContext &Ctx,
907                                           const OMPExecutableDirective &D) {
908   assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
909   const auto *CS = D.getInnermostCapturedStmt();
910   const auto *Body =
911       CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
912   const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
913 
914   if (const auto *NestedDir =
915           dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
916     OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
917     switch (D.getDirectiveKind()) {
918     case OMPD_target:
919       if (isOpenMPParallelDirective(DKind) &&
920           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
921           hasStaticScheduling(*NestedDir))
922         return true;
923       if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
924         return true;
925       if (DKind == OMPD_parallel) {
926         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
927             /*IgnoreCaptured=*/true);
928         if (!Body)
929           return false;
930         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
931         if (const auto *NND =
932                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
933           DKind = NND->getDirectiveKind();
934           if (isOpenMPWorksharingDirective(DKind) &&
935               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
936             return true;
937         }
938       } else if (DKind == OMPD_teams) {
939         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
940             /*IgnoreCaptured=*/true);
941         if (!Body)
942           return false;
943         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
944         if (const auto *NND =
945                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
946           DKind = NND->getDirectiveKind();
947           if (isOpenMPParallelDirective(DKind) &&
948               isOpenMPWorksharingDirective(DKind) &&
949               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
950             return true;
951           if (DKind == OMPD_parallel) {
952             Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
953                 /*IgnoreCaptured=*/true);
954             if (!Body)
955               return false;
956             ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
957             if (const auto *NND =
958                     dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
959               DKind = NND->getDirectiveKind();
960               if (isOpenMPWorksharingDirective(DKind) &&
961                   isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
962                 return true;
963             }
964           }
965         }
966       }
967       return false;
968     case OMPD_target_teams:
969       if (isOpenMPParallelDirective(DKind) &&
970           isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
971           hasStaticScheduling(*NestedDir))
972         return true;
973       if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
974         return true;
975       if (DKind == OMPD_parallel) {
976         Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
977             /*IgnoreCaptured=*/true);
978         if (!Body)
979           return false;
980         ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
981         if (const auto *NND =
982                 dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
983           DKind = NND->getDirectiveKind();
984           if (isOpenMPWorksharingDirective(DKind) &&
985               isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
986             return true;
987         }
988       }
989       return false;
990     case OMPD_target_parallel:
991       if (DKind == OMPD_simd)
992         return true;
993       return isOpenMPWorksharingDirective(DKind) &&
994              isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
995     case OMPD_target_teams_distribute:
996     case OMPD_target_simd:
997     case OMPD_target_parallel_for:
998     case OMPD_target_parallel_for_simd:
999     case OMPD_target_teams_distribute_simd:
1000     case OMPD_target_teams_distribute_parallel_for:
1001     case OMPD_target_teams_distribute_parallel_for_simd:
1002     case OMPD_parallel:
1003     case OMPD_for:
1004     case OMPD_parallel_for:
1005     case OMPD_parallel_sections:
1006     case OMPD_for_simd:
1007     case OMPD_parallel_for_simd:
1008     case OMPD_cancel:
1009     case OMPD_cancellation_point:
1010     case OMPD_ordered:
1011     case OMPD_threadprivate:
1012     case OMPD_allocate:
1013     case OMPD_task:
1014     case OMPD_simd:
1015     case OMPD_sections:
1016     case OMPD_section:
1017     case OMPD_single:
1018     case OMPD_master:
1019     case OMPD_critical:
1020     case OMPD_taskyield:
1021     case OMPD_barrier:
1022     case OMPD_taskwait:
1023     case OMPD_taskgroup:
1024     case OMPD_atomic:
1025     case OMPD_flush:
1026     case OMPD_teams:
1027     case OMPD_target_data:
1028     case OMPD_target_exit_data:
1029     case OMPD_target_enter_data:
1030     case OMPD_distribute:
1031     case OMPD_distribute_simd:
1032     case OMPD_distribute_parallel_for:
1033     case OMPD_distribute_parallel_for_simd:
1034     case OMPD_teams_distribute:
1035     case OMPD_teams_distribute_simd:
1036     case OMPD_teams_distribute_parallel_for:
1037     case OMPD_teams_distribute_parallel_for_simd:
1038     case OMPD_target_update:
1039     case OMPD_declare_simd:
1040     case OMPD_declare_variant:
1041     case OMPD_declare_target:
1042     case OMPD_end_declare_target:
1043     case OMPD_declare_reduction:
1044     case OMPD_declare_mapper:
1045     case OMPD_taskloop:
1046     case OMPD_taskloop_simd:
1047     case OMPD_master_taskloop:
1048     case OMPD_master_taskloop_simd:
1049     case OMPD_parallel_master_taskloop:
1050     case OMPD_requires:
1051     case OMPD_unknown:
1052       llvm_unreachable("Unexpected directive.");
1053     }
1054   }
1055 
1056   return false;
1057 }
1058 
1059 /// Checks if the construct supports lightweight runtime. It must be SPMD
1060 /// construct + inner loop-based construct with static scheduling.
1061 static bool supportsLightweightRuntime(ASTContext &Ctx,
1062                                        const OMPExecutableDirective &D) {
1063   if (!supportsSPMDExecutionMode(Ctx, D))
1064     return false;
1065   OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
1066   switch (DirectiveKind) {
1067   case OMPD_target:
1068   case OMPD_target_teams:
1069   case OMPD_target_parallel:
1070     return hasNestedLightweightDirective(Ctx, D);
1071   case OMPD_target_parallel_for:
1072   case OMPD_target_parallel_for_simd:
1073   case OMPD_target_teams_distribute_parallel_for:
1074   case OMPD_target_teams_distribute_parallel_for_simd:
1075     // (Last|First)-privates must be shared in parallel region.
1076     return hasStaticScheduling(D);
1077   case OMPD_target_simd:
1078   case OMPD_target_teams_distribute_simd:
1079     return true;
1080   case OMPD_target_teams_distribute:
1081     return false;
1082   case OMPD_parallel:
1083   case OMPD_for:
1084   case OMPD_parallel_for:
1085   case OMPD_parallel_sections:
1086   case OMPD_for_simd:
1087   case OMPD_parallel_for_simd:
1088   case OMPD_cancel:
1089   case OMPD_cancellation_point:
1090   case OMPD_ordered:
1091   case OMPD_threadprivate:
1092   case OMPD_allocate:
1093   case OMPD_task:
1094   case OMPD_simd:
1095   case OMPD_sections:
1096   case OMPD_section:
1097   case OMPD_single:
1098   case OMPD_master:
1099   case OMPD_critical:
1100   case OMPD_taskyield:
1101   case OMPD_barrier:
1102   case OMPD_taskwait:
1103   case OMPD_taskgroup:
1104   case OMPD_atomic:
1105   case OMPD_flush:
1106   case OMPD_teams:
1107   case OMPD_target_data:
1108   case OMPD_target_exit_data:
1109   case OMPD_target_enter_data:
1110   case OMPD_distribute:
1111   case OMPD_distribute_simd:
1112   case OMPD_distribute_parallel_for:
1113   case OMPD_distribute_parallel_for_simd:
1114   case OMPD_teams_distribute:
1115   case OMPD_teams_distribute_simd:
1116   case OMPD_teams_distribute_parallel_for:
1117   case OMPD_teams_distribute_parallel_for_simd:
1118   case OMPD_target_update:
1119   case OMPD_declare_simd:
1120   case OMPD_declare_variant:
1121   case OMPD_declare_target:
1122   case OMPD_end_declare_target:
1123   case OMPD_declare_reduction:
1124   case OMPD_declare_mapper:
1125   case OMPD_taskloop:
1126   case OMPD_taskloop_simd:
1127   case OMPD_master_taskloop:
1128   case OMPD_master_taskloop_simd:
1129   case OMPD_parallel_master_taskloop:
1130   case OMPD_requires:
1131   case OMPD_unknown:
1132     break;
1133   }
1134   llvm_unreachable(
1135       "Unknown programming model for OpenMP directive on NVPTX target.");
1136 }
1137 
1138 void CGOpenMPRuntimeNVPTX::emitNonSPMDKernel(const OMPExecutableDirective &D,
1139                                              StringRef ParentName,
1140                                              llvm::Function *&OutlinedFn,
1141                                              llvm::Constant *&OutlinedFnID,
1142                                              bool IsOffloadEntry,
1143                                              const RegionCodeGenTy &CodeGen) {
1144   ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
1145   EntryFunctionState EST;
1146   WorkerFunctionState WST(CGM, D.getBeginLoc());
1147   Work.clear();
1148   WrapperFunctionsMap.clear();
1149 
1150   // Emit target region as a standalone region.
1151   class NVPTXPrePostActionTy : public PrePostActionTy {
1152     CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
1153     CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
1154 
1155   public:
1156     NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
1157                          CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
1158         : EST(EST), WST(WST) {}
1159     void Enter(CodeGenFunction &CGF) override {
1160       auto &RT =
1161           static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
1162       RT.emitNonSPMDEntryHeader(CGF, EST, WST);
1163       // Skip target region initialization.
1164       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1165     }
1166     void Exit(CodeGenFunction &CGF) override {
1167       auto &RT =
1168           static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
1169       RT.clearLocThreadIdInsertPt(CGF);
1170       RT.emitNonSPMDEntryFooter(CGF, EST);
1171     }
1172   } Action(EST, WST);
1173   CodeGen.setAction(Action);
1174   IsInTTDRegion = true;
1175   // Reserve place for the globalized memory.
1176   GlobalizedRecords.emplace_back();
1177   if (!KernelStaticGlobalized) {
1178     KernelStaticGlobalized = new llvm::GlobalVariable(
1179         CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1180         llvm::GlobalValue::InternalLinkage,
1181         llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1182         "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1183         llvm::GlobalValue::NotThreadLocal,
1184         CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1185   }
1186   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1187                                    IsOffloadEntry, CodeGen);
1188   IsInTTDRegion = false;
1189 
1190   // Now change the name of the worker function to correspond to this target
1191   // region's entry function.
1192   WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
1193 
1194   // Create the worker function
1195   emitWorkerFunction(WST);
1196 }
1197 
1198 // Setup NVPTX threads for master-worker OpenMP scheme.
1199 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
1200                                                   EntryFunctionState &EST,
1201                                                   WorkerFunctionState &WST) {
1202   CGBuilderTy &Bld = CGF.Builder;
1203 
1204   llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
1205   llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
1206   llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
1207   EST.ExitBB = CGF.createBasicBlock(".exit");
1208 
1209   llvm::Value *IsWorker =
1210       Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
1211   Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
1212 
1213   CGF.EmitBlock(WorkerBB);
1214   emitCall(CGF, WST.Loc, WST.WorkerFn);
1215   CGF.EmitBranch(EST.ExitBB);
1216 
1217   CGF.EmitBlock(MasterCheckBB);
1218   llvm::Value *IsMaster =
1219       Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
1220   Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
1221 
1222   CGF.EmitBlock(MasterBB);
1223   IsInTargetMasterThreadRegion = true;
1224   // SEQUENTIAL (MASTER) REGION START
1225   // First action in sequential region:
1226   // Initialize the state of the OpenMP runtime library on the GPU.
1227   // TODO: Optimize runtime initialization and pass in correct value.
1228   llvm::Value *Args[] = {getThreadLimit(CGF),
1229                          Bld.getInt16(/*RequiresOMPRuntime=*/1)};
1230   CGF.EmitRuntimeCall(
1231       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
1232 
1233   // For data sharing, we need to initialize the stack.
1234   CGF.EmitRuntimeCall(
1235       createNVPTXRuntimeFunction(
1236           OMPRTL_NVPTX__kmpc_data_sharing_init_stack));
1237 
1238   emitGenericVarsProlog(CGF, WST.Loc);
1239 }
1240 
1241 void CGOpenMPRuntimeNVPTX::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
1242                                                   EntryFunctionState &EST) {
1243   IsInTargetMasterThreadRegion = false;
1244   if (!CGF.HaveInsertPoint())
1245     return;
1246 
1247   emitGenericVarsEpilog(CGF);
1248 
1249   if (!EST.ExitBB)
1250     EST.ExitBB = CGF.createBasicBlock(".exit");
1251 
1252   llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
1253   CGF.EmitBranch(TerminateBB);
1254 
1255   CGF.EmitBlock(TerminateBB);
1256   // Signal termination condition.
1257   // TODO: Optimize runtime initialization and pass in correct value.
1258   llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
1259   CGF.EmitRuntimeCall(
1260       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
1261   // Barrier to terminate worker threads.
1262   syncCTAThreads(CGF);
1263   // Master thread jumps to exit point.
1264   CGF.EmitBranch(EST.ExitBB);
1265 
1266   CGF.EmitBlock(EST.ExitBB);
1267   EST.ExitBB = nullptr;
1268 }
1269 
1270 void CGOpenMPRuntimeNVPTX::emitSPMDKernel(const OMPExecutableDirective &D,
1271                                           StringRef ParentName,
1272                                           llvm::Function *&OutlinedFn,
1273                                           llvm::Constant *&OutlinedFnID,
1274                                           bool IsOffloadEntry,
1275                                           const RegionCodeGenTy &CodeGen) {
1276   ExecutionRuntimeModesRAII ModeRAII(
1277       CurrentExecutionMode, RequiresFullRuntime,
1278       CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
1279           !supportsLightweightRuntime(CGM.getContext(), D));
1280   EntryFunctionState EST;
1281 
1282   // Emit target region as a standalone region.
1283   class NVPTXPrePostActionTy : public PrePostActionTy {
1284     CGOpenMPRuntimeNVPTX &RT;
1285     CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
1286     const OMPExecutableDirective &D;
1287 
1288   public:
1289     NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
1290                          CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
1291                          const OMPExecutableDirective &D)
1292         : RT(RT), EST(EST), D(D) {}
1293     void Enter(CodeGenFunction &CGF) override {
1294       RT.emitSPMDEntryHeader(CGF, EST, D);
1295       // Skip target region initialization.
1296       RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1297     }
1298     void Exit(CodeGenFunction &CGF) override {
1299       RT.clearLocThreadIdInsertPt(CGF);
1300       RT.emitSPMDEntryFooter(CGF, EST);
1301     }
1302   } Action(*this, EST, D);
1303   CodeGen.setAction(Action);
1304   IsInTTDRegion = true;
1305   // Reserve place for the globalized memory.
1306   GlobalizedRecords.emplace_back();
1307   if (!KernelStaticGlobalized) {
1308     KernelStaticGlobalized = new llvm::GlobalVariable(
1309         CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
1310         llvm::GlobalValue::InternalLinkage,
1311         llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
1312         "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
1313         llvm::GlobalValue::NotThreadLocal,
1314         CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
1315   }
1316   emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
1317                                    IsOffloadEntry, CodeGen);
1318   IsInTTDRegion = false;
1319 }
1320 
1321 void CGOpenMPRuntimeNVPTX::emitSPMDEntryHeader(
1322     CodeGenFunction &CGF, EntryFunctionState &EST,
1323     const OMPExecutableDirective &D) {
1324   CGBuilderTy &Bld = CGF.Builder;
1325 
1326   // Setup BBs in entry function.
1327   llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
1328   EST.ExitBB = CGF.createBasicBlock(".exit");
1329 
1330   llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
1331                          /*RequiresOMPRuntime=*/
1332                          Bld.getInt16(RequiresFullRuntime ? 1 : 0),
1333                          /*RequiresDataSharing=*/Bld.getInt16(0)};
1334   CGF.EmitRuntimeCall(
1335       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
1336 
1337   if (RequiresFullRuntime) {
1338     // For data sharing, we need to initialize the stack.
1339     CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
1340         OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd));
1341   }
1342 
1343   CGF.EmitBranch(ExecuteBB);
1344 
1345   CGF.EmitBlock(ExecuteBB);
1346 
1347   IsInTargetMasterThreadRegion = true;
1348 }
1349 
1350 void CGOpenMPRuntimeNVPTX::emitSPMDEntryFooter(CodeGenFunction &CGF,
1351                                                EntryFunctionState &EST) {
1352   IsInTargetMasterThreadRegion = false;
1353   if (!CGF.HaveInsertPoint())
1354     return;
1355 
1356   if (!EST.ExitBB)
1357     EST.ExitBB = CGF.createBasicBlock(".exit");
1358 
1359   llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
1360   CGF.EmitBranch(OMPDeInitBB);
1361 
1362   CGF.EmitBlock(OMPDeInitBB);
1363   // DeInitialize the OMP state in the runtime; called by all active threads.
1364   llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
1365                          CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
1366   CGF.EmitRuntimeCall(
1367       createNVPTXRuntimeFunction(
1368           OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2), Args);
1369   CGF.EmitBranch(EST.ExitBB);
1370 
1371   CGF.EmitBlock(EST.ExitBB);
1372   EST.ExitBB = nullptr;
1373 }
1374 
1375 // Create a unique global variable to indicate the execution mode of this target
1376 // region. The execution mode is either 'generic', or 'spmd' depending on the
1377 // target directive. This variable is picked up by the offload library to setup
1378 // the device appropriately before kernel launch. If the execution mode is
1379 // 'generic', the runtime reserves one warp for the master, otherwise, all
1380 // warps participate in parallel work.
1381 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
1382                                      bool Mode) {
1383   auto *GVMode =
1384       new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
1385                                llvm::GlobalValue::WeakAnyLinkage,
1386                                llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
1387                                Twine(Name, "_exec_mode"));
1388   CGM.addCompilerUsedGlobal(GVMode);
1389 }
1390 
1391 void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
1392   ASTContext &Ctx = CGM.getContext();
1393 
1394   CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
1395   CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
1396                     WST.Loc, WST.Loc);
1397   emitWorkerLoop(CGF, WST);
1398   CGF.FinishFunction();
1399 }
1400 
1401 void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
1402                                           WorkerFunctionState &WST) {
1403   //
1404   // The workers enter this loop and wait for parallel work from the master.
1405   // When the master encounters a parallel region it sets up the work + variable
1406   // arguments, and wakes up the workers.  The workers first check to see if
1407   // they are required for the parallel region, i.e., within the # of requested
1408   // parallel threads.  The activated workers load the variable arguments and
1409   // execute the parallel work.
1410   //
1411 
1412   CGBuilderTy &Bld = CGF.Builder;
1413 
1414   llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
1415   llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
1416   llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
1417   llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
1418   llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
1419   llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
1420 
1421   CGF.EmitBranch(AwaitBB);
1422 
1423   // Workers wait for work from master.
1424   CGF.EmitBlock(AwaitBB);
1425   // Wait for parallel work
1426   syncCTAThreads(CGF);
1427 
1428   Address WorkFn =
1429       CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
1430   Address ExecStatus =
1431       CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
1432   CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
1433   CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
1434 
1435   // TODO: Optimize runtime initialization and pass in correct value.
1436   llvm::Value *Args[] = {WorkFn.getPointer(),
1437                          /*RequiresOMPRuntime=*/Bld.getInt16(1)};
1438   llvm::Value *Ret = CGF.EmitRuntimeCall(
1439       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
1440   Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
1441 
1442   // On termination condition (workid == 0), exit loop.
1443   llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
1444   llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
1445   Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
1446 
1447   // Activate requested workers.
1448   CGF.EmitBlock(SelectWorkersBB);
1449   llvm::Value *IsActive =
1450       Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
1451   Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
1452 
1453   // Signal start of parallel region.
1454   CGF.EmitBlock(ExecuteBB);
1455   // Skip initialization.
1456   setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
1457 
1458   // Process work items: outlined parallel functions.
1459   for (llvm::Function *W : Work) {
1460     // Try to match this outlined function.
1461     llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
1462 
1463     llvm::Value *WorkFnMatch =
1464         Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
1465 
1466     llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
1467     llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
1468     Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
1469 
1470     // Execute this outlined function.
1471     CGF.EmitBlock(ExecuteFNBB);
1472 
1473     // Insert call to work function via shared wrapper. The shared
1474     // wrapper takes two arguments:
1475     //   - the parallelism level;
1476     //   - the thread ID;
1477     emitCall(CGF, WST.Loc, W,
1478              {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1479 
1480     // Go to end of parallel region.
1481     CGF.EmitBranch(TerminateBB);
1482 
1483     CGF.EmitBlock(CheckNextBB);
1484   }
1485   // Default case: call to outlined function through pointer if the target
1486   // region makes a declare target call that may contain an orphaned parallel
1487   // directive.
1488   auto *ParallelFnTy =
1489       llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
1490                               /*isVarArg=*/false);
1491   llvm::Value *WorkFnCast =
1492       Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
1493   // Insert call to work function via shared wrapper. The shared
1494   // wrapper takes two arguments:
1495   //   - the parallelism level;
1496   //   - the thread ID;
1497   emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
1498            {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
1499   // Go to end of parallel region.
1500   CGF.EmitBranch(TerminateBB);
1501 
1502   // Signal end of parallel region.
1503   CGF.EmitBlock(TerminateBB);
1504   CGF.EmitRuntimeCall(
1505       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
1506       llvm::None);
1507   CGF.EmitBranch(BarrierBB);
1508 
1509   // All active and inactive workers wait at a barrier after parallel region.
1510   CGF.EmitBlock(BarrierBB);
1511   // Barrier after parallel region.
1512   syncCTAThreads(CGF);
1513   CGF.EmitBranch(AwaitBB);
1514 
1515   // Exit target region.
1516   CGF.EmitBlock(ExitBB);
1517   // Skip initialization.
1518   clearLocThreadIdInsertPt(CGF);
1519 }
1520 
1521 /// Returns specified OpenMP runtime function for the current OpenMP
1522 /// implementation.  Specialized for the NVPTX device.
1523 /// \param Function OpenMP runtime function.
1524 /// \return Specified function.
1525 llvm::FunctionCallee
1526 CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
1527   llvm::FunctionCallee RTLFn = nullptr;
1528   switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
1529   case OMPRTL_NVPTX__kmpc_kernel_init: {
1530     // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
1531     // RequiresOMPRuntime);
1532     llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
1533     auto *FnTy =
1534         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1535     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
1536     break;
1537   }
1538   case OMPRTL_NVPTX__kmpc_kernel_deinit: {
1539     // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
1540     llvm::Type *TypeParams[] = {CGM.Int16Ty};
1541     auto *FnTy =
1542         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1543     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
1544     break;
1545   }
1546   case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
1547     // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
1548     // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
1549     llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1550     auto *FnTy =
1551         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1552     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
1553     break;
1554   }
1555   case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit_v2: {
1556     // Build void __kmpc_spmd_kernel_deinit_v2(int16_t RequiresOMPRuntime);
1557     llvm::Type *TypeParams[] = {CGM.Int16Ty};
1558     auto *FnTy =
1559         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1560     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit_v2");
1561     break;
1562   }
1563   case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
1564     /// Build void __kmpc_kernel_prepare_parallel(
1565     /// void *outlined_function, int16_t IsOMPRuntimeInitialized);
1566     llvm::Type *TypeParams[] = {CGM.Int8PtrTy, CGM.Int16Ty};
1567     auto *FnTy =
1568         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1569     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
1570     break;
1571   }
1572   case OMPRTL_NVPTX__kmpc_kernel_parallel: {
1573     /// Build bool __kmpc_kernel_parallel(void **outlined_function,
1574     /// int16_t IsOMPRuntimeInitialized);
1575     llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy, CGM.Int16Ty};
1576     llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
1577     auto *FnTy =
1578         llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
1579     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
1580     break;
1581   }
1582   case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
1583     /// Build void __kmpc_kernel_end_parallel();
1584     auto *FnTy =
1585         llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1586     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
1587     break;
1588   }
1589   case OMPRTL_NVPTX__kmpc_serialized_parallel: {
1590     // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
1591     // global_tid);
1592     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1593     auto *FnTy =
1594         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1595     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
1596     break;
1597   }
1598   case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
1599     // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
1600     // global_tid);
1601     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1602     auto *FnTy =
1603         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1604     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
1605     break;
1606   }
1607   case OMPRTL_NVPTX__kmpc_shuffle_int32: {
1608     // Build int32_t __kmpc_shuffle_int32(int32_t element,
1609     // int16_t lane_offset, int16_t warp_size);
1610     llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
1611     auto *FnTy =
1612         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
1613     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
1614     break;
1615   }
1616   case OMPRTL_NVPTX__kmpc_shuffle_int64: {
1617     // Build int64_t __kmpc_shuffle_int64(int64_t element,
1618     // int16_t lane_offset, int16_t warp_size);
1619     llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
1620     auto *FnTy =
1621         llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
1622     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
1623     break;
1624   }
1625   case OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2: {
1626     // Build int32_t kmpc_nvptx_parallel_reduce_nowait_v2(ident_t *loc,
1627     // kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void*
1628     // reduce_data, void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t
1629     // lane_id, int16_t lane_offset, int16_t Algorithm Version), void
1630     // (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
1631     llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1632                                              CGM.Int16Ty, CGM.Int16Ty};
1633     auto *ShuffleReduceFnTy =
1634         llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1635                                 /*isVarArg=*/false);
1636     llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1637     auto *InterWarpCopyFnTy =
1638         llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1639                                 /*isVarArg=*/false);
1640     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1641                                 CGM.Int32Ty,
1642                                 CGM.Int32Ty,
1643                                 CGM.SizeTy,
1644                                 CGM.VoidPtrTy,
1645                                 ShuffleReduceFnTy->getPointerTo(),
1646                                 InterWarpCopyFnTy->getPointerTo()};
1647     auto *FnTy =
1648         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1649     RTLFn = CGM.CreateRuntimeFunction(
1650         FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait_v2");
1651     break;
1652   }
1653   case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
1654     // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
1655     llvm::Type *TypeParams[] = {CGM.Int32Ty};
1656     auto *FnTy =
1657         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1658     RTLFn = CGM.CreateRuntimeFunction(
1659         FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
1660     break;
1661   }
1662   case OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2: {
1663     // Build int32_t __kmpc_nvptx_teams_reduce_nowait_v2(ident_t *loc, kmp_int32
1664     // global_tid, void *global_buffer, int32_t num_of_records, void*
1665     // reduce_data,
1666     // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
1667     // lane_offset, int16_t shortCircuit),
1668     // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num), void
1669     // (*kmp_ListToGlobalCpyFctPtr)(void *buffer, int idx, void *reduce_data),
1670     // void (*kmp_GlobalToListCpyFctPtr)(void *buffer, int idx,
1671     // void *reduce_data), void (*kmp_GlobalToListCpyPtrsFctPtr)(void *buffer,
1672     // int idx, void *reduce_data), void (*kmp_GlobalToListRedFctPtr)(void
1673     // *buffer, int idx, void *reduce_data));
1674     llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
1675                                              CGM.Int16Ty, CGM.Int16Ty};
1676     auto *ShuffleReduceFnTy =
1677         llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
1678                                 /*isVarArg=*/false);
1679     llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
1680     auto *InterWarpCopyFnTy =
1681         llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
1682                                 /*isVarArg=*/false);
1683     llvm::Type *GlobalListTypeParams[] = {CGM.VoidPtrTy, CGM.IntTy,
1684                                           CGM.VoidPtrTy};
1685     auto *GlobalListFnTy =
1686         llvm::FunctionType::get(CGM.VoidTy, GlobalListTypeParams,
1687                                 /*isVarArg=*/false);
1688     llvm::Type *TypeParams[] = {getIdentTyPointerTy(),
1689                                 CGM.Int32Ty,
1690                                 CGM.VoidPtrTy,
1691                                 CGM.Int32Ty,
1692                                 CGM.VoidPtrTy,
1693                                 ShuffleReduceFnTy->getPointerTo(),
1694                                 InterWarpCopyFnTy->getPointerTo(),
1695                                 GlobalListFnTy->getPointerTo(),
1696                                 GlobalListFnTy->getPointerTo(),
1697                                 GlobalListFnTy->getPointerTo(),
1698                                 GlobalListFnTy->getPointerTo()};
1699     auto *FnTy =
1700         llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
1701     RTLFn = CGM.CreateRuntimeFunction(
1702         FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait_v2");
1703     break;
1704   }
1705   case OMPRTL_NVPTX__kmpc_data_sharing_init_stack: {
1706     /// Build void __kmpc_data_sharing_init_stack();
1707     auto *FnTy =
1708         llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1709     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack");
1710     break;
1711   }
1712   case OMPRTL_NVPTX__kmpc_data_sharing_init_stack_spmd: {
1713     /// Build void __kmpc_data_sharing_init_stack_spmd();
1714     auto *FnTy =
1715         llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1716     RTLFn =
1717         CGM.CreateRuntimeFunction(FnTy, "__kmpc_data_sharing_init_stack_spmd");
1718     break;
1719   }
1720   case OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack: {
1721     // Build void *__kmpc_data_sharing_coalesced_push_stack(size_t size,
1722     // int16_t UseSharedMemory);
1723     llvm::Type *TypeParams[] = {CGM.SizeTy, CGM.Int16Ty};
1724     auto *FnTy =
1725         llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false);
1726     RTLFn = CGM.CreateRuntimeFunction(
1727         FnTy, /*Name=*/"__kmpc_data_sharing_coalesced_push_stack");
1728     break;
1729   }
1730   case OMPRTL_NVPTX__kmpc_data_sharing_pop_stack: {
1731     // Build void __kmpc_data_sharing_pop_stack(void *a);
1732     llvm::Type *TypeParams[] = {CGM.VoidPtrTy};
1733     auto *FnTy =
1734         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1735     RTLFn = CGM.CreateRuntimeFunction(FnTy,
1736                                       /*Name=*/"__kmpc_data_sharing_pop_stack");
1737     break;
1738   }
1739   case OMPRTL_NVPTX__kmpc_begin_sharing_variables: {
1740     /// Build void __kmpc_begin_sharing_variables(void ***args,
1741     /// size_t n_args);
1742     llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo(), CGM.SizeTy};
1743     auto *FnTy =
1744         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1745     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_begin_sharing_variables");
1746     break;
1747   }
1748   case OMPRTL_NVPTX__kmpc_end_sharing_variables: {
1749     /// Build void __kmpc_end_sharing_variables();
1750     auto *FnTy =
1751         llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
1752     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_sharing_variables");
1753     break;
1754   }
1755   case OMPRTL_NVPTX__kmpc_get_shared_variables: {
1756     /// Build void __kmpc_get_shared_variables(void ***GlobalArgs);
1757     llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy->getPointerTo()};
1758     auto *FnTy =
1759         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1760     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_shared_variables");
1761     break;
1762   }
1763   case OMPRTL_NVPTX__kmpc_parallel_level: {
1764     // Build uint16_t __kmpc_parallel_level(ident_t *loc, kmp_int32 global_tid);
1765     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1766     auto *FnTy =
1767         llvm::FunctionType::get(CGM.Int16Ty, TypeParams, /*isVarArg*/ false);
1768     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_parallel_level");
1769     break;
1770   }
1771   case OMPRTL_NVPTX__kmpc_is_spmd_exec_mode: {
1772     // Build int8_t __kmpc_is_spmd_exec_mode();
1773     auto *FnTy = llvm::FunctionType::get(CGM.Int8Ty, /*isVarArg=*/false);
1774     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_is_spmd_exec_mode");
1775     break;
1776   }
1777   case OMPRTL_NVPTX__kmpc_get_team_static_memory: {
1778     // Build void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
1779     // const void *buf, size_t size, int16_t is_shared, const void **res);
1780     llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.VoidPtrTy, CGM.SizeTy,
1781                                 CGM.Int16Ty, CGM.VoidPtrPtrTy};
1782     auto *FnTy =
1783         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1784     RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_get_team_static_memory");
1785     break;
1786   }
1787   case OMPRTL_NVPTX__kmpc_restore_team_static_memory: {
1788     // Build void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
1789     // int16_t is_shared);
1790     llvm::Type *TypeParams[] = {CGM.Int16Ty, CGM.Int16Ty};
1791     auto *FnTy =
1792         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
1793     RTLFn =
1794         CGM.CreateRuntimeFunction(FnTy, "__kmpc_restore_team_static_memory");
1795     break;
1796   }
1797   case OMPRTL__kmpc_barrier: {
1798     // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
1799     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1800     auto *FnTy =
1801         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1802     RTLFn =
1803         CGM.CreateConvergentRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier");
1804     break;
1805   }
1806   case OMPRTL__kmpc_barrier_simple_spmd: {
1807     // Build void __kmpc_barrier_simple_spmd(ident_t *loc, kmp_int32
1808     // global_tid);
1809     llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
1810     auto *FnTy =
1811         llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
1812     RTLFn = CGM.CreateConvergentRuntimeFunction(
1813         FnTy, /*Name*/ "__kmpc_barrier_simple_spmd");
1814     break;
1815   }
1816   case OMPRTL_NVPTX__kmpc_warp_active_thread_mask: {
1817     // Build int32_t __kmpc_warp_active_thread_mask(void);
1818     auto *FnTy =
1819         llvm::FunctionType::get(CGM.Int32Ty, llvm::None, /*isVarArg=*/false);
1820     RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_warp_active_thread_mask");
1821     break;
1822   }
1823   case OMPRTL_NVPTX__kmpc_syncwarp: {
1824     // Build void __kmpc_syncwarp(kmp_int32 Mask);
1825     auto *FnTy =
1826         llvm::FunctionType::get(CGM.VoidTy, CGM.Int32Ty, /*isVarArg=*/false);
1827     RTLFn = CGM.CreateConvergentRuntimeFunction(FnTy, "__kmpc_syncwarp");
1828     break;
1829   }
1830   }
1831   return RTLFn;
1832 }
1833 
1834 void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
1835                                               llvm::Constant *Addr,
1836                                               uint64_t Size, int32_t,
1837                                               llvm::GlobalValue::LinkageTypes) {
1838   // TODO: Add support for global variables on the device after declare target
1839   // support.
1840   if (!isa<llvm::Function>(Addr))
1841     return;
1842   llvm::Module &M = CGM.getModule();
1843   llvm::LLVMContext &Ctx = CGM.getLLVMContext();
1844 
1845   // Get "nvvm.annotations" metadata node
1846   llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
1847 
1848   llvm::Metadata *MDVals[] = {
1849       llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
1850       llvm::ConstantAsMetadata::get(
1851           llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
1852   // Append metadata to nvvm.annotations
1853   MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
1854 }
1855 
1856 void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
1857     const OMPExecutableDirective &D, StringRef ParentName,
1858     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
1859     bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
1860   if (!IsOffloadEntry) // Nothing to do.
1861     return;
1862 
1863   assert(!ParentName.empty() && "Invalid target region parent name!");
1864 
1865   bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
1866   if (Mode)
1867     emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1868                    CodeGen);
1869   else
1870     emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
1871                       CodeGen);
1872 
1873   setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
1874 }
1875 
1876 namespace {
1877 LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
1878 /// Enum for accesseing the reserved_2 field of the ident_t struct.
1879 enum ModeFlagsTy : unsigned {
1880   /// Bit set to 1 when in SPMD mode.
1881   KMP_IDENT_SPMD_MODE = 0x01,
1882   /// Bit set to 1 when a simplified runtime is used.
1883   KMP_IDENT_SIMPLE_RT_MODE = 0x02,
1884   LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
1885 };
1886 
1887 /// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
1888 static const ModeFlagsTy UndefinedMode =
1889     (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
1890 } // anonymous namespace
1891 
1892 unsigned CGOpenMPRuntimeNVPTX::getDefaultLocationReserved2Flags() const {
1893   switch (getExecutionMode()) {
1894   case EM_SPMD:
1895     if (requiresFullRuntime())
1896       return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
1897     return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
1898   case EM_NonSPMD:
1899     assert(requiresFullRuntime() && "Expected full runtime.");
1900     return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
1901   case EM_Unknown:
1902     return UndefinedMode;
1903   }
1904   llvm_unreachable("Unknown flags are requested.");
1905 }
1906 
1907 bool CGOpenMPRuntimeNVPTX::tryEmitDeclareVariant(const GlobalDecl &NewGD,
1908                                                  const GlobalDecl &OldGD,
1909                                                  llvm::GlobalValue *OrigAddr,
1910                                                  bool IsForDefinition) {
1911   // Emit the function in OldGD with the body from NewGD, if NewGD is defined.
1912   auto *NewFD = cast<FunctionDecl>(NewGD.getDecl());
1913   if (NewFD->isDefined()) {
1914     CGM.emitOpenMPDeviceFunctionRedefinition(OldGD, NewGD, OrigAddr);
1915     return true;
1916   }
1917   return false;
1918 }
1919 
1920 CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
1921     : CGOpenMPRuntime(CGM, "_", "$") {
1922   if (!CGM.getLangOpts().OpenMPIsDevice)
1923     llvm_unreachable("OpenMP NVPTX can only handle device code.");
1924 }
1925 
1926 void CGOpenMPRuntimeNVPTX::emitProcBindClause(CodeGenFunction &CGF,
1927                                               OpenMPProcBindClauseKind ProcBind,
1928                                               SourceLocation Loc) {
1929   // Do nothing in case of SPMD mode and L0 parallel.
1930   if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1931     return;
1932 
1933   CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
1934 }
1935 
1936 void CGOpenMPRuntimeNVPTX::emitNumThreadsClause(CodeGenFunction &CGF,
1937                                                 llvm::Value *NumThreads,
1938                                                 SourceLocation Loc) {
1939   // Do nothing in case of SPMD mode and L0 parallel.
1940   if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
1941     return;
1942 
1943   CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
1944 }
1945 
1946 void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
1947                                               const Expr *NumTeams,
1948                                               const Expr *ThreadLimit,
1949                                               SourceLocation Loc) {}
1950 
1951 llvm::Function *CGOpenMPRuntimeNVPTX::emitParallelOutlinedFunction(
1952     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
1953     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
1954   // Emit target region as a standalone region.
1955   class NVPTXPrePostActionTy : public PrePostActionTy {
1956     bool &IsInParallelRegion;
1957     bool PrevIsInParallelRegion;
1958 
1959   public:
1960     NVPTXPrePostActionTy(bool &IsInParallelRegion)
1961         : IsInParallelRegion(IsInParallelRegion) {}
1962     void Enter(CodeGenFunction &CGF) override {
1963       PrevIsInParallelRegion = IsInParallelRegion;
1964       IsInParallelRegion = true;
1965     }
1966     void Exit(CodeGenFunction &CGF) override {
1967       IsInParallelRegion = PrevIsInParallelRegion;
1968     }
1969   } Action(IsInParallelRegion);
1970   CodeGen.setAction(Action);
1971   bool PrevIsInTTDRegion = IsInTTDRegion;
1972   IsInTTDRegion = false;
1973   bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
1974   IsInTargetMasterThreadRegion = false;
1975   auto *OutlinedFun =
1976       cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
1977           D, ThreadIDVar, InnermostKind, CodeGen));
1978   if (CGM.getLangOpts().Optimize) {
1979     OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
1980     OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
1981     OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
1982   }
1983   IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
1984   IsInTTDRegion = PrevIsInTTDRegion;
1985   if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD &&
1986       !IsInParallelRegion) {
1987     llvm::Function *WrapperFun =
1988         createParallelDataSharingWrapper(OutlinedFun, D);
1989     WrapperFunctionsMap[OutlinedFun] = WrapperFun;
1990   }
1991 
1992   return OutlinedFun;
1993 }
1994 
1995 /// Get list of lastprivate variables from the teams distribute ... or
1996 /// teams {distribute ...} directives.
1997 static void
1998 getDistributeLastprivateVars(ASTContext &Ctx, const OMPExecutableDirective &D,
1999                              llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2000   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
2001          "expected teams directive.");
2002   const OMPExecutableDirective *Dir = &D;
2003   if (!isOpenMPDistributeDirective(D.getDirectiveKind())) {
2004     if (const Stmt *S = CGOpenMPRuntime::getSingleCompoundChild(
2005             Ctx,
2006             D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(
2007                 /*IgnoreCaptured=*/true))) {
2008       Dir = dyn_cast_or_null<OMPExecutableDirective>(S);
2009       if (Dir && !isOpenMPDistributeDirective(Dir->getDirectiveKind()))
2010         Dir = nullptr;
2011     }
2012   }
2013   if (!Dir)
2014     return;
2015   for (const auto *C : Dir->getClausesOfKind<OMPLastprivateClause>()) {
2016     for (const Expr *E : C->getVarRefs())
2017       Vars.push_back(getPrivateItem(E));
2018   }
2019 }
2020 
2021 /// Get list of reduction variables from the teams ... directives.
2022 static void
2023 getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
2024                       llvm::SmallVectorImpl<const ValueDecl *> &Vars) {
2025   assert(isOpenMPTeamsDirective(D.getDirectiveKind()) &&
2026          "expected teams directive.");
2027   for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
2028     for (const Expr *E : C->privates())
2029       Vars.push_back(getPrivateItem(E));
2030   }
2031 }
2032 
2033 llvm::Function *CGOpenMPRuntimeNVPTX::emitTeamsOutlinedFunction(
2034     const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
2035     OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
2036   SourceLocation Loc = D.getBeginLoc();
2037 
2038   const RecordDecl *GlobalizedRD = nullptr;
2039   llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
2040   llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
2041   // Globalize team reductions variable unconditionally in all modes.
2042   if (getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2043     getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
2044   if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
2045     getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
2046     if (!LastPrivatesReductions.empty()) {
2047       GlobalizedRD = ::buildRecordForGlobalizedVars(
2048           CGM.getContext(), llvm::None, LastPrivatesReductions,
2049           MappedDeclsFields, WarpSize);
2050     }
2051   } else if (!LastPrivatesReductions.empty()) {
2052     assert(!TeamAndReductions.first &&
2053            "Previous team declaration is not expected.");
2054     TeamAndReductions.first = D.getCapturedStmt(OMPD_teams)->getCapturedDecl();
2055     std::swap(TeamAndReductions.second, LastPrivatesReductions);
2056   }
2057 
2058   // Emit target region as a standalone region.
2059   class NVPTXPrePostActionTy : public PrePostActionTy {
2060     SourceLocation &Loc;
2061     const RecordDecl *GlobalizedRD;
2062     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2063         &MappedDeclsFields;
2064 
2065   public:
2066     NVPTXPrePostActionTy(
2067         SourceLocation &Loc, const RecordDecl *GlobalizedRD,
2068         llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
2069             &MappedDeclsFields)
2070         : Loc(Loc), GlobalizedRD(GlobalizedRD),
2071           MappedDeclsFields(MappedDeclsFields) {}
2072     void Enter(CodeGenFunction &CGF) override {
2073       auto &Rt =
2074           static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime());
2075       if (GlobalizedRD) {
2076         auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
2077         I->getSecond().GlobalRecord = GlobalizedRD;
2078         I->getSecond().MappedParams =
2079             std::make_unique<CodeGenFunction::OMPMapVars>();
2080         DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
2081         for (const auto &Pair : MappedDeclsFields) {
2082           assert(Pair.getFirst()->isCanonicalDecl() &&
2083                  "Expected canonical declaration");
2084           Data.insert(std::make_pair(Pair.getFirst(),
2085                                      MappedVarData(Pair.getSecond(),
2086                                                    /*IsOnePerTeam=*/true)));
2087         }
2088       }
2089       Rt.emitGenericVarsProlog(CGF, Loc);
2090     }
2091     void Exit(CodeGenFunction &CGF) override {
2092       static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
2093           .emitGenericVarsEpilog(CGF);
2094     }
2095   } Action(Loc, GlobalizedRD, MappedDeclsFields);
2096   CodeGen.setAction(Action);
2097   llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
2098       D, ThreadIDVar, InnermostKind, CodeGen);
2099   if (CGM.getLangOpts().Optimize) {
2100     OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
2101     OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
2102     OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
2103   }
2104 
2105   return OutlinedFun;
2106 }
2107 
2108 void CGOpenMPRuntimeNVPTX::emitGenericVarsProlog(CodeGenFunction &CGF,
2109                                                  SourceLocation Loc,
2110                                                  bool WithSPMDCheck) {
2111   if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
2112       getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2113     return;
2114 
2115   CGBuilderTy &Bld = CGF.Builder;
2116 
2117   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2118   if (I == FunctionGlobalizedDecls.end())
2119     return;
2120   if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
2121     QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
2122     QualType SecGlobalRecTy;
2123 
2124     // Recover pointer to this function's global record. The runtime will
2125     // handle the specifics of the allocation of the memory.
2126     // Use actual memory size of the record including the padding
2127     // for alignment purposes.
2128     unsigned Alignment =
2129         CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2130     unsigned GlobalRecordSize =
2131         CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
2132     GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2133 
2134     llvm::PointerType *GlobalRecPtrTy =
2135         CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
2136     llvm::Value *GlobalRecCastAddr;
2137     llvm::Value *IsTTD = nullptr;
2138     if (!IsInTTDRegion &&
2139         (WithSPMDCheck ||
2140          getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2141       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2142       llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
2143       llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2144       if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
2145         llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2146         llvm::Value *ThreadID = getThreadID(CGF, Loc);
2147         llvm::Value *PL = CGF.EmitRuntimeCall(
2148             createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2149             {RTLoc, ThreadID});
2150         IsTTD = Bld.CreateIsNull(PL);
2151       }
2152       llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2153           createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2154       Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
2155       // There is no need to emit line number for unconditional branch.
2156       (void)ApplyDebugLocation::CreateEmpty(CGF);
2157       CGF.EmitBlock(SPMDBB);
2158       Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
2159                                CharUnits::fromQuantity(Alignment));
2160       CGF.EmitBranch(ExitBB);
2161       // There is no need to emit line number for unconditional branch.
2162       (void)ApplyDebugLocation::CreateEmpty(CGF);
2163       CGF.EmitBlock(NonSPMDBB);
2164       llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
2165       if (const RecordDecl *SecGlobalizedVarsRecord =
2166               I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
2167         SecGlobalRecTy =
2168             CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
2169 
2170         // Recover pointer to this function's global record. The runtime will
2171         // handle the specifics of the allocation of the memory.
2172         // Use actual memory size of the record including the padding
2173         // for alignment purposes.
2174         unsigned Alignment =
2175             CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
2176         unsigned GlobalRecordSize =
2177             CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
2178         GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
2179         Size = Bld.CreateSelect(
2180             IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
2181       }
2182       // TODO: allow the usage of shared memory to be controlled by
2183       // the user, for now, default to global.
2184       llvm::Value *GlobalRecordSizeArg[] = {
2185           Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2186       llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2187           createNVPTXRuntimeFunction(
2188               OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2189           GlobalRecordSizeArg);
2190       GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2191           GlobalRecValue, GlobalRecPtrTy);
2192       CGF.EmitBlock(ExitBB);
2193       auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
2194                                 /*NumReservedValues=*/2, "_select_stack");
2195       Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
2196       Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
2197       GlobalRecCastAddr = Phi;
2198       I->getSecond().GlobalRecordAddr = Phi;
2199       I->getSecond().IsInSPMDModeFlag = IsSPMD;
2200     } else if (IsInTTDRegion) {
2201       assert(GlobalizedRecords.back().Records.size() < 2 &&
2202              "Expected less than 2 globalized records: one for target and one "
2203              "for teams.");
2204       unsigned Offset = 0;
2205       for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
2206         QualType RDTy = CGM.getContext().getRecordType(RD);
2207         unsigned Alignment =
2208             CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
2209         unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
2210         Offset =
2211             llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
2212       }
2213       unsigned Alignment =
2214           CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
2215       Offset = llvm::alignTo(Offset, Alignment);
2216       GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
2217       ++GlobalizedRecords.back().RegionCounter;
2218       if (GlobalizedRecords.back().Records.size() == 1) {
2219         assert(KernelStaticGlobalized &&
2220                "Kernel static pointer must be initialized already.");
2221         auto *UseSharedMemory = new llvm::GlobalVariable(
2222             CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
2223             llvm::GlobalValue::InternalLinkage, nullptr,
2224             "_openmp_static_kernel$is_shared");
2225         UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2226         QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2227             /*DestWidth=*/16, /*Signed=*/0);
2228         llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2229             Address(UseSharedMemory,
2230                     CGM.getContext().getTypeAlignInChars(Int16Ty)),
2231             /*Volatile=*/false, Int16Ty, Loc);
2232         auto *StaticGlobalized = new llvm::GlobalVariable(
2233             CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2234             llvm::GlobalValue::CommonLinkage, nullptr);
2235         auto *RecSize = new llvm::GlobalVariable(
2236             CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
2237             llvm::GlobalValue::InternalLinkage, nullptr,
2238             "_openmp_static_kernel$size");
2239         RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2240         llvm::Value *Ld = CGF.EmitLoadOfScalar(
2241             Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
2242             CGM.getContext().getSizeType(), Loc);
2243         llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2244             KernelStaticGlobalized, CGM.VoidPtrPtrTy);
2245         llvm::Value *GlobalRecordSizeArg[] = {
2246             llvm::ConstantInt::get(
2247                 CGM.Int16Ty,
2248                 getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
2249             StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
2250         CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2251                                 OMPRTL_NVPTX__kmpc_get_team_static_memory),
2252                             GlobalRecordSizeArg);
2253         GlobalizedRecords.back().Buffer = StaticGlobalized;
2254         GlobalizedRecords.back().RecSize = RecSize;
2255         GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
2256         GlobalizedRecords.back().Loc = Loc;
2257       }
2258       assert(KernelStaticGlobalized && "Global address must be set already.");
2259       Address FrameAddr = CGF.EmitLoadOfPointer(
2260           Address(KernelStaticGlobalized, CGM.getPointerAlign()),
2261           CGM.getContext()
2262               .getPointerType(CGM.getContext().VoidPtrTy)
2263               .castAs<PointerType>());
2264       llvm::Value *GlobalRecValue =
2265           Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
2266       I->getSecond().GlobalRecordAddr = GlobalRecValue;
2267       I->getSecond().IsInSPMDModeFlag = nullptr;
2268       GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2269           GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
2270     } else {
2271       // TODO: allow the usage of shared memory to be controlled by
2272       // the user, for now, default to global.
2273       llvm::Value *GlobalRecordSizeArg[] = {
2274           llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
2275           CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2276       llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2277           createNVPTXRuntimeFunction(
2278               OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2279           GlobalRecordSizeArg);
2280       GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2281           GlobalRecValue, GlobalRecPtrTy);
2282       I->getSecond().GlobalRecordAddr = GlobalRecValue;
2283       I->getSecond().IsInSPMDModeFlag = nullptr;
2284     }
2285     LValue Base =
2286         CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
2287 
2288     // Emit the "global alloca" which is a GEP from the global declaration
2289     // record using the pointer returned by the runtime.
2290     LValue SecBase;
2291     decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
2292     if (IsTTD) {
2293       SecIt = I->getSecond().SecondaryLocalVarData->begin();
2294       llvm::PointerType *SecGlobalRecPtrTy =
2295           CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
2296       SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
2297           Bld.CreatePointerBitCastOrAddrSpaceCast(
2298               I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
2299           SecGlobalRecTy);
2300     }
2301     for (auto &Rec : I->getSecond().LocalVarData) {
2302       bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
2303       llvm::Value *ParValue;
2304       if (EscapedParam) {
2305         const auto *VD = cast<VarDecl>(Rec.first);
2306         LValue ParLVal =
2307             CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
2308         ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
2309       }
2310       LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
2311       // Emit VarAddr basing on lane-id if required.
2312       QualType VarTy;
2313       if (Rec.second.IsOnePerTeam) {
2314         VarTy = Rec.second.FD->getType();
2315       } else {
2316         llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
2317             VarAddr.getAddress().getPointer(),
2318             {Bld.getInt32(0), getNVPTXLaneID(CGF)});
2319         VarTy =
2320             Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
2321         VarAddr = CGF.MakeAddrLValue(
2322             Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
2323             AlignmentSource::Decl);
2324       }
2325       Rec.second.PrivateAddr = VarAddr.getAddress();
2326       if (!IsInTTDRegion &&
2327           (WithSPMDCheck ||
2328            getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2329         assert(I->getSecond().IsInSPMDModeFlag &&
2330                "Expected unknown execution mode or required SPMD check.");
2331         if (IsTTD) {
2332           assert(SecIt->second.IsOnePerTeam &&
2333                  "Secondary glob data must be one per team.");
2334           LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
2335           VarAddr.setAddress(
2336               Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(),
2337                                        VarAddr.getPointer()),
2338                       VarAddr.getAlignment()));
2339           Rec.second.PrivateAddr = VarAddr.getAddress();
2340         }
2341         Address GlobalPtr = Rec.second.PrivateAddr;
2342         Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
2343         Rec.second.PrivateAddr = Address(
2344             Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
2345                              LocalAddr.getPointer(), GlobalPtr.getPointer()),
2346             LocalAddr.getAlignment());
2347       }
2348       if (EscapedParam) {
2349         const auto *VD = cast<VarDecl>(Rec.first);
2350         CGF.EmitStoreOfScalar(ParValue, VarAddr);
2351         I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
2352       }
2353       if (IsTTD)
2354         ++SecIt;
2355     }
2356   }
2357   for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
2358     // Recover pointer to this function's global record. The runtime will
2359     // handle the specifics of the allocation of the memory.
2360     // Use actual memory size of the record including the padding
2361     // for alignment purposes.
2362     CGBuilderTy &Bld = CGF.Builder;
2363     llvm::Value *Size = CGF.getTypeSize(VD->getType());
2364     CharUnits Align = CGM.getContext().getDeclAlign(VD);
2365     Size = Bld.CreateNUWAdd(
2366         Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
2367     llvm::Value *AlignVal =
2368         llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
2369     Size = Bld.CreateUDiv(Size, AlignVal);
2370     Size = Bld.CreateNUWMul(Size, AlignVal);
2371     // TODO: allow the usage of shared memory to be controlled by
2372     // the user, for now, default to global.
2373     llvm::Value *GlobalRecordSizeArg[] = {
2374         Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
2375     llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
2376         createNVPTXRuntimeFunction(
2377             OMPRTL_NVPTX__kmpc_data_sharing_coalesced_push_stack),
2378         GlobalRecordSizeArg);
2379     llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
2380         GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
2381     LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
2382                                      CGM.getContext().getDeclAlign(VD),
2383                                      AlignmentSource::Decl);
2384     I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
2385                                             Base.getAddress());
2386     I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
2387   }
2388   I->getSecond().MappedParams->apply(CGF);
2389 }
2390 
2391 void CGOpenMPRuntimeNVPTX::emitGenericVarsEpilog(CodeGenFunction &CGF,
2392                                                  bool WithSPMDCheck) {
2393   if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic &&
2394       getExecutionMode() != CGOpenMPRuntimeNVPTX::EM_SPMD)
2395     return;
2396 
2397   const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
2398   if (I != FunctionGlobalizedDecls.end()) {
2399     I->getSecond().MappedParams->restore(CGF);
2400     if (!CGF.HaveInsertPoint())
2401       return;
2402     for (llvm::Value *Addr :
2403          llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
2404       CGF.EmitRuntimeCall(
2405           createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2406           Addr);
2407     }
2408     if (I->getSecond().GlobalRecordAddr) {
2409       if (!IsInTTDRegion &&
2410           (WithSPMDCheck ||
2411            getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_Unknown)) {
2412         CGBuilderTy &Bld = CGF.Builder;
2413         llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2414         llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
2415         Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
2416         // There is no need to emit line number for unconditional branch.
2417         (void)ApplyDebugLocation::CreateEmpty(CGF);
2418         CGF.EmitBlock(NonSPMDBB);
2419         CGF.EmitRuntimeCall(
2420             createNVPTXRuntimeFunction(
2421                 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2422             CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
2423         CGF.EmitBlock(ExitBB);
2424       } else if (IsInTTDRegion) {
2425         assert(GlobalizedRecords.back().RegionCounter > 0 &&
2426                "region counter must be > 0.");
2427         --GlobalizedRecords.back().RegionCounter;
2428         // Emit the restore function only in the target region.
2429         if (GlobalizedRecords.back().RegionCounter == 0) {
2430           QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
2431               /*DestWidth=*/16, /*Signed=*/0);
2432           llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
2433               Address(GlobalizedRecords.back().UseSharedMemory,
2434                       CGM.getContext().getTypeAlignInChars(Int16Ty)),
2435               /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
2436           llvm::Value *Args[] = {
2437               llvm::ConstantInt::get(
2438                   CGM.Int16Ty,
2439                   getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD ? 1 : 0),
2440               IsInSharedMemory};
2441           CGF.EmitRuntimeCall(
2442               createNVPTXRuntimeFunction(
2443                   OMPRTL_NVPTX__kmpc_restore_team_static_memory),
2444               Args);
2445         }
2446       } else {
2447         CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2448                                 OMPRTL_NVPTX__kmpc_data_sharing_pop_stack),
2449                             I->getSecond().GlobalRecordAddr);
2450       }
2451     }
2452   }
2453 }
2454 
2455 void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
2456                                          const OMPExecutableDirective &D,
2457                                          SourceLocation Loc,
2458                                          llvm::Function *OutlinedFn,
2459                                          ArrayRef<llvm::Value *> CapturedVars) {
2460   if (!CGF.HaveInsertPoint())
2461     return;
2462 
2463   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2464                                                       /*Name=*/".zero.addr");
2465   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2466   llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2467   OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
2468   OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2469   OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2470   emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2471 }
2472 
2473 void CGOpenMPRuntimeNVPTX::emitParallelCall(
2474     CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2475     ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2476   if (!CGF.HaveInsertPoint())
2477     return;
2478 
2479   if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
2480     emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2481   else
2482     emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
2483 }
2484 
2485 void CGOpenMPRuntimeNVPTX::emitNonSPMDParallelCall(
2486     CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2487     ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2488   llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
2489 
2490   // Force inline this outlined function at its call site.
2491   Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2492 
2493   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2494                                                       /*Name=*/".zero.addr");
2495   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2496   // ThreadId for serialized parallels is 0.
2497   Address ThreadIDAddr = ZeroAddr;
2498   auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
2499                        CodeGenFunction &CGF, PrePostActionTy &Action) {
2500     Action.Enter(CGF);
2501 
2502     Address ZeroAddr =
2503         CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2504                                          /*Name=*/".bound.zero.addr");
2505     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2506     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2507     OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2508     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2509     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2510     emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
2511   };
2512   auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2513                                         PrePostActionTy &) {
2514 
2515     RegionCodeGenTy RCG(CodeGen);
2516     llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2517     llvm::Value *ThreadID = getThreadID(CGF, Loc);
2518     llvm::Value *Args[] = {RTLoc, ThreadID};
2519 
2520     NVPTXActionTy Action(
2521         createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2522         Args,
2523         createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2524         Args);
2525     RCG.setAction(Action);
2526     RCG(CGF);
2527   };
2528 
2529   auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
2530                                                   PrePostActionTy &Action) {
2531     CGBuilderTy &Bld = CGF.Builder;
2532     llvm::Function *WFn = WrapperFunctionsMap[Fn];
2533     assert(WFn && "Wrapper function does not exist!");
2534     llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
2535 
2536     // Prepare for parallel region. Indicate the outlined function.
2537     llvm::Value *Args[] = {ID, /*RequiresOMPRuntime=*/Bld.getInt16(1)};
2538     CGF.EmitRuntimeCall(
2539         createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
2540         Args);
2541 
2542     // Create a private scope that will globalize the arguments
2543     // passed from the outside of the target region.
2544     CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
2545 
2546     // There's something to share.
2547     if (!CapturedVars.empty()) {
2548       // Prepare for parallel region. Indicate the outlined function.
2549       Address SharedArgs =
2550           CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
2551       llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
2552 
2553       llvm::Value *DataSharingArgs[] = {
2554           SharedArgsPtr,
2555           llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
2556       CGF.EmitRuntimeCall(createNVPTXRuntimeFunction(
2557                               OMPRTL_NVPTX__kmpc_begin_sharing_variables),
2558                           DataSharingArgs);
2559 
2560       // Store variable address in a list of references to pass to workers.
2561       unsigned Idx = 0;
2562       ASTContext &Ctx = CGF.getContext();
2563       Address SharedArgListAddress = CGF.EmitLoadOfPointer(
2564           SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
2565                           .castAs<PointerType>());
2566       for (llvm::Value *V : CapturedVars) {
2567         Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
2568         llvm::Value *PtrV;
2569         if (V->getType()->isIntegerTy())
2570           PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
2571         else
2572           PtrV = Bld.CreatePointerBitCastOrAddrSpaceCast(V, CGF.VoidPtrTy);
2573         CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
2574                               Ctx.getPointerType(Ctx.VoidPtrTy));
2575         ++Idx;
2576       }
2577     }
2578 
2579     // Activate workers. This barrier is used by the master to signal
2580     // work for the workers.
2581     syncCTAThreads(CGF);
2582 
2583     // OpenMP [2.5, Parallel Construct, p.49]
2584     // There is an implied barrier at the end of a parallel region. After the
2585     // end of a parallel region, only the master thread of the team resumes
2586     // execution of the enclosing task region.
2587     //
2588     // The master waits at this barrier until all workers are done.
2589     syncCTAThreads(CGF);
2590 
2591     if (!CapturedVars.empty())
2592       CGF.EmitRuntimeCall(
2593           createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_sharing_variables));
2594 
2595     // Remember for post-processing in worker loop.
2596     Work.emplace_back(WFn);
2597   };
2598 
2599   auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
2600                              CodeGenFunction &CGF, PrePostActionTy &Action) {
2601     if (IsInParallelRegion) {
2602       SeqGen(CGF, Action);
2603     } else if (IsInTargetMasterThreadRegion) {
2604       L0ParallelGen(CGF, Action);
2605     } else {
2606       // Check for master and then parallelism:
2607       // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
2608       //   Serialized execution.
2609       // } else {
2610       //   Worker call.
2611       // }
2612       CGBuilderTy &Bld = CGF.Builder;
2613       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
2614       llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
2615       llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
2616       llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
2617       llvm::Value *IsSPMD = Bld.CreateIsNotNull(CGF.EmitNounwindRuntimeCall(
2618           createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_is_spmd_exec_mode)));
2619       Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
2620       // There is no need to emit line number for unconditional branch.
2621       (void)ApplyDebugLocation::CreateEmpty(CGF);
2622       CGF.EmitBlock(ParallelCheckBB);
2623       llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2624       llvm::Value *ThreadID = getThreadID(CGF, Loc);
2625       llvm::Value *PL = CGF.EmitRuntimeCall(
2626           createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_level),
2627           {RTLoc, ThreadID});
2628       llvm::Value *Res = Bld.CreateIsNotNull(PL);
2629       Bld.CreateCondBr(Res, SeqBB, MasterBB);
2630       CGF.EmitBlock(SeqBB);
2631       SeqGen(CGF, Action);
2632       CGF.EmitBranch(ExitBB);
2633       // There is no need to emit line number for unconditional branch.
2634       (void)ApplyDebugLocation::CreateEmpty(CGF);
2635       CGF.EmitBlock(MasterBB);
2636       L0ParallelGen(CGF, Action);
2637       CGF.EmitBranch(ExitBB);
2638       // There is no need to emit line number for unconditional branch.
2639       (void)ApplyDebugLocation::CreateEmpty(CGF);
2640       // Emit the continuation block for code after the if.
2641       CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2642     }
2643   };
2644 
2645   if (IfCond) {
2646     emitOMPIfClause(CGF, IfCond, LNParallelGen, SeqGen);
2647   } else {
2648     CodeGenFunction::RunCleanupsScope Scope(CGF);
2649     RegionCodeGenTy ThenRCG(LNParallelGen);
2650     ThenRCG(CGF);
2651   }
2652 }
2653 
2654 void CGOpenMPRuntimeNVPTX::emitSPMDParallelCall(
2655     CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
2656     ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
2657   // Just call the outlined function to execute the parallel region.
2658   // OutlinedFn(&GTid, &zero, CapturedStruct);
2659   //
2660   llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2661 
2662   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2663                                                       /*Name=*/".zero.addr");
2664   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2665   // ThreadId for serialized parallels is 0.
2666   Address ThreadIDAddr = ZeroAddr;
2667   auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
2668                        CodeGenFunction &CGF, PrePostActionTy &Action) {
2669     Action.Enter(CGF);
2670 
2671     Address ZeroAddr =
2672         CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
2673                                          /*Name=*/".bound.zero.addr");
2674     CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
2675     llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
2676     OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
2677     OutlinedFnArgs.push_back(ZeroAddr.getPointer());
2678     OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
2679     emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
2680   };
2681   auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
2682                                         PrePostActionTy &) {
2683 
2684     RegionCodeGenTy RCG(CodeGen);
2685     llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
2686     llvm::Value *ThreadID = getThreadID(CGF, Loc);
2687     llvm::Value *Args[] = {RTLoc, ThreadID};
2688 
2689     NVPTXActionTy Action(
2690         createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
2691         Args,
2692         createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
2693         Args);
2694     RCG.setAction(Action);
2695     RCG(CGF);
2696   };
2697 
2698   if (IsInTargetMasterThreadRegion) {
2699     // In the worker need to use the real thread id.
2700     ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
2701     RegionCodeGenTy RCG(CodeGen);
2702     RCG(CGF);
2703   } else {
2704     // If we are not in the target region, it is definitely L2 parallelism or
2705     // more, because for SPMD mode we always has L1 parallel level, sowe don't
2706     // need to check for orphaned directives.
2707     RegionCodeGenTy RCG(SeqGen);
2708     RCG(CGF);
2709   }
2710 }
2711 
2712 void CGOpenMPRuntimeNVPTX::syncCTAThreads(CodeGenFunction &CGF) {
2713   // Always emit simple barriers!
2714   if (!CGF.HaveInsertPoint())
2715     return;
2716   // Build call __kmpc_barrier_simple_spmd(nullptr, 0);
2717   // This function does not use parameters, so we can emit just default values.
2718   llvm::Value *Args[] = {
2719       llvm::ConstantPointerNull::get(
2720           cast<llvm::PointerType>(getIdentTyPointerTy())),
2721       llvm::ConstantInt::get(CGF.Int32Ty, /*V=*/0, /*isSigned=*/true)};
2722   llvm::CallInst *Call = CGF.EmitRuntimeCall(
2723       createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier_simple_spmd), Args);
2724   Call->setConvergent();
2725 }
2726 
2727 void CGOpenMPRuntimeNVPTX::emitBarrierCall(CodeGenFunction &CGF,
2728                                            SourceLocation Loc,
2729                                            OpenMPDirectiveKind Kind, bool,
2730                                            bool) {
2731   // Always emit simple barriers!
2732   if (!CGF.HaveInsertPoint())
2733     return;
2734   // Build call __kmpc_cancel_barrier(loc, thread_id);
2735   unsigned Flags = getDefaultFlagsForBarriers(Kind);
2736   llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags),
2737                          getThreadID(CGF, Loc)};
2738   llvm::CallInst *Call = CGF.EmitRuntimeCall(
2739       createNVPTXRuntimeFunction(OMPRTL__kmpc_barrier), Args);
2740   Call->setConvergent();
2741 }
2742 
2743 void CGOpenMPRuntimeNVPTX::emitCriticalRegion(
2744     CodeGenFunction &CGF, StringRef CriticalName,
2745     const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc,
2746     const Expr *Hint) {
2747   llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.critical.loop");
2748   llvm::BasicBlock *TestBB = CGF.createBasicBlock("omp.critical.test");
2749   llvm::BasicBlock *SyncBB = CGF.createBasicBlock("omp.critical.sync");
2750   llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.critical.body");
2751   llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.critical.exit");
2752 
2753   // Get the mask of active threads in the warp.
2754   llvm::Value *Mask = CGF.EmitRuntimeCall(
2755       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_warp_active_thread_mask));
2756   // Fetch team-local id of the thread.
2757   llvm::Value *ThreadID = getNVPTXThreadID(CGF);
2758 
2759   // Get the width of the team.
2760   llvm::Value *TeamWidth = getNVPTXNumThreads(CGF);
2761 
2762   // Initialize the counter variable for the loop.
2763   QualType Int32Ty =
2764       CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/0);
2765   Address Counter = CGF.CreateMemTemp(Int32Ty, "critical_counter");
2766   LValue CounterLVal = CGF.MakeAddrLValue(Counter, Int32Ty);
2767   CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.Int32Ty), CounterLVal,
2768                         /*isInit=*/true);
2769 
2770   // Block checks if loop counter exceeds upper bound.
2771   CGF.EmitBlock(LoopBB);
2772   llvm::Value *CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2773   llvm::Value *CmpLoopBound = CGF.Builder.CreateICmpSLT(CounterVal, TeamWidth);
2774   CGF.Builder.CreateCondBr(CmpLoopBound, TestBB, ExitBB);
2775 
2776   // Block tests which single thread should execute region, and which threads
2777   // should go straight to synchronisation point.
2778   CGF.EmitBlock(TestBB);
2779   CounterVal = CGF.EmitLoadOfScalar(CounterLVal, Loc);
2780   llvm::Value *CmpThreadToCounter =
2781       CGF.Builder.CreateICmpEQ(ThreadID, CounterVal);
2782   CGF.Builder.CreateCondBr(CmpThreadToCounter, BodyBB, SyncBB);
2783 
2784   // Block emits the body of the critical region.
2785   CGF.EmitBlock(BodyBB);
2786 
2787   // Output the critical statement.
2788   CGOpenMPRuntime::emitCriticalRegion(CGF, CriticalName, CriticalOpGen, Loc,
2789                                       Hint);
2790 
2791   // After the body surrounded by the critical region, the single executing
2792   // thread will jump to the synchronisation point.
2793   // Block waits for all threads in current team to finish then increments the
2794   // counter variable and returns to the loop.
2795   CGF.EmitBlock(SyncBB);
2796   // Reconverge active threads in the warp.
2797   (void)CGF.EmitRuntimeCall(
2798       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_syncwarp), Mask);
2799 
2800   llvm::Value *IncCounterVal =
2801       CGF.Builder.CreateNSWAdd(CounterVal, CGF.Builder.getInt32(1));
2802   CGF.EmitStoreOfScalar(IncCounterVal, CounterLVal);
2803   CGF.EmitBranch(LoopBB);
2804 
2805   // Block that is reached when  all threads in the team complete the region.
2806   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2807 }
2808 
2809 /// Cast value to the specified type.
2810 static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
2811                                     QualType ValTy, QualType CastTy,
2812                                     SourceLocation Loc) {
2813   assert(!CGF.getContext().getTypeSizeInChars(CastTy).isZero() &&
2814          "Cast type must sized.");
2815   assert(!CGF.getContext().getTypeSizeInChars(ValTy).isZero() &&
2816          "Val type must sized.");
2817   llvm::Type *LLVMCastTy = CGF.ConvertTypeForMem(CastTy);
2818   if (ValTy == CastTy)
2819     return Val;
2820   if (CGF.getContext().getTypeSizeInChars(ValTy) ==
2821       CGF.getContext().getTypeSizeInChars(CastTy))
2822     return CGF.Builder.CreateBitCast(Val, LLVMCastTy);
2823   if (CastTy->isIntegerType() && ValTy->isIntegerType())
2824     return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
2825                                      CastTy->hasSignedIntegerRepresentation());
2826   Address CastItem = CGF.CreateMemTemp(CastTy);
2827   Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
2828       CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
2829   CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy);
2830   return CGF.EmitLoadOfScalar(CastItem, /*Volatile=*/false, CastTy, Loc);
2831 }
2832 
2833 /// This function creates calls to one of two shuffle functions to copy
2834 /// variables between lanes in a warp.
2835 static llvm::Value *createRuntimeShuffleFunction(CodeGenFunction &CGF,
2836                                                  llvm::Value *Elem,
2837                                                  QualType ElemType,
2838                                                  llvm::Value *Offset,
2839                                                  SourceLocation Loc) {
2840   CodeGenModule &CGM = CGF.CGM;
2841   CGBuilderTy &Bld = CGF.Builder;
2842   CGOpenMPRuntimeNVPTX &RT =
2843       *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
2844 
2845   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2846   assert(Size.getQuantity() <= 8 &&
2847          "Unsupported bitwidth in shuffle instruction.");
2848 
2849   OpenMPRTLFunctionNVPTX ShuffleFn = Size.getQuantity() <= 4
2850                                          ? OMPRTL_NVPTX__kmpc_shuffle_int32
2851                                          : OMPRTL_NVPTX__kmpc_shuffle_int64;
2852 
2853   // Cast all types to 32- or 64-bit values before calling shuffle routines.
2854   QualType CastTy = CGF.getContext().getIntTypeForBitwidth(
2855       Size.getQuantity() <= 4 ? 32 : 64, /*Signed=*/1);
2856   llvm::Value *ElemCast = castValueToType(CGF, Elem, ElemType, CastTy, Loc);
2857   llvm::Value *WarpSize =
2858       Bld.CreateIntCast(getNVPTXWarpSize(CGF), CGM.Int16Ty, /*isSigned=*/true);
2859 
2860   llvm::Value *ShuffledVal = CGF.EmitRuntimeCall(
2861       RT.createNVPTXRuntimeFunction(ShuffleFn), {ElemCast, Offset, WarpSize});
2862 
2863   return castValueToType(CGF, ShuffledVal, CastTy, ElemType, Loc);
2864 }
2865 
2866 static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
2867                             Address DestAddr, QualType ElemType,
2868                             llvm::Value *Offset, SourceLocation Loc) {
2869   CGBuilderTy &Bld = CGF.Builder;
2870 
2871   CharUnits Size = CGF.getContext().getTypeSizeInChars(ElemType);
2872   // Create the loop over the big sized data.
2873   // ptr = (void*)Elem;
2874   // ptrEnd = (void*) Elem + 1;
2875   // Step = 8;
2876   // while (ptr + Step < ptrEnd)
2877   //   shuffle((int64_t)*ptr);
2878   // Step = 4;
2879   // while (ptr + Step < ptrEnd)
2880   //   shuffle((int32_t)*ptr);
2881   // ...
2882   Address ElemPtr = DestAddr;
2883   Address Ptr = SrcAddr;
2884   Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
2885       Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
2886   for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
2887     if (Size < CharUnits::fromQuantity(IntSize))
2888       continue;
2889     QualType IntType = CGF.getContext().getIntTypeForBitwidth(
2890         CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
2891         /*Signed=*/1);
2892     llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
2893     Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
2894     ElemPtr =
2895         Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
2896     if (Size.getQuantity() / IntSize > 1) {
2897       llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
2898       llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
2899       llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".shuffle.exit");
2900       llvm::BasicBlock *CurrentBB = Bld.GetInsertBlock();
2901       CGF.EmitBlock(PreCondBB);
2902       llvm::PHINode *PhiSrc =
2903           Bld.CreatePHI(Ptr.getType(), /*NumReservedValues=*/2);
2904       PhiSrc->addIncoming(Ptr.getPointer(), CurrentBB);
2905       llvm::PHINode *PhiDest =
2906           Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
2907       PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
2908       Ptr = Address(PhiSrc, Ptr.getAlignment());
2909       ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
2910       llvm::Value *PtrDiff = Bld.CreatePtrDiff(
2911           PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
2912                                    Ptr.getPointer(), CGF.VoidPtrTy));
2913       Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
2914                        ThenBB, ExitBB);
2915       CGF.EmitBlock(ThenBB);
2916       llvm::Value *Res = createRuntimeShuffleFunction(
2917           CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2918           IntType, Offset, Loc);
2919       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2920       Address LocalPtr = Bld.CreateConstGEP(Ptr, 1);
2921       Address LocalElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2922       PhiSrc->addIncoming(LocalPtr.getPointer(), ThenBB);
2923       PhiDest->addIncoming(LocalElemPtr.getPointer(), ThenBB);
2924       CGF.EmitBranch(PreCondBB);
2925       CGF.EmitBlock(ExitBB);
2926     } else {
2927       llvm::Value *Res = createRuntimeShuffleFunction(
2928           CGF, CGF.EmitLoadOfScalar(Ptr, /*Volatile=*/false, IntType, Loc),
2929           IntType, Offset, Loc);
2930       CGF.EmitStoreOfScalar(Res, ElemPtr, /*Volatile=*/false, IntType);
2931       Ptr = Bld.CreateConstGEP(Ptr, 1);
2932       ElemPtr = Bld.CreateConstGEP(ElemPtr, 1);
2933     }
2934     Size = Size % IntSize;
2935   }
2936 }
2937 
2938 namespace {
2939 enum CopyAction : unsigned {
2940   // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
2941   // the warp using shuffle instructions.
2942   RemoteLaneToThread,
2943   // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
2944   ThreadCopy,
2945   // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
2946   ThreadToScratchpad,
2947   // ScratchpadToThread: Copy from a scratchpad array in global memory
2948   // containing team-reduced data to a thread's stack.
2949   ScratchpadToThread,
2950 };
2951 } // namespace
2952 
2953 struct CopyOptionsTy {
2954   llvm::Value *RemoteLaneOffset;
2955   llvm::Value *ScratchpadIndex;
2956   llvm::Value *ScratchpadWidth;
2957 };
2958 
2959 /// Emit instructions to copy a Reduce list, which contains partially
2960 /// aggregated values, in the specified direction.
2961 static void emitReductionListCopy(
2962     CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
2963     ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
2964     CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
2965 
2966   CodeGenModule &CGM = CGF.CGM;
2967   ASTContext &C = CGM.getContext();
2968   CGBuilderTy &Bld = CGF.Builder;
2969 
2970   llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
2971   llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
2972   llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
2973 
2974   // Iterates, element-by-element, through the source Reduce list and
2975   // make a copy.
2976   unsigned Idx = 0;
2977   unsigned Size = Privates.size();
2978   for (const Expr *Private : Privates) {
2979     Address SrcElementAddr = Address::invalid();
2980     Address DestElementAddr = Address::invalid();
2981     Address DestElementPtrAddr = Address::invalid();
2982     // Should we shuffle in an element from a remote lane?
2983     bool ShuffleInElement = false;
2984     // Set to true to update the pointer in the dest Reduce list to a
2985     // newly created element.
2986     bool UpdateDestListPtr = false;
2987     // Increment the src or dest pointer to the scratchpad, for each
2988     // new element.
2989     bool IncrScratchpadSrc = false;
2990     bool IncrScratchpadDest = false;
2991 
2992     switch (Action) {
2993     case RemoteLaneToThread: {
2994       // Step 1.1: Get the address for the src element in the Reduce list.
2995       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
2996       SrcElementAddr = CGF.EmitLoadOfPointer(
2997           SrcElementPtrAddr,
2998           C.getPointerType(Private->getType())->castAs<PointerType>());
2999 
3000       // Step 1.2: Create a temporary to store the element in the destination
3001       // Reduce list.
3002       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3003       DestElementAddr =
3004           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3005       ShuffleInElement = true;
3006       UpdateDestListPtr = true;
3007       break;
3008     }
3009     case ThreadCopy: {
3010       // Step 1.1: Get the address for the src element in the Reduce list.
3011       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3012       SrcElementAddr = CGF.EmitLoadOfPointer(
3013           SrcElementPtrAddr,
3014           C.getPointerType(Private->getType())->castAs<PointerType>());
3015 
3016       // Step 1.2: Get the address for dest element.  The destination
3017       // element has already been created on the thread's stack.
3018       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3019       DestElementAddr = CGF.EmitLoadOfPointer(
3020           DestElementPtrAddr,
3021           C.getPointerType(Private->getType())->castAs<PointerType>());
3022       break;
3023     }
3024     case ThreadToScratchpad: {
3025       // Step 1.1: Get the address for the src element in the Reduce list.
3026       Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
3027       SrcElementAddr = CGF.EmitLoadOfPointer(
3028           SrcElementPtrAddr,
3029           C.getPointerType(Private->getType())->castAs<PointerType>());
3030 
3031       // Step 1.2: Get the address for dest element:
3032       // address = base + index * ElementSizeInChars.
3033       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3034       llvm::Value *CurrentOffset =
3035           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3036       llvm::Value *ScratchPadElemAbsolutePtrVal =
3037           Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
3038       ScratchPadElemAbsolutePtrVal =
3039           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3040       DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3041                                 C.getTypeAlignInChars(Private->getType()));
3042       IncrScratchpadDest = true;
3043       break;
3044     }
3045     case ScratchpadToThread: {
3046       // Step 1.1: Get the address for the src element in the scratchpad.
3047       // address = base + index * ElementSizeInChars.
3048       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3049       llvm::Value *CurrentOffset =
3050           Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
3051       llvm::Value *ScratchPadElemAbsolutePtrVal =
3052           Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
3053       ScratchPadElemAbsolutePtrVal =
3054           Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
3055       SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
3056                                C.getTypeAlignInChars(Private->getType()));
3057       IncrScratchpadSrc = true;
3058 
3059       // Step 1.2: Create a temporary to store the element in the destination
3060       // Reduce list.
3061       DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
3062       DestElementAddr =
3063           CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
3064       UpdateDestListPtr = true;
3065       break;
3066     }
3067     }
3068 
3069     // Regardless of src and dest of copy, we emit the load of src
3070     // element as this is required in all directions
3071     SrcElementAddr = Bld.CreateElementBitCast(
3072         SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
3073     DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
3074                                                SrcElementAddr.getElementType());
3075 
3076     // Now that all active lanes have read the element in the
3077     // Reduce list, shuffle over the value from the remote lane.
3078     if (ShuffleInElement) {
3079       shuffleAndStore(CGF, SrcElementAddr, DestElementAddr, Private->getType(),
3080                       RemoteLaneOffset, Private->getExprLoc());
3081     } else {
3082       switch (CGF.getEvaluationKind(Private->getType())) {
3083       case TEK_Scalar: {
3084         llvm::Value *Elem =
3085             CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
3086                                  Private->getType(), Private->getExprLoc());
3087         // Store the source element value to the dest element address.
3088         CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
3089                               Private->getType());
3090         break;
3091       }
3092       case TEK_Complex: {
3093         CodeGenFunction::ComplexPairTy Elem = CGF.EmitLoadOfComplex(
3094             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3095             Private->getExprLoc());
3096         CGF.EmitStoreOfComplex(
3097             Elem, CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3098             /*isInit=*/false);
3099         break;
3100       }
3101       case TEK_Aggregate:
3102         CGF.EmitAggregateCopy(
3103             CGF.MakeAddrLValue(DestElementAddr, Private->getType()),
3104             CGF.MakeAddrLValue(SrcElementAddr, Private->getType()),
3105             Private->getType(), AggValueSlot::DoesNotOverlap);
3106         break;
3107       }
3108     }
3109 
3110     // Step 3.1: Modify reference in dest Reduce list as needed.
3111     // Modifying the reference in Reduce list to point to the newly
3112     // created element.  The element is live in the current function
3113     // scope and that of functions it invokes (i.e., reduce_function).
3114     // RemoteReduceData[i] = (void*)&RemoteElem
3115     if (UpdateDestListPtr) {
3116       CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
3117                                 DestElementAddr.getPointer(), CGF.VoidPtrTy),
3118                             DestElementPtrAddr, /*Volatile=*/false,
3119                             C.VoidPtrTy);
3120     }
3121 
3122     // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
3123     // address of the next element in scratchpad memory, unless we're currently
3124     // processing the last one.  Memory alignment is also taken care of here.
3125     if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
3126       llvm::Value *ScratchpadBasePtr =
3127           IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
3128       llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
3129       ScratchpadBasePtr = Bld.CreateNUWAdd(
3130           ScratchpadBasePtr,
3131           Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
3132 
3133       // Take care of global memory alignment for performance
3134       ScratchpadBasePtr = Bld.CreateNUWSub(
3135           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3136       ScratchpadBasePtr = Bld.CreateUDiv(
3137           ScratchpadBasePtr,
3138           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3139       ScratchpadBasePtr = Bld.CreateNUWAdd(
3140           ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
3141       ScratchpadBasePtr = Bld.CreateNUWMul(
3142           ScratchpadBasePtr,
3143           llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
3144 
3145       if (IncrScratchpadDest)
3146         DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3147       else /* IncrScratchpadSrc = true */
3148         SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
3149     }
3150 
3151     ++Idx;
3152   }
3153 }
3154 
3155 /// This function emits a helper that gathers Reduce lists from the first
3156 /// lane of every active warp to lanes in the first warp.
3157 ///
3158 /// void inter_warp_copy_func(void* reduce_data, num_warps)
3159 ///   shared smem[warp_size];
3160 ///   For all data entries D in reduce_data:
3161 ///     sync
3162 ///     If (I am the first lane in each warp)
3163 ///       Copy my local D to smem[warp_id]
3164 ///     sync
3165 ///     if (I am the first warp)
3166 ///       Copy smem[thread_id] to my local D
3167 static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
3168                                               ArrayRef<const Expr *> Privates,
3169                                               QualType ReductionArrayTy,
3170                                               SourceLocation Loc) {
3171   ASTContext &C = CGM.getContext();
3172   llvm::Module &M = CGM.getModule();
3173 
3174   // ReduceList: thread local Reduce list.
3175   // At the stage of the computation when this function is called, partially
3176   // aggregated values reside in the first lane of every active warp.
3177   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3178                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3179   // NumWarps: number of warps active in the parallel region.  This could
3180   // be smaller than 32 (max warps in a CTA) for partial block reduction.
3181   ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3182                                 C.getIntTypeForBitwidth(32, /* Signed */ true),
3183                                 ImplicitParamDecl::Other);
3184   FunctionArgList Args;
3185   Args.push_back(&ReduceListArg);
3186   Args.push_back(&NumWarpsArg);
3187 
3188   const CGFunctionInfo &CGFI =
3189       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3190   auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
3191                                     llvm::GlobalValue::InternalLinkage,
3192                                     "_omp_reduction_inter_warp_copy_func", &M);
3193   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3194   Fn->setDoesNotRecurse();
3195   CodeGenFunction CGF(CGM);
3196   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3197 
3198   CGBuilderTy &Bld = CGF.Builder;
3199 
3200   // This array is used as a medium to transfer, one reduce element at a time,
3201   // the data from the first lane of every warp to lanes in the first warp
3202   // in order to perform the final step of a reduction in a parallel region
3203   // (reduction across warps).  The array is placed in NVPTX __shared__ memory
3204   // for reduced latency, as well as to have a distinct copy for concurrently
3205   // executing target regions.  The array is declared with common linkage so
3206   // as to be shared across compilation units.
3207   StringRef TransferMediumName =
3208       "__openmp_nvptx_data_transfer_temporary_storage";
3209   llvm::GlobalVariable *TransferMedium =
3210       M.getGlobalVariable(TransferMediumName);
3211   if (!TransferMedium) {
3212     auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
3213     unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
3214     TransferMedium = new llvm::GlobalVariable(
3215         M, Ty, /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
3216         llvm::Constant::getNullValue(Ty), TransferMediumName,
3217         /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
3218         SharedAddressSpace);
3219     CGM.addCompilerUsedGlobal(TransferMedium);
3220   }
3221 
3222   // Get the CUDA thread id of the current OpenMP thread on the GPU.
3223   llvm::Value *ThreadID = getNVPTXThreadID(CGF);
3224   // nvptx_lane_id = nvptx_id % warpsize
3225   llvm::Value *LaneID = getNVPTXLaneID(CGF);
3226   // nvptx_warp_id = nvptx_id / warpsize
3227   llvm::Value *WarpID = getNVPTXWarpID(CGF);
3228 
3229   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3230   Address LocalReduceList(
3231       Bld.CreatePointerBitCastOrAddrSpaceCast(
3232           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3233                                C.VoidPtrTy, Loc),
3234           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3235       CGF.getPointerAlign());
3236 
3237   unsigned Idx = 0;
3238   for (const Expr *Private : Privates) {
3239     //
3240     // Warp master copies reduce element to transfer medium in __shared__
3241     // memory.
3242     //
3243     unsigned RealTySize =
3244         C.getTypeSizeInChars(Private->getType())
3245             .alignTo(C.getTypeAlignInChars(Private->getType()))
3246             .getQuantity();
3247     for (unsigned TySize = 4; TySize > 0 && RealTySize > 0; TySize /=2) {
3248       unsigned NumIters = RealTySize / TySize;
3249       if (NumIters == 0)
3250         continue;
3251       QualType CType = C.getIntTypeForBitwidth(
3252           C.toBits(CharUnits::fromQuantity(TySize)), /*Signed=*/1);
3253       llvm::Type *CopyType = CGF.ConvertTypeForMem(CType);
3254       CharUnits Align = CharUnits::fromQuantity(TySize);
3255       llvm::Value *Cnt = nullptr;
3256       Address CntAddr = Address::invalid();
3257       llvm::BasicBlock *PrecondBB = nullptr;
3258       llvm::BasicBlock *ExitBB = nullptr;
3259       if (NumIters > 1) {
3260         CntAddr = CGF.CreateMemTemp(C.IntTy, ".cnt.addr");
3261         CGF.EmitStoreOfScalar(llvm::Constant::getNullValue(CGM.IntTy), CntAddr,
3262                               /*Volatile=*/false, C.IntTy);
3263         PrecondBB = CGF.createBasicBlock("precond");
3264         ExitBB = CGF.createBasicBlock("exit");
3265         llvm::BasicBlock *BodyBB = CGF.createBasicBlock("body");
3266         // There is no need to emit line number for unconditional branch.
3267         (void)ApplyDebugLocation::CreateEmpty(CGF);
3268         CGF.EmitBlock(PrecondBB);
3269         Cnt = CGF.EmitLoadOfScalar(CntAddr, /*Volatile=*/false, C.IntTy, Loc);
3270         llvm::Value *Cmp =
3271             Bld.CreateICmpULT(Cnt, llvm::ConstantInt::get(CGM.IntTy, NumIters));
3272         Bld.CreateCondBr(Cmp, BodyBB, ExitBB);
3273         CGF.EmitBlock(BodyBB);
3274       }
3275       // kmpc_barrier.
3276       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3277                                              /*EmitChecks=*/false,
3278                                              /*ForceSimpleCall=*/true);
3279       llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3280       llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3281       llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3282 
3283       // if (lane_id == 0)
3284       llvm::Value *IsWarpMaster = Bld.CreateIsNull(LaneID, "warp_master");
3285       Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
3286       CGF.EmitBlock(ThenBB);
3287 
3288       // Reduce element = LocalReduceList[i]
3289       Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3290       llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3291           ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3292       // elemptr = ((CopyType*)(elemptrptr)) + I
3293       Address ElemPtr = Address(ElemPtrPtr, Align);
3294       ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
3295       if (NumIters > 1) {
3296         ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
3297                           ElemPtr.getAlignment());
3298       }
3299 
3300       // Get pointer to location in transfer medium.
3301       // MediumPtr = &medium[warp_id]
3302       llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
3303           TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
3304       Address MediumPtr(MediumPtrVal, Align);
3305       // Casting to actual data type.
3306       // MediumPtr = (CopyType*)MediumPtrAddr;
3307       MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
3308 
3309       // elem = *elemptr
3310       //*MediumPtr = elem
3311       llvm::Value *Elem =
3312           CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false, CType, Loc);
3313       // Store the source element value to the dest element address.
3314       CGF.EmitStoreOfScalar(Elem, MediumPtr, /*Volatile=*/true, CType);
3315 
3316       Bld.CreateBr(MergeBB);
3317 
3318       CGF.EmitBlock(ElseBB);
3319       Bld.CreateBr(MergeBB);
3320 
3321       CGF.EmitBlock(MergeBB);
3322 
3323       // kmpc_barrier.
3324       CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown,
3325                                              /*EmitChecks=*/false,
3326                                              /*ForceSimpleCall=*/true);
3327 
3328       //
3329       // Warp 0 copies reduce element from transfer medium.
3330       //
3331       llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
3332       llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
3333       llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
3334 
3335       Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
3336       llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
3337           AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc);
3338 
3339       // Up to 32 threads in warp 0 are active.
3340       llvm::Value *IsActiveThread =
3341           Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
3342       Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
3343 
3344       CGF.EmitBlock(W0ThenBB);
3345 
3346       // SrcMediumPtr = &medium[tid]
3347       llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
3348           TransferMedium,
3349           {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
3350       Address SrcMediumPtr(SrcMediumPtrVal, Align);
3351       // SrcMediumVal = *SrcMediumPtr;
3352       SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
3353 
3354       // TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
3355       Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3356       llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
3357           TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
3358       Address TargetElemPtr = Address(TargetElemPtrVal, Align);
3359       TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
3360       if (NumIters > 1) {
3361         TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
3362                                 TargetElemPtr.getAlignment());
3363       }
3364 
3365       // *TargetElemPtr = SrcMediumVal;
3366       llvm::Value *SrcMediumValue =
3367           CGF.EmitLoadOfScalar(SrcMediumPtr, /*Volatile=*/true, CType, Loc);
3368       CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
3369                             CType);
3370       Bld.CreateBr(W0MergeBB);
3371 
3372       CGF.EmitBlock(W0ElseBB);
3373       Bld.CreateBr(W0MergeBB);
3374 
3375       CGF.EmitBlock(W0MergeBB);
3376 
3377       if (NumIters > 1) {
3378         Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1));
3379         CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy);
3380         CGF.EmitBranch(PrecondBB);
3381         (void)ApplyDebugLocation::CreateEmpty(CGF);
3382         CGF.EmitBlock(ExitBB);
3383       }
3384       RealTySize %= TySize;
3385     }
3386     ++Idx;
3387   }
3388 
3389   CGF.FinishFunction();
3390   return Fn;
3391 }
3392 
3393 /// Emit a helper that reduces data across two OpenMP threads (lanes)
3394 /// in the same warp.  It uses shuffle instructions to copy over data from
3395 /// a remote lane's stack.  The reduction algorithm performed is specified
3396 /// by the fourth parameter.
3397 ///
3398 /// Algorithm Versions.
3399 /// Full Warp Reduce (argument value 0):
3400 ///   This algorithm assumes that all 32 lanes are active and gathers
3401 ///   data from these 32 lanes, producing a single resultant value.
3402 /// Contiguous Partial Warp Reduce (argument value 1):
3403 ///   This algorithm assumes that only a *contiguous* subset of lanes
3404 ///   are active.  This happens for the last warp in a parallel region
3405 ///   when the user specified num_threads is not an integer multiple of
3406 ///   32.  This contiguous subset always starts with the zeroth lane.
3407 /// Partial Warp Reduce (argument value 2):
3408 ///   This algorithm gathers data from any number of lanes at any position.
3409 /// All reduced values are stored in the lowest possible lane.  The set
3410 /// of problems every algorithm addresses is a super set of those
3411 /// addressable by algorithms with a lower version number.  Overhead
3412 /// increases as algorithm version increases.
3413 ///
3414 /// Terminology
3415 /// Reduce element:
3416 ///   Reduce element refers to the individual data field with primitive
3417 ///   data types to be combined and reduced across threads.
3418 /// Reduce list:
3419 ///   Reduce list refers to a collection of local, thread-private
3420 ///   reduce elements.
3421 /// Remote Reduce list:
3422 ///   Remote Reduce list refers to a collection of remote (relative to
3423 ///   the current thread) reduce elements.
3424 ///
3425 /// We distinguish between three states of threads that are important to
3426 /// the implementation of this function.
3427 /// Alive threads:
3428 ///   Threads in a warp executing the SIMT instruction, as distinguished from
3429 ///   threads that are inactive due to divergent control flow.
3430 /// Active threads:
3431 ///   The minimal set of threads that has to be alive upon entry to this
3432 ///   function.  The computation is correct iff active threads are alive.
3433 ///   Some threads are alive but they are not active because they do not
3434 ///   contribute to the computation in any useful manner.  Turning them off
3435 ///   may introduce control flow overheads without any tangible benefits.
3436 /// Effective threads:
3437 ///   In order to comply with the argument requirements of the shuffle
3438 ///   function, we must keep all lanes holding data alive.  But at most
3439 ///   half of them perform value aggregation; we refer to this half of
3440 ///   threads as effective. The other half is simply handing off their
3441 ///   data.
3442 ///
3443 /// Procedure
3444 /// Value shuffle:
3445 ///   In this step active threads transfer data from higher lane positions
3446 ///   in the warp to lower lane positions, creating Remote Reduce list.
3447 /// Value aggregation:
3448 ///   In this step, effective threads combine their thread local Reduce list
3449 ///   with Remote Reduce list and store the result in the thread local
3450 ///   Reduce list.
3451 /// Value copy:
3452 ///   In this step, we deal with the assumption made by algorithm 2
3453 ///   (i.e. contiguity assumption).  When we have an odd number of lanes
3454 ///   active, say 2k+1, only k threads will be effective and therefore k
3455 ///   new values will be produced.  However, the Reduce list owned by the
3456 ///   (2k+1)th thread is ignored in the value aggregation.  Therefore
3457 ///   we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
3458 ///   that the contiguity assumption still holds.
3459 static llvm::Function *emitShuffleAndReduceFunction(
3460     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3461     QualType ReductionArrayTy, llvm::Function *ReduceFn, SourceLocation Loc) {
3462   ASTContext &C = CGM.getContext();
3463 
3464   // Thread local Reduce list used to host the values of data to be reduced.
3465   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3466                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3467   // Current lane id; could be logical.
3468   ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
3469                               ImplicitParamDecl::Other);
3470   // Offset of the remote source lane relative to the current lane.
3471   ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3472                                         C.ShortTy, ImplicitParamDecl::Other);
3473   // Algorithm version.  This is expected to be known at compile time.
3474   ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3475                                C.ShortTy, ImplicitParamDecl::Other);
3476   FunctionArgList Args;
3477   Args.push_back(&ReduceListArg);
3478   Args.push_back(&LaneIDArg);
3479   Args.push_back(&RemoteLaneOffsetArg);
3480   Args.push_back(&AlgoVerArg);
3481 
3482   const CGFunctionInfo &CGFI =
3483       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3484   auto *Fn = llvm::Function::Create(
3485       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3486       "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
3487   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3488   Fn->setDoesNotRecurse();
3489   if (CGM.getLangOpts().Optimize) {
3490     Fn->removeFnAttr(llvm::Attribute::NoInline);
3491     Fn->removeFnAttr(llvm::Attribute::OptimizeNone);
3492     Fn->addFnAttr(llvm::Attribute::AlwaysInline);
3493   }
3494 
3495   CodeGenFunction CGF(CGM);
3496   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3497 
3498   CGBuilderTy &Bld = CGF.Builder;
3499 
3500   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3501   Address LocalReduceList(
3502       Bld.CreatePointerBitCastOrAddrSpaceCast(
3503           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3504                                C.VoidPtrTy, SourceLocation()),
3505           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3506       CGF.getPointerAlign());
3507 
3508   Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
3509   llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
3510       AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3511 
3512   Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
3513   llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
3514       AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3515 
3516   Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
3517   llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
3518       AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
3519 
3520   // Create a local thread-private variable to host the Reduce list
3521   // from a remote lane.
3522   Address RemoteReduceList =
3523       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
3524 
3525   // This loop iterates through the list of reduce elements and copies,
3526   // element by element, from a remote lane in the warp to RemoteReduceList,
3527   // hosted on the thread's stack.
3528   emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
3529                         LocalReduceList, RemoteReduceList,
3530                         {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
3531                          /*ScratchpadIndex=*/nullptr,
3532                          /*ScratchpadWidth=*/nullptr});
3533 
3534   // The actions to be performed on the Remote Reduce list is dependent
3535   // on the algorithm version.
3536   //
3537   //  if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
3538   //  LaneId % 2 == 0 && Offset > 0):
3539   //    do the reduction value aggregation
3540   //
3541   //  The thread local variable Reduce list is mutated in place to host the
3542   //  reduced data, which is the aggregated value produced from local and
3543   //  remote lanes.
3544   //
3545   //  Note that AlgoVer is expected to be a constant integer known at compile
3546   //  time.
3547   //  When AlgoVer==0, the first conjunction evaluates to true, making
3548   //    the entire predicate true during compile time.
3549   //  When AlgoVer==1, the second conjunction has only the second part to be
3550   //    evaluated during runtime.  Other conjunctions evaluates to false
3551   //    during compile time.
3552   //  When AlgoVer==2, the third conjunction has only the second part to be
3553   //    evaluated during runtime.  Other conjunctions evaluates to false
3554   //    during compile time.
3555   llvm::Value *CondAlgo0 = Bld.CreateIsNull(AlgoVerArgVal);
3556 
3557   llvm::Value *Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3558   llvm::Value *CondAlgo1 = Bld.CreateAnd(
3559       Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
3560 
3561   llvm::Value *Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
3562   llvm::Value *CondAlgo2 = Bld.CreateAnd(
3563       Algo2, Bld.CreateIsNull(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1))));
3564   CondAlgo2 = Bld.CreateAnd(
3565       CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
3566 
3567   llvm::Value *CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
3568   CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
3569 
3570   llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
3571   llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
3572   llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
3573   Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
3574 
3575   CGF.EmitBlock(ThenBB);
3576   // reduce_function(LocalReduceList, RemoteReduceList)
3577   llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3578       LocalReduceList.getPointer(), CGF.VoidPtrTy);
3579   llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3580       RemoteReduceList.getPointer(), CGF.VoidPtrTy);
3581   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3582       CGF, Loc, ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
3583   Bld.CreateBr(MergeBB);
3584 
3585   CGF.EmitBlock(ElseBB);
3586   Bld.CreateBr(MergeBB);
3587 
3588   CGF.EmitBlock(MergeBB);
3589 
3590   // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
3591   // Reduce list.
3592   Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
3593   llvm::Value *CondCopy = Bld.CreateAnd(
3594       Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
3595 
3596   llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
3597   llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
3598   llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
3599   Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
3600 
3601   CGF.EmitBlock(CpyThenBB);
3602   emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
3603                         RemoteReduceList, LocalReduceList);
3604   Bld.CreateBr(CpyMergeBB);
3605 
3606   CGF.EmitBlock(CpyElseBB);
3607   Bld.CreateBr(CpyMergeBB);
3608 
3609   CGF.EmitBlock(CpyMergeBB);
3610 
3611   CGF.FinishFunction();
3612   return Fn;
3613 }
3614 
3615 /// This function emits a helper that copies all the reduction variables from
3616 /// the team into the provided global buffer for the reduction variables.
3617 ///
3618 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3619 ///   For all data entries D in reduce_data:
3620 ///     Copy local D to buffer.D[Idx]
3621 static llvm::Value *emitListToGlobalCopyFunction(
3622     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3623     QualType ReductionArrayTy, SourceLocation Loc,
3624     const RecordDecl *TeamReductionRec,
3625     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3626         &VarFieldMap) {
3627   ASTContext &C = CGM.getContext();
3628 
3629   // Buffer: global reduction buffer.
3630   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3631                               C.VoidPtrTy, ImplicitParamDecl::Other);
3632   // Idx: index of the buffer.
3633   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3634                            ImplicitParamDecl::Other);
3635   // ReduceList: thread local Reduce list.
3636   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3637                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3638   FunctionArgList Args;
3639   Args.push_back(&BufferArg);
3640   Args.push_back(&IdxArg);
3641   Args.push_back(&ReduceListArg);
3642 
3643   const CGFunctionInfo &CGFI =
3644       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3645   auto *Fn = llvm::Function::Create(
3646       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3647       "_omp_reduction_list_to_global_copy_func", &CGM.getModule());
3648   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3649   Fn->setDoesNotRecurse();
3650   CodeGenFunction CGF(CGM);
3651   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3652 
3653   CGBuilderTy &Bld = CGF.Builder;
3654 
3655   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3656   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3657   Address LocalReduceList(
3658       Bld.CreatePointerBitCastOrAddrSpaceCast(
3659           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3660                                C.VoidPtrTy, Loc),
3661           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3662       CGF.getPointerAlign());
3663   QualType StaticTy = C.getRecordType(TeamReductionRec);
3664   llvm::Type *LLVMReductionsBufferTy =
3665       CGM.getTypes().ConvertTypeForMem(StaticTy);
3666   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3667       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3668       LLVMReductionsBufferTy->getPointerTo());
3669   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3670                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3671                                               /*Volatile=*/false, C.IntTy,
3672                                               Loc)};
3673   unsigned Idx = 0;
3674   for (const Expr *Private : Privates) {
3675     // Reduce element = LocalReduceList[i]
3676     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3677     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3678         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3679     // elemptr = ((CopyType*)(elemptrptr)) + I
3680     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3681         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3682     Address ElemPtr =
3683         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3684     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3685     // Global = Buffer.VD[Idx];
3686     const FieldDecl *FD = VarFieldMap.lookup(VD);
3687     LValue GlobLVal = CGF.EmitLValueForField(
3688         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3689     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
3690     GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3691     switch (CGF.getEvaluationKind(Private->getType())) {
3692     case TEK_Scalar: {
3693       llvm::Value *V = CGF.EmitLoadOfScalar(ElemPtr, /*Volatile=*/false,
3694                                             Private->getType(), Loc);
3695       CGF.EmitStoreOfScalar(V, GlobLVal);
3696       break;
3697     }
3698     case TEK_Complex: {
3699       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(
3700           CGF.MakeAddrLValue(ElemPtr, Private->getType()), Loc);
3701       CGF.EmitStoreOfComplex(V, GlobLVal, /*isInit=*/false);
3702       break;
3703     }
3704     case TEK_Aggregate:
3705       CGF.EmitAggregateCopy(GlobLVal,
3706                             CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3707                             Private->getType(), AggValueSlot::DoesNotOverlap);
3708       break;
3709     }
3710     ++Idx;
3711   }
3712 
3713   CGF.FinishFunction();
3714   return Fn;
3715 }
3716 
3717 /// This function emits a helper that reduces all the reduction variables from
3718 /// the team into the provided global buffer for the reduction variables.
3719 ///
3720 /// void list_to_global_reduce_func(void *buffer, int Idx, void *reduce_data)
3721 ///  void *GlobPtrs[];
3722 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
3723 ///  ...
3724 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
3725 ///  reduce_function(GlobPtrs, reduce_data);
3726 static llvm::Value *emitListToGlobalReduceFunction(
3727     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3728     QualType ReductionArrayTy, SourceLocation Loc,
3729     const RecordDecl *TeamReductionRec,
3730     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3731         &VarFieldMap,
3732     llvm::Function *ReduceFn) {
3733   ASTContext &C = CGM.getContext();
3734 
3735   // Buffer: global reduction buffer.
3736   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3737                               C.VoidPtrTy, ImplicitParamDecl::Other);
3738   // Idx: index of the buffer.
3739   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3740                            ImplicitParamDecl::Other);
3741   // ReduceList: thread local Reduce list.
3742   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3743                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3744   FunctionArgList Args;
3745   Args.push_back(&BufferArg);
3746   Args.push_back(&IdxArg);
3747   Args.push_back(&ReduceListArg);
3748 
3749   const CGFunctionInfo &CGFI =
3750       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3751   auto *Fn = llvm::Function::Create(
3752       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3753       "_omp_reduction_list_to_global_reduce_func", &CGM.getModule());
3754   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3755   Fn->setDoesNotRecurse();
3756   CodeGenFunction CGF(CGM);
3757   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3758 
3759   CGBuilderTy &Bld = CGF.Builder;
3760 
3761   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3762   QualType StaticTy = C.getRecordType(TeamReductionRec);
3763   llvm::Type *LLVMReductionsBufferTy =
3764       CGM.getTypes().ConvertTypeForMem(StaticTy);
3765   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3766       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3767       LLVMReductionsBufferTy->getPointerTo());
3768 
3769   // 1. Build a list of reduction variables.
3770   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3771   Address ReductionList =
3772       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3773   auto IPriv = Privates.begin();
3774   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3775                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3776                                               /*Volatile=*/false, C.IntTy,
3777                                               Loc)};
3778   unsigned Idx = 0;
3779   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3780     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3781     // Global = Buffer.VD[Idx];
3782     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3783     const FieldDecl *FD = VarFieldMap.lookup(VD);
3784     LValue GlobLVal = CGF.EmitLValueForField(
3785         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3786     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
3787     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3788     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3789     if ((*IPriv)->getType()->isVariablyModifiedType()) {
3790       // Store array size.
3791       ++Idx;
3792       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3793       llvm::Value *Size = CGF.Builder.CreateIntCast(
3794           CGF.getVLASize(
3795                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3796               .NumElts,
3797           CGF.SizeTy, /*isSigned=*/false);
3798       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3799                               Elem);
3800     }
3801   }
3802 
3803   // Call reduce_function(GlobalReduceList, ReduceList)
3804   llvm::Value *GlobalReduceList =
3805       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
3806   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3807   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
3808       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
3809   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
3810       CGF, Loc, ReduceFn, {GlobalReduceList, ReducedPtr});
3811   CGF.FinishFunction();
3812   return Fn;
3813 }
3814 
3815 /// This function emits a helper that copies all the reduction variables from
3816 /// the team into the provided global buffer for the reduction variables.
3817 ///
3818 /// void list_to_global_copy_func(void *buffer, int Idx, void *reduce_data)
3819 ///   For all data entries D in reduce_data:
3820 ///     Copy buffer.D[Idx] to local D;
3821 static llvm::Value *emitGlobalToListCopyFunction(
3822     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3823     QualType ReductionArrayTy, SourceLocation Loc,
3824     const RecordDecl *TeamReductionRec,
3825     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3826         &VarFieldMap) {
3827   ASTContext &C = CGM.getContext();
3828 
3829   // Buffer: global reduction buffer.
3830   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3831                               C.VoidPtrTy, ImplicitParamDecl::Other);
3832   // Idx: index of the buffer.
3833   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3834                            ImplicitParamDecl::Other);
3835   // ReduceList: thread local Reduce list.
3836   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3837                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3838   FunctionArgList Args;
3839   Args.push_back(&BufferArg);
3840   Args.push_back(&IdxArg);
3841   Args.push_back(&ReduceListArg);
3842 
3843   const CGFunctionInfo &CGFI =
3844       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3845   auto *Fn = llvm::Function::Create(
3846       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3847       "_omp_reduction_global_to_list_copy_func", &CGM.getModule());
3848   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3849   Fn->setDoesNotRecurse();
3850   CodeGenFunction CGF(CGM);
3851   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3852 
3853   CGBuilderTy &Bld = CGF.Builder;
3854 
3855   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
3856   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3857   Address LocalReduceList(
3858       Bld.CreatePointerBitCastOrAddrSpaceCast(
3859           CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
3860                                C.VoidPtrTy, Loc),
3861           CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
3862       CGF.getPointerAlign());
3863   QualType StaticTy = C.getRecordType(TeamReductionRec);
3864   llvm::Type *LLVMReductionsBufferTy =
3865       CGM.getTypes().ConvertTypeForMem(StaticTy);
3866   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3867       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3868       LLVMReductionsBufferTy->getPointerTo());
3869 
3870   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3871                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3872                                               /*Volatile=*/false, C.IntTy,
3873                                               Loc)};
3874   unsigned Idx = 0;
3875   for (const Expr *Private : Privates) {
3876     // Reduce element = LocalReduceList[i]
3877     Address ElemPtrPtrAddr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
3878     llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
3879         ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
3880     // elemptr = ((CopyType*)(elemptrptr)) + I
3881     ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3882         ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
3883     Address ElemPtr =
3884         Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
3885     const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
3886     // Global = Buffer.VD[Idx];
3887     const FieldDecl *FD = VarFieldMap.lookup(VD);
3888     LValue GlobLVal = CGF.EmitLValueForField(
3889         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3890     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
3891     GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
3892     switch (CGF.getEvaluationKind(Private->getType())) {
3893     case TEK_Scalar: {
3894       llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
3895       CGF.EmitStoreOfScalar(V, ElemPtr, /*Volatile=*/false, Private->getType());
3896       break;
3897     }
3898     case TEK_Complex: {
3899       CodeGenFunction::ComplexPairTy V = CGF.EmitLoadOfComplex(GlobLVal, Loc);
3900       CGF.EmitStoreOfComplex(V, CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3901                              /*isInit=*/false);
3902       break;
3903     }
3904     case TEK_Aggregate:
3905       CGF.EmitAggregateCopy(CGF.MakeAddrLValue(ElemPtr, Private->getType()),
3906                             GlobLVal, Private->getType(),
3907                             AggValueSlot::DoesNotOverlap);
3908       break;
3909     }
3910     ++Idx;
3911   }
3912 
3913   CGF.FinishFunction();
3914   return Fn;
3915 }
3916 
3917 /// This function emits a helper that reduces all the reduction variables from
3918 /// the team into the provided global buffer for the reduction variables.
3919 ///
3920 /// void global_to_list_reduce_func(void *buffer, int Idx, void *reduce_data)
3921 ///  void *GlobPtrs[];
3922 ///  GlobPtrs[0] = (void*)&buffer.D0[Idx];
3923 ///  ...
3924 ///  GlobPtrs[N] = (void*)&buffer.DN[Idx];
3925 ///  reduce_function(reduce_data, GlobPtrs);
3926 static llvm::Value *emitGlobalToListReduceFunction(
3927     CodeGenModule &CGM, ArrayRef<const Expr *> Privates,
3928     QualType ReductionArrayTy, SourceLocation Loc,
3929     const RecordDecl *TeamReductionRec,
3930     const llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
3931         &VarFieldMap,
3932     llvm::Function *ReduceFn) {
3933   ASTContext &C = CGM.getContext();
3934 
3935   // Buffer: global reduction buffer.
3936   ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3937                               C.VoidPtrTy, ImplicitParamDecl::Other);
3938   // Idx: index of the buffer.
3939   ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
3940                            ImplicitParamDecl::Other);
3941   // ReduceList: thread local Reduce list.
3942   ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
3943                                   C.VoidPtrTy, ImplicitParamDecl::Other);
3944   FunctionArgList Args;
3945   Args.push_back(&BufferArg);
3946   Args.push_back(&IdxArg);
3947   Args.push_back(&ReduceListArg);
3948 
3949   const CGFunctionInfo &CGFI =
3950       CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
3951   auto *Fn = llvm::Function::Create(
3952       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
3953       "_omp_reduction_global_to_list_reduce_func", &CGM.getModule());
3954   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
3955   Fn->setDoesNotRecurse();
3956   CodeGenFunction CGF(CGM);
3957   CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc);
3958 
3959   CGBuilderTy &Bld = CGF.Builder;
3960 
3961   Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
3962   QualType StaticTy = C.getRecordType(TeamReductionRec);
3963   llvm::Type *LLVMReductionsBufferTy =
3964       CGM.getTypes().ConvertTypeForMem(StaticTy);
3965   llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
3966       CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
3967       LLVMReductionsBufferTy->getPointerTo());
3968 
3969   // 1. Build a list of reduction variables.
3970   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
3971   Address ReductionList =
3972       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
3973   auto IPriv = Privates.begin();
3974   llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
3975                          CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
3976                                               /*Volatile=*/false, C.IntTy,
3977                                               Loc)};
3978   unsigned Idx = 0;
3979   for (unsigned I = 0, E = Privates.size(); I < E; ++I, ++IPriv, ++Idx) {
3980     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3981     // Global = Buffer.VD[Idx];
3982     const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
3983     const FieldDecl *FD = VarFieldMap.lookup(VD);
3984     LValue GlobLVal = CGF.EmitLValueForField(
3985         CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
3986     llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobLVal.getPointer(), Idxs);
3987     llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
3988     CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
3989     if ((*IPriv)->getType()->isVariablyModifiedType()) {
3990       // Store array size.
3991       ++Idx;
3992       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
3993       llvm::Value *Size = CGF.Builder.CreateIntCast(
3994           CGF.getVLASize(
3995                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
3996               .NumElts,
3997           CGF.SizeTy, /*isSigned=*/false);
3998       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
3999                               Elem);
4000     }
4001   }
4002 
4003   // Call reduce_function(ReduceList, GlobalReduceList)
4004   llvm::Value *GlobalReduceList =
4005       CGF.EmitCastToVoidPtr(ReductionList.getPointer());
4006   Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
4007   llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
4008       AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
4009   CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
4010       CGF, Loc, ReduceFn, {ReducedPtr, GlobalReduceList});
4011   CGF.FinishFunction();
4012   return Fn;
4013 }
4014 
4015 ///
4016 /// Design of OpenMP reductions on the GPU
4017 ///
4018 /// Consider a typical OpenMP program with one or more reduction
4019 /// clauses:
4020 ///
4021 /// float foo;
4022 /// double bar;
4023 /// #pragma omp target teams distribute parallel for \
4024 ///             reduction(+:foo) reduction(*:bar)
4025 /// for (int i = 0; i < N; i++) {
4026 ///   foo += A[i]; bar *= B[i];
4027 /// }
4028 ///
4029 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
4030 /// all teams.  In our OpenMP implementation on the NVPTX device an
4031 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
4032 /// within a team are mapped to CUDA threads within a threadblock.
4033 /// Our goal is to efficiently aggregate values across all OpenMP
4034 /// threads such that:
4035 ///
4036 ///   - the compiler and runtime are logically concise, and
4037 ///   - the reduction is performed efficiently in a hierarchical
4038 ///     manner as follows: within OpenMP threads in the same warp,
4039 ///     across warps in a threadblock, and finally across teams on
4040 ///     the NVPTX device.
4041 ///
4042 /// Introduction to Decoupling
4043 ///
4044 /// We would like to decouple the compiler and the runtime so that the
4045 /// latter is ignorant of the reduction variables (number, data types)
4046 /// and the reduction operators.  This allows a simpler interface
4047 /// and implementation while still attaining good performance.
4048 ///
4049 /// Pseudocode for the aforementioned OpenMP program generated by the
4050 /// compiler is as follows:
4051 ///
4052 /// 1. Create private copies of reduction variables on each OpenMP
4053 ///    thread: 'foo_private', 'bar_private'
4054 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
4055 ///    to it and writes the result in 'foo_private' and 'bar_private'
4056 ///    respectively.
4057 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
4058 ///    and store the result on the team master:
4059 ///
4060 ///     __kmpc_nvptx_parallel_reduce_nowait_v2(...,
4061 ///        reduceData, shuffleReduceFn, interWarpCpyFn)
4062 ///
4063 ///     where:
4064 ///       struct ReduceData {
4065 ///         double *foo;
4066 ///         double *bar;
4067 ///       } reduceData
4068 ///       reduceData.foo = &foo_private
4069 ///       reduceData.bar = &bar_private
4070 ///
4071 ///     'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
4072 ///     auxiliary functions generated by the compiler that operate on
4073 ///     variables of type 'ReduceData'.  They aid the runtime perform
4074 ///     algorithmic steps in a data agnostic manner.
4075 ///
4076 ///     'shuffleReduceFn' is a pointer to a function that reduces data
4077 ///     of type 'ReduceData' across two OpenMP threads (lanes) in the
4078 ///     same warp.  It takes the following arguments as input:
4079 ///
4080 ///     a. variable of type 'ReduceData' on the calling lane,
4081 ///     b. its lane_id,
4082 ///     c. an offset relative to the current lane_id to generate a
4083 ///        remote_lane_id.  The remote lane contains the second
4084 ///        variable of type 'ReduceData' that is to be reduced.
4085 ///     d. an algorithm version parameter determining which reduction
4086 ///        algorithm to use.
4087 ///
4088 ///     'shuffleReduceFn' retrieves data from the remote lane using
4089 ///     efficient GPU shuffle intrinsics and reduces, using the
4090 ///     algorithm specified by the 4th parameter, the two operands
4091 ///     element-wise.  The result is written to the first operand.
4092 ///
4093 ///     Different reduction algorithms are implemented in different
4094 ///     runtime functions, all calling 'shuffleReduceFn' to perform
4095 ///     the essential reduction step.  Therefore, based on the 4th
4096 ///     parameter, this function behaves slightly differently to
4097 ///     cooperate with the runtime to ensure correctness under
4098 ///     different circumstances.
4099 ///
4100 ///     'InterWarpCpyFn' is a pointer to a function that transfers
4101 ///     reduced variables across warps.  It tunnels, through CUDA
4102 ///     shared memory, the thread-private data of type 'ReduceData'
4103 ///     from lane 0 of each warp to a lane in the first warp.
4104 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
4105 ///    The last team writes the global reduced value to memory.
4106 ///
4107 ///     ret = __kmpc_nvptx_teams_reduce_nowait(...,
4108 ///             reduceData, shuffleReduceFn, interWarpCpyFn,
4109 ///             scratchpadCopyFn, loadAndReduceFn)
4110 ///
4111 ///     'scratchpadCopyFn' is a helper that stores reduced
4112 ///     data from the team master to a scratchpad array in
4113 ///     global memory.
4114 ///
4115 ///     'loadAndReduceFn' is a helper that loads data from
4116 ///     the scratchpad array and reduces it with the input
4117 ///     operand.
4118 ///
4119 ///     These compiler generated functions hide address
4120 ///     calculation and alignment information from the runtime.
4121 /// 5. if ret == 1:
4122 ///     The team master of the last team stores the reduced
4123 ///     result to the globals in memory.
4124 ///     foo += reduceData.foo; bar *= reduceData.bar
4125 ///
4126 ///
4127 /// Warp Reduction Algorithms
4128 ///
4129 /// On the warp level, we have three algorithms implemented in the
4130 /// OpenMP runtime depending on the number of active lanes:
4131 ///
4132 /// Full Warp Reduction
4133 ///
4134 /// The reduce algorithm within a warp where all lanes are active
4135 /// is implemented in the runtime as follows:
4136 ///
4137 /// full_warp_reduce(void *reduce_data,
4138 ///                  kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4139 ///   for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
4140 ///     ShuffleReduceFn(reduce_data, 0, offset, 0);
4141 /// }
4142 ///
4143 /// The algorithm completes in log(2, WARPSIZE) steps.
4144 ///
4145 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
4146 /// not used therefore we save instructions by not retrieving lane_id
4147 /// from the corresponding special registers.  The 4th parameter, which
4148 /// represents the version of the algorithm being used, is set to 0 to
4149 /// signify full warp reduction.
4150 ///
4151 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4152 ///
4153 /// #reduce_elem refers to an element in the local lane's data structure
4154 /// #remote_elem is retrieved from a remote lane
4155 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4156 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
4157 ///
4158 /// Contiguous Partial Warp Reduction
4159 ///
4160 /// This reduce algorithm is used within a warp where only the first
4161 /// 'n' (n <= WARPSIZE) lanes are active.  It is typically used when the
4162 /// number of OpenMP threads in a parallel region is not a multiple of
4163 /// WARPSIZE.  The algorithm is implemented in the runtime as follows:
4164 ///
4165 /// void
4166 /// contiguous_partial_reduce(void *reduce_data,
4167 ///                           kmp_ShuffleReductFctPtr ShuffleReduceFn,
4168 ///                           int size, int lane_id) {
4169 ///   int curr_size;
4170 ///   int offset;
4171 ///   curr_size = size;
4172 ///   mask = curr_size/2;
4173 ///   while (offset>0) {
4174 ///     ShuffleReduceFn(reduce_data, lane_id, offset, 1);
4175 ///     curr_size = (curr_size+1)/2;
4176 ///     offset = curr_size/2;
4177 ///   }
4178 /// }
4179 ///
4180 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4181 ///
4182 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4183 /// if (lane_id < offset)
4184 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
4185 /// else
4186 ///     reduce_elem = remote_elem
4187 ///
4188 /// This algorithm assumes that the data to be reduced are located in a
4189 /// contiguous subset of lanes starting from the first.  When there is
4190 /// an odd number of active lanes, the data in the last lane is not
4191 /// aggregated with any other lane's dat but is instead copied over.
4192 ///
4193 /// Dispersed Partial Warp Reduction
4194 ///
4195 /// This algorithm is used within a warp when any discontiguous subset of
4196 /// lanes are active.  It is used to implement the reduction operation
4197 /// across lanes in an OpenMP simd region or in a nested parallel region.
4198 ///
4199 /// void
4200 /// dispersed_partial_reduce(void *reduce_data,
4201 ///                          kmp_ShuffleReductFctPtr ShuffleReduceFn) {
4202 ///   int size, remote_id;
4203 ///   int logical_lane_id = number_of_active_lanes_before_me() * 2;
4204 ///   do {
4205 ///       remote_id = next_active_lane_id_right_after_me();
4206 ///       # the above function returns 0 of no active lane
4207 ///       # is present right after the current lane.
4208 ///       size = number_of_active_lanes_in_this_warp();
4209 ///       logical_lane_id /= 2;
4210 ///       ShuffleReduceFn(reduce_data, logical_lane_id,
4211 ///                       remote_id-1-threadIdx.x, 2);
4212 ///   } while (logical_lane_id % 2 == 0 && size > 1);
4213 /// }
4214 ///
4215 /// There is no assumption made about the initial state of the reduction.
4216 /// Any number of lanes (>=1) could be active at any position.  The reduction
4217 /// result is returned in the first active lane.
4218 ///
4219 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
4220 ///
4221 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
4222 /// if (lane_id % 2 == 0 && offset > 0)
4223 ///     reduce_elem = reduce_elem REDUCE_OP remote_elem
4224 /// else
4225 ///     reduce_elem = remote_elem
4226 ///
4227 ///
4228 /// Intra-Team Reduction
4229 ///
4230 /// This function, as implemented in the runtime call
4231 /// '__kmpc_nvptx_parallel_reduce_nowait_v2', aggregates data across OpenMP
4232 /// threads in a team.  It first reduces within a warp using the
4233 /// aforementioned algorithms.  We then proceed to gather all such
4234 /// reduced values at the first warp.
4235 ///
4236 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
4237 /// data from each of the "warp master" (zeroth lane of each warp, where
4238 /// warp-reduced data is held) to the zeroth warp.  This step reduces (in
4239 /// a mathematical sense) the problem of reduction across warp masters in
4240 /// a block to the problem of warp reduction.
4241 ///
4242 ///
4243 /// Inter-Team Reduction
4244 ///
4245 /// Once a team has reduced its data to a single value, it is stored in
4246 /// a global scratchpad array.  Since each team has a distinct slot, this
4247 /// can be done without locking.
4248 ///
4249 /// The last team to write to the scratchpad array proceeds to reduce the
4250 /// scratchpad array.  One or more workers in the last team use the helper
4251 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
4252 /// the k'th worker reduces every k'th element.
4253 ///
4254 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait_v2' to
4255 /// reduce across workers and compute a globally reduced value.
4256 ///
4257 void CGOpenMPRuntimeNVPTX::emitReduction(
4258     CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates,
4259     ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
4260     ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
4261   if (!CGF.HaveInsertPoint())
4262     return;
4263 
4264   bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
4265 #ifndef NDEBUG
4266   bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
4267 #endif
4268 
4269   if (Options.SimpleReduction) {
4270     assert(!TeamsReduction && !ParallelReduction &&
4271            "Invalid reduction selection in emitReduction.");
4272     CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs,
4273                                    ReductionOps, Options);
4274     return;
4275   }
4276 
4277   assert((TeamsReduction || ParallelReduction) &&
4278          "Invalid reduction selection in emitReduction.");
4279 
4280   // Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
4281   // RedList, shuffle_reduce_func, interwarp_copy_func);
4282   // or
4283   // Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
4284   llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
4285   llvm::Value *ThreadId = getThreadID(CGF, Loc);
4286 
4287   llvm::Value *Res;
4288   ASTContext &C = CGM.getContext();
4289   // 1. Build a list of reduction variables.
4290   // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
4291   auto Size = RHSExprs.size();
4292   for (const Expr *E : Privates) {
4293     if (E->getType()->isVariablyModifiedType())
4294       // Reserve place for array size.
4295       ++Size;
4296   }
4297   llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
4298   QualType ReductionArrayTy =
4299       C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
4300                              /*IndexTypeQuals=*/0);
4301   Address ReductionList =
4302       CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
4303   auto IPriv = Privates.begin();
4304   unsigned Idx = 0;
4305   for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
4306     Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4307     CGF.Builder.CreateStore(
4308         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4309             CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
4310         Elem);
4311     if ((*IPriv)->getType()->isVariablyModifiedType()) {
4312       // Store array size.
4313       ++Idx;
4314       Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
4315       llvm::Value *Size = CGF.Builder.CreateIntCast(
4316           CGF.getVLASize(
4317                  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
4318               .NumElts,
4319           CGF.SizeTy, /*isSigned=*/false);
4320       CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
4321                               Elem);
4322     }
4323   }
4324 
4325   llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4326       ReductionList.getPointer(), CGF.VoidPtrTy);
4327   llvm::Function *ReductionFn = emitReductionFunction(
4328       Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
4329       LHSExprs, RHSExprs, ReductionOps);
4330   llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
4331   llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
4332       CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
4333   llvm::Value *InterWarpCopyFn =
4334       emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
4335 
4336   if (ParallelReduction) {
4337     llvm::Value *Args[] = {RTLoc,
4338                            ThreadId,
4339                            CGF.Builder.getInt32(RHSExprs.size()),
4340                            ReductionArrayTySize,
4341                            RL,
4342                            ShuffleAndReduceFn,
4343                            InterWarpCopyFn};
4344 
4345     Res = CGF.EmitRuntimeCall(
4346         createNVPTXRuntimeFunction(
4347             OMPRTL_NVPTX__kmpc_nvptx_parallel_reduce_nowait_v2),
4348         Args);
4349   } else {
4350     assert(TeamsReduction && "expected teams reduction.");
4351     llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
4352     llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
4353     int Cnt = 0;
4354     for (const Expr *DRE : Privates) {
4355       PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
4356       ++Cnt;
4357     }
4358     const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
4359         CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
4360         C.getLangOpts().OpenMPCUDAReductionBufNum);
4361     TeamsReductions.push_back(TeamReductionRec);
4362     if (!KernelTeamsReductionPtr) {
4363       KernelTeamsReductionPtr = new llvm::GlobalVariable(
4364           CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
4365           llvm::GlobalValue::InternalLinkage, nullptr,
4366           "_openmp_teams_reductions_buffer_$_$ptr");
4367     }
4368     llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
4369         Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
4370         /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
4371     llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
4372         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4373     llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
4374         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4375         ReductionFn);
4376     llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
4377         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
4378     llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
4379         CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
4380         ReductionFn);
4381 
4382     llvm::Value *Args[] = {
4383         RTLoc,
4384         ThreadId,
4385         GlobalBufferPtr,
4386         CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
4387         RL,
4388         ShuffleAndReduceFn,
4389         InterWarpCopyFn,
4390         GlobalToBufferCpyFn,
4391         GlobalToBufferRedFn,
4392         BufferToGlobalCpyFn,
4393         BufferToGlobalRedFn};
4394 
4395     Res = CGF.EmitRuntimeCall(
4396         createNVPTXRuntimeFunction(
4397             OMPRTL_NVPTX__kmpc_nvptx_teams_reduce_nowait_v2),
4398         Args);
4399   }
4400 
4401   // 5. Build if (res == 1)
4402   llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.reduction.done");
4403   llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.then");
4404   llvm::Value *Cond = CGF.Builder.CreateICmpEQ(
4405       Res, llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1));
4406   CGF.Builder.CreateCondBr(Cond, ThenBB, ExitBB);
4407 
4408   // 6. Build then branch: where we have reduced values in the master
4409   //    thread in each team.
4410   //    __kmpc_end_reduce{_nowait}(<gtid>);
4411   //    break;
4412   CGF.EmitBlock(ThenBB);
4413 
4414   // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
4415   auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps,
4416                     this](CodeGenFunction &CGF, PrePostActionTy &Action) {
4417     auto IPriv = Privates.begin();
4418     auto ILHS = LHSExprs.begin();
4419     auto IRHS = RHSExprs.begin();
4420     for (const Expr *E : ReductionOps) {
4421       emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
4422                                   cast<DeclRefExpr>(*IRHS));
4423       ++IPriv;
4424       ++ILHS;
4425       ++IRHS;
4426     }
4427   };
4428   llvm::Value *EndArgs[] = {ThreadId};
4429   RegionCodeGenTy RCG(CodeGen);
4430   NVPTXActionTy Action(
4431       nullptr, llvm::None,
4432       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
4433       EndArgs);
4434   RCG.setAction(Action);
4435   RCG(CGF);
4436   // There is no need to emit line number for unconditional branch.
4437   (void)ApplyDebugLocation::CreateEmpty(CGF);
4438   CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4439 }
4440 
4441 const VarDecl *
4442 CGOpenMPRuntimeNVPTX::translateParameter(const FieldDecl *FD,
4443                                          const VarDecl *NativeParam) const {
4444   if (!NativeParam->getType()->isReferenceType())
4445     return NativeParam;
4446   QualType ArgType = NativeParam->getType();
4447   QualifierCollector QC;
4448   const Type *NonQualTy = QC.strip(ArgType);
4449   QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4450   if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
4451     if (Attr->getCaptureKind() == OMPC_map) {
4452       PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4453                                                         LangAS::opencl_global);
4454     } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
4455                PointeeTy.isConstant(CGM.getContext())) {
4456       PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
4457                                                         LangAS::opencl_generic);
4458     }
4459   }
4460   ArgType = CGM.getContext().getPointerType(PointeeTy);
4461   QC.addRestrict();
4462   enum { NVPTX_local_addr = 5 };
4463   QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
4464   ArgType = QC.apply(CGM.getContext(), ArgType);
4465   if (isa<ImplicitParamDecl>(NativeParam))
4466     return ImplicitParamDecl::Create(
4467         CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
4468         NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
4469   return ParmVarDecl::Create(
4470       CGM.getContext(),
4471       const_cast<DeclContext *>(NativeParam->getDeclContext()),
4472       NativeParam->getBeginLoc(), NativeParam->getLocation(),
4473       NativeParam->getIdentifier(), ArgType,
4474       /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
4475 }
4476 
4477 Address
4478 CGOpenMPRuntimeNVPTX::getParameterAddress(CodeGenFunction &CGF,
4479                                           const VarDecl *NativeParam,
4480                                           const VarDecl *TargetParam) const {
4481   assert(NativeParam != TargetParam &&
4482          NativeParam->getType()->isReferenceType() &&
4483          "Native arg must not be the same as target arg.");
4484   Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
4485   QualType NativeParamType = NativeParam->getType();
4486   QualifierCollector QC;
4487   const Type *NonQualTy = QC.strip(NativeParamType);
4488   QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
4489   unsigned NativePointeeAddrSpace =
4490       CGF.getContext().getTargetAddressSpace(NativePointeeTy);
4491   QualType TargetTy = TargetParam->getType();
4492   llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
4493       LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
4494   // First cast to generic.
4495   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4496       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4497                       /*AddrSpace=*/0));
4498   // Cast from generic to native address space.
4499   TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4500       TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
4501                       NativePointeeAddrSpace));
4502   Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
4503   CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
4504                         NativeParamType);
4505   return NativeParamAddr;
4506 }
4507 
4508 void CGOpenMPRuntimeNVPTX::emitOutlinedFunctionCall(
4509     CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
4510     ArrayRef<llvm::Value *> Args) const {
4511   SmallVector<llvm::Value *, 4> TargetArgs;
4512   TargetArgs.reserve(Args.size());
4513   auto *FnType = OutlinedFn.getFunctionType();
4514   for (unsigned I = 0, E = Args.size(); I < E; ++I) {
4515     if (FnType->isVarArg() && FnType->getNumParams() <= I) {
4516       TargetArgs.append(std::next(Args.begin(), I), Args.end());
4517       break;
4518     }
4519     llvm::Type *TargetType = FnType->getParamType(I);
4520     llvm::Value *NativeArg = Args[I];
4521     if (!TargetType->isPointerTy()) {
4522       TargetArgs.emplace_back(NativeArg);
4523       continue;
4524     }
4525     llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
4526         NativeArg,
4527         NativeArg->getType()->getPointerElementType()->getPointerTo());
4528     TargetArgs.emplace_back(
4529         CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
4530   }
4531   CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
4532 }
4533 
4534 /// Emit function which wraps the outline parallel region
4535 /// and controls the arguments which are passed to this function.
4536 /// The wrapper ensures that the outlined function is called
4537 /// with the correct arguments when data is shared.
4538 llvm::Function *CGOpenMPRuntimeNVPTX::createParallelDataSharingWrapper(
4539     llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
4540   ASTContext &Ctx = CGM.getContext();
4541   const auto &CS = *D.getCapturedStmt(OMPD_parallel);
4542 
4543   // Create a function that takes as argument the source thread.
4544   FunctionArgList WrapperArgs;
4545   QualType Int16QTy =
4546       Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
4547   QualType Int32QTy =
4548       Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
4549   ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4550                                      /*Id=*/nullptr, Int16QTy,
4551                                      ImplicitParamDecl::Other);
4552   ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
4553                                /*Id=*/nullptr, Int32QTy,
4554                                ImplicitParamDecl::Other);
4555   WrapperArgs.emplace_back(&ParallelLevelArg);
4556   WrapperArgs.emplace_back(&WrapperArg);
4557 
4558   const CGFunctionInfo &CGFI =
4559       CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
4560 
4561   auto *Fn = llvm::Function::Create(
4562       CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
4563       Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
4564   CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
4565   Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
4566   Fn->setDoesNotRecurse();
4567 
4568   CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
4569   CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs,
4570                     D.getBeginLoc(), D.getBeginLoc());
4571 
4572   const auto *RD = CS.getCapturedRecordDecl();
4573   auto CurField = RD->field_begin();
4574 
4575   Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
4576                                                       /*Name=*/".zero.addr");
4577   CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
4578   // Get the array of arguments.
4579   SmallVector<llvm::Value *, 8> Args;
4580 
4581   Args.emplace_back(CGF.GetAddrOfLocalVar(&WrapperArg).getPointer());
4582   Args.emplace_back(ZeroAddr.getPointer());
4583 
4584   CGBuilderTy &Bld = CGF.Builder;
4585   auto CI = CS.capture_begin();
4586 
4587   // Use global memory for data sharing.
4588   // Handle passing of global args to workers.
4589   Address GlobalArgs =
4590       CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "global_args");
4591   llvm::Value *GlobalArgsPtr = GlobalArgs.getPointer();
4592   llvm::Value *DataSharingArgs[] = {GlobalArgsPtr};
4593   CGF.EmitRuntimeCall(
4594       createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_get_shared_variables),
4595       DataSharingArgs);
4596 
4597   // Retrieve the shared variables from the list of references returned
4598   // by the runtime. Pass the variables to the outlined function.
4599   Address SharedArgListAddress = Address::invalid();
4600   if (CS.capture_size() > 0 ||
4601       isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4602     SharedArgListAddress = CGF.EmitLoadOfPointer(
4603         GlobalArgs, CGF.getContext()
4604                         .getPointerType(CGF.getContext().getPointerType(
4605                             CGF.getContext().VoidPtrTy))
4606                         .castAs<PointerType>());
4607   }
4608   unsigned Idx = 0;
4609   if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
4610     Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4611     Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4612         Src, CGF.SizeTy->getPointerTo());
4613     llvm::Value *LB = CGF.EmitLoadOfScalar(
4614         TypedAddress,
4615         /*Volatile=*/false,
4616         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4617         cast<OMPLoopDirective>(D).getLowerBoundVariable()->getExprLoc());
4618     Args.emplace_back(LB);
4619     ++Idx;
4620     Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
4621     TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4622         Src, CGF.SizeTy->getPointerTo());
4623     llvm::Value *UB = CGF.EmitLoadOfScalar(
4624         TypedAddress,
4625         /*Volatile=*/false,
4626         CGF.getContext().getPointerType(CGF.getContext().getSizeType()),
4627         cast<OMPLoopDirective>(D).getUpperBoundVariable()->getExprLoc());
4628     Args.emplace_back(UB);
4629     ++Idx;
4630   }
4631   if (CS.capture_size() > 0) {
4632     ASTContext &CGFContext = CGF.getContext();
4633     for (unsigned I = 0, E = CS.capture_size(); I < E; ++I, ++CI, ++CurField) {
4634       QualType ElemTy = CurField->getType();
4635       Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
4636       Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
4637           Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
4638       llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
4639                                               /*Volatile=*/false,
4640                                               CGFContext.getPointerType(ElemTy),
4641                                               CI->getLocation());
4642       if (CI->capturesVariableByCopy() &&
4643           !CI->getCapturedVar()->getType()->isAnyPointerType()) {
4644         Arg = castValueToType(CGF, Arg, ElemTy, CGFContext.getUIntPtrType(),
4645                               CI->getLocation());
4646       }
4647       Args.emplace_back(Arg);
4648     }
4649   }
4650 
4651   emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedParallelFn, Args);
4652   CGF.FinishFunction();
4653   return Fn;
4654 }
4655 
4656 void CGOpenMPRuntimeNVPTX::emitFunctionProlog(CodeGenFunction &CGF,
4657                                               const Decl *D) {
4658   if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
4659     return;
4660 
4661   assert(D && "Expected function or captured|block decl.");
4662   assert(FunctionGlobalizedDecls.count(CGF.CurFn) == 0 &&
4663          "Function is registered already.");
4664   assert((!TeamAndReductions.first || TeamAndReductions.first == D) &&
4665          "Team is set but not processed.");
4666   const Stmt *Body = nullptr;
4667   bool NeedToDelayGlobalization = false;
4668   if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
4669     Body = FD->getBody();
4670   } else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
4671     Body = BD->getBody();
4672   } else if (const auto *CD = dyn_cast<CapturedDecl>(D)) {
4673     Body = CD->getBody();
4674     NeedToDelayGlobalization = CGF.CapturedStmtInfo->getKind() == CR_OpenMP;
4675     if (NeedToDelayGlobalization &&
4676         getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD)
4677       return;
4678   }
4679   if (!Body)
4680     return;
4681   CheckVarsEscapingDeclContext VarChecker(CGF, TeamAndReductions.second);
4682   VarChecker.Visit(Body);
4683   const RecordDecl *GlobalizedVarsRecord =
4684       VarChecker.getGlobalizedRecord(IsInTTDRegion);
4685   TeamAndReductions.first = nullptr;
4686   TeamAndReductions.second.clear();
4687   ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
4688       VarChecker.getEscapedVariableLengthDecls();
4689   if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
4690     return;
4691   auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
4692   I->getSecond().MappedParams =
4693       std::make_unique<CodeGenFunction::OMPMapVars>();
4694   I->getSecond().GlobalRecord = GlobalizedVarsRecord;
4695   I->getSecond().EscapedParameters.insert(
4696       VarChecker.getEscapedParameters().begin(),
4697       VarChecker.getEscapedParameters().end());
4698   I->getSecond().EscapedVariableLengthDecls.append(
4699       EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
4700   DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
4701   for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4702     assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4703     const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4704     Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
4705   }
4706   if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
4707     CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
4708     VarChecker.Visit(Body);
4709     I->getSecond().SecondaryGlobalRecord =
4710         VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
4711     I->getSecond().SecondaryLocalVarData.emplace();
4712     DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
4713     for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
4714       assert(VD->isCanonicalDecl() && "Expected canonical declaration");
4715       const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
4716       Data.insert(
4717           std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
4718     }
4719   }
4720   if (!NeedToDelayGlobalization) {
4721     emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
4722     struct GlobalizationScope final : EHScopeStack::Cleanup {
4723       GlobalizationScope() = default;
4724 
4725       void Emit(CodeGenFunction &CGF, Flags flags) override {
4726         static_cast<CGOpenMPRuntimeNVPTX &>(CGF.CGM.getOpenMPRuntime())
4727             .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
4728       }
4729     };
4730     CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
4731   }
4732 }
4733 
4734 Address CGOpenMPRuntimeNVPTX::getAddressOfLocalVariable(CodeGenFunction &CGF,
4735                                                         const VarDecl *VD) {
4736   if (VD && VD->hasAttr<OMPAllocateDeclAttr>()) {
4737     const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4738     switch (A->getAllocatorType()) {
4739       // Use the default allocator here as by default local vars are
4740       // threadlocal.
4741     case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4742     case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4743     case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4744     case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4745       // Follow the user decision - use default allocation.
4746       return Address::invalid();
4747     case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4748       // TODO: implement aupport for user-defined allocators.
4749       return Address::invalid();
4750     case OMPAllocateDeclAttr::OMPConstMemAlloc: {
4751       llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4752       auto *GV = new llvm::GlobalVariable(
4753           CGM.getModule(), VarTy, /*isConstant=*/false,
4754           llvm::GlobalValue::InternalLinkage,
4755           llvm::Constant::getNullValue(VarTy), VD->getName(),
4756           /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4757           CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant));
4758       CharUnits Align = CGM.getContext().getDeclAlign(VD);
4759       GV->setAlignment(Align.getAsAlign());
4760       return Address(GV, Align);
4761     }
4762     case OMPAllocateDeclAttr::OMPPTeamMemAlloc: {
4763       llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4764       auto *GV = new llvm::GlobalVariable(
4765           CGM.getModule(), VarTy, /*isConstant=*/false,
4766           llvm::GlobalValue::InternalLinkage,
4767           llvm::Constant::getNullValue(VarTy), VD->getName(),
4768           /*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
4769           CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
4770       CharUnits Align = CGM.getContext().getDeclAlign(VD);
4771       GV->setAlignment(Align.getAsAlign());
4772       return Address(GV, Align);
4773     }
4774     case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4775     case OMPAllocateDeclAttr::OMPCGroupMemAlloc: {
4776       llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
4777       auto *GV = new llvm::GlobalVariable(
4778           CGM.getModule(), VarTy, /*isConstant=*/false,
4779           llvm::GlobalValue::InternalLinkage,
4780           llvm::Constant::getNullValue(VarTy), VD->getName());
4781       CharUnits Align = CGM.getContext().getDeclAlign(VD);
4782       GV->setAlignment(Align.getAsAlign());
4783       return Address(GV, Align);
4784     }
4785     }
4786   }
4787 
4788   if (getDataSharingMode(CGM) != CGOpenMPRuntimeNVPTX::Generic)
4789     return Address::invalid();
4790 
4791   VD = VD->getCanonicalDecl();
4792   auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
4793   if (I == FunctionGlobalizedDecls.end())
4794     return Address::invalid();
4795   auto VDI = I->getSecond().LocalVarData.find(VD);
4796   if (VDI != I->getSecond().LocalVarData.end())
4797     return VDI->second.PrivateAddr;
4798   if (VD->hasAttrs()) {
4799     for (specific_attr_iterator<OMPReferencedVarAttr> IT(VD->attr_begin()),
4800          E(VD->attr_end());
4801          IT != E; ++IT) {
4802       auto VDI = I->getSecond().LocalVarData.find(
4803           cast<VarDecl>(cast<DeclRefExpr>(IT->getRef())->getDecl())
4804               ->getCanonicalDecl());
4805       if (VDI != I->getSecond().LocalVarData.end())
4806         return VDI->second.PrivateAddr;
4807     }
4808   }
4809 
4810   return Address::invalid();
4811 }
4812 
4813 void CGOpenMPRuntimeNVPTX::functionFinished(CodeGenFunction &CGF) {
4814   FunctionGlobalizedDecls.erase(CGF.CurFn);
4815   CGOpenMPRuntime::functionFinished(CGF);
4816 }
4817 
4818 void CGOpenMPRuntimeNVPTX::getDefaultDistScheduleAndChunk(
4819     CodeGenFunction &CGF, const OMPLoopDirective &S,
4820     OpenMPDistScheduleClauseKind &ScheduleKind,
4821     llvm::Value *&Chunk) const {
4822   if (getExecutionMode() == CGOpenMPRuntimeNVPTX::EM_SPMD) {
4823     ScheduleKind = OMPC_DIST_SCHEDULE_static;
4824     Chunk = CGF.EmitScalarConversion(getNVPTXNumThreads(CGF),
4825         CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4826         S.getIterationVariable()->getType(), S.getBeginLoc());
4827     return;
4828   }
4829   CGOpenMPRuntime::getDefaultDistScheduleAndChunk(
4830       CGF, S, ScheduleKind, Chunk);
4831 }
4832 
4833 void CGOpenMPRuntimeNVPTX::getDefaultScheduleAndChunk(
4834     CodeGenFunction &CGF, const OMPLoopDirective &S,
4835     OpenMPScheduleClauseKind &ScheduleKind,
4836     const Expr *&ChunkExpr) const {
4837   ScheduleKind = OMPC_SCHEDULE_static;
4838   // Chunk size is 1 in this case.
4839   llvm::APInt ChunkSize(32, 1);
4840   ChunkExpr = IntegerLiteral::Create(CGF.getContext(), ChunkSize,
4841       CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
4842       SourceLocation());
4843 }
4844 
4845 void CGOpenMPRuntimeNVPTX::adjustTargetSpecificDataForLambdas(
4846     CodeGenFunction &CGF, const OMPExecutableDirective &D) const {
4847   assert(isOpenMPTargetExecutionDirective(D.getDirectiveKind()) &&
4848          " Expected target-based directive.");
4849   const CapturedStmt *CS = D.getCapturedStmt(OMPD_target);
4850   for (const CapturedStmt::Capture &C : CS->captures()) {
4851     // Capture variables captured by reference in lambdas for target-based
4852     // directives.
4853     if (!C.capturesVariable())
4854       continue;
4855     const VarDecl *VD = C.getCapturedVar();
4856     const auto *RD = VD->getType()
4857                          .getCanonicalType()
4858                          .getNonReferenceType()
4859                          ->getAsCXXRecordDecl();
4860     if (!RD || !RD->isLambda())
4861       continue;
4862     Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4863     LValue VDLVal;
4864     if (VD->getType().getCanonicalType()->isReferenceType())
4865       VDLVal = CGF.EmitLoadOfReferenceLValue(VDAddr, VD->getType());
4866     else
4867       VDLVal = CGF.MakeAddrLValue(
4868           VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
4869     llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
4870     FieldDecl *ThisCapture = nullptr;
4871     RD->getCaptureFields(Captures, ThisCapture);
4872     if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
4873       LValue ThisLVal =
4874           CGF.EmitLValueForFieldInitialization(VDLVal, ThisCapture);
4875       llvm::Value *CXXThis = CGF.LoadCXXThis();
4876       CGF.EmitStoreOfScalar(CXXThis, ThisLVal);
4877     }
4878     for (const LambdaCapture &LC : RD->captures()) {
4879       if (LC.getCaptureKind() != LCK_ByRef)
4880         continue;
4881       const VarDecl *VD = LC.getCapturedVar();
4882       if (!CS->capturesVariable(VD))
4883         continue;
4884       auto It = Captures.find(VD);
4885       assert(It != Captures.end() && "Found lambda capture without field.");
4886       LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
4887       Address VDAddr = CGF.GetAddrOfLocalVar(VD);
4888       if (VD->getType().getCanonicalType()->isReferenceType())
4889         VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
4890                                                VD->getType().getCanonicalType())
4891                      .getAddress();
4892       CGF.EmitStoreOfScalar(VDAddr.getPointer(), VarLVal);
4893     }
4894   }
4895 }
4896 
4897 unsigned CGOpenMPRuntimeNVPTX::getDefaultFirstprivateAddressSpace() const {
4898   return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
4899 }
4900 
4901 bool CGOpenMPRuntimeNVPTX::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
4902                                                             LangAS &AS) {
4903   if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
4904     return false;
4905   const auto *A = VD->getAttr<OMPAllocateDeclAttr>();
4906   switch(A->getAllocatorType()) {
4907   case OMPAllocateDeclAttr::OMPDefaultMemAlloc:
4908   // Not supported, fallback to the default mem space.
4909   case OMPAllocateDeclAttr::OMPThreadMemAlloc:
4910   case OMPAllocateDeclAttr::OMPLargeCapMemAlloc:
4911   case OMPAllocateDeclAttr::OMPCGroupMemAlloc:
4912   case OMPAllocateDeclAttr::OMPHighBWMemAlloc:
4913   case OMPAllocateDeclAttr::OMPLowLatMemAlloc:
4914     AS = LangAS::Default;
4915     return true;
4916   case OMPAllocateDeclAttr::OMPConstMemAlloc:
4917     AS = LangAS::cuda_constant;
4918     return true;
4919   case OMPAllocateDeclAttr::OMPPTeamMemAlloc:
4920     AS = LangAS::cuda_shared;
4921     return true;
4922   case OMPAllocateDeclAttr::OMPUserDefinedMemAlloc:
4923     llvm_unreachable("Expected predefined allocator for the variables with the "
4924                      "static storage.");
4925   }
4926   return false;
4927 }
4928 
4929 // Get current CudaArch and ignore any unknown values
4930 static CudaArch getCudaArch(CodeGenModule &CGM) {
4931   if (!CGM.getTarget().hasFeature("ptx"))
4932     return CudaArch::UNKNOWN;
4933   llvm::StringMap<bool> Features;
4934   CGM.getTarget().initFeatureMap(Features, CGM.getDiags(),
4935                                  CGM.getTarget().getTargetOpts().CPU,
4936                                  CGM.getTarget().getTargetOpts().Features);
4937   for (const auto &Feature : Features) {
4938     if (Feature.getValue()) {
4939       CudaArch Arch = StringToCudaArch(Feature.getKey());
4940       if (Arch != CudaArch::UNKNOWN)
4941         return Arch;
4942     }
4943   }
4944   return CudaArch::UNKNOWN;
4945 }
4946 
4947 /// Check to see if target architecture supports unified addressing which is
4948 /// a restriction for OpenMP requires clause "unified_shared_memory".
4949 void CGOpenMPRuntimeNVPTX::checkArchForUnifiedAddressing(
4950     const OMPRequiresDecl *D) {
4951   for (const OMPClause *Clause : D->clauselists()) {
4952     if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
4953       switch (getCudaArch(CGM)) {
4954       case CudaArch::SM_20:
4955       case CudaArch::SM_21:
4956       case CudaArch::SM_30:
4957       case CudaArch::SM_32:
4958       case CudaArch::SM_35:
4959       case CudaArch::SM_37:
4960       case CudaArch::SM_50:
4961       case CudaArch::SM_52:
4962       case CudaArch::SM_53:
4963       case CudaArch::SM_60:
4964       case CudaArch::SM_61:
4965       case CudaArch::SM_62:
4966         CGM.Error(Clause->getBeginLoc(),
4967                   "Target architecture does not support unified addressing");
4968         return;
4969       case CudaArch::SM_70:
4970       case CudaArch::SM_72:
4971       case CudaArch::SM_75:
4972       case CudaArch::GFX600:
4973       case CudaArch::GFX601:
4974       case CudaArch::GFX700:
4975       case CudaArch::GFX701:
4976       case CudaArch::GFX702:
4977       case CudaArch::GFX703:
4978       case CudaArch::GFX704:
4979       case CudaArch::GFX801:
4980       case CudaArch::GFX802:
4981       case CudaArch::GFX803:
4982       case CudaArch::GFX810:
4983       case CudaArch::GFX900:
4984       case CudaArch::GFX902:
4985       case CudaArch::GFX904:
4986       case CudaArch::GFX906:
4987       case CudaArch::GFX908:
4988       case CudaArch::GFX909:
4989       case CudaArch::GFX1010:
4990       case CudaArch::GFX1011:
4991       case CudaArch::GFX1012:
4992       case CudaArch::UNKNOWN:
4993         break;
4994       case CudaArch::LAST:
4995         llvm_unreachable("Unexpected Cuda arch.");
4996       }
4997     }
4998   }
4999   CGOpenMPRuntime::checkArchForUnifiedAddressing(D);
5000 }
5001 
5002 /// Get number of SMs and number of blocks per SM.
5003 static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
5004   std::pair<unsigned, unsigned> Data;
5005   if (CGM.getLangOpts().OpenMPCUDANumSMs)
5006     Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
5007   if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
5008     Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
5009   if (Data.first && Data.second)
5010     return Data;
5011   switch (getCudaArch(CGM)) {
5012   case CudaArch::SM_20:
5013   case CudaArch::SM_21:
5014   case CudaArch::SM_30:
5015   case CudaArch::SM_32:
5016   case CudaArch::SM_35:
5017   case CudaArch::SM_37:
5018   case CudaArch::SM_50:
5019   case CudaArch::SM_52:
5020   case CudaArch::SM_53:
5021     return {16, 16};
5022   case CudaArch::SM_60:
5023   case CudaArch::SM_61:
5024   case CudaArch::SM_62:
5025     return {56, 32};
5026   case CudaArch::SM_70:
5027   case CudaArch::SM_72:
5028   case CudaArch::SM_75:
5029     return {84, 32};
5030   case CudaArch::GFX600:
5031   case CudaArch::GFX601:
5032   case CudaArch::GFX700:
5033   case CudaArch::GFX701:
5034   case CudaArch::GFX702:
5035   case CudaArch::GFX703:
5036   case CudaArch::GFX704:
5037   case CudaArch::GFX801:
5038   case CudaArch::GFX802:
5039   case CudaArch::GFX803:
5040   case CudaArch::GFX810:
5041   case CudaArch::GFX900:
5042   case CudaArch::GFX902:
5043   case CudaArch::GFX904:
5044   case CudaArch::GFX906:
5045   case CudaArch::GFX908:
5046   case CudaArch::GFX909:
5047   case CudaArch::GFX1010:
5048   case CudaArch::GFX1011:
5049   case CudaArch::GFX1012:
5050   case CudaArch::UNKNOWN:
5051     break;
5052   case CudaArch::LAST:
5053     llvm_unreachable("Unexpected Cuda arch.");
5054   }
5055   llvm_unreachable("Unexpected NVPTX target without ptx feature.");
5056 }
5057 
5058 void CGOpenMPRuntimeNVPTX::clear() {
5059   if (!GlobalizedRecords.empty()) {
5060     ASTContext &C = CGM.getContext();
5061     llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
5062     llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
5063     RecordDecl *StaticRD = C.buildImplicitRecord(
5064         "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5065     StaticRD->startDefinition();
5066     RecordDecl *SharedStaticRD = C.buildImplicitRecord(
5067         "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
5068     SharedStaticRD->startDefinition();
5069     for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
5070       if (Records.Records.empty())
5071         continue;
5072       unsigned Size = 0;
5073       unsigned RecAlignment = 0;
5074       for (const RecordDecl *RD : Records.Records) {
5075         QualType RDTy = C.getRecordType(RD);
5076         unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
5077         RecAlignment = std::max(RecAlignment, Alignment);
5078         unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
5079         Size =
5080             llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
5081       }
5082       Size = llvm::alignTo(Size, RecAlignment);
5083       llvm::APInt ArySize(/*numBits=*/64, Size);
5084       QualType SubTy = C.getConstantArrayType(
5085           C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5086       const bool UseSharedMemory = Size <= SharedMemorySize;
5087       auto *Field =
5088           FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
5089                             SourceLocation(), SourceLocation(), nullptr, SubTy,
5090                             C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5091                             /*BW=*/nullptr, /*Mutable=*/false,
5092                             /*InitStyle=*/ICIS_NoInit);
5093       Field->setAccess(AS_public);
5094       if (UseSharedMemory) {
5095         SharedStaticRD->addDecl(Field);
5096         SharedRecs.push_back(&Records);
5097       } else {
5098         StaticRD->addDecl(Field);
5099         GlobalRecs.push_back(&Records);
5100       }
5101       Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
5102       Records.UseSharedMemory->setInitializer(
5103           llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
5104     }
5105     // Allocate SharedMemorySize buffer for the shared memory.
5106     // FIXME: nvlink does not handle weak linkage correctly (object with the
5107     // different size are reported as erroneous).
5108     // Restore this code as sson as nvlink is fixed.
5109     if (!SharedStaticRD->field_empty()) {
5110       llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
5111       QualType SubTy = C.getConstantArrayType(
5112           C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
5113       auto *Field = FieldDecl::Create(
5114           C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
5115           C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
5116           /*BW=*/nullptr, /*Mutable=*/false,
5117           /*InitStyle=*/ICIS_NoInit);
5118       Field->setAccess(AS_public);
5119       SharedStaticRD->addDecl(Field);
5120     }
5121     SharedStaticRD->completeDefinition();
5122     if (!SharedStaticRD->field_empty()) {
5123       QualType StaticTy = C.getRecordType(SharedStaticRD);
5124       llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
5125       auto *GV = new llvm::GlobalVariable(
5126           CGM.getModule(), LLVMStaticTy,
5127           /*isConstant=*/false, llvm::GlobalValue::CommonLinkage,
5128           llvm::Constant::getNullValue(LLVMStaticTy),
5129           "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
5130           llvm::GlobalValue::NotThreadLocal,
5131           C.getTargetAddressSpace(LangAS::cuda_shared));
5132       auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5133           GV, CGM.VoidPtrTy);
5134       for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
5135         Rec->Buffer->replaceAllUsesWith(Replacement);
5136         Rec->Buffer->eraseFromParent();
5137       }
5138     }
5139     StaticRD->completeDefinition();
5140     if (!StaticRD->field_empty()) {
5141       QualType StaticTy = C.getRecordType(StaticRD);
5142       std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
5143       llvm::APInt Size1(32, SMsBlockPerSM.second);
5144       QualType Arr1Ty =
5145           C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
5146                                  /*IndexTypeQuals=*/0);
5147       llvm::APInt Size2(32, SMsBlockPerSM.first);
5148       QualType Arr2Ty =
5149           C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
5150                                  /*IndexTypeQuals=*/0);
5151       llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
5152       // FIXME: nvlink does not handle weak linkage correctly (object with the
5153       // different size are reported as erroneous).
5154       // Restore CommonLinkage as soon as nvlink is fixed.
5155       auto *GV = new llvm::GlobalVariable(
5156           CGM.getModule(), LLVMArr2Ty,
5157           /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5158           llvm::Constant::getNullValue(LLVMArr2Ty),
5159           "_openmp_static_glob_rd_$_");
5160       auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
5161           GV, CGM.VoidPtrTy);
5162       for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
5163         Rec->Buffer->replaceAllUsesWith(Replacement);
5164         Rec->Buffer->eraseFromParent();
5165       }
5166     }
5167   }
5168   if (!TeamsReductions.empty()) {
5169     ASTContext &C = CGM.getContext();
5170     RecordDecl *StaticRD = C.buildImplicitRecord(
5171         "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
5172     StaticRD->startDefinition();
5173     for (const RecordDecl *TeamReductionRec : TeamsReductions) {
5174       QualType RecTy = C.getRecordType(TeamReductionRec);
5175       auto *Field = FieldDecl::Create(
5176           C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
5177           C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
5178           /*BW=*/nullptr, /*Mutable=*/false,
5179           /*InitStyle=*/ICIS_NoInit);
5180       Field->setAccess(AS_public);
5181       StaticRD->addDecl(Field);
5182     }
5183     StaticRD->completeDefinition();
5184     QualType StaticTy = C.getRecordType(StaticRD);
5185     llvm::Type *LLVMReductionsBufferTy =
5186         CGM.getTypes().ConvertTypeForMem(StaticTy);
5187     // FIXME: nvlink does not handle weak linkage correctly (object with the
5188     // different size are reported as erroneous).
5189     // Restore CommonLinkage as soon as nvlink is fixed.
5190     auto *GV = new llvm::GlobalVariable(
5191         CGM.getModule(), LLVMReductionsBufferTy,
5192         /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
5193         llvm::Constant::getNullValue(LLVMReductionsBufferTy),
5194         "_openmp_teams_reductions_buffer_$_");
5195     KernelTeamsReductionPtr->setInitializer(
5196         llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
5197                                                              CGM.VoidPtrTy));
5198   }
5199   CGOpenMPRuntime::clear();
5200 }
5201