1 //===- AMDGPULDSUtils.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // AMDGPU LDS related helper utility functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "AMDGPULDSUtils.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "llvm/IR/Constants.h"
16
17 using namespace llvm;
18
19 namespace llvm {
20
21 namespace AMDGPU {
22
isKernelCC(Function * Func)23 bool isKernelCC(Function *Func) {
24 return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
25 }
26
getAlign(DataLayout const & DL,const GlobalVariable * GV)27 Align getAlign(DataLayout const &DL, const GlobalVariable *GV) {
28 return DL.getValueOrABITypeAlignment(GV->getPointerAlignment(DL),
29 GV->getValueType());
30 }
31
userRequiresLowering(const SmallPtrSetImpl<GlobalValue * > & UsedList,User * InitialUser)32 bool userRequiresLowering(const SmallPtrSetImpl<GlobalValue *> &UsedList,
33 User *InitialUser) {
34 // Any LDS variable can be lowered by moving into the created struct
35 // Each variable so lowered is allocated in every kernel, so variables
36 // whose users are all known to be safe to lower without the transform
37 // are left unchanged.
38 SmallPtrSet<User *, 8> Visited;
39 SmallVector<User *, 16> Stack;
40 Stack.push_back(InitialUser);
41
42 while (!Stack.empty()) {
43 User *V = Stack.pop_back_val();
44 Visited.insert(V);
45
46 if (auto *G = dyn_cast<GlobalValue>(V->stripPointerCasts())) {
47 if (UsedList.contains(G)) {
48 continue;
49 }
50 }
51
52 if (auto *I = dyn_cast<Instruction>(V)) {
53 if (isKernelCC(I->getFunction())) {
54 continue;
55 }
56 }
57
58 if (auto *E = dyn_cast<ConstantExpr>(V)) {
59 for (Value::user_iterator EU = E->user_begin(); EU != E->user_end();
60 ++EU) {
61 if (Visited.insert(*EU).second) {
62 Stack.push_back(*EU);
63 }
64 }
65 continue;
66 }
67
68 // Unknown user, conservatively lower the variable
69 return true;
70 }
71
72 return false;
73 }
74
75 std::vector<GlobalVariable *>
findVariablesToLower(Module & M,const SmallPtrSetImpl<GlobalValue * > & UsedList)76 findVariablesToLower(Module &M,
77 const SmallPtrSetImpl<GlobalValue *> &UsedList) {
78 std::vector<llvm::GlobalVariable *> LocalVars;
79 for (auto &GV : M.globals()) {
80 if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
81 continue;
82 }
83 if (!GV.hasInitializer()) {
84 // addrspace(3) without initializer implies cuda/hip extern __shared__
85 // the semantics for such a variable appears to be that all extern
86 // __shared__ variables alias one another, in which case this transform
87 // is not required
88 continue;
89 }
90 if (!isa<UndefValue>(GV.getInitializer())) {
91 // Initializers are unimplemented for local address space.
92 // Leave such variables in place for consistent error reporting.
93 continue;
94 }
95 if (GV.isConstant()) {
96 // A constant undef variable can't be written to, and any load is
97 // undef, so it should be eliminated by the optimizer. It could be
98 // dropped by the back end if not. This pass skips over it.
99 continue;
100 }
101 if (std::none_of(GV.user_begin(), GV.user_end(), [&](User *U) {
102 return userRequiresLowering(UsedList, U);
103 })) {
104 continue;
105 }
106 LocalVars.push_back(&GV);
107 }
108 return LocalVars;
109 }
110
getUsedList(Module & M)111 SmallPtrSet<GlobalValue *, 32> getUsedList(Module &M) {
112 SmallPtrSet<GlobalValue *, 32> UsedList;
113
114 SmallVector<GlobalValue *, 32> TmpVec;
115 collectUsedGlobalVariables(M, TmpVec, true);
116 UsedList.insert(TmpVec.begin(), TmpVec.end());
117
118 TmpVec.clear();
119 collectUsedGlobalVariables(M, TmpVec, false);
120 UsedList.insert(TmpVec.begin(), TmpVec.end());
121
122 return UsedList;
123 }
124
125 } // end namespace AMDGPU
126
127 } // end namespace llvm
128