xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
1 //===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass adds amdgpu.uniform metadata to IR values so this information
11 /// can be used during instruction selection.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/InstVisitor.h"
23 #include "llvm/InitializePasses.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/raw_ostream.h"
26 
27 #define DEBUG_TYPE "amdgpu-annotate-uniform"
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 class AMDGPUAnnotateUniformValues : public FunctionPass,
34                        public InstVisitor<AMDGPUAnnotateUniformValues> {
35   LegacyDivergenceAnalysis *DA;
36   MemoryDependenceResults *MDR;
37   LoopInfo *LI;
38   DenseMap<Value*, GetElementPtrInst*> noClobberClones;
39   bool isEntryFunc;
40 
41 public:
42   static char ID;
43   AMDGPUAnnotateUniformValues() :
44     FunctionPass(ID) { }
45   bool doInitialization(Module &M) override;
46   bool runOnFunction(Function &F) override;
47   StringRef getPassName() const override {
48     return "AMDGPU Annotate Uniform Values";
49   }
50   void getAnalysisUsage(AnalysisUsage &AU) const override {
51     AU.addRequired<LegacyDivergenceAnalysis>();
52     AU.addRequired<MemoryDependenceWrapperPass>();
53     AU.addRequired<LoopInfoWrapperPass>();
54     AU.setPreservesAll();
55  }
56 
57   void visitBranchInst(BranchInst &I);
58   void visitLoadInst(LoadInst &I);
59   bool isClobberedInFunction(LoadInst * Load);
60 };
61 
62 } // End anonymous namespace
63 
64 INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
65                       "Add AMDGPU uniform metadata", false, false)
66 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
67 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
68 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
69 INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
70                     "Add AMDGPU uniform metadata", false, false)
71 
72 char AMDGPUAnnotateUniformValues::ID = 0;
73 
74 static void setUniformMetadata(Instruction *I) {
75   I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
76 }
77 static void setNoClobberMetadata(Instruction *I) {
78   I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {}));
79 }
80 
81 static void DFS(BasicBlock *Root, SetVector<BasicBlock*> & Set) {
82   for (auto I : predecessors(Root))
83     if (Set.insert(I))
84       DFS(I, Set);
85 }
86 
87 bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
88   // 1. get Loop for the Load->getparent();
89   // 2. if it exists, collect all the BBs from the most outer
90   // loop and check for the writes. If NOT - start DFS over all preds.
91   // 3. Start DFS over all preds from the most outer loop header.
92   SetVector<BasicBlock *> Checklist;
93   BasicBlock *Start = Load->getParent();
94   Checklist.insert(Start);
95   const Value *Ptr = Load->getPointerOperand();
96   const Loop *L = LI->getLoopFor(Start);
97   if (L) {
98     const Loop *P = L;
99     do {
100       L = P;
101       P = P->getParentLoop();
102     } while (P);
103     Checklist.insert(L->block_begin(), L->block_end());
104     Start = L->getHeader();
105   }
106 
107   DFS(Start, Checklist);
108   for (auto &BB : Checklist) {
109     BasicBlock::iterator StartIt = (!L && (BB == Load->getParent())) ?
110       BasicBlock::iterator(Load) : BB->end();
111     auto Q = MDR->getPointerDependencyFrom(MemoryLocation(Ptr), true,
112                                            StartIt, BB, Load);
113     if (Q.isClobber() || Q.isUnknown())
114       return true;
115   }
116   return false;
117 }
118 
119 void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
120   if (DA->isUniform(&I))
121     setUniformMetadata(I.getParent()->getTerminator());
122 }
123 
124 void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
125   Value *Ptr = I.getPointerOperand();
126   if (!DA->isUniform(Ptr))
127     return;
128   auto isGlobalLoad = [&](LoadInst &Load)->bool {
129     return Load.getPointerAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
130   };
131   // We're tracking up to the Function boundaries, and cannot go beyond because
132   // of FunctionPass restrictions. We can ensure that is memory not clobbered
133   // for memory operations that are live in to entry points only.
134   bool NotClobbered = isEntryFunc && !isClobberedInFunction(&I);
135   Instruction *PtrI = dyn_cast<Instruction>(Ptr);
136   if (!PtrI && NotClobbered && isGlobalLoad(I)) {
137     if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
138       // Lookup for the existing GEP
139       if (noClobberClones.count(Ptr)) {
140         PtrI = noClobberClones[Ptr];
141       } else {
142         // Create GEP of the Value
143         Function *F = I.getParent()->getParent();
144         Value *Idx = Constant::getIntegerValue(
145           Type::getInt32Ty(Ptr->getContext()), APInt(64, 0));
146         // Insert GEP at the entry to make it dominate all uses
147         PtrI = GetElementPtrInst::Create(
148           Ptr->getType()->getPointerElementType(), Ptr,
149           ArrayRef<Value*>(Idx), Twine(""), F->getEntryBlock().getFirstNonPHI());
150       }
151       I.replaceUsesOfWith(Ptr, PtrI);
152     }
153   }
154 
155   if (PtrI) {
156     setUniformMetadata(PtrI);
157     if (NotClobbered)
158       setNoClobberMetadata(PtrI);
159   }
160 }
161 
162 bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
163   return false;
164 }
165 
166 bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
167   if (skipFunction(F))
168     return false;
169 
170   DA  = &getAnalysis<LegacyDivergenceAnalysis>();
171   MDR = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
172   LI  = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
173   isEntryFunc = AMDGPU::isEntryFunctionCC(F.getCallingConv());
174 
175   visit(F);
176   noClobberClones.clear();
177   return true;
178 }
179 
180 FunctionPass *
181 llvm::createAMDGPUAnnotateUniformValues() {
182   return new AMDGPUAnnotateUniformValues();
183 }
184