xref: /llvm-project/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp (revision 2a46d810387abcc29ce4ca28d27aab02e7a94e37)
1 //===-- NVPTXInferAddressSpace.cpp - ---------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // CUDA C/C++ includes memory space designation as variable type qualifers (such
11 // as __global__ and __shared__). Knowing the space of a memory access allows
12 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
13 // shared memory can be translated to `ld.shared` which is roughly 10% faster
14 // than a generic `ld` on an NVIDIA Tesla K40c.
15 //
16 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
17 // compilers must infer the memory space of an address expression from
18 // type-qualified variables.
19 //
20 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
21 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
22 // places only type-qualified variables in specific address spaces, and then
23 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
24 // (so-called the generic address space) for other instructions to use.
25 //
26 // For example, the Clang translates the following CUDA code
27 //   __shared__ float a[10];
28 //   float v = a[i];
29 // to
30 //   %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
31 //   %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
32 //   %v = load float, float* %1 ; emits ld.f32
33 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
34 // redirected to %0 (the generic version of @a).
35 //
36 // The optimization implemented in this file propagates specific address spaces
37 // from type-qualified variable declarations to its users. For example, it
38 // optimizes the above IR to
39 //   %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
40 //   %v = load float addrspace(3)* %1 ; emits ld.shared.f32
41 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
42 // codegen is able to emit ld.shared.f32 for %v.
43 //
44 // Address space inference works in two steps. First, it uses a data-flow
45 // analysis to infer as many generic pointers as possible to point to only one
46 // specific address space. In the above example, it can prove that %1 only
47 // points to addrspace(3). This algorithm was published in
48 //   CUDA: Compiling and optimizing for a GPU platform
49 //   Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
50 //   ICCS 2012
51 //
52 // Then, address space inference replaces all refinable generic pointers with
53 // equivalent specific pointers.
54 //
55 // The major challenge of implementing this optimization is handling PHINodes,
56 // which may create loops in the data flow graph. This brings two complications.
57 //
58 // First, the data flow analysis in Step 1 needs to be circular. For example,
59 //     %generic.input = addrspacecast float addrspace(3)* %input to float*
60 //   loop:
61 //     %y = phi [ %generic.input, %y2 ]
62 //     %y2 = getelementptr %y, 1
63 //     %v = load %y2
64 //     br ..., label %loop, ...
65 // proving %y specific requires proving both %generic.input and %y2 specific,
66 // but proving %y2 specific circles back to %y. To address this complication,
67 // the data flow analysis operates on a lattice:
68 //   uninitialized > specific address spaces > generic.
69 // All address expressions (our implementation only considers phi, bitcast,
70 // addrspacecast, and getelementptr) start with the uninitialized address space.
71 // The monotone transfer function moves the address space of a pointer down a
72 // lattice path from uninitialized to specific and then to generic. A join
73 // operation of two different specific address spaces pushes the expression down
74 // to the generic address space. The analysis completes once it reaches a fixed
75 // point.
76 //
77 // Second, IR rewriting in Step 2 also needs to be circular. For example,
78 // converting %y to addrspace(3) requires the compiler to know the converted
79 // %y2, but converting %y2 needs the converted %y. To address this complication,
80 // we break these cycles using "undef" placeholders. When converting an
81 // instruction `I` to a new address space, if its operand `Op` is not converted
82 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
83 // For instance, our algorithm first converts %y to
84 //   %y' = phi float addrspace(3)* [ %input, undef ]
85 // Then, it converts %y2 to
86 //   %y2' = getelementptr %y', 1
87 // Finally, it fixes the undef in %y' so that
88 //   %y' = phi float addrspace(3)* [ %input, %y2' ]
89 //
90 //===----------------------------------------------------------------------===//
91 
92 #include "llvm/Transforms/Scalar.h"
93 #include "llvm/ADT/DenseSet.h"
94 #include "llvm/ADT/Optional.h"
95 #include "llvm/ADT/SetVector.h"
96 #include "llvm/Analysis/TargetTransformInfo.h"
97 #include "llvm/IR/Function.h"
98 #include "llvm/IR/InstIterator.h"
99 #include "llvm/IR/Instructions.h"
100 #include "llvm/IR/Operator.h"
101 #include "llvm/Support/Debug.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Transforms/Utils/Local.h"
104 #include "llvm/Transforms/Utils/ValueMapper.h"
105 
106 #define DEBUG_TYPE "infer-address-spaces"
107 
108 using namespace llvm;
109 
110 namespace {
111 static const unsigned UninitializedAddressSpace = ~0u;
112 
113 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
114 
115 /// \brief InferAddressSpaces
116 class InferAddressSpaces: public FunctionPass {
117   /// Target specific address space which uses of should be replaced if
118   /// possible.
119   unsigned FlatAddrSpace;
120 
121 public:
122   static char ID;
123 
124   InferAddressSpaces() : FunctionPass(ID) {}
125 
126   void getAnalysisUsage(AnalysisUsage &AU) const override {
127     AU.setPreservesCFG();
128     AU.addRequired<TargetTransformInfoWrapperPass>();
129   }
130 
131   bool runOnFunction(Function &F) override;
132 
133 private:
134   // Returns the new address space of V if updated; otherwise, returns None.
135   Optional<unsigned>
136   updateAddressSpace(const Value &V,
137                      const ValueToAddrSpaceMapTy &InferredAddrSpace) const;
138 
139   // Tries to infer the specific address space of each address expression in
140   // Postorder.
141   void inferAddressSpaces(const std::vector<Value *> &Postorder,
142                           ValueToAddrSpaceMapTy *InferredAddrSpace) const;
143 
144   bool handleComplexPtrUse(User &U, Value *OldV, Value *NewV) const;
145   bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
146 
147   // Changes the flat address expressions in function F to point to specific
148   // address spaces if InferredAddrSpace says so. Postorder is the postorder of
149   // all flat expressions in the use-def graph of function F.
150   bool
151   rewriteWithNewAddressSpaces(const std::vector<Value *> &Postorder,
152                               const ValueToAddrSpaceMapTy &InferredAddrSpace,
153                               Function *F) const;
154 
155   void appendsFlatAddressExpressionToPostorderStack(
156     Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
157     DenseSet<Value *> *Visited) const;
158 
159   bool rewriteIntrinsicOperands(IntrinsicInst *II,
160                                 Value *OldV, Value *NewV) const;
161   void collectRewritableIntrinsicOperands(
162     IntrinsicInst *II,
163     std::vector<std::pair<Value *, bool>> *PostorderStack,
164     DenseSet<Value *> *Visited) const;
165 
166   std::vector<Value *> collectFlatAddressExpressions(Function &F) const;
167 
168   Value *cloneValueWithNewAddressSpace(
169     Value *V, unsigned NewAddrSpace,
170     const ValueToValueMapTy &ValueWithNewAddrSpace,
171     SmallVectorImpl<const Use *> *UndefUsesToFix) const;
172   unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
173 };
174 } // end anonymous namespace
175 
176 char InferAddressSpaces::ID = 0;
177 
178 namespace llvm {
179 void initializeInferAddressSpacesPass(PassRegistry &);
180 }
181 
182 INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
183                 false, false)
184 
185 // Returns true if V is an address expression.
186 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
187 // getelementptr operators.
188 static bool isAddressExpression(const Value &V) {
189   if (!isa<Operator>(V))
190     return false;
191 
192   switch (cast<Operator>(V).getOpcode()) {
193   case Instruction::PHI:
194   case Instruction::BitCast:
195   case Instruction::AddrSpaceCast:
196   case Instruction::GetElementPtr:
197     return true;
198   default:
199     return false;
200   }
201 }
202 
203 // Returns the pointer operands of V.
204 //
205 // Precondition: V is an address expression.
206 static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
207   assert(isAddressExpression(V));
208   const Operator& Op = cast<Operator>(V);
209   switch (Op.getOpcode()) {
210   case Instruction::PHI: {
211     auto IncomingValues = cast<PHINode>(Op).incoming_values();
212     return SmallVector<Value *, 2>(IncomingValues.begin(),
213                                    IncomingValues.end());
214   }
215   case Instruction::BitCast:
216   case Instruction::AddrSpaceCast:
217   case Instruction::GetElementPtr:
218     return {Op.getOperand(0)};
219   default:
220     llvm_unreachable("Unexpected instruction type.");
221   }
222 }
223 
224 // TODO: Move logic to TTI?
225 bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
226                                                   Value *OldV,
227                                                   Value *NewV) const {
228   Module *M = II->getParent()->getParent()->getParent();
229 
230   switch (II->getIntrinsicID()) {
231   case Intrinsic::objectsize:
232   case Intrinsic::amdgcn_atomic_inc:
233   case Intrinsic::amdgcn_atomic_dec: {
234     Type *DestTy = II->getType();
235     Type *SrcTy = NewV->getType();
236     Function *NewDecl
237       = Intrinsic::getDeclaration(M, II->getIntrinsicID(), { DestTy, SrcTy });
238     II->setArgOperand(0, NewV);
239     II->setCalledFunction(NewDecl);
240     return true;
241   }
242   default:
243     return false;
244   }
245 }
246 
247 // TODO: Move logic to TTI?
248 void InferAddressSpaces::collectRewritableIntrinsicOperands(
249   IntrinsicInst *II,
250   std::vector<std::pair<Value *, bool>> *PostorderStack,
251   DenseSet<Value *> *Visited) const {
252   switch (II->getIntrinsicID()) {
253   case Intrinsic::objectsize:
254   case Intrinsic::amdgcn_atomic_inc:
255   case Intrinsic::amdgcn_atomic_dec:
256     appendsFlatAddressExpressionToPostorderStack(
257       II->getArgOperand(0), PostorderStack, Visited);
258     break;
259   default:
260     break;
261   }
262 }
263 
264 // Returns all flat address expressions in function F. The elements are
265 // If V is an unvisited flat address expression, appends V to PostorderStack
266 // and marks it as visited.
267 void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
268   Value *V, std::vector<std::pair<Value *, bool>> *PostorderStack,
269   DenseSet<Value *> *Visited) const {
270   assert(V->getType()->isPointerTy());
271   if (isAddressExpression(*V) &&
272       V->getType()->getPointerAddressSpace() == FlatAddrSpace) {
273     if (Visited->insert(V).second)
274       PostorderStack->push_back(std::make_pair(V, false));
275   }
276 }
277 
278 // Returns all flat address expressions in function F. The elements are ordered
279 // ordered in postorder.
280 std::vector<Value *>
281 InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
282   // This function implements a non-recursive postorder traversal of a partial
283   // use-def graph of function F.
284   std::vector<std::pair<Value*, bool>> PostorderStack;
285   // The set of visited expressions.
286   DenseSet<Value*> Visited;
287 
288   auto PushPtrOperand = [&](Value *Ptr) {
289     appendsFlatAddressExpressionToPostorderStack(
290       Ptr, &PostorderStack, &Visited);
291   };
292 
293   // We only explore address expressions that are reachable from loads and
294   // stores for now because we aim at generating faster loads and stores.
295   for (Instruction &I : instructions(F)) {
296     if (auto *LI = dyn_cast<LoadInst>(&I))
297       PushPtrOperand(LI->getPointerOperand());
298     else if (auto *SI = dyn_cast<StoreInst>(&I))
299       PushPtrOperand(SI->getPointerOperand());
300     else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
301       PushPtrOperand(RMW->getPointerOperand());
302     else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
303       PushPtrOperand(CmpX->getPointerOperand());
304     else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
305       // For memset/memcpy/memmove, any pointer operand can be replaced.
306       PushPtrOperand(MI->getRawDest());
307 
308       // Handle 2nd operand for memcpy/memmove.
309       if (auto *MTI = dyn_cast<MemTransferInst>(MI))
310        PushPtrOperand(MTI->getRawSource());
311     } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
312       collectRewritableIntrinsicOperands(II, &PostorderStack, &Visited);
313     else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
314       // FIXME: Handle vectors of pointers
315       if (Cmp->getOperand(0)->getType()->isPointerTy()) {
316         PushPtrOperand(Cmp->getOperand(0));
317         PushPtrOperand(Cmp->getOperand(1));
318       }
319     }
320   }
321 
322   std::vector<Value *> Postorder; // The resultant postorder.
323   while (!PostorderStack.empty()) {
324     // If the operands of the expression on the top are already explored,
325     // adds that expression to the resultant postorder.
326     if (PostorderStack.back().second) {
327       Postorder.push_back(PostorderStack.back().first);
328       PostorderStack.pop_back();
329       continue;
330     }
331     // Otherwise, adds its operands to the stack and explores them.
332     PostorderStack.back().second = true;
333     for (Value *PtrOperand : getPointerOperands(*PostorderStack.back().first)) {
334       appendsFlatAddressExpressionToPostorderStack(
335         PtrOperand, &PostorderStack, &Visited);
336     }
337   }
338   return Postorder;
339 }
340 
341 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
342 // of OperandUse.get() in the new address space. If the clone is not ready yet,
343 // returns an undef in the new address space as a placeholder.
344 static Value *operandWithNewAddressSpaceOrCreateUndef(
345   const Use &OperandUse, unsigned NewAddrSpace,
346   const ValueToValueMapTy &ValueWithNewAddrSpace,
347   SmallVectorImpl<const Use *> *UndefUsesToFix) {
348   Value *Operand = OperandUse.get();
349   if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
350     return NewOperand;
351 
352   UndefUsesToFix->push_back(&OperandUse);
353   return UndefValue::get(
354     Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace));
355 }
356 
357 // Returns a clone of `I` with its operands converted to those specified in
358 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
359 // operand whose address space needs to be modified might not exist in
360 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
361 // adds that operand use to UndefUsesToFix so that caller can fix them later.
362 //
363 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
364 // from a pointer whose type already matches. Therefore, this function returns a
365 // Value* instead of an Instruction*.
366 static Value *cloneInstructionWithNewAddressSpace(
367   Instruction *I, unsigned NewAddrSpace,
368   const ValueToValueMapTy &ValueWithNewAddrSpace,
369   SmallVectorImpl<const Use *> *UndefUsesToFix) {
370   Type *NewPtrType =
371     I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
372 
373   if (I->getOpcode() == Instruction::AddrSpaceCast) {
374     Value *Src = I->getOperand(0);
375     // Because `I` is flat, the source address space must be specific.
376     // Therefore, the inferred address space must be the source space, according
377     // to our algorithm.
378     assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
379     if (Src->getType() != NewPtrType)
380       return new BitCastInst(Src, NewPtrType);
381     return Src;
382   }
383 
384   // Computes the converted pointer operands.
385   SmallVector<Value *, 4> NewPointerOperands;
386   for (const Use &OperandUse : I->operands()) {
387     if (!OperandUse.get()->getType()->isPointerTy())
388       NewPointerOperands.push_back(nullptr);
389     else
390       NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
391                                      OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
392   }
393 
394   switch (I->getOpcode()) {
395   case Instruction::BitCast:
396     return new BitCastInst(NewPointerOperands[0], NewPtrType);
397   case Instruction::PHI: {
398     assert(I->getType()->isPointerTy());
399     PHINode *PHI = cast<PHINode>(I);
400     PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
401     for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
402       unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
403       NewPHI->addIncoming(NewPointerOperands[OperandNo],
404                           PHI->getIncomingBlock(Index));
405     }
406     return NewPHI;
407   }
408   case Instruction::GetElementPtr: {
409     GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
410     GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
411       GEP->getSourceElementType(), NewPointerOperands[0],
412       SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
413     NewGEP->setIsInBounds(GEP->isInBounds());
414     return NewGEP;
415   }
416   default:
417     llvm_unreachable("Unexpected opcode");
418   }
419 }
420 
421 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
422 // constant expression `CE` with its operands replaced as specified in
423 // ValueWithNewAddrSpace.
424 static Value *cloneConstantExprWithNewAddressSpace(
425   ConstantExpr *CE, unsigned NewAddrSpace,
426   const ValueToValueMapTy &ValueWithNewAddrSpace) {
427   Type *TargetType =
428     CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
429 
430   if (CE->getOpcode() == Instruction::AddrSpaceCast) {
431     // Because CE is flat, the source address space must be specific.
432     // Therefore, the inferred address space must be the source space according
433     // to our algorithm.
434     assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
435            NewAddrSpace);
436     return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
437   }
438 
439   // Computes the operands of the new constant expression.
440   SmallVector<Constant *, 4> NewOperands;
441   for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
442     Constant *Operand = CE->getOperand(Index);
443     // If the address space of `Operand` needs to be modified, the new operand
444     // with the new address space should already be in ValueWithNewAddrSpace
445     // because (1) the constant expressions we consider (i.e. addrspacecast,
446     // bitcast, and getelementptr) do not incur cycles in the data flow graph
447     // and (2) this function is called on constant expressions in postorder.
448     if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
449       NewOperands.push_back(cast<Constant>(NewOperand));
450     } else {
451       // Otherwise, reuses the old operand.
452       NewOperands.push_back(Operand);
453     }
454   }
455 
456   if (CE->getOpcode() == Instruction::GetElementPtr) {
457     // Needs to specify the source type while constructing a getelementptr
458     // constant expression.
459     return CE->getWithOperands(
460       NewOperands, TargetType, /*OnlyIfReduced=*/false,
461       NewOperands[0]->getType()->getPointerElementType());
462   }
463 
464   return CE->getWithOperands(NewOperands, TargetType);
465 }
466 
467 // Returns a clone of the value `V`, with its operands replaced as specified in
468 // ValueWithNewAddrSpace. This function is called on every flat address
469 // expression whose address space needs to be modified, in postorder.
470 //
471 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
472 Value *InferAddressSpaces::cloneValueWithNewAddressSpace(
473   Value *V, unsigned NewAddrSpace,
474   const ValueToValueMapTy &ValueWithNewAddrSpace,
475   SmallVectorImpl<const Use *> *UndefUsesToFix) const {
476   // All values in Postorder are flat address expressions.
477   assert(isAddressExpression(*V) &&
478          V->getType()->getPointerAddressSpace() == FlatAddrSpace);
479 
480   if (Instruction *I = dyn_cast<Instruction>(V)) {
481     Value *NewV = cloneInstructionWithNewAddressSpace(
482       I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
483     if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
484       if (NewI->getParent() == nullptr) {
485         NewI->insertBefore(I);
486         NewI->takeName(I);
487       }
488     }
489     return NewV;
490   }
491 
492   return cloneConstantExprWithNewAddressSpace(
493     cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
494 }
495 
496 // Defines the join operation on the address space lattice (see the file header
497 // comments).
498 unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1,
499                                                unsigned AS2) const {
500   if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
501     return FlatAddrSpace;
502 
503   if (AS1 == UninitializedAddressSpace)
504     return AS2;
505   if (AS2 == UninitializedAddressSpace)
506     return AS1;
507 
508   // The join of two different specific address spaces is flat.
509   return (AS1 == AS2) ? AS1 : FlatAddrSpace;
510 }
511 
512 bool InferAddressSpaces::runOnFunction(Function &F) {
513   if (skipFunction(F))
514     return false;
515 
516   const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
517   FlatAddrSpace = TTI.getFlatAddressSpace();
518   if (FlatAddrSpace == UninitializedAddressSpace)
519     return false;
520 
521   // Collects all flat address expressions in postorder.
522   std::vector<Value *> Postorder = collectFlatAddressExpressions(F);
523 
524   // Runs a data-flow analysis to refine the address spaces of every expression
525   // in Postorder.
526   ValueToAddrSpaceMapTy InferredAddrSpace;
527   inferAddressSpaces(Postorder, &InferredAddrSpace);
528 
529   // Changes the address spaces of the flat address expressions who are inferred
530   // to point to a specific address space.
531   return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, &F);
532 }
533 
534 void InferAddressSpaces::inferAddressSpaces(
535   const std::vector<Value *> &Postorder,
536   ValueToAddrSpaceMapTy *InferredAddrSpace) const {
537   SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
538   // Initially, all expressions are in the uninitialized address space.
539   for (Value *V : Postorder)
540     (*InferredAddrSpace)[V] = UninitializedAddressSpace;
541 
542   while (!Worklist.empty()) {
543     Value* V = Worklist.pop_back_val();
544 
545     // Tries to update the address space of the stack top according to the
546     // address spaces of its operands.
547     DEBUG(dbgs() << "Updating the address space of\n  " << *V << '\n');
548     Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
549     if (!NewAS.hasValue())
550       continue;
551     // If any updates are made, grabs its users to the worklist because
552     // their address spaces can also be possibly updated.
553     DEBUG(dbgs() << "  to " << NewAS.getValue() << '\n');
554     (*InferredAddrSpace)[V] = NewAS.getValue();
555 
556     for (Value *User : V->users()) {
557       // Skip if User is already in the worklist.
558       if (Worklist.count(User))
559         continue;
560 
561       auto Pos = InferredAddrSpace->find(User);
562       // Our algorithm only updates the address spaces of flat address
563       // expressions, which are those in InferredAddrSpace.
564       if (Pos == InferredAddrSpace->end())
565         continue;
566 
567       // Function updateAddressSpace moves the address space down a lattice
568       // path. Therefore, nothing to do if User is already inferred as flat (the
569       // bottom element in the lattice).
570       if (Pos->second == FlatAddrSpace)
571         continue;
572 
573       Worklist.insert(User);
574     }
575   }
576 }
577 
578 Optional<unsigned> InferAddressSpaces::updateAddressSpace(
579   const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
580   assert(InferredAddrSpace.count(&V));
581 
582   // The new inferred address space equals the join of the address spaces
583   // of all its pointer operands.
584   unsigned NewAS = UninitializedAddressSpace;
585   for (Value *PtrOperand : getPointerOperands(V)) {
586     unsigned OperandAS;
587     if (InferredAddrSpace.count(PtrOperand))
588       OperandAS = InferredAddrSpace.lookup(PtrOperand);
589     else
590       OperandAS = PtrOperand->getType()->getPointerAddressSpace();
591     NewAS = joinAddressSpaces(NewAS, OperandAS);
592 
593     // join(flat, *) = flat. So we can break if NewAS is already flat.
594     if (NewAS == FlatAddrSpace)
595       break;
596   }
597 
598   unsigned OldAS = InferredAddrSpace.lookup(&V);
599   assert(OldAS != FlatAddrSpace);
600   if (OldAS == NewAS)
601     return None;
602   return NewAS;
603 }
604 
605 /// \p returns true if \p U is the pointer operand of a memory instruction with
606 /// a single pointer operand that can have its address space changed by simply
607 /// mutating the use to a new value.
608 static bool isSimplePointerUseValidToReplace(Use &U) {
609   User *Inst = U.getUser();
610   unsigned OpNo = U.getOperandNo();
611 
612   if (auto *LI = dyn_cast<LoadInst>(Inst))
613     return OpNo == LoadInst::getPointerOperandIndex() && !LI->isVolatile();
614 
615   if (auto *SI = dyn_cast<StoreInst>(Inst))
616     return OpNo == StoreInst::getPointerOperandIndex() && !SI->isVolatile();
617 
618   if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
619     return OpNo == AtomicRMWInst::getPointerOperandIndex() && !RMW->isVolatile();
620 
621   if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) {
622     return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
623            !CmpX->isVolatile();
624   }
625 
626   return false;
627 }
628 
629 /// Update memory intrinsic uses that require more complex processing than
630 /// simple memory instructions. Thse require re-mangling and may have multiple
631 /// pointer operands.
632 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI,
633                                      Value *OldV, Value *NewV) {
634   IRBuilder<> B(MI);
635   MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
636   MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
637   MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
638 
639   if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
640     B.CreateMemSet(NewV, MSI->getValue(),
641                    MSI->getLength(), MSI->getAlignment(),
642                    false, // isVolatile
643                    TBAA, ScopeMD, NoAliasMD);
644   } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
645     Value *Src = MTI->getRawSource();
646     Value *Dest = MTI->getRawDest();
647 
648     // Be careful in case this is a self-to-self copy.
649     if (Src == OldV)
650       Src = NewV;
651 
652     if (Dest == OldV)
653       Dest = NewV;
654 
655     if (isa<MemCpyInst>(MTI)) {
656       MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
657       B.CreateMemCpy(Dest, Src, MTI->getLength(),
658                      MTI->getAlignment(),
659                      false, // isVolatile
660                      TBAA, TBAAStruct, ScopeMD, NoAliasMD);
661     } else {
662       assert(isa<MemMoveInst>(MTI));
663       B.CreateMemMove(Dest, Src, MTI->getLength(),
664                       MTI->getAlignment(),
665                       false, // isVolatile
666                       TBAA, ScopeMD, NoAliasMD);
667     }
668   } else
669     llvm_unreachable("unhandled MemIntrinsic");
670 
671   MI->eraseFromParent();
672   return true;
673 }
674 
675 // \p returns true if it is OK to change the address space of constant \p C with
676 // a ConstantExpr addrspacecast.
677 bool InferAddressSpaces::isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const {
678   unsigned SrcAS = C->getType()->getPointerAddressSpace();
679   if (SrcAS == NewAS || isa<UndefValue>(C))
680     return true;
681 
682   // Prevent illegal casts between different non-flat address spaces.
683   if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
684     return false;
685 
686   if (isa<ConstantPointerNull>(C))
687     return true;
688 
689   if (auto *Op = dyn_cast<Operator>(C)) {
690     // If we already have a constant addrspacecast, it should be safe to cast it
691     // off.
692     if (Op->getOpcode() == Instruction::AddrSpaceCast)
693       return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
694 
695     if (Op->getOpcode() == Instruction::IntToPtr &&
696         Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
697       return true;
698   }
699 
700   return false;
701 }
702 
703 static Value::use_iterator skipToNextUser(Value::use_iterator I,
704                                           Value::use_iterator End) {
705   User *CurUser = I->getUser();
706   ++I;
707 
708   while (I != End && I->getUser() == CurUser)
709     ++I;
710 
711   return I;
712 }
713 
714 bool InferAddressSpaces::rewriteWithNewAddressSpaces(
715   const std::vector<Value *> &Postorder,
716   const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
717   // For each address expression to be modified, creates a clone of it with its
718   // pointer operands converted to the new address space. Since the pointer
719   // operands are converted, the clone is naturally in the new address space by
720   // construction.
721   ValueToValueMapTy ValueWithNewAddrSpace;
722   SmallVector<const Use *, 32> UndefUsesToFix;
723   for (Value* V : Postorder) {
724     unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
725     if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
726       ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
727         V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
728     }
729   }
730 
731   if (ValueWithNewAddrSpace.empty())
732     return false;
733 
734   // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
735   for (const Use* UndefUse : UndefUsesToFix) {
736     User *V = UndefUse->getUser();
737     User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
738     unsigned OperandNo = UndefUse->getOperandNo();
739     assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
740     NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
741   }
742 
743   // Replaces the uses of the old address expressions with the new ones.
744   for (Value *V : Postorder) {
745     Value *NewV = ValueWithNewAddrSpace.lookup(V);
746     if (NewV == nullptr)
747       continue;
748 
749     DEBUG(dbgs() << "Replacing the uses of " << *V
750                  << "\n  with\n  " << *NewV << '\n');
751 
752     Value::use_iterator I, E, Next;
753     for (I = V->use_begin(), E = V->use_end(); I != E; ) {
754       Use &U = *I;
755 
756       // Some users may see the same pointer operand in multiple operands. Skip
757       // to the next instruction.
758       I = skipToNextUser(I, E);
759 
760       if (isSimplePointerUseValidToReplace(U)) {
761         // If V is used as the pointer operand of a compatible memory operation,
762         // sets the pointer operand to NewV. This replacement does not change
763         // the element type, so the resultant load/store is still valid.
764         U.set(NewV);
765         continue;
766       }
767 
768       User *CurUser = U.getUser();
769       // Handle more complex cases like intrinsic that need to be remangled.
770       if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
771         if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
772           continue;
773       }
774 
775       if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
776         if (rewriteIntrinsicOperands(II, V, NewV))
777           continue;
778       }
779 
780       if (isa<Instruction>(CurUser)) {
781         if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
782           // If we can infer that both pointers are in the same addrspace,
783           // transform e.g.
784           //   %cmp = icmp eq float* %p, %q
785           // into
786           //   %cmp = icmp eq float addrspace(3)* %new_p, %new_q
787 
788           unsigned NewAS = NewV->getType()->getPointerAddressSpace();
789           int SrcIdx = U.getOperandNo();
790           int OtherIdx = (SrcIdx == 0) ? 1 : 0;
791           Value *OtherSrc = Cmp->getOperand(OtherIdx);
792 
793           if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
794             if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
795               Cmp->setOperand(OtherIdx, OtherNewV);
796               Cmp->setOperand(SrcIdx, NewV);
797               continue;
798             }
799           }
800 
801           // Even if the type mismatches, we can cast the constant.
802           if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
803             if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
804               Cmp->setOperand(SrcIdx, NewV);
805               Cmp->setOperand(OtherIdx,
806                 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
807               continue;
808             }
809           }
810         }
811 
812         // Otherwise, replaces the use with flat(NewV).
813         if (Instruction *I = dyn_cast<Instruction>(V)) {
814           BasicBlock::iterator InsertPos = std::next(I->getIterator());
815           while (isa<PHINode>(InsertPos))
816             ++InsertPos;
817           U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
818         } else {
819           U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
820                                                V->getType()));
821         }
822       }
823     }
824 
825     if (V->use_empty())
826       RecursivelyDeleteTriviallyDeadInstructions(V);
827   }
828 
829   return true;
830 }
831 
832 FunctionPass *llvm::createInferAddressSpacesPass() {
833   return new InferAddressSpaces();
834 }
835