xref: /llvm-project/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp (revision 1df203d78e478bea989736fd9f1657adea70db0d)
1 //===- InferAddressSpace.cpp - --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // CUDA C/C++ includes memory space designation as variable type qualifers (such
10 // as __global__ and __shared__). Knowing the space of a memory access allows
11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
12 // shared memory can be translated to `ld.shared` which is roughly 10% faster
13 // than a generic `ld` on an NVIDIA Tesla K40c.
14 //
15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
16 // compilers must infer the memory space of an address expression from
17 // type-qualified variables.
18 //
19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
21 // places only type-qualified variables in specific address spaces, and then
22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
23 // (so-called the generic address space) for other instructions to use.
24 //
25 // For example, the Clang translates the following CUDA code
26 //   __shared__ float a[10];
27 //   float v = a[i];
28 // to
29 //   %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
30 //   %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
31 //   %v = load float, float* %1 ; emits ld.f32
32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
33 // redirected to %0 (the generic version of @a).
34 //
35 // The optimization implemented in this file propagates specific address spaces
36 // from type-qualified variable declarations to its users. For example, it
37 // optimizes the above IR to
38 //   %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
39 //   %v = load float addrspace(3)* %1 ; emits ld.shared.f32
40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
41 // codegen is able to emit ld.shared.f32 for %v.
42 //
43 // Address space inference works in two steps. First, it uses a data-flow
44 // analysis to infer as many generic pointers as possible to point to only one
45 // specific address space. In the above example, it can prove that %1 only
46 // points to addrspace(3). This algorithm was published in
47 //   CUDA: Compiling and optimizing for a GPU platform
48 //   Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
49 //   ICCS 2012
50 //
51 // Then, address space inference replaces all refinable generic pointers with
52 // equivalent specific pointers.
53 //
54 // The major challenge of implementing this optimization is handling PHINodes,
55 // which may create loops in the data flow graph. This brings two complications.
56 //
57 // First, the data flow analysis in Step 1 needs to be circular. For example,
58 //     %generic.input = addrspacecast float addrspace(3)* %input to float*
59 //   loop:
60 //     %y = phi [ %generic.input, %y2 ]
61 //     %y2 = getelementptr %y, 1
62 //     %v = load %y2
63 //     br ..., label %loop, ...
64 // proving %y specific requires proving both %generic.input and %y2 specific,
65 // but proving %y2 specific circles back to %y. To address this complication,
66 // the data flow analysis operates on a lattice:
67 //   uninitialized > specific address spaces > generic.
68 // All address expressions (our implementation only considers phi, bitcast,
69 // addrspacecast, and getelementptr) start with the uninitialized address space.
70 // The monotone transfer function moves the address space of a pointer down a
71 // lattice path from uninitialized to specific and then to generic. A join
72 // operation of two different specific address spaces pushes the expression down
73 // to the generic address space. The analysis completes once it reaches a fixed
74 // point.
75 //
76 // Second, IR rewriting in Step 2 also needs to be circular. For example,
77 // converting %y to addrspace(3) requires the compiler to know the converted
78 // %y2, but converting %y2 needs the converted %y. To address this complication,
79 // we break these cycles using "undef" placeholders. When converting an
80 // instruction `I` to a new address space, if its operand `Op` is not converted
81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
82 // For instance, our algorithm first converts %y to
83 //   %y' = phi float addrspace(3)* [ %input, undef ]
84 // Then, it converts %y2 to
85 //   %y2' = getelementptr %y', 1
86 // Finally, it fixes the undef in %y' so that
87 //   %y' = phi float addrspace(3)* [ %input, %y2' ]
88 //
89 //===----------------------------------------------------------------------===//
90 
91 #include "llvm/ADT/ArrayRef.h"
92 #include "llvm/ADT/DenseMap.h"
93 #include "llvm/ADT/DenseSet.h"
94 #include "llvm/ADT/None.h"
95 #include "llvm/ADT/Optional.h"
96 #include "llvm/ADT/SetVector.h"
97 #include "llvm/ADT/SmallVector.h"
98 #include "llvm/Analysis/TargetTransformInfo.h"
99 #include "llvm/Transforms/Utils/Local.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/Function.h"
104 #include "llvm/IR/IRBuilder.h"
105 #include "llvm/IR/InstIterator.h"
106 #include "llvm/IR/Instruction.h"
107 #include "llvm/IR/Instructions.h"
108 #include "llvm/IR/IntrinsicInst.h"
109 #include "llvm/IR/Intrinsics.h"
110 #include "llvm/IR/LLVMContext.h"
111 #include "llvm/IR/Operator.h"
112 #include "llvm/IR/Type.h"
113 #include "llvm/IR/Use.h"
114 #include "llvm/IR/User.h"
115 #include "llvm/IR/Value.h"
116 #include "llvm/IR/ValueHandle.h"
117 #include "llvm/Pass.h"
118 #include "llvm/Support/Casting.h"
119 #include "llvm/Support/Compiler.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 #include "llvm/Support/raw_ostream.h"
123 #include "llvm/Transforms/Scalar.h"
124 #include "llvm/Transforms/Utils/ValueMapper.h"
125 #include <cassert>
126 #include <iterator>
127 #include <limits>
128 #include <utility>
129 #include <vector>
130 
131 #define DEBUG_TYPE "infer-address-spaces"
132 
133 using namespace llvm;
134 
135 static const unsigned UninitializedAddressSpace =
136     std::numeric_limits<unsigned>::max();
137 
138 namespace {
139 
140 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
141 
142 /// InferAddressSpaces
143 class InferAddressSpaces : public FunctionPass {
144   /// Target specific address space which uses of should be replaced if
145   /// possible.
146   unsigned FlatAddrSpace;
147 
148 public:
149   static char ID;
150 
151   InferAddressSpaces() :
152     FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {}
153   InferAddressSpaces(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {}
154 
155   void getAnalysisUsage(AnalysisUsage &AU) const override {
156     AU.setPreservesCFG();
157     AU.addRequired<TargetTransformInfoWrapperPass>();
158   }
159 
160   bool runOnFunction(Function &F) override;
161 
162 private:
163   // Returns the new address space of V if updated; otherwise, returns None.
164   Optional<unsigned>
165   updateAddressSpace(const Value &V,
166                      const ValueToAddrSpaceMapTy &InferredAddrSpace) const;
167 
168   // Tries to infer the specific address space of each address expression in
169   // Postorder.
170   void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
171                           ValueToAddrSpaceMapTy *InferredAddrSpace) const;
172 
173   bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
174 
175   // Changes the flat address expressions in function F to point to specific
176   // address spaces if InferredAddrSpace says so. Postorder is the postorder of
177   // all flat expressions in the use-def graph of function F.
178   bool rewriteWithNewAddressSpaces(
179       const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
180       const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const;
181 
182   void appendsFlatAddressExpressionToPostorderStack(
183     Value *V, std::vector<std::pair<Value *, bool>> &PostorderStack,
184     DenseSet<Value *> &Visited) const;
185 
186   bool rewriteIntrinsicOperands(IntrinsicInst *II,
187                                 Value *OldV, Value *NewV) const;
188   void collectRewritableIntrinsicOperands(
189     IntrinsicInst *II,
190     std::vector<std::pair<Value *, bool>> &PostorderStack,
191     DenseSet<Value *> &Visited) const;
192 
193   std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
194 
195   Value *cloneValueWithNewAddressSpace(
196     Value *V, unsigned NewAddrSpace,
197     const ValueToValueMapTy &ValueWithNewAddrSpace,
198     SmallVectorImpl<const Use *> *UndefUsesToFix) const;
199   unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
200 };
201 
202 } // end anonymous namespace
203 
204 char InferAddressSpaces::ID = 0;
205 
206 namespace llvm {
207 
208 void initializeInferAddressSpacesPass(PassRegistry &);
209 
210 } // end namespace llvm
211 
212 INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
213                 false, false)
214 
215 // Returns true if V is an address expression.
216 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
217 // getelementptr operators.
218 static bool isAddressExpression(const Value &V) {
219   if (!isa<Operator>(V))
220     return false;
221 
222   const Operator &Op = cast<Operator>(V);
223   switch (Op.getOpcode()) {
224   case Instruction::PHI:
225     assert(Op.getType()->isPointerTy());
226     return true;
227   case Instruction::BitCast:
228   case Instruction::AddrSpaceCast:
229   case Instruction::GetElementPtr:
230     return true;
231   case Instruction::Select:
232     return Op.getType()->isPointerTy();
233   default:
234     return false;
235   }
236 }
237 
238 // Returns the pointer operands of V.
239 //
240 // Precondition: V is an address expression.
241 static SmallVector<Value *, 2> getPointerOperands(const Value &V) {
242   const Operator &Op = cast<Operator>(V);
243   switch (Op.getOpcode()) {
244   case Instruction::PHI: {
245     auto IncomingValues = cast<PHINode>(Op).incoming_values();
246     return SmallVector<Value *, 2>(IncomingValues.begin(),
247                                    IncomingValues.end());
248   }
249   case Instruction::BitCast:
250   case Instruction::AddrSpaceCast:
251   case Instruction::GetElementPtr:
252     return {Op.getOperand(0)};
253   case Instruction::Select:
254     return {Op.getOperand(1), Op.getOperand(2)};
255   default:
256     llvm_unreachable("Unexpected instruction type.");
257   }
258 }
259 
260 // TODO: Move logic to TTI?
261 bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
262                                                   Value *OldV,
263                                                   Value *NewV) const {
264   Module *M = II->getParent()->getParent()->getParent();
265 
266   switch (II->getIntrinsicID()) {
267   case Intrinsic::amdgcn_atomic_inc:
268   case Intrinsic::amdgcn_atomic_dec:
269   case Intrinsic::amdgcn_ds_fadd:
270   case Intrinsic::amdgcn_ds_fmin:
271   case Intrinsic::amdgcn_ds_fmax: {
272     const ConstantInt *IsVolatile = dyn_cast<ConstantInt>(II->getArgOperand(4));
273     if (!IsVolatile || !IsVolatile->isZero())
274       return false;
275 
276     LLVM_FALLTHROUGH;
277   }
278   case Intrinsic::objectsize: {
279     Type *DestTy = II->getType();
280     Type *SrcTy = NewV->getType();
281     Function *NewDecl =
282         Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
283     II->setArgOperand(0, NewV);
284     II->setCalledFunction(NewDecl);
285     return true;
286   }
287   default:
288     return false;
289   }
290 }
291 
292 // TODO: Move logic to TTI?
293 void InferAddressSpaces::collectRewritableIntrinsicOperands(
294     IntrinsicInst *II, std::vector<std::pair<Value *, bool>> &PostorderStack,
295     DenseSet<Value *> &Visited) const {
296   switch (II->getIntrinsicID()) {
297   case Intrinsic::objectsize:
298   case Intrinsic::amdgcn_atomic_inc:
299   case Intrinsic::amdgcn_atomic_dec:
300   case Intrinsic::amdgcn_ds_fadd:
301   case Intrinsic::amdgcn_ds_fmin:
302   case Intrinsic::amdgcn_ds_fmax:
303     appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
304                                                  PostorderStack, Visited);
305     break;
306   default:
307     break;
308   }
309 }
310 
311 // Returns all flat address expressions in function F. The elements are
312 // If V is an unvisited flat address expression, appends V to PostorderStack
313 // and marks it as visited.
314 void InferAddressSpaces::appendsFlatAddressExpressionToPostorderStack(
315     Value *V, std::vector<std::pair<Value *, bool>> &PostorderStack,
316     DenseSet<Value *> &Visited) const {
317   assert(V->getType()->isPointerTy());
318 
319   // Generic addressing expressions may be hidden in nested constant
320   // expressions.
321   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
322     // TODO: Look in non-address parts, like icmp operands.
323     if (isAddressExpression(*CE) && Visited.insert(CE).second)
324       PostorderStack.push_back(std::make_pair(CE, false));
325 
326     return;
327   }
328 
329   if (isAddressExpression(*V) &&
330       V->getType()->getPointerAddressSpace() == FlatAddrSpace) {
331     if (Visited.insert(V).second) {
332       PostorderStack.push_back(std::make_pair(V, false));
333 
334       Operator *Op = cast<Operator>(V);
335       for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) {
336         if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) {
337           if (isAddressExpression(*CE) && Visited.insert(CE).second)
338             PostorderStack.emplace_back(CE, false);
339         }
340       }
341     }
342   }
343 }
344 
345 // Returns all flat address expressions in function F. The elements are ordered
346 // ordered in postorder.
347 std::vector<WeakTrackingVH>
348 InferAddressSpaces::collectFlatAddressExpressions(Function &F) const {
349   // This function implements a non-recursive postorder traversal of a partial
350   // use-def graph of function F.
351   std::vector<std::pair<Value *, bool>> PostorderStack;
352   // The set of visited expressions.
353   DenseSet<Value *> Visited;
354 
355   auto PushPtrOperand = [&](Value *Ptr) {
356     appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack,
357                                                  Visited);
358   };
359 
360   // Look at operations that may be interesting accelerate by moving to a known
361   // address space. We aim at generating after loads and stores, but pure
362   // addressing calculations may also be faster.
363   for (Instruction &I : instructions(F)) {
364     if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
365       if (!GEP->getType()->isVectorTy())
366         PushPtrOperand(GEP->getPointerOperand());
367     } else if (auto *LI = dyn_cast<LoadInst>(&I))
368       PushPtrOperand(LI->getPointerOperand());
369     else if (auto *SI = dyn_cast<StoreInst>(&I))
370       PushPtrOperand(SI->getPointerOperand());
371     else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
372       PushPtrOperand(RMW->getPointerOperand());
373     else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
374       PushPtrOperand(CmpX->getPointerOperand());
375     else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
376       // For memset/memcpy/memmove, any pointer operand can be replaced.
377       PushPtrOperand(MI->getRawDest());
378 
379       // Handle 2nd operand for memcpy/memmove.
380       if (auto *MTI = dyn_cast<MemTransferInst>(MI))
381         PushPtrOperand(MTI->getRawSource());
382     } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
383       collectRewritableIntrinsicOperands(II, PostorderStack, Visited);
384     else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
385       // FIXME: Handle vectors of pointers
386       if (Cmp->getOperand(0)->getType()->isPointerTy()) {
387         PushPtrOperand(Cmp->getOperand(0));
388         PushPtrOperand(Cmp->getOperand(1));
389       }
390     } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
391       if (!ASC->getType()->isVectorTy())
392         PushPtrOperand(ASC->getPointerOperand());
393     }
394   }
395 
396   std::vector<WeakTrackingVH> Postorder; // The resultant postorder.
397   while (!PostorderStack.empty()) {
398     Value *TopVal = PostorderStack.back().first;
399     // If the operands of the expression on the top are already explored,
400     // adds that expression to the resultant postorder.
401     if (PostorderStack.back().second) {
402       if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace)
403         Postorder.push_back(TopVal);
404       PostorderStack.pop_back();
405       continue;
406     }
407     // Otherwise, adds its operands to the stack and explores them.
408     PostorderStack.back().second = true;
409     for (Value *PtrOperand : getPointerOperands(*TopVal)) {
410       appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack,
411                                                    Visited);
412     }
413   }
414   return Postorder;
415 }
416 
417 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
418 // of OperandUse.get() in the new address space. If the clone is not ready yet,
419 // returns an undef in the new address space as a placeholder.
420 static Value *operandWithNewAddressSpaceOrCreateUndef(
421     const Use &OperandUse, unsigned NewAddrSpace,
422     const ValueToValueMapTy &ValueWithNewAddrSpace,
423     SmallVectorImpl<const Use *> *UndefUsesToFix) {
424   Value *Operand = OperandUse.get();
425 
426   Type *NewPtrTy =
427       Operand->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
428 
429   if (Constant *C = dyn_cast<Constant>(Operand))
430     return ConstantExpr::getAddrSpaceCast(C, NewPtrTy);
431 
432   if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
433     return NewOperand;
434 
435   UndefUsesToFix->push_back(&OperandUse);
436   return UndefValue::get(NewPtrTy);
437 }
438 
439 // Returns a clone of `I` with its operands converted to those specified in
440 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
441 // operand whose address space needs to be modified might not exist in
442 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
443 // adds that operand use to UndefUsesToFix so that caller can fix them later.
444 //
445 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
446 // from a pointer whose type already matches. Therefore, this function returns a
447 // Value* instead of an Instruction*.
448 static Value *cloneInstructionWithNewAddressSpace(
449     Instruction *I, unsigned NewAddrSpace,
450     const ValueToValueMapTy &ValueWithNewAddrSpace,
451     SmallVectorImpl<const Use *> *UndefUsesToFix) {
452   Type *NewPtrType =
453       I->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
454 
455   if (I->getOpcode() == Instruction::AddrSpaceCast) {
456     Value *Src = I->getOperand(0);
457     // Because `I` is flat, the source address space must be specific.
458     // Therefore, the inferred address space must be the source space, according
459     // to our algorithm.
460     assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
461     if (Src->getType() != NewPtrType)
462       return new BitCastInst(Src, NewPtrType);
463     return Src;
464   }
465 
466   // Computes the converted pointer operands.
467   SmallVector<Value *, 4> NewPointerOperands;
468   for (const Use &OperandUse : I->operands()) {
469     if (!OperandUse.get()->getType()->isPointerTy())
470       NewPointerOperands.push_back(nullptr);
471     else
472       NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
473                                      OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix));
474   }
475 
476   switch (I->getOpcode()) {
477   case Instruction::BitCast:
478     return new BitCastInst(NewPointerOperands[0], NewPtrType);
479   case Instruction::PHI: {
480     assert(I->getType()->isPointerTy());
481     PHINode *PHI = cast<PHINode>(I);
482     PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
483     for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
484       unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
485       NewPHI->addIncoming(NewPointerOperands[OperandNo],
486                           PHI->getIncomingBlock(Index));
487     }
488     return NewPHI;
489   }
490   case Instruction::GetElementPtr: {
491     GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
492     GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
493         GEP->getSourceElementType(), NewPointerOperands[0],
494         SmallVector<Value *, 4>(GEP->idx_begin(), GEP->idx_end()));
495     NewGEP->setIsInBounds(GEP->isInBounds());
496     return NewGEP;
497   }
498   case Instruction::Select:
499     assert(I->getType()->isPointerTy());
500     return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
501                               NewPointerOperands[2], "", nullptr, I);
502   default:
503     llvm_unreachable("Unexpected opcode");
504   }
505 }
506 
507 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
508 // constant expression `CE` with its operands replaced as specified in
509 // ValueWithNewAddrSpace.
510 static Value *cloneConstantExprWithNewAddressSpace(
511   ConstantExpr *CE, unsigned NewAddrSpace,
512   const ValueToValueMapTy &ValueWithNewAddrSpace) {
513   Type *TargetType =
514     CE->getType()->getPointerElementType()->getPointerTo(NewAddrSpace);
515 
516   if (CE->getOpcode() == Instruction::AddrSpaceCast) {
517     // Because CE is flat, the source address space must be specific.
518     // Therefore, the inferred address space must be the source space according
519     // to our algorithm.
520     assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
521            NewAddrSpace);
522     return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
523   }
524 
525   if (CE->getOpcode() == Instruction::BitCast) {
526     if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0)))
527       return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType);
528     return ConstantExpr::getAddrSpaceCast(CE, TargetType);
529   }
530 
531   if (CE->getOpcode() == Instruction::Select) {
532     Constant *Src0 = CE->getOperand(1);
533     Constant *Src1 = CE->getOperand(2);
534     if (Src0->getType()->getPointerAddressSpace() ==
535         Src1->getType()->getPointerAddressSpace()) {
536 
537       return ConstantExpr::getSelect(
538           CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
539           ConstantExpr::getAddrSpaceCast(Src1, TargetType));
540     }
541   }
542 
543   // Computes the operands of the new constant expression.
544   bool IsNew = false;
545   SmallVector<Constant *, 4> NewOperands;
546   for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
547     Constant *Operand = CE->getOperand(Index);
548     // If the address space of `Operand` needs to be modified, the new operand
549     // with the new address space should already be in ValueWithNewAddrSpace
550     // because (1) the constant expressions we consider (i.e. addrspacecast,
551     // bitcast, and getelementptr) do not incur cycles in the data flow graph
552     // and (2) this function is called on constant expressions in postorder.
553     if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
554       IsNew = true;
555       NewOperands.push_back(cast<Constant>(NewOperand));
556       continue;
557     }
558     if (auto CExpr = dyn_cast<ConstantExpr>(Operand))
559       if (Value *NewOperand = cloneConstantExprWithNewAddressSpace(
560               CExpr, NewAddrSpace, ValueWithNewAddrSpace)) {
561         IsNew = true;
562         NewOperands.push_back(cast<Constant>(NewOperand));
563         continue;
564       }
565     // Otherwise, reuses the old operand.
566     NewOperands.push_back(Operand);
567   }
568 
569   // If !IsNew, we will replace the Value with itself. However, replaced values
570   // are assumed to wrapped in a addrspace cast later so drop it now.
571   if (!IsNew)
572     return nullptr;
573 
574   if (CE->getOpcode() == Instruction::GetElementPtr) {
575     // Needs to specify the source type while constructing a getelementptr
576     // constant expression.
577     return CE->getWithOperands(
578       NewOperands, TargetType, /*OnlyIfReduced=*/false,
579       NewOperands[0]->getType()->getPointerElementType());
580   }
581 
582   return CE->getWithOperands(NewOperands, TargetType);
583 }
584 
585 // Returns a clone of the value `V`, with its operands replaced as specified in
586 // ValueWithNewAddrSpace. This function is called on every flat address
587 // expression whose address space needs to be modified, in postorder.
588 //
589 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
590 Value *InferAddressSpaces::cloneValueWithNewAddressSpace(
591   Value *V, unsigned NewAddrSpace,
592   const ValueToValueMapTy &ValueWithNewAddrSpace,
593   SmallVectorImpl<const Use *> *UndefUsesToFix) const {
594   // All values in Postorder are flat address expressions.
595   assert(isAddressExpression(*V) &&
596          V->getType()->getPointerAddressSpace() == FlatAddrSpace);
597 
598   if (Instruction *I = dyn_cast<Instruction>(V)) {
599     Value *NewV = cloneInstructionWithNewAddressSpace(
600       I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix);
601     if (Instruction *NewI = dyn_cast<Instruction>(NewV)) {
602       if (NewI->getParent() == nullptr) {
603         NewI->insertBefore(I);
604         NewI->takeName(I);
605       }
606     }
607     return NewV;
608   }
609 
610   return cloneConstantExprWithNewAddressSpace(
611     cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace);
612 }
613 
614 // Defines the join operation on the address space lattice (see the file header
615 // comments).
616 unsigned InferAddressSpaces::joinAddressSpaces(unsigned AS1,
617                                                unsigned AS2) const {
618   if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
619     return FlatAddrSpace;
620 
621   if (AS1 == UninitializedAddressSpace)
622     return AS2;
623   if (AS2 == UninitializedAddressSpace)
624     return AS1;
625 
626   // The join of two different specific address spaces is flat.
627   return (AS1 == AS2) ? AS1 : FlatAddrSpace;
628 }
629 
630 bool InferAddressSpaces::runOnFunction(Function &F) {
631   if (skipFunction(F))
632     return false;
633 
634   const TargetTransformInfo &TTI =
635       getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
636 
637   if (FlatAddrSpace == UninitializedAddressSpace) {
638     FlatAddrSpace = TTI.getFlatAddressSpace();
639     if (FlatAddrSpace == UninitializedAddressSpace)
640       return false;
641   }
642 
643   // Collects all flat address expressions in postorder.
644   std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F);
645 
646   // Runs a data-flow analysis to refine the address spaces of every expression
647   // in Postorder.
648   ValueToAddrSpaceMapTy InferredAddrSpace;
649   inferAddressSpaces(Postorder, &InferredAddrSpace);
650 
651   // Changes the address spaces of the flat address expressions who are inferred
652   // to point to a specific address space.
653   return rewriteWithNewAddressSpaces(TTI, Postorder, InferredAddrSpace, &F);
654 }
655 
656 // Constants need to be tracked through RAUW to handle cases with nested
657 // constant expressions, so wrap values in WeakTrackingVH.
658 void InferAddressSpaces::inferAddressSpaces(
659     ArrayRef<WeakTrackingVH> Postorder,
660     ValueToAddrSpaceMapTy *InferredAddrSpace) const {
661   SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
662   // Initially, all expressions are in the uninitialized address space.
663   for (Value *V : Postorder)
664     (*InferredAddrSpace)[V] = UninitializedAddressSpace;
665 
666   while (!Worklist.empty()) {
667     Value *V = Worklist.pop_back_val();
668 
669     // Tries to update the address space of the stack top according to the
670     // address spaces of its operands.
671     LLVM_DEBUG(dbgs() << "Updating the address space of\n  " << *V << '\n');
672     Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace);
673     if (!NewAS.hasValue())
674       continue;
675     // If any updates are made, grabs its users to the worklist because
676     // their address spaces can also be possibly updated.
677     LLVM_DEBUG(dbgs() << "  to " << NewAS.getValue() << '\n');
678     (*InferredAddrSpace)[V] = NewAS.getValue();
679 
680     for (Value *User : V->users()) {
681       // Skip if User is already in the worklist.
682       if (Worklist.count(User))
683         continue;
684 
685       auto Pos = InferredAddrSpace->find(User);
686       // Our algorithm only updates the address spaces of flat address
687       // expressions, which are those in InferredAddrSpace.
688       if (Pos == InferredAddrSpace->end())
689         continue;
690 
691       // Function updateAddressSpace moves the address space down a lattice
692       // path. Therefore, nothing to do if User is already inferred as flat (the
693       // bottom element in the lattice).
694       if (Pos->second == FlatAddrSpace)
695         continue;
696 
697       Worklist.insert(User);
698     }
699   }
700 }
701 
702 Optional<unsigned> InferAddressSpaces::updateAddressSpace(
703     const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const {
704   assert(InferredAddrSpace.count(&V));
705 
706   // The new inferred address space equals the join of the address spaces
707   // of all its pointer operands.
708   unsigned NewAS = UninitializedAddressSpace;
709 
710   const Operator &Op = cast<Operator>(V);
711   if (Op.getOpcode() == Instruction::Select) {
712     Value *Src0 = Op.getOperand(1);
713     Value *Src1 = Op.getOperand(2);
714 
715     auto I = InferredAddrSpace.find(Src0);
716     unsigned Src0AS = (I != InferredAddrSpace.end()) ?
717       I->second : Src0->getType()->getPointerAddressSpace();
718 
719     auto J = InferredAddrSpace.find(Src1);
720     unsigned Src1AS = (J != InferredAddrSpace.end()) ?
721       J->second : Src1->getType()->getPointerAddressSpace();
722 
723     auto *C0 = dyn_cast<Constant>(Src0);
724     auto *C1 = dyn_cast<Constant>(Src1);
725 
726     // If one of the inputs is a constant, we may be able to do a constant
727     // addrspacecast of it. Defer inferring the address space until the input
728     // address space is known.
729     if ((C1 && Src0AS == UninitializedAddressSpace) ||
730         (C0 && Src1AS == UninitializedAddressSpace))
731       return None;
732 
733     if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
734       NewAS = Src1AS;
735     else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS))
736       NewAS = Src0AS;
737     else
738       NewAS = joinAddressSpaces(Src0AS, Src1AS);
739   } else {
740     for (Value *PtrOperand : getPointerOperands(V)) {
741       auto I = InferredAddrSpace.find(PtrOperand);
742       unsigned OperandAS = I != InferredAddrSpace.end() ?
743         I->second : PtrOperand->getType()->getPointerAddressSpace();
744 
745       // join(flat, *) = flat. So we can break if NewAS is already flat.
746       NewAS = joinAddressSpaces(NewAS, OperandAS);
747       if (NewAS == FlatAddrSpace)
748         break;
749     }
750   }
751 
752   unsigned OldAS = InferredAddrSpace.lookup(&V);
753   assert(OldAS != FlatAddrSpace);
754   if (OldAS == NewAS)
755     return None;
756   return NewAS;
757 }
758 
759 /// \p returns true if \p U is the pointer operand of a memory instruction with
760 /// a single pointer operand that can have its address space changed by simply
761 /// mutating the use to a new value. If the memory instruction is volatile,
762 /// return true only if the target allows the memory instruction to be volatile
763 /// in the new address space.
764 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI,
765                                              Use &U, unsigned AddrSpace) {
766   User *Inst = U.getUser();
767   unsigned OpNo = U.getOperandNo();
768   bool VolatileIsAllowed = false;
769   if (auto *I = dyn_cast<Instruction>(Inst))
770     VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace);
771 
772   if (auto *LI = dyn_cast<LoadInst>(Inst))
773     return OpNo == LoadInst::getPointerOperandIndex() &&
774            (VolatileIsAllowed || !LI->isVolatile());
775 
776   if (auto *SI = dyn_cast<StoreInst>(Inst))
777     return OpNo == StoreInst::getPointerOperandIndex() &&
778            (VolatileIsAllowed || !SI->isVolatile());
779 
780   if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
781     return OpNo == AtomicRMWInst::getPointerOperandIndex() &&
782            (VolatileIsAllowed || !RMW->isVolatile());
783 
784   if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
785     return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
786            (VolatileIsAllowed || !CmpX->isVolatile());
787 
788   return false;
789 }
790 
791 /// Update memory intrinsic uses that require more complex processing than
792 /// simple memory instructions. Thse require re-mangling and may have multiple
793 /// pointer operands.
794 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
795                                      Value *NewV) {
796   IRBuilder<> B(MI);
797   MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
798   MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
799   MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
800 
801   if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
802     B.CreateMemSet(NewV, MSI->getValue(),
803                    MSI->getLength(), MSI->getDestAlignment(),
804                    false, // isVolatile
805                    TBAA, ScopeMD, NoAliasMD);
806   } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
807     Value *Src = MTI->getRawSource();
808     Value *Dest = MTI->getRawDest();
809 
810     // Be careful in case this is a self-to-self copy.
811     if (Src == OldV)
812       Src = NewV;
813 
814     if (Dest == OldV)
815       Dest = NewV;
816 
817     if (isa<MemCpyInst>(MTI)) {
818       MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
819       B.CreateMemCpy(Dest, MTI->getDestAlignment(),
820                      Src, MTI->getSourceAlignment(),
821                      MTI->getLength(),
822                      false, // isVolatile
823                      TBAA, TBAAStruct, ScopeMD, NoAliasMD);
824     } else {
825       assert(isa<MemMoveInst>(MTI));
826       B.CreateMemMove(Dest, MTI->getDestAlignment(),
827                       Src, MTI->getSourceAlignment(),
828                       MTI->getLength(),
829                       false, // isVolatile
830                       TBAA, ScopeMD, NoAliasMD);
831     }
832   } else
833     llvm_unreachable("unhandled MemIntrinsic");
834 
835   MI->eraseFromParent();
836   return true;
837 }
838 
839 // \p returns true if it is OK to change the address space of constant \p C with
840 // a ConstantExpr addrspacecast.
841 bool InferAddressSpaces::isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const {
842   assert(NewAS != UninitializedAddressSpace);
843 
844   unsigned SrcAS = C->getType()->getPointerAddressSpace();
845   if (SrcAS == NewAS || isa<UndefValue>(C))
846     return true;
847 
848   // Prevent illegal casts between different non-flat address spaces.
849   if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
850     return false;
851 
852   if (isa<ConstantPointerNull>(C))
853     return true;
854 
855   if (auto *Op = dyn_cast<Operator>(C)) {
856     // If we already have a constant addrspacecast, it should be safe to cast it
857     // off.
858     if (Op->getOpcode() == Instruction::AddrSpaceCast)
859       return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
860 
861     if (Op->getOpcode() == Instruction::IntToPtr &&
862         Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
863       return true;
864   }
865 
866   return false;
867 }
868 
869 static Value::use_iterator skipToNextUser(Value::use_iterator I,
870                                           Value::use_iterator End) {
871   User *CurUser = I->getUser();
872   ++I;
873 
874   while (I != End && I->getUser() == CurUser)
875     ++I;
876 
877   return I;
878 }
879 
880 bool InferAddressSpaces::rewriteWithNewAddressSpaces(
881     const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder,
882     const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const {
883   // For each address expression to be modified, creates a clone of it with its
884   // pointer operands converted to the new address space. Since the pointer
885   // operands are converted, the clone is naturally in the new address space by
886   // construction.
887   ValueToValueMapTy ValueWithNewAddrSpace;
888   SmallVector<const Use *, 32> UndefUsesToFix;
889   for (Value* V : Postorder) {
890     unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
891     if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
892       ValueWithNewAddrSpace[V] = cloneValueWithNewAddressSpace(
893         V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix);
894     }
895   }
896 
897   if (ValueWithNewAddrSpace.empty())
898     return false;
899 
900   // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
901   for (const Use *UndefUse : UndefUsesToFix) {
902     User *V = UndefUse->getUser();
903     User *NewV = cast<User>(ValueWithNewAddrSpace.lookup(V));
904     unsigned OperandNo = UndefUse->getOperandNo();
905     assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
906     NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
907   }
908 
909   SmallVector<Instruction *, 16> DeadInstructions;
910 
911   // Replaces the uses of the old address expressions with the new ones.
912   for (const WeakTrackingVH &WVH : Postorder) {
913     assert(WVH && "value was unexpectedly deleted");
914     Value *V = WVH;
915     Value *NewV = ValueWithNewAddrSpace.lookup(V);
916     if (NewV == nullptr)
917       continue;
918 
919     LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n  with\n  "
920                       << *NewV << '\n');
921 
922     if (Constant *C = dyn_cast<Constant>(V)) {
923       Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
924                                                          C->getType());
925       if (C != Replace) {
926         LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
927                           << ": " << *Replace << '\n');
928         C->replaceAllUsesWith(Replace);
929         V = Replace;
930       }
931     }
932 
933     Value::use_iterator I, E, Next;
934     for (I = V->use_begin(), E = V->use_end(); I != E; ) {
935       Use &U = *I;
936 
937       // Some users may see the same pointer operand in multiple operands. Skip
938       // to the next instruction.
939       I = skipToNextUser(I, E);
940 
941       if (isSimplePointerUseValidToReplace(
942               TTI, U, V->getType()->getPointerAddressSpace())) {
943         // If V is used as the pointer operand of a compatible memory operation,
944         // sets the pointer operand to NewV. This replacement does not change
945         // the element type, so the resultant load/store is still valid.
946         U.set(NewV);
947         continue;
948       }
949 
950       User *CurUser = U.getUser();
951       // Handle more complex cases like intrinsic that need to be remangled.
952       if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
953         if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
954           continue;
955       }
956 
957       if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
958         if (rewriteIntrinsicOperands(II, V, NewV))
959           continue;
960       }
961 
962       if (isa<Instruction>(CurUser)) {
963         if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
964           // If we can infer that both pointers are in the same addrspace,
965           // transform e.g.
966           //   %cmp = icmp eq float* %p, %q
967           // into
968           //   %cmp = icmp eq float addrspace(3)* %new_p, %new_q
969 
970           unsigned NewAS = NewV->getType()->getPointerAddressSpace();
971           int SrcIdx = U.getOperandNo();
972           int OtherIdx = (SrcIdx == 0) ? 1 : 0;
973           Value *OtherSrc = Cmp->getOperand(OtherIdx);
974 
975           if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
976             if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
977               Cmp->setOperand(OtherIdx, OtherNewV);
978               Cmp->setOperand(SrcIdx, NewV);
979               continue;
980             }
981           }
982 
983           // Even if the type mismatches, we can cast the constant.
984           if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
985             if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
986               Cmp->setOperand(SrcIdx, NewV);
987               Cmp->setOperand(OtherIdx,
988                 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
989               continue;
990             }
991           }
992         }
993 
994         if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) {
995           unsigned NewAS = NewV->getType()->getPointerAddressSpace();
996           if (ASC->getDestAddressSpace() == NewAS) {
997             if (ASC->getType()->getPointerElementType() !=
998                 NewV->getType()->getPointerElementType()) {
999               NewV = CastInst::Create(Instruction::BitCast, NewV,
1000                                       ASC->getType(), "", ASC);
1001             }
1002             ASC->replaceAllUsesWith(NewV);
1003             DeadInstructions.push_back(ASC);
1004             continue;
1005           }
1006         }
1007 
1008         // Otherwise, replaces the use with flat(NewV).
1009         if (Instruction *Inst = dyn_cast<Instruction>(V)) {
1010           // Don't create a copy of the original addrspacecast.
1011           if (U == V && isa<AddrSpaceCastInst>(V))
1012             continue;
1013 
1014           BasicBlock::iterator InsertPos = std::next(Inst->getIterator());
1015           while (isa<PHINode>(InsertPos))
1016             ++InsertPos;
1017           U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
1018         } else {
1019           U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1020                                                V->getType()));
1021         }
1022       }
1023     }
1024 
1025     if (V->use_empty()) {
1026       if (Instruction *I = dyn_cast<Instruction>(V))
1027         DeadInstructions.push_back(I);
1028     }
1029   }
1030 
1031   for (Instruction *I : DeadInstructions)
1032     RecursivelyDeleteTriviallyDeadInstructions(I);
1033 
1034   return true;
1035 }
1036 
1037 FunctionPass *llvm::createInferAddressSpacesPass(unsigned AddressSpace) {
1038   return new InferAddressSpaces(AddressSpace);
1039 }
1040