1 //===- InferAddressSpace.cpp - --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // CUDA C/C++ includes memory space designation as variable type qualifers (such
10 // as __global__ and __shared__). Knowing the space of a memory access allows
11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
12 // shared memory can be translated to `ld.shared` which is roughly 10% faster
13 // than a generic `ld` on an NVIDIA Tesla K40c.
14 //
15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
16 // compilers must infer the memory space of an address expression from
17 // type-qualified variables.
18 //
19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
21 // places only type-qualified variables in specific address spaces, and then
22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
23 // (so-called the generic address space) for other instructions to use.
24 //
25 // For example, the Clang translates the following CUDA code
26 // __shared__ float a[10];
27 // float v = a[i];
28 // to
29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
31 // %v = load float, float* %1 ; emits ld.f32
32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
33 // redirected to %0 (the generic version of @a).
34 //
35 // The optimization implemented in this file propagates specific address spaces
36 // from type-qualified variable declarations to its users. For example, it
37 // optimizes the above IR to
38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
41 // codegen is able to emit ld.shared.f32 for %v.
42 //
43 // Address space inference works in two steps. First, it uses a data-flow
44 // analysis to infer as many generic pointers as possible to point to only one
45 // specific address space. In the above example, it can prove that %1 only
46 // points to addrspace(3). This algorithm was published in
47 // CUDA: Compiling and optimizing for a GPU platform
48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
49 // ICCS 2012
50 //
51 // Then, address space inference replaces all refinable generic pointers with
52 // equivalent specific pointers.
53 //
54 // The major challenge of implementing this optimization is handling PHINodes,
55 // which may create loops in the data flow graph. This brings two complications.
56 //
57 // First, the data flow analysis in Step 1 needs to be circular. For example,
58 // %generic.input = addrspacecast float addrspace(3)* %input to float*
59 // loop:
60 // %y = phi [ %generic.input, %y2 ]
61 // %y2 = getelementptr %y, 1
62 // %v = load %y2
63 // br ..., label %loop, ...
64 // proving %y specific requires proving both %generic.input and %y2 specific,
65 // but proving %y2 specific circles back to %y. To address this complication,
66 // the data flow analysis operates on a lattice:
67 // uninitialized > specific address spaces > generic.
68 // All address expressions (our implementation only considers phi, bitcast,
69 // addrspacecast, and getelementptr) start with the uninitialized address space.
70 // The monotone transfer function moves the address space of a pointer down a
71 // lattice path from uninitialized to specific and then to generic. A join
72 // operation of two different specific address spaces pushes the expression down
73 // to the generic address space. The analysis completes once it reaches a fixed
74 // point.
75 //
76 // Second, IR rewriting in Step 2 also needs to be circular. For example,
77 // converting %y to addrspace(3) requires the compiler to know the converted
78 // %y2, but converting %y2 needs the converted %y. To address this complication,
79 // we break these cycles using "undef" placeholders. When converting an
80 // instruction `I` to a new address space, if its operand `Op` is not converted
81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
82 // For instance, our algorithm first converts %y to
83 // %y' = phi float addrspace(3)* [ %input, undef ]
84 // Then, it converts %y2 to
85 // %y2' = getelementptr %y', 1
86 // Finally, it fixes the undef in %y' so that
87 // %y' = phi float addrspace(3)* [ %input, %y2' ]
88 //
89 //===----------------------------------------------------------------------===//
90
91 #include "llvm/Transforms/Scalar/InferAddressSpaces.h"
92 #include "llvm/ADT/ArrayRef.h"
93 #include "llvm/ADT/DenseMap.h"
94 #include "llvm/ADT/DenseSet.h"
95 #include "llvm/ADT/SetVector.h"
96 #include "llvm/ADT/SmallVector.h"
97 #include "llvm/Analysis/AssumptionCache.h"
98 #include "llvm/Analysis/TargetTransformInfo.h"
99 #include "llvm/Analysis/ValueTracking.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/Dominators.h"
104 #include "llvm/IR/Function.h"
105 #include "llvm/IR/IRBuilder.h"
106 #include "llvm/IR/InstIterator.h"
107 #include "llvm/IR/Instruction.h"
108 #include "llvm/IR/Instructions.h"
109 #include "llvm/IR/IntrinsicInst.h"
110 #include "llvm/IR/Intrinsics.h"
111 #include "llvm/IR/LLVMContext.h"
112 #include "llvm/IR/Operator.h"
113 #include "llvm/IR/PassManager.h"
114 #include "llvm/IR/Type.h"
115 #include "llvm/IR/Use.h"
116 #include "llvm/IR/User.h"
117 #include "llvm/IR/Value.h"
118 #include "llvm/IR/ValueHandle.h"
119 #include "llvm/InitializePasses.h"
120 #include "llvm/Pass.h"
121 #include "llvm/Support/Casting.h"
122 #include "llvm/Support/CommandLine.h"
123 #include "llvm/Support/Compiler.h"
124 #include "llvm/Support/Debug.h"
125 #include "llvm/Support/ErrorHandling.h"
126 #include "llvm/Support/raw_ostream.h"
127 #include "llvm/Transforms/Scalar.h"
128 #include "llvm/Transforms/Utils/Local.h"
129 #include "llvm/Transforms/Utils/ValueMapper.h"
130 #include <cassert>
131 #include <iterator>
132 #include <limits>
133 #include <utility>
134 #include <vector>
135
136 #define DEBUG_TYPE "infer-address-spaces"
137
138 using namespace llvm;
139
140 static cl::opt<bool> AssumeDefaultIsFlatAddressSpace(
141 "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden,
142 cl::desc("The default address space is assumed as the flat address space. "
143 "This is mainly for test purpose."));
144
145 static const unsigned UninitializedAddressSpace =
146 std::numeric_limits<unsigned>::max();
147
148 namespace {
149
150 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;
151 // Different from ValueToAddrSpaceMapTy, where a new addrspace is inferred on
152 // the *def* of a value, PredicatedAddrSpaceMapTy is map where a new
153 // addrspace is inferred on the *use* of a pointer. This map is introduced to
154 // infer addrspace from the addrspace predicate assumption built from assume
155 // intrinsic. In that scenario, only specific uses (under valid assumption
156 // context) could be inferred with a new addrspace.
157 using PredicatedAddrSpaceMapTy =
158 DenseMap<std::pair<const Value *, const Value *>, unsigned>;
159 using PostorderStackTy = llvm::SmallVector<PointerIntPair<Value *, 1, bool>, 4>;
160
161 class InferAddressSpaces : public FunctionPass {
162 unsigned FlatAddrSpace = 0;
163
164 public:
165 static char ID;
166
InferAddressSpaces()167 InferAddressSpaces() :
168 FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {}
InferAddressSpaces(unsigned AS)169 InferAddressSpaces(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {}
170
getAnalysisUsage(AnalysisUsage & AU) const171 void getAnalysisUsage(AnalysisUsage &AU) const override {
172 AU.setPreservesCFG();
173 AU.addPreserved<DominatorTreeWrapperPass>();
174 AU.addRequired<AssumptionCacheTracker>();
175 AU.addRequired<TargetTransformInfoWrapperPass>();
176 }
177
178 bool runOnFunction(Function &F) override;
179 };
180
181 class InferAddressSpacesImpl {
182 AssumptionCache &AC;
183 const DominatorTree *DT = nullptr;
184 const TargetTransformInfo *TTI = nullptr;
185 const DataLayout *DL = nullptr;
186
187 /// Target specific address space which uses of should be replaced if
188 /// possible.
189 unsigned FlatAddrSpace = 0;
190
191 // Try to update the address space of V. If V is updated, returns true and
192 // false otherwise.
193 bool updateAddressSpace(const Value &V,
194 ValueToAddrSpaceMapTy &InferredAddrSpace,
195 PredicatedAddrSpaceMapTy &PredicatedAS) const;
196
197 // Tries to infer the specific address space of each address expression in
198 // Postorder.
199 void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
200 ValueToAddrSpaceMapTy &InferredAddrSpace,
201 PredicatedAddrSpaceMapTy &PredicatedAS) const;
202
203 bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const;
204
205 Value *cloneInstructionWithNewAddressSpace(
206 Instruction *I, unsigned NewAddrSpace,
207 const ValueToValueMapTy &ValueWithNewAddrSpace,
208 const PredicatedAddrSpaceMapTy &PredicatedAS,
209 SmallVectorImpl<const Use *> *UndefUsesToFix) const;
210
211 // Changes the flat address expressions in function F to point to specific
212 // address spaces if InferredAddrSpace says so. Postorder is the postorder of
213 // all flat expressions in the use-def graph of function F.
214 bool
215 rewriteWithNewAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,
216 const ValueToAddrSpaceMapTy &InferredAddrSpace,
217 const PredicatedAddrSpaceMapTy &PredicatedAS,
218 Function *F) const;
219
220 void appendsFlatAddressExpressionToPostorderStack(
221 Value *V, PostorderStackTy &PostorderStack,
222 DenseSet<Value *> &Visited) const;
223
224 bool rewriteIntrinsicOperands(IntrinsicInst *II,
225 Value *OldV, Value *NewV) const;
226 void collectRewritableIntrinsicOperands(IntrinsicInst *II,
227 PostorderStackTy &PostorderStack,
228 DenseSet<Value *> &Visited) const;
229
230 std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const;
231
232 Value *cloneValueWithNewAddressSpace(
233 Value *V, unsigned NewAddrSpace,
234 const ValueToValueMapTy &ValueWithNewAddrSpace,
235 const PredicatedAddrSpaceMapTy &PredicatedAS,
236 SmallVectorImpl<const Use *> *UndefUsesToFix) const;
237 unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const;
238
239 unsigned getPredicatedAddrSpace(const Value &V, Value *Opnd) const;
240
241 public:
InferAddressSpacesImpl(AssumptionCache & AC,const DominatorTree * DT,const TargetTransformInfo * TTI,unsigned FlatAddrSpace)242 InferAddressSpacesImpl(AssumptionCache &AC, const DominatorTree *DT,
243 const TargetTransformInfo *TTI, unsigned FlatAddrSpace)
244 : AC(AC), DT(DT), TTI(TTI), FlatAddrSpace(FlatAddrSpace) {}
245 bool run(Function &F);
246 };
247
248 } // end anonymous namespace
249
250 char InferAddressSpaces::ID = 0;
251
252 INITIALIZE_PASS_BEGIN(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
253 false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)254 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
255 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
256 INITIALIZE_PASS_END(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces",
257 false, false)
258
259 // Check whether that's no-op pointer bicast using a pair of
260 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over
261 // different address spaces.
262 static bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL,
263 const TargetTransformInfo *TTI) {
264 assert(I2P->getOpcode() == Instruction::IntToPtr);
265 auto *P2I = dyn_cast<Operator>(I2P->getOperand(0));
266 if (!P2I || P2I->getOpcode() != Instruction::PtrToInt)
267 return false;
268 // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a
269 // no-op cast. Besides checking both of them are no-op casts, as the
270 // reinterpreted pointer may be used in other pointer arithmetic, we also
271 // need to double-check that through the target-specific hook. That ensures
272 // the underlying target also agrees that's a no-op address space cast and
273 // pointer bits are preserved.
274 // The current IR spec doesn't have clear rules on address space casts,
275 // especially a clear definition for pointer bits in non-default address
276 // spaces. It would be undefined if that pointer is dereferenced after an
277 // invalid reinterpret cast. Also, due to the unclearness for the meaning of
278 // bits in non-default address spaces in the current spec, the pointer
279 // arithmetic may also be undefined after invalid pointer reinterpret cast.
280 // However, as we confirm through the target hooks that it's a no-op
281 // addrspacecast, it doesn't matter since the bits should be the same.
282 unsigned P2IOp0AS = P2I->getOperand(0)->getType()->getPointerAddressSpace();
283 unsigned I2PAS = I2P->getType()->getPointerAddressSpace();
284 return CastInst::isNoopCast(Instruction::CastOps(I2P->getOpcode()),
285 I2P->getOperand(0)->getType(), I2P->getType(),
286 DL) &&
287 CastInst::isNoopCast(Instruction::CastOps(P2I->getOpcode()),
288 P2I->getOperand(0)->getType(), P2I->getType(),
289 DL) &&
290 (P2IOp0AS == I2PAS || TTI->isNoopAddrSpaceCast(P2IOp0AS, I2PAS));
291 }
292
293 // Returns true if V is an address expression.
294 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
295 // getelementptr operators.
isAddressExpression(const Value & V,const DataLayout & DL,const TargetTransformInfo * TTI)296 static bool isAddressExpression(const Value &V, const DataLayout &DL,
297 const TargetTransformInfo *TTI) {
298 const Operator *Op = dyn_cast<Operator>(&V);
299 if (!Op)
300 return false;
301
302 switch (Op->getOpcode()) {
303 case Instruction::PHI:
304 assert(Op->getType()->isPointerTy());
305 return true;
306 case Instruction::BitCast:
307 case Instruction::AddrSpaceCast:
308 case Instruction::GetElementPtr:
309 return true;
310 case Instruction::Select:
311 return Op->getType()->isPointerTy();
312 case Instruction::Call: {
313 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&V);
314 return II && II->getIntrinsicID() == Intrinsic::ptrmask;
315 }
316 case Instruction::IntToPtr:
317 return isNoopPtrIntCastPair(Op, DL, TTI);
318 default:
319 // That value is an address expression if it has an assumed address space.
320 return TTI->getAssumedAddrSpace(&V) != UninitializedAddressSpace;
321 }
322 }
323
324 // Returns the pointer operands of V.
325 //
326 // Precondition: V is an address expression.
327 static SmallVector<Value *, 2>
getPointerOperands(const Value & V,const DataLayout & DL,const TargetTransformInfo * TTI)328 getPointerOperands(const Value &V, const DataLayout &DL,
329 const TargetTransformInfo *TTI) {
330 const Operator &Op = cast<Operator>(V);
331 switch (Op.getOpcode()) {
332 case Instruction::PHI: {
333 auto IncomingValues = cast<PHINode>(Op).incoming_values();
334 return {IncomingValues.begin(), IncomingValues.end()};
335 }
336 case Instruction::BitCast:
337 case Instruction::AddrSpaceCast:
338 case Instruction::GetElementPtr:
339 return {Op.getOperand(0)};
340 case Instruction::Select:
341 return {Op.getOperand(1), Op.getOperand(2)};
342 case Instruction::Call: {
343 const IntrinsicInst &II = cast<IntrinsicInst>(Op);
344 assert(II.getIntrinsicID() == Intrinsic::ptrmask &&
345 "unexpected intrinsic call");
346 return {II.getArgOperand(0)};
347 }
348 case Instruction::IntToPtr: {
349 assert(isNoopPtrIntCastPair(&Op, DL, TTI));
350 auto *P2I = cast<Operator>(Op.getOperand(0));
351 return {P2I->getOperand(0)};
352 }
353 default:
354 llvm_unreachable("Unexpected instruction type.");
355 }
356 }
357
rewriteIntrinsicOperands(IntrinsicInst * II,Value * OldV,Value * NewV) const358 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
359 Value *OldV,
360 Value *NewV) const {
361 Module *M = II->getParent()->getParent()->getParent();
362
363 switch (II->getIntrinsicID()) {
364 case Intrinsic::objectsize: {
365 Type *DestTy = II->getType();
366 Type *SrcTy = NewV->getType();
367 Function *NewDecl =
368 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
369 II->setArgOperand(0, NewV);
370 II->setCalledFunction(NewDecl);
371 return true;
372 }
373 case Intrinsic::ptrmask:
374 // This is handled as an address expression, not as a use memory operation.
375 return false;
376 default: {
377 Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
378 if (!Rewrite)
379 return false;
380 if (Rewrite != II)
381 II->replaceAllUsesWith(Rewrite);
382 return true;
383 }
384 }
385 }
386
collectRewritableIntrinsicOperands(IntrinsicInst * II,PostorderStackTy & PostorderStack,DenseSet<Value * > & Visited) const387 void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
388 IntrinsicInst *II, PostorderStackTy &PostorderStack,
389 DenseSet<Value *> &Visited) const {
390 auto IID = II->getIntrinsicID();
391 switch (IID) {
392 case Intrinsic::ptrmask:
393 case Intrinsic::objectsize:
394 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
395 PostorderStack, Visited);
396 break;
397 default:
398 SmallVector<int, 2> OpIndexes;
399 if (TTI->collectFlatAddressOperands(OpIndexes, IID)) {
400 for (int Idx : OpIndexes) {
401 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(Idx),
402 PostorderStack, Visited);
403 }
404 }
405 break;
406 }
407 }
408
409 // Returns all flat address expressions in function F. The elements are
410 // If V is an unvisited flat address expression, appends V to PostorderStack
411 // and marks it as visited.
appendsFlatAddressExpressionToPostorderStack(Value * V,PostorderStackTy & PostorderStack,DenseSet<Value * > & Visited) const412 void InferAddressSpacesImpl::appendsFlatAddressExpressionToPostorderStack(
413 Value *V, PostorderStackTy &PostorderStack,
414 DenseSet<Value *> &Visited) const {
415 assert(V->getType()->isPointerTy());
416
417 // Generic addressing expressions may be hidden in nested constant
418 // expressions.
419 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
420 // TODO: Look in non-address parts, like icmp operands.
421 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second)
422 PostorderStack.emplace_back(CE, false);
423
424 return;
425 }
426
427 if (V->getType()->getPointerAddressSpace() == FlatAddrSpace &&
428 isAddressExpression(*V, *DL, TTI)) {
429 if (Visited.insert(V).second) {
430 PostorderStack.emplace_back(V, false);
431
432 Operator *Op = cast<Operator>(V);
433 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) {
434 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) {
435 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second)
436 PostorderStack.emplace_back(CE, false);
437 }
438 }
439 }
440 }
441 }
442
443 // Returns all flat address expressions in function F. The elements are ordered
444 // ordered in postorder.
445 std::vector<WeakTrackingVH>
collectFlatAddressExpressions(Function & F) const446 InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const {
447 // This function implements a non-recursive postorder traversal of a partial
448 // use-def graph of function F.
449 PostorderStackTy PostorderStack;
450 // The set of visited expressions.
451 DenseSet<Value *> Visited;
452
453 auto PushPtrOperand = [&](Value *Ptr) {
454 appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack,
455 Visited);
456 };
457
458 // Look at operations that may be interesting accelerate by moving to a known
459 // address space. We aim at generating after loads and stores, but pure
460 // addressing calculations may also be faster.
461 for (Instruction &I : instructions(F)) {
462 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
463 if (!GEP->getType()->isVectorTy())
464 PushPtrOperand(GEP->getPointerOperand());
465 } else if (auto *LI = dyn_cast<LoadInst>(&I))
466 PushPtrOperand(LI->getPointerOperand());
467 else if (auto *SI = dyn_cast<StoreInst>(&I))
468 PushPtrOperand(SI->getPointerOperand());
469 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I))
470 PushPtrOperand(RMW->getPointerOperand());
471 else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
472 PushPtrOperand(CmpX->getPointerOperand());
473 else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
474 // For memset/memcpy/memmove, any pointer operand can be replaced.
475 PushPtrOperand(MI->getRawDest());
476
477 // Handle 2nd operand for memcpy/memmove.
478 if (auto *MTI = dyn_cast<MemTransferInst>(MI))
479 PushPtrOperand(MTI->getRawSource());
480 } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
481 collectRewritableIntrinsicOperands(II, PostorderStack, Visited);
482 else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) {
483 // FIXME: Handle vectors of pointers
484 if (Cmp->getOperand(0)->getType()->isPointerTy()) {
485 PushPtrOperand(Cmp->getOperand(0));
486 PushPtrOperand(Cmp->getOperand(1));
487 }
488 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
489 if (!ASC->getType()->isVectorTy())
490 PushPtrOperand(ASC->getPointerOperand());
491 } else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) {
492 if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI))
493 PushPtrOperand(
494 cast<Operator>(I2P->getOperand(0))->getOperand(0));
495 }
496 }
497
498 std::vector<WeakTrackingVH> Postorder; // The resultant postorder.
499 while (!PostorderStack.empty()) {
500 Value *TopVal = PostorderStack.back().getPointer();
501 // If the operands of the expression on the top are already explored,
502 // adds that expression to the resultant postorder.
503 if (PostorderStack.back().getInt()) {
504 if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace)
505 Postorder.push_back(TopVal);
506 PostorderStack.pop_back();
507 continue;
508 }
509 // Otherwise, adds its operands to the stack and explores them.
510 PostorderStack.back().setInt(true);
511 // Skip values with an assumed address space.
512 if (TTI->getAssumedAddrSpace(TopVal) == UninitializedAddressSpace) {
513 for (Value *PtrOperand : getPointerOperands(*TopVal, *DL, TTI)) {
514 appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack,
515 Visited);
516 }
517 }
518 }
519 return Postorder;
520 }
521
522 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
523 // of OperandUse.get() in the new address space. If the clone is not ready yet,
524 // returns an undef in the new address space as a placeholder.
operandWithNewAddressSpaceOrCreateUndef(const Use & OperandUse,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace,const PredicatedAddrSpaceMapTy & PredicatedAS,SmallVectorImpl<const Use * > * UndefUsesToFix)525 static Value *operandWithNewAddressSpaceOrCreateUndef(
526 const Use &OperandUse, unsigned NewAddrSpace,
527 const ValueToValueMapTy &ValueWithNewAddrSpace,
528 const PredicatedAddrSpaceMapTy &PredicatedAS,
529 SmallVectorImpl<const Use *> *UndefUsesToFix) {
530 Value *Operand = OperandUse.get();
531
532 Type *NewPtrTy = PointerType::getWithSamePointeeType(
533 cast<PointerType>(Operand->getType()), NewAddrSpace);
534
535 if (Constant *C = dyn_cast<Constant>(Operand))
536 return ConstantExpr::getAddrSpaceCast(C, NewPtrTy);
537
538 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand))
539 return NewOperand;
540
541 Instruction *Inst = cast<Instruction>(OperandUse.getUser());
542 auto I = PredicatedAS.find(std::make_pair(Inst, Operand));
543 if (I != PredicatedAS.end()) {
544 // Insert an addrspacecast on that operand before the user.
545 unsigned NewAS = I->second;
546 Type *NewPtrTy = PointerType::getWithSamePointeeType(
547 cast<PointerType>(Operand->getType()), NewAS);
548 auto *NewI = new AddrSpaceCastInst(Operand, NewPtrTy);
549 NewI->insertBefore(Inst);
550 NewI->setDebugLoc(Inst->getDebugLoc());
551 return NewI;
552 }
553
554 UndefUsesToFix->push_back(&OperandUse);
555 return UndefValue::get(NewPtrTy);
556 }
557
558 // Returns a clone of `I` with its operands converted to those specified in
559 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
560 // operand whose address space needs to be modified might not exist in
561 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
562 // adds that operand use to UndefUsesToFix so that caller can fix them later.
563 //
564 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
565 // from a pointer whose type already matches. Therefore, this function returns a
566 // Value* instead of an Instruction*.
567 //
568 // This may also return nullptr in the case the instruction could not be
569 // rewritten.
cloneInstructionWithNewAddressSpace(Instruction * I,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace,const PredicatedAddrSpaceMapTy & PredicatedAS,SmallVectorImpl<const Use * > * UndefUsesToFix) const570 Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
571 Instruction *I, unsigned NewAddrSpace,
572 const ValueToValueMapTy &ValueWithNewAddrSpace,
573 const PredicatedAddrSpaceMapTy &PredicatedAS,
574 SmallVectorImpl<const Use *> *UndefUsesToFix) const {
575 Type *NewPtrType = PointerType::getWithSamePointeeType(
576 cast<PointerType>(I->getType()), NewAddrSpace);
577
578 if (I->getOpcode() == Instruction::AddrSpaceCast) {
579 Value *Src = I->getOperand(0);
580 // Because `I` is flat, the source address space must be specific.
581 // Therefore, the inferred address space must be the source space, according
582 // to our algorithm.
583 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
584 if (Src->getType() != NewPtrType)
585 return new BitCastInst(Src, NewPtrType);
586 return Src;
587 }
588
589 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
590 // Technically the intrinsic ID is a pointer typed argument, so specially
591 // handle calls early.
592 assert(II->getIntrinsicID() == Intrinsic::ptrmask);
593 Value *NewPtr = operandWithNewAddressSpaceOrCreateUndef(
594 II->getArgOperandUse(0), NewAddrSpace, ValueWithNewAddrSpace,
595 PredicatedAS, UndefUsesToFix);
596 Value *Rewrite =
597 TTI->rewriteIntrinsicWithAddressSpace(II, II->getArgOperand(0), NewPtr);
598 if (Rewrite) {
599 assert(Rewrite != II && "cannot modify this pointer operation in place");
600 return Rewrite;
601 }
602
603 return nullptr;
604 }
605
606 unsigned AS = TTI->getAssumedAddrSpace(I);
607 if (AS != UninitializedAddressSpace) {
608 // For the assumed address space, insert an `addrspacecast` to make that
609 // explicit.
610 Type *NewPtrTy = PointerType::getWithSamePointeeType(
611 cast<PointerType>(I->getType()), AS);
612 auto *NewI = new AddrSpaceCastInst(I, NewPtrTy);
613 NewI->insertAfter(I);
614 return NewI;
615 }
616
617 // Computes the converted pointer operands.
618 SmallVector<Value *, 4> NewPointerOperands;
619 for (const Use &OperandUse : I->operands()) {
620 if (!OperandUse.get()->getType()->isPointerTy())
621 NewPointerOperands.push_back(nullptr);
622 else
623 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef(
624 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS,
625 UndefUsesToFix));
626 }
627
628 switch (I->getOpcode()) {
629 case Instruction::BitCast:
630 return new BitCastInst(NewPointerOperands[0], NewPtrType);
631 case Instruction::PHI: {
632 assert(I->getType()->isPointerTy());
633 PHINode *PHI = cast<PHINode>(I);
634 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues());
635 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) {
636 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index);
637 NewPHI->addIncoming(NewPointerOperands[OperandNo],
638 PHI->getIncomingBlock(Index));
639 }
640 return NewPHI;
641 }
642 case Instruction::GetElementPtr: {
643 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
644 GetElementPtrInst *NewGEP = GetElementPtrInst::Create(
645 GEP->getSourceElementType(), NewPointerOperands[0],
646 SmallVector<Value *, 4>(GEP->indices()));
647 NewGEP->setIsInBounds(GEP->isInBounds());
648 return NewGEP;
649 }
650 case Instruction::Select:
651 assert(I->getType()->isPointerTy());
652 return SelectInst::Create(I->getOperand(0), NewPointerOperands[1],
653 NewPointerOperands[2], "", nullptr, I);
654 case Instruction::IntToPtr: {
655 assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI));
656 Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0);
657 if (Src->getType() == NewPtrType)
658 return Src;
659
660 // If we had a no-op inttoptr/ptrtoint pair, we may still have inferred a
661 // source address space from a generic pointer source need to insert a cast
662 // back.
663 return CastInst::CreatePointerBitCastOrAddrSpaceCast(Src, NewPtrType);
664 }
665 default:
666 llvm_unreachable("Unexpected opcode");
667 }
668 }
669
670 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
671 // constant expression `CE` with its operands replaced as specified in
672 // ValueWithNewAddrSpace.
cloneConstantExprWithNewAddressSpace(ConstantExpr * CE,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace,const DataLayout * DL,const TargetTransformInfo * TTI)673 static Value *cloneConstantExprWithNewAddressSpace(
674 ConstantExpr *CE, unsigned NewAddrSpace,
675 const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL,
676 const TargetTransformInfo *TTI) {
677 Type *TargetType = CE->getType()->isPointerTy()
678 ? PointerType::getWithSamePointeeType(
679 cast<PointerType>(CE->getType()), NewAddrSpace)
680 : CE->getType();
681
682 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
683 // Because CE is flat, the source address space must be specific.
684 // Therefore, the inferred address space must be the source space according
685 // to our algorithm.
686 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() ==
687 NewAddrSpace);
688 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType);
689 }
690
691 if (CE->getOpcode() == Instruction::BitCast) {
692 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0)))
693 return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType);
694 return ConstantExpr::getAddrSpaceCast(CE, TargetType);
695 }
696
697 if (CE->getOpcode() == Instruction::Select) {
698 Constant *Src0 = CE->getOperand(1);
699 Constant *Src1 = CE->getOperand(2);
700 if (Src0->getType()->getPointerAddressSpace() ==
701 Src1->getType()->getPointerAddressSpace()) {
702
703 return ConstantExpr::getSelect(
704 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType),
705 ConstantExpr::getAddrSpaceCast(Src1, TargetType));
706 }
707 }
708
709 if (CE->getOpcode() == Instruction::IntToPtr) {
710 assert(isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI));
711 Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0);
712 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace);
713 return ConstantExpr::getBitCast(Src, TargetType);
714 }
715
716 // Computes the operands of the new constant expression.
717 bool IsNew = false;
718 SmallVector<Constant *, 4> NewOperands;
719 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) {
720 Constant *Operand = CE->getOperand(Index);
721 // If the address space of `Operand` needs to be modified, the new operand
722 // with the new address space should already be in ValueWithNewAddrSpace
723 // because (1) the constant expressions we consider (i.e. addrspacecast,
724 // bitcast, and getelementptr) do not incur cycles in the data flow graph
725 // and (2) this function is called on constant expressions in postorder.
726 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) {
727 IsNew = true;
728 NewOperands.push_back(cast<Constant>(NewOperand));
729 continue;
730 }
731 if (auto *CExpr = dyn_cast<ConstantExpr>(Operand))
732 if (Value *NewOperand = cloneConstantExprWithNewAddressSpace(
733 CExpr, NewAddrSpace, ValueWithNewAddrSpace, DL, TTI)) {
734 IsNew = true;
735 NewOperands.push_back(cast<Constant>(NewOperand));
736 continue;
737 }
738 // Otherwise, reuses the old operand.
739 NewOperands.push_back(Operand);
740 }
741
742 // If !IsNew, we will replace the Value with itself. However, replaced values
743 // are assumed to wrapped in an addrspacecast cast later so drop it now.
744 if (!IsNew)
745 return nullptr;
746
747 if (CE->getOpcode() == Instruction::GetElementPtr) {
748 // Needs to specify the source type while constructing a getelementptr
749 // constant expression.
750 return CE->getWithOperands(NewOperands, TargetType, /*OnlyIfReduced=*/false,
751 cast<GEPOperator>(CE)->getSourceElementType());
752 }
753
754 return CE->getWithOperands(NewOperands, TargetType);
755 }
756
757 // Returns a clone of the value `V`, with its operands replaced as specified in
758 // ValueWithNewAddrSpace. This function is called on every flat address
759 // expression whose address space needs to be modified, in postorder.
760 //
761 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
cloneValueWithNewAddressSpace(Value * V,unsigned NewAddrSpace,const ValueToValueMapTy & ValueWithNewAddrSpace,const PredicatedAddrSpaceMapTy & PredicatedAS,SmallVectorImpl<const Use * > * UndefUsesToFix) const762 Value *InferAddressSpacesImpl::cloneValueWithNewAddressSpace(
763 Value *V, unsigned NewAddrSpace,
764 const ValueToValueMapTy &ValueWithNewAddrSpace,
765 const PredicatedAddrSpaceMapTy &PredicatedAS,
766 SmallVectorImpl<const Use *> *UndefUsesToFix) const {
767 // All values in Postorder are flat address expressions.
768 assert(V->getType()->getPointerAddressSpace() == FlatAddrSpace &&
769 isAddressExpression(*V, *DL, TTI));
770
771 if (Instruction *I = dyn_cast<Instruction>(V)) {
772 Value *NewV = cloneInstructionWithNewAddressSpace(
773 I, NewAddrSpace, ValueWithNewAddrSpace, PredicatedAS, UndefUsesToFix);
774 if (Instruction *NewI = dyn_cast_or_null<Instruction>(NewV)) {
775 if (NewI->getParent() == nullptr) {
776 NewI->insertBefore(I);
777 NewI->takeName(I);
778 NewI->setDebugLoc(I->getDebugLoc());
779 }
780 }
781 return NewV;
782 }
783
784 return cloneConstantExprWithNewAddressSpace(
785 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace, DL, TTI);
786 }
787
788 // Defines the join operation on the address space lattice (see the file header
789 // comments).
joinAddressSpaces(unsigned AS1,unsigned AS2) const790 unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1,
791 unsigned AS2) const {
792 if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace)
793 return FlatAddrSpace;
794
795 if (AS1 == UninitializedAddressSpace)
796 return AS2;
797 if (AS2 == UninitializedAddressSpace)
798 return AS1;
799
800 // The join of two different specific address spaces is flat.
801 return (AS1 == AS2) ? AS1 : FlatAddrSpace;
802 }
803
run(Function & F)804 bool InferAddressSpacesImpl::run(Function &F) {
805 DL = &F.getParent()->getDataLayout();
806
807 if (AssumeDefaultIsFlatAddressSpace)
808 FlatAddrSpace = 0;
809
810 if (FlatAddrSpace == UninitializedAddressSpace) {
811 FlatAddrSpace = TTI->getFlatAddressSpace();
812 if (FlatAddrSpace == UninitializedAddressSpace)
813 return false;
814 }
815
816 // Collects all flat address expressions in postorder.
817 std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F);
818
819 // Runs a data-flow analysis to refine the address spaces of every expression
820 // in Postorder.
821 ValueToAddrSpaceMapTy InferredAddrSpace;
822 PredicatedAddrSpaceMapTy PredicatedAS;
823 inferAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS);
824
825 // Changes the address spaces of the flat address expressions who are inferred
826 // to point to a specific address space.
827 return rewriteWithNewAddressSpaces(Postorder, InferredAddrSpace, PredicatedAS,
828 &F);
829 }
830
831 // Constants need to be tracked through RAUW to handle cases with nested
832 // constant expressions, so wrap values in WeakTrackingVH.
inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,ValueToAddrSpaceMapTy & InferredAddrSpace,PredicatedAddrSpaceMapTy & PredicatedAS) const833 void InferAddressSpacesImpl::inferAddressSpaces(
834 ArrayRef<WeakTrackingVH> Postorder,
835 ValueToAddrSpaceMapTy &InferredAddrSpace,
836 PredicatedAddrSpaceMapTy &PredicatedAS) const {
837 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end());
838 // Initially, all expressions are in the uninitialized address space.
839 for (Value *V : Postorder)
840 InferredAddrSpace[V] = UninitializedAddressSpace;
841
842 while (!Worklist.empty()) {
843 Value *V = Worklist.pop_back_val();
844
845 // Try to update the address space of the stack top according to the
846 // address spaces of its operands.
847 if (!updateAddressSpace(*V, InferredAddrSpace, PredicatedAS))
848 continue;
849
850 for (Value *User : V->users()) {
851 // Skip if User is already in the worklist.
852 if (Worklist.count(User))
853 continue;
854
855 auto Pos = InferredAddrSpace.find(User);
856 // Our algorithm only updates the address spaces of flat address
857 // expressions, which are those in InferredAddrSpace.
858 if (Pos == InferredAddrSpace.end())
859 continue;
860
861 // Function updateAddressSpace moves the address space down a lattice
862 // path. Therefore, nothing to do if User is already inferred as flat (the
863 // bottom element in the lattice).
864 if (Pos->second == FlatAddrSpace)
865 continue;
866
867 Worklist.insert(User);
868 }
869 }
870 }
871
getPredicatedAddrSpace(const Value & V,Value * Opnd) const872 unsigned InferAddressSpacesImpl::getPredicatedAddrSpace(const Value &V,
873 Value *Opnd) const {
874 const Instruction *I = dyn_cast<Instruction>(&V);
875 if (!I)
876 return UninitializedAddressSpace;
877
878 Opnd = Opnd->stripInBoundsOffsets();
879 for (auto &AssumeVH : AC.assumptionsFor(Opnd)) {
880 if (!AssumeVH)
881 continue;
882 CallInst *CI = cast<CallInst>(AssumeVH);
883 if (!isValidAssumeForContext(CI, I, DT))
884 continue;
885
886 const Value *Ptr;
887 unsigned AS;
888 std::tie(Ptr, AS) = TTI->getPredicatedAddrSpace(CI->getArgOperand(0));
889 if (Ptr)
890 return AS;
891 }
892
893 return UninitializedAddressSpace;
894 }
895
updateAddressSpace(const Value & V,ValueToAddrSpaceMapTy & InferredAddrSpace,PredicatedAddrSpaceMapTy & PredicatedAS) const896 bool InferAddressSpacesImpl::updateAddressSpace(
897 const Value &V, ValueToAddrSpaceMapTy &InferredAddrSpace,
898 PredicatedAddrSpaceMapTy &PredicatedAS) const {
899 assert(InferredAddrSpace.count(&V));
900
901 LLVM_DEBUG(dbgs() << "Updating the address space of\n " << V << '\n');
902
903 // The new inferred address space equals the join of the address spaces
904 // of all its pointer operands.
905 unsigned NewAS = UninitializedAddressSpace;
906
907 const Operator &Op = cast<Operator>(V);
908 if (Op.getOpcode() == Instruction::Select) {
909 Value *Src0 = Op.getOperand(1);
910 Value *Src1 = Op.getOperand(2);
911
912 auto I = InferredAddrSpace.find(Src0);
913 unsigned Src0AS = (I != InferredAddrSpace.end()) ?
914 I->second : Src0->getType()->getPointerAddressSpace();
915
916 auto J = InferredAddrSpace.find(Src1);
917 unsigned Src1AS = (J != InferredAddrSpace.end()) ?
918 J->second : Src1->getType()->getPointerAddressSpace();
919
920 auto *C0 = dyn_cast<Constant>(Src0);
921 auto *C1 = dyn_cast<Constant>(Src1);
922
923 // If one of the inputs is a constant, we may be able to do a constant
924 // addrspacecast of it. Defer inferring the address space until the input
925 // address space is known.
926 if ((C1 && Src0AS == UninitializedAddressSpace) ||
927 (C0 && Src1AS == UninitializedAddressSpace))
928 return false;
929
930 if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS))
931 NewAS = Src1AS;
932 else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS))
933 NewAS = Src0AS;
934 else
935 NewAS = joinAddressSpaces(Src0AS, Src1AS);
936 } else {
937 unsigned AS = TTI->getAssumedAddrSpace(&V);
938 if (AS != UninitializedAddressSpace) {
939 // Use the assumed address space directly.
940 NewAS = AS;
941 } else {
942 // Otherwise, infer the address space from its pointer operands.
943 for (Value *PtrOperand : getPointerOperands(V, *DL, TTI)) {
944 auto I = InferredAddrSpace.find(PtrOperand);
945 unsigned OperandAS;
946 if (I == InferredAddrSpace.end()) {
947 OperandAS = PtrOperand->getType()->getPointerAddressSpace();
948 if (OperandAS == FlatAddrSpace) {
949 // Check AC for assumption dominating V.
950 unsigned AS = getPredicatedAddrSpace(V, PtrOperand);
951 if (AS != UninitializedAddressSpace) {
952 LLVM_DEBUG(dbgs()
953 << " deduce operand AS from the predicate addrspace "
954 << AS << '\n');
955 OperandAS = AS;
956 // Record this use with the predicated AS.
957 PredicatedAS[std::make_pair(&V, PtrOperand)] = OperandAS;
958 }
959 }
960 } else
961 OperandAS = I->second;
962
963 // join(flat, *) = flat. So we can break if NewAS is already flat.
964 NewAS = joinAddressSpaces(NewAS, OperandAS);
965 if (NewAS == FlatAddrSpace)
966 break;
967 }
968 }
969 }
970
971 unsigned OldAS = InferredAddrSpace.lookup(&V);
972 assert(OldAS != FlatAddrSpace);
973 if (OldAS == NewAS)
974 return false;
975
976 // If any updates are made, grabs its users to the worklist because
977 // their address spaces can also be possibly updated.
978 LLVM_DEBUG(dbgs() << " to " << NewAS << '\n');
979 InferredAddrSpace[&V] = NewAS;
980 return true;
981 }
982
983 /// \p returns true if \p U is the pointer operand of a memory instruction with
984 /// a single pointer operand that can have its address space changed by simply
985 /// mutating the use to a new value. If the memory instruction is volatile,
986 /// return true only if the target allows the memory instruction to be volatile
987 /// in the new address space.
isSimplePointerUseValidToReplace(const TargetTransformInfo & TTI,Use & U,unsigned AddrSpace)988 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI,
989 Use &U, unsigned AddrSpace) {
990 User *Inst = U.getUser();
991 unsigned OpNo = U.getOperandNo();
992 bool VolatileIsAllowed = false;
993 if (auto *I = dyn_cast<Instruction>(Inst))
994 VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace);
995
996 if (auto *LI = dyn_cast<LoadInst>(Inst))
997 return OpNo == LoadInst::getPointerOperandIndex() &&
998 (VolatileIsAllowed || !LI->isVolatile());
999
1000 if (auto *SI = dyn_cast<StoreInst>(Inst))
1001 return OpNo == StoreInst::getPointerOperandIndex() &&
1002 (VolatileIsAllowed || !SI->isVolatile());
1003
1004 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
1005 return OpNo == AtomicRMWInst::getPointerOperandIndex() &&
1006 (VolatileIsAllowed || !RMW->isVolatile());
1007
1008 if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst))
1009 return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() &&
1010 (VolatileIsAllowed || !CmpX->isVolatile());
1011
1012 return false;
1013 }
1014
1015 /// Update memory intrinsic uses that require more complex processing than
1016 /// simple memory instructions. These require re-mangling and may have multiple
1017 /// pointer operands.
handleMemIntrinsicPtrUse(MemIntrinsic * MI,Value * OldV,Value * NewV)1018 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV,
1019 Value *NewV) {
1020 IRBuilder<> B(MI);
1021 MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa);
1022 MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope);
1023 MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias);
1024
1025 if (auto *MSI = dyn_cast<MemSetInst>(MI)) {
1026 B.CreateMemSet(NewV, MSI->getValue(), MSI->getLength(), MSI->getDestAlign(),
1027 false, // isVolatile
1028 TBAA, ScopeMD, NoAliasMD);
1029 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
1030 Value *Src = MTI->getRawSource();
1031 Value *Dest = MTI->getRawDest();
1032
1033 // Be careful in case this is a self-to-self copy.
1034 if (Src == OldV)
1035 Src = NewV;
1036
1037 if (Dest == OldV)
1038 Dest = NewV;
1039
1040 if (isa<MemCpyInlineInst>(MTI)) {
1041 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
1042 B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src,
1043 MTI->getSourceAlign(), MTI->getLength(),
1044 false, // isVolatile
1045 TBAA, TBAAStruct, ScopeMD, NoAliasMD);
1046 } else if (isa<MemCpyInst>(MTI)) {
1047 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct);
1048 B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
1049 MTI->getLength(),
1050 false, // isVolatile
1051 TBAA, TBAAStruct, ScopeMD, NoAliasMD);
1052 } else {
1053 assert(isa<MemMoveInst>(MTI));
1054 B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(),
1055 MTI->getLength(),
1056 false, // isVolatile
1057 TBAA, ScopeMD, NoAliasMD);
1058 }
1059 } else
1060 llvm_unreachable("unhandled MemIntrinsic");
1061
1062 MI->eraseFromParent();
1063 return true;
1064 }
1065
1066 // \p returns true if it is OK to change the address space of constant \p C with
1067 // a ConstantExpr addrspacecast.
isSafeToCastConstAddrSpace(Constant * C,unsigned NewAS) const1068 bool InferAddressSpacesImpl::isSafeToCastConstAddrSpace(Constant *C,
1069 unsigned NewAS) const {
1070 assert(NewAS != UninitializedAddressSpace);
1071
1072 unsigned SrcAS = C->getType()->getPointerAddressSpace();
1073 if (SrcAS == NewAS || isa<UndefValue>(C))
1074 return true;
1075
1076 // Prevent illegal casts between different non-flat address spaces.
1077 if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace)
1078 return false;
1079
1080 if (isa<ConstantPointerNull>(C))
1081 return true;
1082
1083 if (auto *Op = dyn_cast<Operator>(C)) {
1084 // If we already have a constant addrspacecast, it should be safe to cast it
1085 // off.
1086 if (Op->getOpcode() == Instruction::AddrSpaceCast)
1087 return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS);
1088
1089 if (Op->getOpcode() == Instruction::IntToPtr &&
1090 Op->getType()->getPointerAddressSpace() == FlatAddrSpace)
1091 return true;
1092 }
1093
1094 return false;
1095 }
1096
skipToNextUser(Value::use_iterator I,Value::use_iterator End)1097 static Value::use_iterator skipToNextUser(Value::use_iterator I,
1098 Value::use_iterator End) {
1099 User *CurUser = I->getUser();
1100 ++I;
1101
1102 while (I != End && I->getUser() == CurUser)
1103 ++I;
1104
1105 return I;
1106 }
1107
rewriteWithNewAddressSpaces(ArrayRef<WeakTrackingVH> Postorder,const ValueToAddrSpaceMapTy & InferredAddrSpace,const PredicatedAddrSpaceMapTy & PredicatedAS,Function * F) const1108 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
1109 ArrayRef<WeakTrackingVH> Postorder,
1110 const ValueToAddrSpaceMapTy &InferredAddrSpace,
1111 const PredicatedAddrSpaceMapTy &PredicatedAS, Function *F) const {
1112 // For each address expression to be modified, creates a clone of it with its
1113 // pointer operands converted to the new address space. Since the pointer
1114 // operands are converted, the clone is naturally in the new address space by
1115 // construction.
1116 ValueToValueMapTy ValueWithNewAddrSpace;
1117 SmallVector<const Use *, 32> UndefUsesToFix;
1118 for (Value* V : Postorder) {
1119 unsigned NewAddrSpace = InferredAddrSpace.lookup(V);
1120
1121 // In some degenerate cases (e.g. invalid IR in unreachable code), we may
1122 // not even infer the value to have its original address space.
1123 if (NewAddrSpace == UninitializedAddressSpace)
1124 continue;
1125
1126 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) {
1127 Value *New =
1128 cloneValueWithNewAddressSpace(V, NewAddrSpace, ValueWithNewAddrSpace,
1129 PredicatedAS, &UndefUsesToFix);
1130 if (New)
1131 ValueWithNewAddrSpace[V] = New;
1132 }
1133 }
1134
1135 if (ValueWithNewAddrSpace.empty())
1136 return false;
1137
1138 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
1139 for (const Use *UndefUse : UndefUsesToFix) {
1140 User *V = UndefUse->getUser();
1141 User *NewV = cast_or_null<User>(ValueWithNewAddrSpace.lookup(V));
1142 if (!NewV)
1143 continue;
1144
1145 unsigned OperandNo = UndefUse->getOperandNo();
1146 assert(isa<UndefValue>(NewV->getOperand(OperandNo)));
1147 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get()));
1148 }
1149
1150 SmallVector<Instruction *, 16> DeadInstructions;
1151
1152 // Replaces the uses of the old address expressions with the new ones.
1153 for (const WeakTrackingVH &WVH : Postorder) {
1154 assert(WVH && "value was unexpectedly deleted");
1155 Value *V = WVH;
1156 Value *NewV = ValueWithNewAddrSpace.lookup(V);
1157 if (NewV == nullptr)
1158 continue;
1159
1160 LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n "
1161 << *NewV << '\n');
1162
1163 if (Constant *C = dyn_cast<Constant>(V)) {
1164 Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1165 C->getType());
1166 if (C != Replace) {
1167 LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
1168 << ": " << *Replace << '\n');
1169 C->replaceAllUsesWith(Replace);
1170 V = Replace;
1171 }
1172 }
1173
1174 Value::use_iterator I, E, Next;
1175 for (I = V->use_begin(), E = V->use_end(); I != E; ) {
1176 Use &U = *I;
1177
1178 // Some users may see the same pointer operand in multiple operands. Skip
1179 // to the next instruction.
1180 I = skipToNextUser(I, E);
1181
1182 if (isSimplePointerUseValidToReplace(
1183 *TTI, U, V->getType()->getPointerAddressSpace())) {
1184 // If V is used as the pointer operand of a compatible memory operation,
1185 // sets the pointer operand to NewV. This replacement does not change
1186 // the element type, so the resultant load/store is still valid.
1187 U.set(NewV);
1188 continue;
1189 }
1190
1191 User *CurUser = U.getUser();
1192 // Skip if the current user is the new value itself.
1193 if (CurUser == NewV)
1194 continue;
1195 // Handle more complex cases like intrinsic that need to be remangled.
1196 if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) {
1197 if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV))
1198 continue;
1199 }
1200
1201 if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) {
1202 if (rewriteIntrinsicOperands(II, V, NewV))
1203 continue;
1204 }
1205
1206 if (isa<Instruction>(CurUser)) {
1207 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) {
1208 // If we can infer that both pointers are in the same addrspace,
1209 // transform e.g.
1210 // %cmp = icmp eq float* %p, %q
1211 // into
1212 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
1213
1214 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1215 int SrcIdx = U.getOperandNo();
1216 int OtherIdx = (SrcIdx == 0) ? 1 : 0;
1217 Value *OtherSrc = Cmp->getOperand(OtherIdx);
1218
1219 if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) {
1220 if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) {
1221 Cmp->setOperand(OtherIdx, OtherNewV);
1222 Cmp->setOperand(SrcIdx, NewV);
1223 continue;
1224 }
1225 }
1226
1227 // Even if the type mismatches, we can cast the constant.
1228 if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) {
1229 if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) {
1230 Cmp->setOperand(SrcIdx, NewV);
1231 Cmp->setOperand(OtherIdx,
1232 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType()));
1233 continue;
1234 }
1235 }
1236 }
1237
1238 if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) {
1239 unsigned NewAS = NewV->getType()->getPointerAddressSpace();
1240 if (ASC->getDestAddressSpace() == NewAS) {
1241 if (!cast<PointerType>(ASC->getType())
1242 ->hasSameElementTypeAs(
1243 cast<PointerType>(NewV->getType()))) {
1244 BasicBlock::iterator InsertPos;
1245 if (Instruction *NewVInst = dyn_cast<Instruction>(NewV))
1246 InsertPos = std::next(NewVInst->getIterator());
1247 else if (Instruction *VInst = dyn_cast<Instruction>(V))
1248 InsertPos = std::next(VInst->getIterator());
1249 else
1250 InsertPos = ASC->getIterator();
1251
1252 NewV = CastInst::Create(Instruction::BitCast, NewV,
1253 ASC->getType(), "", &*InsertPos);
1254 }
1255 ASC->replaceAllUsesWith(NewV);
1256 DeadInstructions.push_back(ASC);
1257 continue;
1258 }
1259 }
1260
1261 // Otherwise, replaces the use with flat(NewV).
1262 if (Instruction *VInst = dyn_cast<Instruction>(V)) {
1263 // Don't create a copy of the original addrspacecast.
1264 if (U == V && isa<AddrSpaceCastInst>(V))
1265 continue;
1266
1267 // Insert the addrspacecast after NewV.
1268 BasicBlock::iterator InsertPos;
1269 if (Instruction *NewVInst = dyn_cast<Instruction>(NewV))
1270 InsertPos = std::next(NewVInst->getIterator());
1271 else
1272 InsertPos = std::next(VInst->getIterator());
1273
1274 while (isa<PHINode>(InsertPos))
1275 ++InsertPos;
1276 U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
1277 } else {
1278 U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV),
1279 V->getType()));
1280 }
1281 }
1282 }
1283
1284 if (V->use_empty()) {
1285 if (Instruction *I = dyn_cast<Instruction>(V))
1286 DeadInstructions.push_back(I);
1287 }
1288 }
1289
1290 for (Instruction *I : DeadInstructions)
1291 RecursivelyDeleteTriviallyDeadInstructions(I);
1292
1293 return true;
1294 }
1295
runOnFunction(Function & F)1296 bool InferAddressSpaces::runOnFunction(Function &F) {
1297 if (skipFunction(F))
1298 return false;
1299
1300 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1301 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
1302 return InferAddressSpacesImpl(
1303 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), DT,
1304 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
1305 FlatAddrSpace)
1306 .run(F);
1307 }
1308
createInferAddressSpacesPass(unsigned AddressSpace)1309 FunctionPass *llvm::createInferAddressSpacesPass(unsigned AddressSpace) {
1310 return new InferAddressSpaces(AddressSpace);
1311 }
1312
InferAddressSpacesPass()1313 InferAddressSpacesPass::InferAddressSpacesPass()
1314 : FlatAddrSpace(UninitializedAddressSpace) {}
InferAddressSpacesPass(unsigned AddressSpace)1315 InferAddressSpacesPass::InferAddressSpacesPass(unsigned AddressSpace)
1316 : FlatAddrSpace(AddressSpace) {}
1317
run(Function & F,FunctionAnalysisManager & AM)1318 PreservedAnalyses InferAddressSpacesPass::run(Function &F,
1319 FunctionAnalysisManager &AM) {
1320 bool Changed =
1321 InferAddressSpacesImpl(AM.getResult<AssumptionAnalysis>(F),
1322 AM.getCachedResult<DominatorTreeAnalysis>(F),
1323 &AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace)
1324 .run(F);
1325 if (Changed) {
1326 PreservedAnalyses PA;
1327 PA.preserveSet<CFGAnalyses>();
1328 PA.preserve<DominatorTreeAnalysis>();
1329 return PA;
1330 }
1331 return PreservedAnalyses::all();
1332 }
1333