1 //===- InferAddressSpace.cpp - --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // CUDA C/C++ includes memory space designation as variable type qualifers (such 10 // as __global__ and __shared__). Knowing the space of a memory access allows 11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from 12 // shared memory can be translated to `ld.shared` which is roughly 10% faster 13 // than a generic `ld` on an NVIDIA Tesla K40c. 14 // 15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA 16 // compilers must infer the memory space of an address expression from 17 // type-qualified variables. 18 // 19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory 20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend 21 // places only type-qualified variables in specific address spaces, and then 22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0) 23 // (so-called the generic address space) for other instructions to use. 24 // 25 // For example, the Clang translates the following CUDA code 26 // __shared__ float a[10]; 27 // float v = a[i]; 28 // to 29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]* 30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i 31 // %v = load float, float* %1 ; emits ld.f32 32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is 33 // redirected to %0 (the generic version of @a). 34 // 35 // The optimization implemented in this file propagates specific address spaces 36 // from type-qualified variable declarations to its users. For example, it 37 // optimizes the above IR to 38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i 39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32 40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX 41 // codegen is able to emit ld.shared.f32 for %v. 42 // 43 // Address space inference works in two steps. First, it uses a data-flow 44 // analysis to infer as many generic pointers as possible to point to only one 45 // specific address space. In the above example, it can prove that %1 only 46 // points to addrspace(3). This algorithm was published in 47 // CUDA: Compiling and optimizing for a GPU platform 48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang 49 // ICCS 2012 50 // 51 // Then, address space inference replaces all refinable generic pointers with 52 // equivalent specific pointers. 53 // 54 // The major challenge of implementing this optimization is handling PHINodes, 55 // which may create loops in the data flow graph. This brings two complications. 56 // 57 // First, the data flow analysis in Step 1 needs to be circular. For example, 58 // %generic.input = addrspacecast float addrspace(3)* %input to float* 59 // loop: 60 // %y = phi [ %generic.input, %y2 ] 61 // %y2 = getelementptr %y, 1 62 // %v = load %y2 63 // br ..., label %loop, ... 64 // proving %y specific requires proving both %generic.input and %y2 specific, 65 // but proving %y2 specific circles back to %y. To address this complication, 66 // the data flow analysis operates on a lattice: 67 // uninitialized > specific address spaces > generic. 68 // All address expressions (our implementation only considers phi, bitcast, 69 // addrspacecast, and getelementptr) start with the uninitialized address space. 70 // The monotone transfer function moves the address space of a pointer down a 71 // lattice path from uninitialized to specific and then to generic. A join 72 // operation of two different specific address spaces pushes the expression down 73 // to the generic address space. The analysis completes once it reaches a fixed 74 // point. 75 // 76 // Second, IR rewriting in Step 2 also needs to be circular. For example, 77 // converting %y to addrspace(3) requires the compiler to know the converted 78 // %y2, but converting %y2 needs the converted %y. To address this complication, 79 // we break these cycles using "undef" placeholders. When converting an 80 // instruction `I` to a new address space, if its operand `Op` is not converted 81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later. 82 // For instance, our algorithm first converts %y to 83 // %y' = phi float addrspace(3)* [ %input, undef ] 84 // Then, it converts %y2 to 85 // %y2' = getelementptr %y', 1 86 // Finally, it fixes the undef in %y' so that 87 // %y' = phi float addrspace(3)* [ %input, %y2' ] 88 // 89 //===----------------------------------------------------------------------===// 90 91 #include "llvm/Transforms/Scalar/InferAddressSpaces.h" 92 #include "llvm/ADT/ArrayRef.h" 93 #include "llvm/ADT/DenseMap.h" 94 #include "llvm/ADT/DenseSet.h" 95 #include "llvm/ADT/None.h" 96 #include "llvm/ADT/Optional.h" 97 #include "llvm/ADT/SetVector.h" 98 #include "llvm/ADT/SmallVector.h" 99 #include "llvm/Analysis/TargetTransformInfo.h" 100 #include "llvm/IR/BasicBlock.h" 101 #include "llvm/IR/Constant.h" 102 #include "llvm/IR/Constants.h" 103 #include "llvm/IR/Function.h" 104 #include "llvm/IR/IRBuilder.h" 105 #include "llvm/IR/InstIterator.h" 106 #include "llvm/IR/Instruction.h" 107 #include "llvm/IR/Instructions.h" 108 #include "llvm/IR/IntrinsicInst.h" 109 #include "llvm/IR/Intrinsics.h" 110 #include "llvm/IR/LLVMContext.h" 111 #include "llvm/IR/Operator.h" 112 #include "llvm/IR/PassManager.h" 113 #include "llvm/IR/Type.h" 114 #include "llvm/IR/Use.h" 115 #include "llvm/IR/User.h" 116 #include "llvm/IR/Value.h" 117 #include "llvm/IR/ValueHandle.h" 118 #include "llvm/Pass.h" 119 #include "llvm/Support/Casting.h" 120 #include "llvm/Support/CommandLine.h" 121 #include "llvm/Support/Compiler.h" 122 #include "llvm/Support/Debug.h" 123 #include "llvm/Support/ErrorHandling.h" 124 #include "llvm/Support/raw_ostream.h" 125 #include "llvm/Transforms/Scalar.h" 126 #include "llvm/Transforms/Utils/Local.h" 127 #include "llvm/Transforms/Utils/ValueMapper.h" 128 #include <cassert> 129 #include <iterator> 130 #include <limits> 131 #include <utility> 132 #include <vector> 133 134 #define DEBUG_TYPE "infer-address-spaces" 135 136 using namespace llvm; 137 138 static cl::opt<bool> AssumeDefaultIsFlatAddressSpace( 139 "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden, 140 cl::desc("The default address space is assumed as the flat address space. " 141 "This is mainly for test purpose.")); 142 143 static const unsigned UninitializedAddressSpace = 144 std::numeric_limits<unsigned>::max(); 145 146 namespace { 147 148 using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>; 149 using PostorderStackTy = llvm::SmallVector<PointerIntPair<Value *, 1, bool>, 4>; 150 151 class InferAddressSpaces : public FunctionPass { 152 unsigned FlatAddrSpace = 0; 153 154 public: 155 static char ID; 156 157 InferAddressSpaces() : 158 FunctionPass(ID), FlatAddrSpace(UninitializedAddressSpace) {} 159 InferAddressSpaces(unsigned AS) : FunctionPass(ID), FlatAddrSpace(AS) {} 160 161 void getAnalysisUsage(AnalysisUsage &AU) const override { 162 AU.setPreservesCFG(); 163 AU.addRequired<TargetTransformInfoWrapperPass>(); 164 } 165 166 bool runOnFunction(Function &F) override; 167 }; 168 169 class InferAddressSpacesImpl { 170 const TargetTransformInfo *TTI = nullptr; 171 const DataLayout *DL = nullptr; 172 173 /// Target specific address space which uses of should be replaced if 174 /// possible. 175 unsigned FlatAddrSpace = 0; 176 177 // Returns the new address space of V if updated; otherwise, returns None. 178 Optional<unsigned> 179 updateAddressSpace(const Value &V, 180 const ValueToAddrSpaceMapTy &InferredAddrSpace) const; 181 182 // Tries to infer the specific address space of each address expression in 183 // Postorder. 184 void inferAddressSpaces(ArrayRef<WeakTrackingVH> Postorder, 185 ValueToAddrSpaceMapTy *InferredAddrSpace) const; 186 187 bool isSafeToCastConstAddrSpace(Constant *C, unsigned NewAS) const; 188 189 Value *cloneInstructionWithNewAddressSpace( 190 Instruction *I, unsigned NewAddrSpace, 191 const ValueToValueMapTy &ValueWithNewAddrSpace, 192 SmallVectorImpl<const Use *> *UndefUsesToFix) const; 193 194 // Changes the flat address expressions in function F to point to specific 195 // address spaces if InferredAddrSpace says so. Postorder is the postorder of 196 // all flat expressions in the use-def graph of function F. 197 bool rewriteWithNewAddressSpaces( 198 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder, 199 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const; 200 201 void appendsFlatAddressExpressionToPostorderStack( 202 Value *V, PostorderStackTy &PostorderStack, 203 DenseSet<Value *> &Visited) const; 204 205 bool rewriteIntrinsicOperands(IntrinsicInst *II, 206 Value *OldV, Value *NewV) const; 207 void collectRewritableIntrinsicOperands(IntrinsicInst *II, 208 PostorderStackTy &PostorderStack, 209 DenseSet<Value *> &Visited) const; 210 211 std::vector<WeakTrackingVH> collectFlatAddressExpressions(Function &F) const; 212 213 Value *cloneValueWithNewAddressSpace( 214 Value *V, unsigned NewAddrSpace, 215 const ValueToValueMapTy &ValueWithNewAddrSpace, 216 SmallVectorImpl<const Use *> *UndefUsesToFix) const; 217 unsigned joinAddressSpaces(unsigned AS1, unsigned AS2) const; 218 219 public: 220 InferAddressSpacesImpl(const TargetTransformInfo *TTI, unsigned FlatAddrSpace) 221 : TTI(TTI), FlatAddrSpace(FlatAddrSpace) {} 222 bool run(Function &F); 223 }; 224 225 } // end anonymous namespace 226 227 char InferAddressSpaces::ID = 0; 228 229 namespace llvm { 230 231 void initializeInferAddressSpacesPass(PassRegistry &); 232 233 } // end namespace llvm 234 235 INITIALIZE_PASS(InferAddressSpaces, DEBUG_TYPE, "Infer address spaces", 236 false, false) 237 238 // Check whether that's no-op pointer bicast using a pair of 239 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over 240 // different address spaces. 241 static bool isNoopPtrIntCastPair(const Operator *I2P, const DataLayout &DL, 242 const TargetTransformInfo *TTI) { 243 assert(I2P->getOpcode() == Instruction::IntToPtr); 244 auto *P2I = dyn_cast<Operator>(I2P->getOperand(0)); 245 if (!P2I || P2I->getOpcode() != Instruction::PtrToInt) 246 return false; 247 // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a 248 // no-op cast. Besides checking both of them are no-op casts, as the 249 // reinterpreted pointer may be used in other pointer arithmetic, we also 250 // need to double-check that through the target-specific hook. That ensures 251 // the underlying target also agrees that's a no-op address space cast and 252 // pointer bits are preserved. 253 // The current IR spec doesn't have clear rules on address space casts, 254 // especially a clear definition for pointer bits in non-default address 255 // spaces. It would be undefined if that pointer is dereferenced after an 256 // invalid reinterpret cast. Also, due to the unclearness for the meaning of 257 // bits in non-default address spaces in the current spec, the pointer 258 // arithmetic may also be undefined after invalid pointer reinterpret cast. 259 // However, as we confirm through the target hooks that it's a no-op 260 // addrspacecast, it doesn't matter since the bits should be the same. 261 return CastInst::isNoopCast(Instruction::CastOps(I2P->getOpcode()), 262 I2P->getOperand(0)->getType(), I2P->getType(), 263 DL) && 264 CastInst::isNoopCast(Instruction::CastOps(P2I->getOpcode()), 265 P2I->getOperand(0)->getType(), P2I->getType(), 266 DL) && 267 TTI->isNoopAddrSpaceCast( 268 P2I->getOperand(0)->getType()->getPointerAddressSpace(), 269 I2P->getType()->getPointerAddressSpace()); 270 } 271 272 // Returns true if V is an address expression. 273 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and 274 // getelementptr operators. 275 static bool isAddressExpression(const Value &V, const DataLayout &DL, 276 const TargetTransformInfo *TTI) { 277 const Operator *Op = dyn_cast<Operator>(&V); 278 if (!Op) 279 return false; 280 281 switch (Op->getOpcode()) { 282 case Instruction::PHI: 283 assert(Op->getType()->isPointerTy()); 284 return true; 285 case Instruction::BitCast: 286 case Instruction::AddrSpaceCast: 287 case Instruction::GetElementPtr: 288 return true; 289 case Instruction::Select: 290 return Op->getType()->isPointerTy(); 291 case Instruction::Call: { 292 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(&V); 293 return II && II->getIntrinsicID() == Intrinsic::ptrmask; 294 } 295 case Instruction::IntToPtr: 296 return isNoopPtrIntCastPair(Op, DL, TTI); 297 default: 298 // That value is an address expression if it has an assumed address space. 299 return TTI->getAssumedAddrSpace(&V) != UninitializedAddressSpace; 300 } 301 } 302 303 // Returns the pointer operands of V. 304 // 305 // Precondition: V is an address expression. 306 static SmallVector<Value *, 2> 307 getPointerOperands(const Value &V, const DataLayout &DL, 308 const TargetTransformInfo *TTI) { 309 const Operator &Op = cast<Operator>(V); 310 switch (Op.getOpcode()) { 311 case Instruction::PHI: { 312 auto IncomingValues = cast<PHINode>(Op).incoming_values(); 313 return SmallVector<Value *, 2>(IncomingValues.begin(), 314 IncomingValues.end()); 315 } 316 case Instruction::BitCast: 317 case Instruction::AddrSpaceCast: 318 case Instruction::GetElementPtr: 319 return {Op.getOperand(0)}; 320 case Instruction::Select: 321 return {Op.getOperand(1), Op.getOperand(2)}; 322 case Instruction::Call: { 323 const IntrinsicInst &II = cast<IntrinsicInst>(Op); 324 assert(II.getIntrinsicID() == Intrinsic::ptrmask && 325 "unexpected intrinsic call"); 326 return {II.getArgOperand(0)}; 327 } 328 case Instruction::IntToPtr: { 329 assert(isNoopPtrIntCastPair(&Op, DL, TTI)); 330 auto *P2I = cast<Operator>(Op.getOperand(0)); 331 return {P2I->getOperand(0)}; 332 } 333 default: 334 llvm_unreachable("Unexpected instruction type."); 335 } 336 } 337 338 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II, 339 Value *OldV, 340 Value *NewV) const { 341 Module *M = II->getParent()->getParent()->getParent(); 342 343 switch (II->getIntrinsicID()) { 344 case Intrinsic::objectsize: { 345 Type *DestTy = II->getType(); 346 Type *SrcTy = NewV->getType(); 347 Function *NewDecl = 348 Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy}); 349 II->setArgOperand(0, NewV); 350 II->setCalledFunction(NewDecl); 351 return true; 352 } 353 case Intrinsic::ptrmask: 354 // This is handled as an address expression, not as a use memory operation. 355 return false; 356 default: { 357 Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV); 358 if (!Rewrite) 359 return false; 360 if (Rewrite != II) 361 II->replaceAllUsesWith(Rewrite); 362 return true; 363 } 364 } 365 } 366 367 void InferAddressSpacesImpl::collectRewritableIntrinsicOperands( 368 IntrinsicInst *II, PostorderStackTy &PostorderStack, 369 DenseSet<Value *> &Visited) const { 370 auto IID = II->getIntrinsicID(); 371 switch (IID) { 372 case Intrinsic::ptrmask: 373 case Intrinsic::objectsize: 374 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0), 375 PostorderStack, Visited); 376 break; 377 default: 378 SmallVector<int, 2> OpIndexes; 379 if (TTI->collectFlatAddressOperands(OpIndexes, IID)) { 380 for (int Idx : OpIndexes) { 381 appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(Idx), 382 PostorderStack, Visited); 383 } 384 } 385 break; 386 } 387 } 388 389 // Returns all flat address expressions in function F. The elements are 390 // If V is an unvisited flat address expression, appends V to PostorderStack 391 // and marks it as visited. 392 void InferAddressSpacesImpl::appendsFlatAddressExpressionToPostorderStack( 393 Value *V, PostorderStackTy &PostorderStack, 394 DenseSet<Value *> &Visited) const { 395 assert(V->getType()->isPointerTy()); 396 397 // Generic addressing expressions may be hidden in nested constant 398 // expressions. 399 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 400 // TODO: Look in non-address parts, like icmp operands. 401 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second) 402 PostorderStack.emplace_back(CE, false); 403 404 return; 405 } 406 407 if (V->getType()->getPointerAddressSpace() == FlatAddrSpace && 408 isAddressExpression(*V, *DL, TTI)) { 409 if (Visited.insert(V).second) { 410 PostorderStack.emplace_back(V, false); 411 412 Operator *Op = cast<Operator>(V); 413 for (unsigned I = 0, E = Op->getNumOperands(); I != E; ++I) { 414 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op->getOperand(I))) { 415 if (isAddressExpression(*CE, *DL, TTI) && Visited.insert(CE).second) 416 PostorderStack.emplace_back(CE, false); 417 } 418 } 419 } 420 } 421 } 422 423 // Returns all flat address expressions in function F. The elements are ordered 424 // ordered in postorder. 425 std::vector<WeakTrackingVH> 426 InferAddressSpacesImpl::collectFlatAddressExpressions(Function &F) const { 427 // This function implements a non-recursive postorder traversal of a partial 428 // use-def graph of function F. 429 PostorderStackTy PostorderStack; 430 // The set of visited expressions. 431 DenseSet<Value *> Visited; 432 433 auto PushPtrOperand = [&](Value *Ptr) { 434 appendsFlatAddressExpressionToPostorderStack(Ptr, PostorderStack, 435 Visited); 436 }; 437 438 // Look at operations that may be interesting accelerate by moving to a known 439 // address space. We aim at generating after loads and stores, but pure 440 // addressing calculations may also be faster. 441 for (Instruction &I : instructions(F)) { 442 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) { 443 if (!GEP->getType()->isVectorTy()) 444 PushPtrOperand(GEP->getPointerOperand()); 445 } else if (auto *LI = dyn_cast<LoadInst>(&I)) 446 PushPtrOperand(LI->getPointerOperand()); 447 else if (auto *SI = dyn_cast<StoreInst>(&I)) 448 PushPtrOperand(SI->getPointerOperand()); 449 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I)) 450 PushPtrOperand(RMW->getPointerOperand()); 451 else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I)) 452 PushPtrOperand(CmpX->getPointerOperand()); 453 else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) { 454 // For memset/memcpy/memmove, any pointer operand can be replaced. 455 PushPtrOperand(MI->getRawDest()); 456 457 // Handle 2nd operand for memcpy/memmove. 458 if (auto *MTI = dyn_cast<MemTransferInst>(MI)) 459 PushPtrOperand(MTI->getRawSource()); 460 } else if (auto *II = dyn_cast<IntrinsicInst>(&I)) 461 collectRewritableIntrinsicOperands(II, PostorderStack, Visited); 462 else if (ICmpInst *Cmp = dyn_cast<ICmpInst>(&I)) { 463 // FIXME: Handle vectors of pointers 464 if (Cmp->getOperand(0)->getType()->isPointerTy()) { 465 PushPtrOperand(Cmp->getOperand(0)); 466 PushPtrOperand(Cmp->getOperand(1)); 467 } 468 } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(&I)) { 469 if (!ASC->getType()->isVectorTy()) 470 PushPtrOperand(ASC->getPointerOperand()); 471 } else if (auto *I2P = dyn_cast<IntToPtrInst>(&I)) { 472 if (isNoopPtrIntCastPair(cast<Operator>(I2P), *DL, TTI)) 473 PushPtrOperand( 474 cast<Operator>(I2P->getOperand(0))->getOperand(0)); 475 } 476 } 477 478 std::vector<WeakTrackingVH> Postorder; // The resultant postorder. 479 while (!PostorderStack.empty()) { 480 Value *TopVal = PostorderStack.back().getPointer(); 481 // If the operands of the expression on the top are already explored, 482 // adds that expression to the resultant postorder. 483 if (PostorderStack.back().getInt()) { 484 if (TopVal->getType()->getPointerAddressSpace() == FlatAddrSpace) 485 Postorder.push_back(TopVal); 486 PostorderStack.pop_back(); 487 continue; 488 } 489 // Otherwise, adds its operands to the stack and explores them. 490 PostorderStack.back().setInt(true); 491 // Skip values with an assumed address space. 492 if (TTI->getAssumedAddrSpace(TopVal) == UninitializedAddressSpace) { 493 for (Value *PtrOperand : getPointerOperands(*TopVal, *DL, TTI)) { 494 appendsFlatAddressExpressionToPostorderStack(PtrOperand, PostorderStack, 495 Visited); 496 } 497 } 498 } 499 return Postorder; 500 } 501 502 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone 503 // of OperandUse.get() in the new address space. If the clone is not ready yet, 504 // returns an undef in the new address space as a placeholder. 505 static Value *operandWithNewAddressSpaceOrCreateUndef( 506 const Use &OperandUse, unsigned NewAddrSpace, 507 const ValueToValueMapTy &ValueWithNewAddrSpace, 508 SmallVectorImpl<const Use *> *UndefUsesToFix) { 509 Value *Operand = OperandUse.get(); 510 511 Type *NewPtrTy = PointerType::getWithSamePointeeType( 512 cast<PointerType>(Operand->getType()), NewAddrSpace); 513 514 if (Constant *C = dyn_cast<Constant>(Operand)) 515 return ConstantExpr::getAddrSpaceCast(C, NewPtrTy); 516 517 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) 518 return NewOperand; 519 520 UndefUsesToFix->push_back(&OperandUse); 521 return UndefValue::get(NewPtrTy); 522 } 523 524 // Returns a clone of `I` with its operands converted to those specified in 525 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an 526 // operand whose address space needs to be modified might not exist in 527 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and 528 // adds that operand use to UndefUsesToFix so that caller can fix them later. 529 // 530 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast 531 // from a pointer whose type already matches. Therefore, this function returns a 532 // Value* instead of an Instruction*. 533 // 534 // This may also return nullptr in the case the instruction could not be 535 // rewritten. 536 Value *InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace( 537 Instruction *I, unsigned NewAddrSpace, 538 const ValueToValueMapTy &ValueWithNewAddrSpace, 539 SmallVectorImpl<const Use *> *UndefUsesToFix) const { 540 Type *NewPtrType = PointerType::getWithSamePointeeType( 541 cast<PointerType>(I->getType()), NewAddrSpace); 542 543 if (I->getOpcode() == Instruction::AddrSpaceCast) { 544 Value *Src = I->getOperand(0); 545 // Because `I` is flat, the source address space must be specific. 546 // Therefore, the inferred address space must be the source space, according 547 // to our algorithm. 548 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 549 if (Src->getType() != NewPtrType) 550 return new BitCastInst(Src, NewPtrType); 551 return Src; 552 } 553 554 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 555 // Technically the intrinsic ID is a pointer typed argument, so specially 556 // handle calls early. 557 assert(II->getIntrinsicID() == Intrinsic::ptrmask); 558 Value *NewPtr = operandWithNewAddressSpaceOrCreateUndef( 559 II->getArgOperandUse(0), NewAddrSpace, ValueWithNewAddrSpace, 560 UndefUsesToFix); 561 Value *Rewrite = 562 TTI->rewriteIntrinsicWithAddressSpace(II, II->getArgOperand(0), NewPtr); 563 if (Rewrite) { 564 assert(Rewrite != II && "cannot modify this pointer operation in place"); 565 return Rewrite; 566 } 567 568 return nullptr; 569 } 570 571 unsigned AS = TTI->getAssumedAddrSpace(I); 572 if (AS != UninitializedAddressSpace) { 573 // For the assumed address space, insert an `addrspacecast` to make that 574 // explicit. 575 Type *NewPtrTy = PointerType::getWithSamePointeeType( 576 cast<PointerType>(I->getType()), AS); 577 auto *NewI = new AddrSpaceCastInst(I, NewPtrTy); 578 NewI->insertAfter(I); 579 return NewI; 580 } 581 582 // Computes the converted pointer operands. 583 SmallVector<Value *, 4> NewPointerOperands; 584 for (const Use &OperandUse : I->operands()) { 585 if (!OperandUse.get()->getType()->isPointerTy()) 586 NewPointerOperands.push_back(nullptr); 587 else 588 NewPointerOperands.push_back(operandWithNewAddressSpaceOrCreateUndef( 589 OperandUse, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix)); 590 } 591 592 switch (I->getOpcode()) { 593 case Instruction::BitCast: 594 return new BitCastInst(NewPointerOperands[0], NewPtrType); 595 case Instruction::PHI: { 596 assert(I->getType()->isPointerTy()); 597 PHINode *PHI = cast<PHINode>(I); 598 PHINode *NewPHI = PHINode::Create(NewPtrType, PHI->getNumIncomingValues()); 599 for (unsigned Index = 0; Index < PHI->getNumIncomingValues(); ++Index) { 600 unsigned OperandNo = PHINode::getOperandNumForIncomingValue(Index); 601 NewPHI->addIncoming(NewPointerOperands[OperandNo], 602 PHI->getIncomingBlock(Index)); 603 } 604 return NewPHI; 605 } 606 case Instruction::GetElementPtr: { 607 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I); 608 GetElementPtrInst *NewGEP = GetElementPtrInst::Create( 609 GEP->getSourceElementType(), NewPointerOperands[0], 610 SmallVector<Value *, 4>(GEP->indices())); 611 NewGEP->setIsInBounds(GEP->isInBounds()); 612 return NewGEP; 613 } 614 case Instruction::Select: 615 assert(I->getType()->isPointerTy()); 616 return SelectInst::Create(I->getOperand(0), NewPointerOperands[1], 617 NewPointerOperands[2], "", nullptr, I); 618 case Instruction::IntToPtr: { 619 assert(isNoopPtrIntCastPair(cast<Operator>(I), *DL, TTI)); 620 Value *Src = cast<Operator>(I->getOperand(0))->getOperand(0); 621 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 622 if (Src->getType() != NewPtrType) 623 return new BitCastInst(Src, NewPtrType); 624 return Src; 625 } 626 default: 627 llvm_unreachable("Unexpected opcode"); 628 } 629 } 630 631 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the 632 // constant expression `CE` with its operands replaced as specified in 633 // ValueWithNewAddrSpace. 634 static Value *cloneConstantExprWithNewAddressSpace( 635 ConstantExpr *CE, unsigned NewAddrSpace, 636 const ValueToValueMapTy &ValueWithNewAddrSpace, const DataLayout *DL, 637 const TargetTransformInfo *TTI) { 638 Type *TargetType = PointerType::getWithSamePointeeType( 639 cast<PointerType>(CE->getType()), NewAddrSpace); 640 641 if (CE->getOpcode() == Instruction::AddrSpaceCast) { 642 // Because CE is flat, the source address space must be specific. 643 // Therefore, the inferred address space must be the source space according 644 // to our algorithm. 645 assert(CE->getOperand(0)->getType()->getPointerAddressSpace() == 646 NewAddrSpace); 647 return ConstantExpr::getBitCast(CE->getOperand(0), TargetType); 648 } 649 650 if (CE->getOpcode() == Instruction::BitCast) { 651 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(CE->getOperand(0))) 652 return ConstantExpr::getBitCast(cast<Constant>(NewOperand), TargetType); 653 return ConstantExpr::getAddrSpaceCast(CE, TargetType); 654 } 655 656 if (CE->getOpcode() == Instruction::Select) { 657 Constant *Src0 = CE->getOperand(1); 658 Constant *Src1 = CE->getOperand(2); 659 if (Src0->getType()->getPointerAddressSpace() == 660 Src1->getType()->getPointerAddressSpace()) { 661 662 return ConstantExpr::getSelect( 663 CE->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0, TargetType), 664 ConstantExpr::getAddrSpaceCast(Src1, TargetType)); 665 } 666 } 667 668 if (CE->getOpcode() == Instruction::IntToPtr) { 669 assert(isNoopPtrIntCastPair(cast<Operator>(CE), *DL, TTI)); 670 Constant *Src = cast<ConstantExpr>(CE->getOperand(0))->getOperand(0); 671 assert(Src->getType()->getPointerAddressSpace() == NewAddrSpace); 672 return ConstantExpr::getBitCast(Src, TargetType); 673 } 674 675 // Computes the operands of the new constant expression. 676 bool IsNew = false; 677 SmallVector<Constant *, 4> NewOperands; 678 for (unsigned Index = 0; Index < CE->getNumOperands(); ++Index) { 679 Constant *Operand = CE->getOperand(Index); 680 // If the address space of `Operand` needs to be modified, the new operand 681 // with the new address space should already be in ValueWithNewAddrSpace 682 // because (1) the constant expressions we consider (i.e. addrspacecast, 683 // bitcast, and getelementptr) do not incur cycles in the data flow graph 684 // and (2) this function is called on constant expressions in postorder. 685 if (Value *NewOperand = ValueWithNewAddrSpace.lookup(Operand)) { 686 IsNew = true; 687 NewOperands.push_back(cast<Constant>(NewOperand)); 688 continue; 689 } 690 if (auto CExpr = dyn_cast<ConstantExpr>(Operand)) 691 if (Value *NewOperand = cloneConstantExprWithNewAddressSpace( 692 CExpr, NewAddrSpace, ValueWithNewAddrSpace, DL, TTI)) { 693 IsNew = true; 694 NewOperands.push_back(cast<Constant>(NewOperand)); 695 continue; 696 } 697 // Otherwise, reuses the old operand. 698 NewOperands.push_back(Operand); 699 } 700 701 // If !IsNew, we will replace the Value with itself. However, replaced values 702 // are assumed to wrapped in a addrspace cast later so drop it now. 703 if (!IsNew) 704 return nullptr; 705 706 if (CE->getOpcode() == Instruction::GetElementPtr) { 707 // Needs to specify the source type while constructing a getelementptr 708 // constant expression. 709 return CE->getWithOperands( 710 NewOperands, TargetType, /*OnlyIfReduced=*/false, 711 NewOperands[0]->getType()->getPointerElementType()); 712 } 713 714 return CE->getWithOperands(NewOperands, TargetType); 715 } 716 717 // Returns a clone of the value `V`, with its operands replaced as specified in 718 // ValueWithNewAddrSpace. This function is called on every flat address 719 // expression whose address space needs to be modified, in postorder. 720 // 721 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix. 722 Value *InferAddressSpacesImpl::cloneValueWithNewAddressSpace( 723 Value *V, unsigned NewAddrSpace, 724 const ValueToValueMapTy &ValueWithNewAddrSpace, 725 SmallVectorImpl<const Use *> *UndefUsesToFix) const { 726 // All values in Postorder are flat address expressions. 727 assert(V->getType()->getPointerAddressSpace() == FlatAddrSpace && 728 isAddressExpression(*V, *DL, TTI)); 729 730 if (Instruction *I = dyn_cast<Instruction>(V)) { 731 Value *NewV = cloneInstructionWithNewAddressSpace( 732 I, NewAddrSpace, ValueWithNewAddrSpace, UndefUsesToFix); 733 if (Instruction *NewI = dyn_cast_or_null<Instruction>(NewV)) { 734 if (NewI->getParent() == nullptr) { 735 NewI->insertBefore(I); 736 NewI->takeName(I); 737 } 738 } 739 return NewV; 740 } 741 742 return cloneConstantExprWithNewAddressSpace( 743 cast<ConstantExpr>(V), NewAddrSpace, ValueWithNewAddrSpace, DL, TTI); 744 } 745 746 // Defines the join operation on the address space lattice (see the file header 747 // comments). 748 unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1, 749 unsigned AS2) const { 750 if (AS1 == FlatAddrSpace || AS2 == FlatAddrSpace) 751 return FlatAddrSpace; 752 753 if (AS1 == UninitializedAddressSpace) 754 return AS2; 755 if (AS2 == UninitializedAddressSpace) 756 return AS1; 757 758 // The join of two different specific address spaces is flat. 759 return (AS1 == AS2) ? AS1 : FlatAddrSpace; 760 } 761 762 bool InferAddressSpacesImpl::run(Function &F) { 763 DL = &F.getParent()->getDataLayout(); 764 765 if (AssumeDefaultIsFlatAddressSpace) 766 FlatAddrSpace = 0; 767 768 if (FlatAddrSpace == UninitializedAddressSpace) { 769 FlatAddrSpace = TTI->getFlatAddressSpace(); 770 if (FlatAddrSpace == UninitializedAddressSpace) 771 return false; 772 } 773 774 // Collects all flat address expressions in postorder. 775 std::vector<WeakTrackingVH> Postorder = collectFlatAddressExpressions(F); 776 777 // Runs a data-flow analysis to refine the address spaces of every expression 778 // in Postorder. 779 ValueToAddrSpaceMapTy InferredAddrSpace; 780 inferAddressSpaces(Postorder, &InferredAddrSpace); 781 782 // Changes the address spaces of the flat address expressions who are inferred 783 // to point to a specific address space. 784 return rewriteWithNewAddressSpaces(*TTI, Postorder, InferredAddrSpace, &F); 785 } 786 787 // Constants need to be tracked through RAUW to handle cases with nested 788 // constant expressions, so wrap values in WeakTrackingVH. 789 void InferAddressSpacesImpl::inferAddressSpaces( 790 ArrayRef<WeakTrackingVH> Postorder, 791 ValueToAddrSpaceMapTy *InferredAddrSpace) const { 792 SetVector<Value *> Worklist(Postorder.begin(), Postorder.end()); 793 // Initially, all expressions are in the uninitialized address space. 794 for (Value *V : Postorder) 795 (*InferredAddrSpace)[V] = UninitializedAddressSpace; 796 797 while (!Worklist.empty()) { 798 Value *V = Worklist.pop_back_val(); 799 800 // Tries to update the address space of the stack top according to the 801 // address spaces of its operands. 802 LLVM_DEBUG(dbgs() << "Updating the address space of\n " << *V << '\n'); 803 Optional<unsigned> NewAS = updateAddressSpace(*V, *InferredAddrSpace); 804 if (!NewAS.hasValue()) 805 continue; 806 // If any updates are made, grabs its users to the worklist because 807 // their address spaces can also be possibly updated. 808 LLVM_DEBUG(dbgs() << " to " << NewAS.getValue() << '\n'); 809 (*InferredAddrSpace)[V] = NewAS.getValue(); 810 811 for (Value *User : V->users()) { 812 // Skip if User is already in the worklist. 813 if (Worklist.count(User)) 814 continue; 815 816 auto Pos = InferredAddrSpace->find(User); 817 // Our algorithm only updates the address spaces of flat address 818 // expressions, which are those in InferredAddrSpace. 819 if (Pos == InferredAddrSpace->end()) 820 continue; 821 822 // Function updateAddressSpace moves the address space down a lattice 823 // path. Therefore, nothing to do if User is already inferred as flat (the 824 // bottom element in the lattice). 825 if (Pos->second == FlatAddrSpace) 826 continue; 827 828 Worklist.insert(User); 829 } 830 } 831 } 832 833 Optional<unsigned> InferAddressSpacesImpl::updateAddressSpace( 834 const Value &V, const ValueToAddrSpaceMapTy &InferredAddrSpace) const { 835 assert(InferredAddrSpace.count(&V)); 836 837 // The new inferred address space equals the join of the address spaces 838 // of all its pointer operands. 839 unsigned NewAS = UninitializedAddressSpace; 840 841 const Operator &Op = cast<Operator>(V); 842 if (Op.getOpcode() == Instruction::Select) { 843 Value *Src0 = Op.getOperand(1); 844 Value *Src1 = Op.getOperand(2); 845 846 auto I = InferredAddrSpace.find(Src0); 847 unsigned Src0AS = (I != InferredAddrSpace.end()) ? 848 I->second : Src0->getType()->getPointerAddressSpace(); 849 850 auto J = InferredAddrSpace.find(Src1); 851 unsigned Src1AS = (J != InferredAddrSpace.end()) ? 852 J->second : Src1->getType()->getPointerAddressSpace(); 853 854 auto *C0 = dyn_cast<Constant>(Src0); 855 auto *C1 = dyn_cast<Constant>(Src1); 856 857 // If one of the inputs is a constant, we may be able to do a constant 858 // addrspacecast of it. Defer inferring the address space until the input 859 // address space is known. 860 if ((C1 && Src0AS == UninitializedAddressSpace) || 861 (C0 && Src1AS == UninitializedAddressSpace)) 862 return None; 863 864 if (C0 && isSafeToCastConstAddrSpace(C0, Src1AS)) 865 NewAS = Src1AS; 866 else if (C1 && isSafeToCastConstAddrSpace(C1, Src0AS)) 867 NewAS = Src0AS; 868 else 869 NewAS = joinAddressSpaces(Src0AS, Src1AS); 870 } else { 871 unsigned AS = TTI->getAssumedAddrSpace(&V); 872 if (AS != UninitializedAddressSpace) { 873 // Use the assumed address space directly. 874 NewAS = AS; 875 } else { 876 // Otherwise, infer the address space from its pointer operands. 877 for (Value *PtrOperand : getPointerOperands(V, *DL, TTI)) { 878 auto I = InferredAddrSpace.find(PtrOperand); 879 unsigned OperandAS = 880 I != InferredAddrSpace.end() 881 ? I->second 882 : PtrOperand->getType()->getPointerAddressSpace(); 883 884 // join(flat, *) = flat. So we can break if NewAS is already flat. 885 NewAS = joinAddressSpaces(NewAS, OperandAS); 886 if (NewAS == FlatAddrSpace) 887 break; 888 } 889 } 890 } 891 892 unsigned OldAS = InferredAddrSpace.lookup(&V); 893 assert(OldAS != FlatAddrSpace); 894 if (OldAS == NewAS) 895 return None; 896 return NewAS; 897 } 898 899 /// \p returns true if \p U is the pointer operand of a memory instruction with 900 /// a single pointer operand that can have its address space changed by simply 901 /// mutating the use to a new value. If the memory instruction is volatile, 902 /// return true only if the target allows the memory instruction to be volatile 903 /// in the new address space. 904 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo &TTI, 905 Use &U, unsigned AddrSpace) { 906 User *Inst = U.getUser(); 907 unsigned OpNo = U.getOperandNo(); 908 bool VolatileIsAllowed = false; 909 if (auto *I = dyn_cast<Instruction>(Inst)) 910 VolatileIsAllowed = TTI.hasVolatileVariant(I, AddrSpace); 911 912 if (auto *LI = dyn_cast<LoadInst>(Inst)) 913 return OpNo == LoadInst::getPointerOperandIndex() && 914 (VolatileIsAllowed || !LI->isVolatile()); 915 916 if (auto *SI = dyn_cast<StoreInst>(Inst)) 917 return OpNo == StoreInst::getPointerOperandIndex() && 918 (VolatileIsAllowed || !SI->isVolatile()); 919 920 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst)) 921 return OpNo == AtomicRMWInst::getPointerOperandIndex() && 922 (VolatileIsAllowed || !RMW->isVolatile()); 923 924 if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(Inst)) 925 return OpNo == AtomicCmpXchgInst::getPointerOperandIndex() && 926 (VolatileIsAllowed || !CmpX->isVolatile()); 927 928 return false; 929 } 930 931 /// Update memory intrinsic uses that require more complex processing than 932 /// simple memory instructions. Thse require re-mangling and may have multiple 933 /// pointer operands. 934 static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV, 935 Value *NewV) { 936 IRBuilder<> B(MI); 937 MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa); 938 MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope); 939 MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias); 940 941 if (auto *MSI = dyn_cast<MemSetInst>(MI)) { 942 B.CreateMemSet(NewV, MSI->getValue(), MSI->getLength(), 943 MaybeAlign(MSI->getDestAlignment()), 944 false, // isVolatile 945 TBAA, ScopeMD, NoAliasMD); 946 } else if (auto *MTI = dyn_cast<MemTransferInst>(MI)) { 947 Value *Src = MTI->getRawSource(); 948 Value *Dest = MTI->getRawDest(); 949 950 // Be careful in case this is a self-to-self copy. 951 if (Src == OldV) 952 Src = NewV; 953 954 if (Dest == OldV) 955 Dest = NewV; 956 957 if (isa<MemCpyInlineInst>(MTI)) { 958 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); 959 B.CreateMemCpyInline(Dest, MTI->getDestAlign(), Src, 960 MTI->getSourceAlign(), MTI->getLength(), 961 false, // isVolatile 962 TBAA, TBAAStruct, ScopeMD, NoAliasMD); 963 } else if (isa<MemCpyInst>(MTI)) { 964 MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); 965 B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), 966 MTI->getLength(), 967 false, // isVolatile 968 TBAA, TBAAStruct, ScopeMD, NoAliasMD); 969 } else { 970 assert(isa<MemMoveInst>(MTI)); 971 B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), 972 MTI->getLength(), 973 false, // isVolatile 974 TBAA, ScopeMD, NoAliasMD); 975 } 976 } else 977 llvm_unreachable("unhandled MemIntrinsic"); 978 979 MI->eraseFromParent(); 980 return true; 981 } 982 983 // \p returns true if it is OK to change the address space of constant \p C with 984 // a ConstantExpr addrspacecast. 985 bool InferAddressSpacesImpl::isSafeToCastConstAddrSpace(Constant *C, 986 unsigned NewAS) const { 987 assert(NewAS != UninitializedAddressSpace); 988 989 unsigned SrcAS = C->getType()->getPointerAddressSpace(); 990 if (SrcAS == NewAS || isa<UndefValue>(C)) 991 return true; 992 993 // Prevent illegal casts between different non-flat address spaces. 994 if (SrcAS != FlatAddrSpace && NewAS != FlatAddrSpace) 995 return false; 996 997 if (isa<ConstantPointerNull>(C)) 998 return true; 999 1000 if (auto *Op = dyn_cast<Operator>(C)) { 1001 // If we already have a constant addrspacecast, it should be safe to cast it 1002 // off. 1003 if (Op->getOpcode() == Instruction::AddrSpaceCast) 1004 return isSafeToCastConstAddrSpace(cast<Constant>(Op->getOperand(0)), NewAS); 1005 1006 if (Op->getOpcode() == Instruction::IntToPtr && 1007 Op->getType()->getPointerAddressSpace() == FlatAddrSpace) 1008 return true; 1009 } 1010 1011 return false; 1012 } 1013 1014 static Value::use_iterator skipToNextUser(Value::use_iterator I, 1015 Value::use_iterator End) { 1016 User *CurUser = I->getUser(); 1017 ++I; 1018 1019 while (I != End && I->getUser() == CurUser) 1020 ++I; 1021 1022 return I; 1023 } 1024 1025 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces( 1026 const TargetTransformInfo &TTI, ArrayRef<WeakTrackingVH> Postorder, 1027 const ValueToAddrSpaceMapTy &InferredAddrSpace, Function *F) const { 1028 // For each address expression to be modified, creates a clone of it with its 1029 // pointer operands converted to the new address space. Since the pointer 1030 // operands are converted, the clone is naturally in the new address space by 1031 // construction. 1032 ValueToValueMapTy ValueWithNewAddrSpace; 1033 SmallVector<const Use *, 32> UndefUsesToFix; 1034 for (Value* V : Postorder) { 1035 unsigned NewAddrSpace = InferredAddrSpace.lookup(V); 1036 1037 // In some degenerate cases (e.g. invalid IR in unreachable code), we may 1038 // not even infer the value to have its original address space. 1039 if (NewAddrSpace == UninitializedAddressSpace) 1040 continue; 1041 1042 if (V->getType()->getPointerAddressSpace() != NewAddrSpace) { 1043 Value *New = cloneValueWithNewAddressSpace( 1044 V, NewAddrSpace, ValueWithNewAddrSpace, &UndefUsesToFix); 1045 if (New) 1046 ValueWithNewAddrSpace[V] = New; 1047 } 1048 } 1049 1050 if (ValueWithNewAddrSpace.empty()) 1051 return false; 1052 1053 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace. 1054 for (const Use *UndefUse : UndefUsesToFix) { 1055 User *V = UndefUse->getUser(); 1056 User *NewV = cast_or_null<User>(ValueWithNewAddrSpace.lookup(V)); 1057 if (!NewV) 1058 continue; 1059 1060 unsigned OperandNo = UndefUse->getOperandNo(); 1061 assert(isa<UndefValue>(NewV->getOperand(OperandNo))); 1062 NewV->setOperand(OperandNo, ValueWithNewAddrSpace.lookup(UndefUse->get())); 1063 } 1064 1065 SmallVector<Instruction *, 16> DeadInstructions; 1066 1067 // Replaces the uses of the old address expressions with the new ones. 1068 for (const WeakTrackingVH &WVH : Postorder) { 1069 assert(WVH && "value was unexpectedly deleted"); 1070 Value *V = WVH; 1071 Value *NewV = ValueWithNewAddrSpace.lookup(V); 1072 if (NewV == nullptr) 1073 continue; 1074 1075 LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V << "\n with\n " 1076 << *NewV << '\n'); 1077 1078 if (Constant *C = dyn_cast<Constant>(V)) { 1079 Constant *Replace = ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV), 1080 C->getType()); 1081 if (C != Replace) { 1082 LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace 1083 << ": " << *Replace << '\n'); 1084 C->replaceAllUsesWith(Replace); 1085 V = Replace; 1086 } 1087 } 1088 1089 Value::use_iterator I, E, Next; 1090 for (I = V->use_begin(), E = V->use_end(); I != E; ) { 1091 Use &U = *I; 1092 1093 // Some users may see the same pointer operand in multiple operands. Skip 1094 // to the next instruction. 1095 I = skipToNextUser(I, E); 1096 1097 if (isSimplePointerUseValidToReplace( 1098 TTI, U, V->getType()->getPointerAddressSpace())) { 1099 // If V is used as the pointer operand of a compatible memory operation, 1100 // sets the pointer operand to NewV. This replacement does not change 1101 // the element type, so the resultant load/store is still valid. 1102 U.set(NewV); 1103 continue; 1104 } 1105 1106 User *CurUser = U.getUser(); 1107 // Skip if the current user is the new value itself. 1108 if (CurUser == NewV) 1109 continue; 1110 // Handle more complex cases like intrinsic that need to be remangled. 1111 if (auto *MI = dyn_cast<MemIntrinsic>(CurUser)) { 1112 if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV)) 1113 continue; 1114 } 1115 1116 if (auto *II = dyn_cast<IntrinsicInst>(CurUser)) { 1117 if (rewriteIntrinsicOperands(II, V, NewV)) 1118 continue; 1119 } 1120 1121 if (isa<Instruction>(CurUser)) { 1122 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(CurUser)) { 1123 // If we can infer that both pointers are in the same addrspace, 1124 // transform e.g. 1125 // %cmp = icmp eq float* %p, %q 1126 // into 1127 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q 1128 1129 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1130 int SrcIdx = U.getOperandNo(); 1131 int OtherIdx = (SrcIdx == 0) ? 1 : 0; 1132 Value *OtherSrc = Cmp->getOperand(OtherIdx); 1133 1134 if (Value *OtherNewV = ValueWithNewAddrSpace.lookup(OtherSrc)) { 1135 if (OtherNewV->getType()->getPointerAddressSpace() == NewAS) { 1136 Cmp->setOperand(OtherIdx, OtherNewV); 1137 Cmp->setOperand(SrcIdx, NewV); 1138 continue; 1139 } 1140 } 1141 1142 // Even if the type mismatches, we can cast the constant. 1143 if (auto *KOtherSrc = dyn_cast<Constant>(OtherSrc)) { 1144 if (isSafeToCastConstAddrSpace(KOtherSrc, NewAS)) { 1145 Cmp->setOperand(SrcIdx, NewV); 1146 Cmp->setOperand(OtherIdx, 1147 ConstantExpr::getAddrSpaceCast(KOtherSrc, NewV->getType())); 1148 continue; 1149 } 1150 } 1151 } 1152 1153 if (AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(CurUser)) { 1154 unsigned NewAS = NewV->getType()->getPointerAddressSpace(); 1155 if (ASC->getDestAddressSpace() == NewAS) { 1156 if (ASC->getType()->getPointerElementType() != 1157 NewV->getType()->getPointerElementType()) { 1158 NewV = CastInst::Create(Instruction::BitCast, NewV, 1159 ASC->getType(), "", ASC); 1160 } 1161 ASC->replaceAllUsesWith(NewV); 1162 DeadInstructions.push_back(ASC); 1163 continue; 1164 } 1165 } 1166 1167 // Otherwise, replaces the use with flat(NewV). 1168 if (Instruction *Inst = dyn_cast<Instruction>(V)) { 1169 // Don't create a copy of the original addrspacecast. 1170 if (U == V && isa<AddrSpaceCastInst>(V)) 1171 continue; 1172 1173 BasicBlock::iterator InsertPos = std::next(Inst->getIterator()); 1174 while (isa<PHINode>(InsertPos)) 1175 ++InsertPos; 1176 U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos)); 1177 } else { 1178 U.set(ConstantExpr::getAddrSpaceCast(cast<Constant>(NewV), 1179 V->getType())); 1180 } 1181 } 1182 } 1183 1184 if (V->use_empty()) { 1185 if (Instruction *I = dyn_cast<Instruction>(V)) 1186 DeadInstructions.push_back(I); 1187 } 1188 } 1189 1190 for (Instruction *I : DeadInstructions) 1191 RecursivelyDeleteTriviallyDeadInstructions(I); 1192 1193 return true; 1194 } 1195 1196 bool InferAddressSpaces::runOnFunction(Function &F) { 1197 if (skipFunction(F)) 1198 return false; 1199 1200 return InferAddressSpacesImpl( 1201 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F), 1202 FlatAddrSpace) 1203 .run(F); 1204 } 1205 1206 FunctionPass *llvm::createInferAddressSpacesPass(unsigned AddressSpace) { 1207 return new InferAddressSpaces(AddressSpace); 1208 } 1209 1210 InferAddressSpacesPass::InferAddressSpacesPass() 1211 : FlatAddrSpace(UninitializedAddressSpace) {} 1212 InferAddressSpacesPass::InferAddressSpacesPass(unsigned AddressSpace) 1213 : FlatAddrSpace(AddressSpace) {} 1214 1215 PreservedAnalyses InferAddressSpacesPass::run(Function &F, 1216 FunctionAnalysisManager &AM) { 1217 bool Changed = 1218 InferAddressSpacesImpl(&AM.getResult<TargetIRAnalysis>(F), FlatAddrSpace) 1219 .run(F); 1220 if (Changed) { 1221 PreservedAnalyses PA; 1222 PA.preserveSet<CFGAnalyses>(); 1223 return PA; 1224 } 1225 return PreservedAnalyses::all(); 1226 } 1227