1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines several CodeGen-specific LLVM IR analysis utilities. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/Analysis.h" 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/CodeGen/MachineFunction.h" 16 #include "llvm/CodeGen/TargetInstrInfo.h" 17 #include "llvm/CodeGen/TargetLowering.h" 18 #include "llvm/CodeGen/TargetSubtargetInfo.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/DerivedTypes.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/Instructions.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/LLVMContext.h" 25 #include "llvm/IR/Module.h" 26 #include "llvm/Support/ErrorHandling.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Target/TargetMachine.h" 29 #include "llvm/Transforms/Utils/GlobalStatus.h" 30 31 using namespace llvm; 32 33 /// Compute the linearized index of a member in a nested aggregate/struct/array 34 /// by recursing and accumulating CurIndex as long as there are indices in the 35 /// index list. 36 unsigned llvm::ComputeLinearIndex(Type *Ty, 37 const unsigned *Indices, 38 const unsigned *IndicesEnd, 39 unsigned CurIndex) { 40 // Base case: We're done. 41 if (Indices && Indices == IndicesEnd) 42 return CurIndex; 43 44 // Given a struct type, recursively traverse the elements. 45 if (StructType *STy = dyn_cast<StructType>(Ty)) { 46 for (auto I : llvm::enumerate(STy->elements())) { 47 Type *ET = I.value(); 48 if (Indices && *Indices == I.index()) 49 return ComputeLinearIndex(ET, Indices + 1, IndicesEnd, CurIndex); 50 CurIndex = ComputeLinearIndex(ET, nullptr, nullptr, CurIndex); 51 } 52 assert(!Indices && "Unexpected out of bound"); 53 return CurIndex; 54 } 55 // Given an array type, recursively traverse the elements. 56 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 57 Type *EltTy = ATy->getElementType(); 58 unsigned NumElts = ATy->getNumElements(); 59 // Compute the Linear offset when jumping one element of the array 60 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0); 61 if (Indices) { 62 assert(*Indices < NumElts && "Unexpected out of bound"); 63 // If the indice is inside the array, compute the index to the requested 64 // elt and recurse inside the element with the end of the indices list 65 CurIndex += EltLinearOffset* *Indices; 66 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex); 67 } 68 CurIndex += EltLinearOffset*NumElts; 69 return CurIndex; 70 } 71 // We haven't found the type we're looking for, so keep searching. 72 return CurIndex + 1; 73 } 74 75 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of 76 /// EVTs that represent all the individual underlying 77 /// non-aggregate types that comprise it. 78 /// 79 /// If Offsets is non-null, it points to a vector to be filled in 80 /// with the in-memory offsets of each of the individual values. 81 /// 82 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, 83 Type *Ty, SmallVectorImpl<EVT> &ValueVTs, 84 SmallVectorImpl<EVT> *MemVTs, 85 SmallVectorImpl<uint64_t> *Offsets, 86 uint64_t StartingOffset) { 87 // Given a struct type, recursively traverse the elements. 88 if (StructType *STy = dyn_cast<StructType>(Ty)) { 89 // If the Offsets aren't needed, don't query the struct layout. This allows 90 // us to support structs with scalable vectors for operations that don't 91 // need offsets. 92 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr; 93 for (StructType::element_iterator EB = STy->element_begin(), 94 EI = EB, 95 EE = STy->element_end(); 96 EI != EE; ++EI) { 97 // Don't compute the element offset if we didn't get a StructLayout above. 98 uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0; 99 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets, 100 StartingOffset + EltOffset); 101 } 102 return; 103 } 104 // Given an array type, recursively traverse the elements. 105 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 106 Type *EltTy = ATy->getElementType(); 107 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); 108 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 109 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets, 110 StartingOffset + i * EltSize); 111 return; 112 } 113 // Interpret void as zero return values. 114 if (Ty->isVoidTy()) 115 return; 116 // Base case: we can get an EVT for this LLVM IR type. 117 ValueVTs.push_back(TLI.getValueType(DL, Ty)); 118 if (MemVTs) 119 MemVTs->push_back(TLI.getMemValueType(DL, Ty)); 120 if (Offsets) 121 Offsets->push_back(StartingOffset); 122 } 123 124 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, 125 Type *Ty, SmallVectorImpl<EVT> &ValueVTs, 126 SmallVectorImpl<uint64_t> *Offsets, 127 uint64_t StartingOffset) { 128 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets, 129 StartingOffset); 130 } 131 132 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty, 133 SmallVectorImpl<LLT> &ValueTys, 134 SmallVectorImpl<uint64_t> *Offsets, 135 uint64_t StartingOffset) { 136 // Given a struct type, recursively traverse the elements. 137 if (StructType *STy = dyn_cast<StructType>(&Ty)) { 138 // If the Offsets aren't needed, don't query the struct layout. This allows 139 // us to support structs with scalable vectors for operations that don't 140 // need offsets. 141 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr; 142 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) { 143 uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0; 144 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets, 145 StartingOffset + EltOffset); 146 } 147 return; 148 } 149 // Given an array type, recursively traverse the elements. 150 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) { 151 Type *EltTy = ATy->getElementType(); 152 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); 153 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) 154 computeValueLLTs(DL, *EltTy, ValueTys, Offsets, 155 StartingOffset + i * EltSize); 156 return; 157 } 158 // Interpret void as zero return values. 159 if (Ty.isVoidTy()) 160 return; 161 // Base case: we can get an LLT for this LLVM IR type. 162 ValueTys.push_back(getLLTForType(Ty, DL)); 163 if (Offsets != nullptr) 164 Offsets->push_back(StartingOffset * 8); 165 } 166 167 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. 168 GlobalValue *llvm::ExtractTypeInfo(Value *V) { 169 V = V->stripPointerCasts(); 170 GlobalValue *GV = dyn_cast<GlobalValue>(V); 171 GlobalVariable *Var = dyn_cast<GlobalVariable>(V); 172 173 if (Var && Var->getName() == "llvm.eh.catch.all.value") { 174 assert(Var->hasInitializer() && 175 "The EH catch-all value must have an initializer"); 176 Value *Init = Var->getInitializer(); 177 GV = dyn_cast<GlobalValue>(Init); 178 if (!GV) V = cast<ConstantPointerNull>(Init); 179 } 180 181 assert((GV || isa<ConstantPointerNull>(V)) && 182 "TypeInfo must be a global variable or NULL"); 183 return GV; 184 } 185 186 /// getFCmpCondCode - Return the ISD condition code corresponding to 187 /// the given LLVM IR floating-point condition code. This includes 188 /// consideration of global floating-point math flags. 189 /// 190 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) { 191 switch (Pred) { 192 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE; 193 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ; 194 case FCmpInst::FCMP_OGT: return ISD::SETOGT; 195 case FCmpInst::FCMP_OGE: return ISD::SETOGE; 196 case FCmpInst::FCMP_OLT: return ISD::SETOLT; 197 case FCmpInst::FCMP_OLE: return ISD::SETOLE; 198 case FCmpInst::FCMP_ONE: return ISD::SETONE; 199 case FCmpInst::FCMP_ORD: return ISD::SETO; 200 case FCmpInst::FCMP_UNO: return ISD::SETUO; 201 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ; 202 case FCmpInst::FCMP_UGT: return ISD::SETUGT; 203 case FCmpInst::FCMP_UGE: return ISD::SETUGE; 204 case FCmpInst::FCMP_ULT: return ISD::SETULT; 205 case FCmpInst::FCMP_ULE: return ISD::SETULE; 206 case FCmpInst::FCMP_UNE: return ISD::SETUNE; 207 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE; 208 default: llvm_unreachable("Invalid FCmp predicate opcode!"); 209 } 210 } 211 212 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) { 213 switch (CC) { 214 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ; 215 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE; 216 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT; 217 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE; 218 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT; 219 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE; 220 default: return CC; 221 } 222 } 223 224 /// getICmpCondCode - Return the ISD condition code corresponding to 225 /// the given LLVM IR integer condition code. 226 /// 227 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) { 228 switch (Pred) { 229 case ICmpInst::ICMP_EQ: return ISD::SETEQ; 230 case ICmpInst::ICMP_NE: return ISD::SETNE; 231 case ICmpInst::ICMP_SLE: return ISD::SETLE; 232 case ICmpInst::ICMP_ULE: return ISD::SETULE; 233 case ICmpInst::ICMP_SGE: return ISD::SETGE; 234 case ICmpInst::ICMP_UGE: return ISD::SETUGE; 235 case ICmpInst::ICMP_SLT: return ISD::SETLT; 236 case ICmpInst::ICMP_ULT: return ISD::SETULT; 237 case ICmpInst::ICMP_SGT: return ISD::SETGT; 238 case ICmpInst::ICMP_UGT: return ISD::SETUGT; 239 default: 240 llvm_unreachable("Invalid ICmp predicate opcode!"); 241 } 242 } 243 244 static bool isNoopBitcast(Type *T1, Type *T2, 245 const TargetLoweringBase& TLI) { 246 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) || 247 (isa<VectorType>(T1) && isa<VectorType>(T2) && 248 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2))); 249 } 250 251 /// Look through operations that will be free to find the earliest source of 252 /// this value. 253 /// 254 /// @param ValLoc If V has aggregate type, we will be interested in a particular 255 /// scalar component. This records its address; the reverse of this list gives a 256 /// sequence of indices appropriate for an extractvalue to locate the important 257 /// value. This value is updated during the function and on exit will indicate 258 /// similar information for the Value returned. 259 /// 260 /// @param DataBits If this function looks through truncate instructions, this 261 /// will record the smallest size attained. 262 static const Value *getNoopInput(const Value *V, 263 SmallVectorImpl<unsigned> &ValLoc, 264 unsigned &DataBits, 265 const TargetLoweringBase &TLI, 266 const DataLayout &DL) { 267 while (true) { 268 // Try to look through V1; if V1 is not an instruction, it can't be looked 269 // through. 270 const Instruction *I = dyn_cast<Instruction>(V); 271 if (!I || I->getNumOperands() == 0) return V; 272 const Value *NoopInput = nullptr; 273 274 Value *Op = I->getOperand(0); 275 if (isa<BitCastInst>(I)) { 276 // Look through truly no-op bitcasts. 277 if (isNoopBitcast(Op->getType(), I->getType(), TLI)) 278 NoopInput = Op; 279 } else if (isa<GetElementPtrInst>(I)) { 280 // Look through getelementptr 281 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices()) 282 NoopInput = Op; 283 } else if (isa<IntToPtrInst>(I)) { 284 // Look through inttoptr. 285 // Make sure this isn't a truncating or extending cast. We could 286 // support this eventually, but don't bother for now. 287 if (!isa<VectorType>(I->getType()) && 288 DL.getPointerSizeInBits() == 289 cast<IntegerType>(Op->getType())->getBitWidth()) 290 NoopInput = Op; 291 } else if (isa<PtrToIntInst>(I)) { 292 // Look through ptrtoint. 293 // Make sure this isn't a truncating or extending cast. We could 294 // support this eventually, but don't bother for now. 295 if (!isa<VectorType>(I->getType()) && 296 DL.getPointerSizeInBits() == 297 cast<IntegerType>(I->getType())->getBitWidth()) 298 NoopInput = Op; 299 } else if (isa<TruncInst>(I) && 300 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) { 301 DataBits = std::min((uint64_t)DataBits, 302 I->getType()->getPrimitiveSizeInBits().getFixedSize()); 303 NoopInput = Op; 304 } else if (auto *CB = dyn_cast<CallBase>(I)) { 305 const Value *ReturnedOp = CB->getReturnedArgOperand(); 306 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI)) 307 NoopInput = ReturnedOp; 308 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) { 309 // Value may come from either the aggregate or the scalar 310 ArrayRef<unsigned> InsertLoc = IVI->getIndices(); 311 if (ValLoc.size() >= InsertLoc.size() && 312 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) { 313 // The type being inserted is a nested sub-type of the aggregate; we 314 // have to remove those initial indices to get the location we're 315 // interested in for the operand. 316 ValLoc.resize(ValLoc.size() - InsertLoc.size()); 317 NoopInput = IVI->getInsertedValueOperand(); 318 } else { 319 // The struct we're inserting into has the value we're interested in, no 320 // change of address. 321 NoopInput = Op; 322 } 323 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) { 324 // The part we're interested in will inevitably be some sub-section of the 325 // previous aggregate. Combine the two paths to obtain the true address of 326 // our element. 327 ArrayRef<unsigned> ExtractLoc = EVI->getIndices(); 328 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend()); 329 NoopInput = Op; 330 } 331 // Terminate if we couldn't find anything to look through. 332 if (!NoopInput) 333 return V; 334 335 V = NoopInput; 336 } 337 } 338 339 /// Return true if this scalar return value only has bits discarded on its path 340 /// from the "tail call" to the "ret". This includes the obvious noop 341 /// instructions handled by getNoopInput above as well as free truncations (or 342 /// extensions prior to the call). 343 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, 344 SmallVectorImpl<unsigned> &RetIndices, 345 SmallVectorImpl<unsigned> &CallIndices, 346 bool AllowDifferingSizes, 347 const TargetLoweringBase &TLI, 348 const DataLayout &DL) { 349 350 // Trace the sub-value needed by the return value as far back up the graph as 351 // possible, in the hope that it will intersect with the value produced by the 352 // call. In the simple case with no "returned" attribute, the hope is actually 353 // that we end up back at the tail call instruction itself. 354 unsigned BitsRequired = UINT_MAX; 355 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL); 356 357 // If this slot in the value returned is undef, it doesn't matter what the 358 // call puts there, it'll be fine. 359 if (isa<UndefValue>(RetVal)) 360 return true; 361 362 // Now do a similar search up through the graph to find where the value 363 // actually returned by the "tail call" comes from. In the simple case without 364 // a "returned" attribute, the search will be blocked immediately and the loop 365 // a Noop. 366 unsigned BitsProvided = UINT_MAX; 367 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL); 368 369 // There's no hope if we can't actually trace them to (the same part of!) the 370 // same value. 371 if (CallVal != RetVal || CallIndices != RetIndices) 372 return false; 373 374 // However, intervening truncates may have made the call non-tail. Make sure 375 // all the bits that are needed by the "ret" have been provided by the "tail 376 // call". FIXME: with sufficiently cunning bit-tracking, we could look through 377 // extensions too. 378 if (BitsProvided < BitsRequired || 379 (!AllowDifferingSizes && BitsProvided != BitsRequired)) 380 return false; 381 382 return true; 383 } 384 385 /// For an aggregate type, determine whether a given index is within bounds or 386 /// not. 387 static bool indexReallyValid(Type *T, unsigned Idx) { 388 if (ArrayType *AT = dyn_cast<ArrayType>(T)) 389 return Idx < AT->getNumElements(); 390 391 return Idx < cast<StructType>(T)->getNumElements(); 392 } 393 394 /// Move the given iterators to the next leaf type in depth first traversal. 395 /// 396 /// Performs a depth-first traversal of the type as specified by its arguments, 397 /// stopping at the next leaf node (which may be a legitimate scalar type or an 398 /// empty struct or array). 399 /// 400 /// @param SubTypes List of the partial components making up the type from 401 /// outermost to innermost non-empty aggregate. The element currently 402 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1). 403 /// 404 /// @param Path Set of extractvalue indices leading from the outermost type 405 /// (SubTypes[0]) to the leaf node currently represented. 406 /// 407 /// @returns true if a new type was found, false otherwise. Calling this 408 /// function again on a finished iterator will repeatedly return 409 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty 410 /// aggregate or a non-aggregate 411 static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes, 412 SmallVectorImpl<unsigned> &Path) { 413 // First march back up the tree until we can successfully increment one of the 414 // coordinates in Path. 415 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) { 416 Path.pop_back(); 417 SubTypes.pop_back(); 418 } 419 420 // If we reached the top, then the iterator is done. 421 if (Path.empty()) 422 return false; 423 424 // We know there's *some* valid leaf now, so march back down the tree picking 425 // out the left-most element at each node. 426 ++Path.back(); 427 Type *DeeperType = 428 ExtractValueInst::getIndexedType(SubTypes.back(), Path.back()); 429 while (DeeperType->isAggregateType()) { 430 if (!indexReallyValid(DeeperType, 0)) 431 return true; 432 433 SubTypes.push_back(DeeperType); 434 Path.push_back(0); 435 436 DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0); 437 } 438 439 return true; 440 } 441 442 /// Find the first non-empty, scalar-like type in Next and setup the iterator 443 /// components. 444 /// 445 /// Assuming Next is an aggregate of some kind, this function will traverse the 446 /// tree from left to right (i.e. depth-first) looking for the first 447 /// non-aggregate type which will play a role in function return. 448 /// 449 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup 450 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first 451 /// i32 in that type. 452 static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes, 453 SmallVectorImpl<unsigned> &Path) { 454 // First initialise the iterator components to the first "leaf" node 455 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf 456 // despite nominally being an aggregate). 457 while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) { 458 SubTypes.push_back(Next); 459 Path.push_back(0); 460 Next = FirstInner; 461 } 462 463 // If there's no Path now, Next was originally scalar already (or empty 464 // leaf). We're done. 465 if (Path.empty()) 466 return true; 467 468 // Otherwise, use normal iteration to keep looking through the tree until we 469 // find a non-aggregate type. 470 while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back()) 471 ->isAggregateType()) { 472 if (!advanceToNextLeafType(SubTypes, Path)) 473 return false; 474 } 475 476 return true; 477 } 478 479 /// Set the iterator data-structures to the next non-empty, non-aggregate 480 /// subtype. 481 static bool nextRealType(SmallVectorImpl<Type *> &SubTypes, 482 SmallVectorImpl<unsigned> &Path) { 483 do { 484 if (!advanceToNextLeafType(SubTypes, Path)) 485 return false; 486 487 assert(!Path.empty() && "found a leaf but didn't set the path?"); 488 } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back()) 489 ->isAggregateType()); 490 491 return true; 492 } 493 494 495 /// Test if the given instruction is in a position to be optimized 496 /// with a tail-call. This roughly means that it's in a block with 497 /// a return and there's nothing that needs to be scheduled 498 /// between it and the return. 499 /// 500 /// This function only tests target-independent requirements. 501 bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) { 502 const BasicBlock *ExitBB = Call.getParent(); 503 const Instruction *Term = ExitBB->getTerminator(); 504 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term); 505 506 // The block must end in a return statement or unreachable. 507 // 508 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in 509 // an unreachable, for now. The way tailcall optimization is currently 510 // implemented means it will add an epilogue followed by a jump. That is 511 // not profitable. Also, if the callee is a special function (e.g. 512 // longjmp on x86), it can end up causing miscompilation that has not 513 // been fully understood. 514 if (!Ret && 515 ((!TM.Options.GuaranteedTailCallOpt && 516 Call.getCallingConv() != CallingConv::Tail) || !isa<UnreachableInst>(Term))) 517 return false; 518 519 // If I will have a chain, make sure no other instruction that will have a 520 // chain interposes between I and the return. 521 // Check for all calls including speculatable functions. 522 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) { 523 if (&*BBI == &Call) 524 break; 525 // Debug info intrinsics do not get in the way of tail call optimization. 526 if (isa<DbgInfoIntrinsic>(BBI)) 527 continue; 528 // Pseudo probe intrinsics do not block tail call optimization either. 529 if (isa<PseudoProbeInst>(BBI)) 530 continue; 531 // A lifetime end, assume or noalias.decl intrinsic should not stop tail 532 // call optimization. 533 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI)) 534 if (II->getIntrinsicID() == Intrinsic::lifetime_end || 535 II->getIntrinsicID() == Intrinsic::assume || 536 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl) 537 continue; 538 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() || 539 !isSafeToSpeculativelyExecute(&*BBI)) 540 return false; 541 } 542 543 const Function *F = ExitBB->getParent(); 544 return returnTypeIsEligibleForTailCall( 545 F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering()); 546 } 547 548 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I, 549 const ReturnInst *Ret, 550 const TargetLoweringBase &TLI, 551 bool *AllowDifferingSizes) { 552 // ADS may be null, so don't write to it directly. 553 bool DummyADS; 554 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS; 555 ADS = true; 556 557 AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex); 558 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(), 559 AttributeList::ReturnIndex); 560 561 // Following attributes are completely benign as far as calling convention 562 // goes, they shouldn't affect whether the call is a tail call. 563 CallerAttrs.removeAttribute(Attribute::NoAlias); 564 CalleeAttrs.removeAttribute(Attribute::NoAlias); 565 CallerAttrs.removeAttribute(Attribute::NonNull); 566 CalleeAttrs.removeAttribute(Attribute::NonNull); 567 CallerAttrs.removeAttribute(Attribute::Dereferenceable); 568 CalleeAttrs.removeAttribute(Attribute::Dereferenceable); 569 CallerAttrs.removeAttribute(Attribute::DereferenceableOrNull); 570 CalleeAttrs.removeAttribute(Attribute::DereferenceableOrNull); 571 572 if (CallerAttrs.contains(Attribute::ZExt)) { 573 if (!CalleeAttrs.contains(Attribute::ZExt)) 574 return false; 575 576 ADS = false; 577 CallerAttrs.removeAttribute(Attribute::ZExt); 578 CalleeAttrs.removeAttribute(Attribute::ZExt); 579 } else if (CallerAttrs.contains(Attribute::SExt)) { 580 if (!CalleeAttrs.contains(Attribute::SExt)) 581 return false; 582 583 ADS = false; 584 CallerAttrs.removeAttribute(Attribute::SExt); 585 CalleeAttrs.removeAttribute(Attribute::SExt); 586 } 587 588 // Drop sext and zext return attributes if the result is not used. 589 // This enables tail calls for code like: 590 // 591 // define void @caller() { 592 // entry: 593 // %unused_result = tail call zeroext i1 @callee() 594 // br label %retlabel 595 // retlabel: 596 // ret void 597 // } 598 if (I->use_empty()) { 599 CalleeAttrs.removeAttribute(Attribute::SExt); 600 CalleeAttrs.removeAttribute(Attribute::ZExt); 601 } 602 603 // If they're still different, there's some facet we don't understand 604 // (currently only "inreg", but in future who knows). It may be OK but the 605 // only safe option is to reject the tail call. 606 return CallerAttrs == CalleeAttrs; 607 } 608 609 /// Check whether B is a bitcast of a pointer type to another pointer type, 610 /// which is equal to A. 611 static bool isPointerBitcastEqualTo(const Value *A, const Value *B) { 612 assert(A && B && "Expected non-null inputs!"); 613 614 auto *BitCastIn = dyn_cast<BitCastInst>(B); 615 616 if (!BitCastIn) 617 return false; 618 619 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy()) 620 return false; 621 622 return A == BitCastIn->getOperand(0); 623 } 624 625 bool llvm::returnTypeIsEligibleForTailCall(const Function *F, 626 const Instruction *I, 627 const ReturnInst *Ret, 628 const TargetLoweringBase &TLI) { 629 // If the block ends with a void return or unreachable, it doesn't matter 630 // what the call's return type is. 631 if (!Ret || Ret->getNumOperands() == 0) return true; 632 633 // If the return value is undef, it doesn't matter what the call's 634 // return type is. 635 if (isa<UndefValue>(Ret->getOperand(0))) return true; 636 637 // Make sure the attributes attached to each return are compatible. 638 bool AllowDifferingSizes; 639 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes)) 640 return false; 641 642 const Value *RetVal = Ret->getOperand(0), *CallVal = I; 643 // Intrinsic like llvm.memcpy has no return value, but the expanded 644 // libcall may or may not have return value. On most platforms, it 645 // will be expanded as memcpy in libc, which returns the first 646 // argument. On other platforms like arm-none-eabi, memcpy may be 647 // expanded as library call without return value, like __aeabi_memcpy. 648 const CallInst *Call = cast<CallInst>(I); 649 if (Function *F = Call->getCalledFunction()) { 650 Intrinsic::ID IID = F->getIntrinsicID(); 651 if (((IID == Intrinsic::memcpy && 652 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) || 653 (IID == Intrinsic::memmove && 654 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) || 655 (IID == Intrinsic::memset && 656 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) && 657 (RetVal == Call->getArgOperand(0) || 658 isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0)))) 659 return true; 660 } 661 662 SmallVector<unsigned, 4> RetPath, CallPath; 663 SmallVector<Type *, 4> RetSubTypes, CallSubTypes; 664 665 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath); 666 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath); 667 668 // Nothing's actually returned, it doesn't matter what the callee put there 669 // it's a valid tail call. 670 if (RetEmpty) 671 return true; 672 673 // Iterate pairwise through each of the value types making up the tail call 674 // and the corresponding return. For each one we want to know whether it's 675 // essentially going directly from the tail call to the ret, via operations 676 // that end up not generating any code. 677 // 678 // We allow a certain amount of covariance here. For example it's permitted 679 // for the tail call to define more bits than the ret actually cares about 680 // (e.g. via a truncate). 681 do { 682 if (CallEmpty) { 683 // We've exhausted the values produced by the tail call instruction, the 684 // rest are essentially undef. The type doesn't really matter, but we need 685 // *something*. 686 Type *SlotType = 687 ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back()); 688 CallVal = UndefValue::get(SlotType); 689 } 690 691 // The manipulations performed when we're looking through an insertvalue or 692 // an extractvalue would happen at the front of the RetPath list, so since 693 // we have to copy it anyway it's more efficient to create a reversed copy. 694 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend()); 695 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend()); 696 697 // Finally, we can check whether the value produced by the tail call at this 698 // index is compatible with the value we return. 699 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath, 700 AllowDifferingSizes, TLI, 701 F->getParent()->getDataLayout())) 702 return false; 703 704 CallEmpty = !nextRealType(CallSubTypes, CallPath); 705 } while(nextRealType(RetSubTypes, RetPath)); 706 707 return true; 708 } 709 710 static void collectEHScopeMembers( 711 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope, 712 const MachineBasicBlock *MBB) { 713 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB}; 714 while (!Worklist.empty()) { 715 const MachineBasicBlock *Visiting = Worklist.pop_back_val(); 716 // Don't follow blocks which start new scopes. 717 if (Visiting->isEHPad() && Visiting != MBB) 718 continue; 719 720 // Add this MBB to our scope. 721 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope)); 722 723 // Don't revisit blocks. 724 if (!P.second) { 725 assert(P.first->second == EHScope && "MBB is part of two scopes!"); 726 continue; 727 } 728 729 // Returns are boundaries where scope transfer can occur, don't follow 730 // successors. 731 if (Visiting->isEHScopeReturnBlock()) 732 continue; 733 734 append_range(Worklist, Visiting->successors()); 735 } 736 } 737 738 DenseMap<const MachineBasicBlock *, int> 739 llvm::getEHScopeMembership(const MachineFunction &MF) { 740 DenseMap<const MachineBasicBlock *, int> EHScopeMembership; 741 742 // We don't have anything to do if there aren't any EH pads. 743 if (!MF.hasEHScopes()) 744 return EHScopeMembership; 745 746 int EntryBBNumber = MF.front().getNumber(); 747 bool IsSEH = isAsynchronousEHPersonality( 748 classifyEHPersonality(MF.getFunction().getPersonalityFn())); 749 750 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 751 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks; 752 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks; 753 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads; 754 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors; 755 for (const MachineBasicBlock &MBB : MF) { 756 if (MBB.isEHScopeEntry()) { 757 EHScopeBlocks.push_back(&MBB); 758 } else if (IsSEH && MBB.isEHPad()) { 759 SEHCatchPads.push_back(&MBB); 760 } else if (MBB.pred_empty()) { 761 UnreachableBlocks.push_back(&MBB); 762 } 763 764 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator(); 765 766 // CatchPads are not scopes for SEH so do not consider CatchRet to 767 // transfer control to another scope. 768 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode()) 769 continue; 770 771 // FIXME: SEH CatchPads are not necessarily in the parent function: 772 // they could be inside a finally block. 773 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB(); 774 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB(); 775 CatchRetSuccessors.push_back( 776 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()}); 777 } 778 779 // We don't have anything to do if there aren't any EH pads. 780 if (EHScopeBlocks.empty()) 781 return EHScopeMembership; 782 783 // Identify all the basic blocks reachable from the function entry. 784 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front()); 785 // All blocks not part of a scope are in the parent function. 786 for (const MachineBasicBlock *MBB : UnreachableBlocks) 787 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB); 788 // Next, identify all the blocks inside the scopes. 789 for (const MachineBasicBlock *MBB : EHScopeBlocks) 790 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB); 791 // SEH CatchPads aren't really scopes, handle them separately. 792 for (const MachineBasicBlock *MBB : SEHCatchPads) 793 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB); 794 // Finally, identify all the targets of a catchret. 795 for (std::pair<const MachineBasicBlock *, int> CatchRetPair : 796 CatchRetSuccessors) 797 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second, 798 CatchRetPair.first); 799 return EHScopeMembership; 800 } 801