1 //===-- FunctionLoweringInfo.cpp ------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements routines for translating functions from LLVM IR into 10 // Machine IR. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/FunctionLoweringInfo.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/Analysis/UniformityAnalysis.h" 17 #include "llvm/CodeGen/Analysis.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/CodeGen/MachineFunction.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/TargetFrameLowering.h" 23 #include "llvm/CodeGen/TargetInstrInfo.h" 24 #include "llvm/CodeGen/TargetLowering.h" 25 #include "llvm/CodeGen/TargetRegisterInfo.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/CodeGen/WasmEHFuncInfo.h" 28 #include "llvm/CodeGen/WinEHFuncInfo.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Intrinsics.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include <algorithm> 39 using namespace llvm; 40 41 #define DEBUG_TYPE "function-lowering-info" 42 43 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by 44 /// PHI nodes or outside of the basic block that defines it, or used by a 45 /// switch or atomic instruction, which may expand to multiple basic blocks. 46 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) { 47 if (I->use_empty()) return false; 48 if (isa<PHINode>(I)) return true; 49 const BasicBlock *BB = I->getParent(); 50 for (const User *U : I->users()) 51 if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U)) 52 return true; 53 54 return false; 55 } 56 57 static ISD::NodeType getPreferredExtendForValue(const Instruction *I) { 58 // For the users of the source value being used for compare instruction, if 59 // the number of signed predicate is greater than unsigned predicate, we 60 // prefer to use SIGN_EXTEND. 61 // 62 // With this optimization, we would be able to reduce some redundant sign or 63 // zero extension instruction, and eventually more machine CSE opportunities 64 // can be exposed. 65 ISD::NodeType ExtendKind = ISD::ANY_EXTEND; 66 unsigned NumOfSigned = 0, NumOfUnsigned = 0; 67 for (const Use &U : I->uses()) { 68 if (const auto *CI = dyn_cast<CmpInst>(U.getUser())) { 69 NumOfSigned += CI->isSigned(); 70 NumOfUnsigned += CI->isUnsigned(); 71 } 72 if (const auto *CallI = dyn_cast<CallBase>(U.getUser())) { 73 if (!CallI->isArgOperand(&U)) 74 continue; 75 unsigned ArgNo = CallI->getArgOperandNo(&U); 76 NumOfUnsigned += CallI->paramHasAttr(ArgNo, Attribute::ZExt); 77 NumOfSigned += CallI->paramHasAttr(ArgNo, Attribute::SExt); 78 } 79 } 80 if (NumOfSigned > NumOfUnsigned) 81 ExtendKind = ISD::SIGN_EXTEND; 82 83 return ExtendKind; 84 } 85 86 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf, 87 SelectionDAG *DAG) { 88 Fn = &fn; 89 MF = &mf; 90 TLI = MF->getSubtarget().getTargetLowering(); 91 RegInfo = &MF->getRegInfo(); 92 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 93 UA = DAG->getUniformityInfo(); 94 95 // Check whether the function can return without sret-demotion. 96 SmallVector<ISD::OutputArg, 4> Outs; 97 CallingConv::ID CC = Fn->getCallingConv(); 98 99 GetReturnInfo(CC, Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI, 100 mf.getDataLayout()); 101 CanLowerReturn = 102 TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext(), Fn->getReturnType()); 103 104 // If this personality uses funclets, we need to do a bit more work. 105 DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects; 106 EHPersonality Personality = classifyEHPersonality( 107 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr); 108 if (isFuncletEHPersonality(Personality)) { 109 // Calculate state numbers if we haven't already. 110 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo(); 111 if (Personality == EHPersonality::MSVC_CXX) 112 calculateWinCXXEHStateNumbers(&fn, EHInfo); 113 else if (isAsynchronousEHPersonality(Personality)) 114 calculateSEHStateNumbers(&fn, EHInfo); 115 else if (Personality == EHPersonality::CoreCLR) 116 calculateClrEHStateNumbers(&fn, EHInfo); 117 118 // Map all BB references in the WinEH data to MBBs. 119 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 120 for (WinEHHandlerType &H : TBME.HandlerArray) { 121 if (const AllocaInst *AI = H.CatchObj.Alloca) 122 CatchObjects[AI].push_back(&H.CatchObj.FrameIndex); 123 else 124 H.CatchObj.FrameIndex = INT_MAX; 125 } 126 } 127 } 128 129 // Initialize the mapping of values to registers. This is only set up for 130 // instruction values that are used outside of the block that defines 131 // them. 132 const Align StackAlign = TFI->getStackAlign(); 133 for (const BasicBlock &BB : *Fn) { 134 for (const Instruction &I : BB) { 135 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) { 136 Type *Ty = AI->getAllocatedType(); 137 Align Alignment = AI->getAlign(); 138 139 // Static allocas can be folded into the initial stack frame 140 // adjustment. For targets that don't realign the stack, don't 141 // do this if there is an extra alignment requirement. 142 if (AI->isStaticAlloca() && 143 (TFI->isStackRealignable() || (Alignment <= StackAlign))) { 144 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize()); 145 uint64_t TySize = 146 MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinValue(); 147 148 TySize *= CUI->getZExtValue(); // Get total allocated size. 149 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 150 int FrameIndex = INT_MAX; 151 auto Iter = CatchObjects.find(AI); 152 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) { 153 FrameIndex = MF->getFrameInfo().CreateFixedObject( 154 TySize, 0, /*IsImmutable=*/false, /*isAliased=*/true); 155 MF->getFrameInfo().setObjectAlignment(FrameIndex, Alignment); 156 } else { 157 FrameIndex = MF->getFrameInfo().CreateStackObject(TySize, Alignment, 158 false, AI); 159 } 160 161 // Scalable vectors and structures that contain scalable vectors may 162 // need a special StackID to distinguish them from other (fixed size) 163 // stack objects. 164 if (Ty->isScalableTy()) 165 MF->getFrameInfo().setStackID(FrameIndex, 166 TFI->getStackIDForScalableVectors()); 167 168 StaticAllocaMap[AI] = FrameIndex; 169 // Update the catch handler information. 170 if (Iter != CatchObjects.end()) { 171 for (int *CatchObjPtr : Iter->second) 172 *CatchObjPtr = FrameIndex; 173 } 174 } else { 175 // FIXME: Overaligned static allocas should be grouped into 176 // a single dynamic allocation instead of using a separate 177 // stack allocation for each one. 178 // Inform the Frame Information that we have variable-sized objects. 179 MF->getFrameInfo().CreateVariableSizedObject( 180 Alignment <= StackAlign ? Align(1) : Alignment, AI); 181 } 182 } else if (auto *Call = dyn_cast<CallBase>(&I)) { 183 // Look for inline asm that clobbers the SP register. 184 if (Call->isInlineAsm()) { 185 Register SP = TLI->getStackPointerRegisterToSaveRestore(); 186 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); 187 std::vector<TargetLowering::AsmOperandInfo> Ops = 188 TLI->ParseConstraints(Fn->getDataLayout(), TRI, 189 *Call); 190 for (TargetLowering::AsmOperandInfo &Op : Ops) { 191 if (Op.Type == InlineAsm::isClobber) { 192 // Clobbers don't have SDValue operands, hence SDValue(). 193 TLI->ComputeConstraintToUse(Op, SDValue(), DAG); 194 std::pair<unsigned, const TargetRegisterClass *> PhysReg = 195 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode, 196 Op.ConstraintVT); 197 if (PhysReg.first == SP) 198 MF->getFrameInfo().setHasOpaqueSPAdjustment(true); 199 } 200 } 201 } 202 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) { 203 switch (II->getIntrinsicID()) { 204 case Intrinsic::vastart: 205 // Look for calls to the @llvm.va_start intrinsic. We can omit 206 // some prologue boilerplate for variadic functions that don't 207 // examine their arguments. 208 MF->getFrameInfo().setHasVAStart(true); 209 break; 210 case Intrinsic::fake_use: 211 // Look for llvm.fake.uses, so that we can remove loads into fake 212 // uses later if necessary. 213 MF->setHasFakeUses(true); 214 break; 215 default: 216 break; 217 } 218 } 219 220 // If we have a musttail call in a variadic function, we need to ensure 221 // we forward implicit register parameters. 222 if (const auto *CI = dyn_cast<CallInst>(&I)) { 223 if (CI->isMustTailCall() && Fn->isVarArg()) 224 MF->getFrameInfo().setHasMustTailInVarArgFunc(true); 225 } 226 227 // Determine if there is a call to setjmp in the machine function. 228 if (Call->hasFnAttr(Attribute::ReturnsTwice)) 229 MF->setExposesReturnsTwice(true); 230 } 231 232 // Mark values used outside their block as exported, by allocating 233 // a virtual register for them. 234 if (isUsedOutsideOfDefiningBlock(&I)) 235 if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I))) 236 InitializeRegForValue(&I); 237 238 // Decide the preferred extend type for a value. This iterates over all 239 // users and therefore isn't cheap, so don't do this at O0. 240 if (DAG->getOptLevel() != CodeGenOptLevel::None) 241 PreferredExtendType[&I] = getPreferredExtendForValue(&I); 242 } 243 } 244 245 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This 246 // also creates the initial PHI MachineInstrs, though none of the input 247 // operands are populated. 248 MBBMap.resize(Fn->getMaxBlockNumber()); 249 for (const BasicBlock &BB : *Fn) { 250 // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks 251 // are really data, and no instructions can live here. 252 if (BB.isEHPad()) { 253 BasicBlock::const_iterator PadInst = BB.getFirstNonPHIIt(); 254 // If this is a non-landingpad EH pad, mark this function as using 255 // funclets. 256 // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid 257 // setting this in such cases in order to improve frame layout. 258 if (!isa<LandingPadInst>(PadInst)) { 259 MF->setHasEHScopes(true); 260 MF->setHasEHFunclets(true); 261 MF->getFrameInfo().setHasOpaqueSPAdjustment(true); 262 } 263 if (isa<CatchSwitchInst>(PadInst)) { 264 assert(BB.begin() == PadInst && 265 "WinEHPrepare failed to remove PHIs from imaginary BBs"); 266 continue; 267 } 268 if (isa<FuncletPadInst>(PadInst) && 269 Personality != EHPersonality::Wasm_CXX) 270 assert(BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs"); 271 } 272 273 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB); 274 MBBMap[BB.getNumber()] = MBB; 275 MF->push_back(MBB); 276 277 // Transfer the address-taken flag. This is necessary because there could 278 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only 279 // the first one should be marked. 280 if (BB.hasAddressTaken()) 281 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB)); 282 283 // Mark landing pad blocks. 284 if (BB.isEHPad()) 285 MBB->setIsEHPad(); 286 287 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as 288 // appropriate. 289 for (const PHINode &PN : BB.phis()) { 290 if (PN.use_empty()) 291 continue; 292 293 // Skip empty types 294 if (PN.getType()->isEmptyTy()) 295 continue; 296 297 DebugLoc DL = PN.getDebugLoc(); 298 unsigned PHIReg = ValueMap[&PN]; 299 assert(PHIReg && "PHI node does not have an assigned virtual register!"); 300 301 SmallVector<EVT, 4> ValueVTs; 302 ComputeValueVTs(*TLI, MF->getDataLayout(), PN.getType(), ValueVTs); 303 for (EVT VT : ValueVTs) { 304 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT); 305 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 306 for (unsigned i = 0; i != NumRegisters; ++i) 307 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i); 308 PHIReg += NumRegisters; 309 } 310 } 311 } 312 313 if (isFuncletEHPersonality(Personality)) { 314 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo(); 315 316 // Map all BB references in the WinEH data to MBBs. 317 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 318 for (WinEHHandlerType &H : TBME.HandlerArray) { 319 if (H.Handler) 320 H.Handler = getMBB(cast<const BasicBlock *>(H.Handler)); 321 } 322 } 323 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap) 324 if (UME.Cleanup) 325 UME.Cleanup = getMBB(cast<const BasicBlock *>(UME.Cleanup)); 326 for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) 327 UME.Handler = getMBB(cast<const BasicBlock *>(UME.Handler)); 328 for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) 329 CME.Handler = getMBB(cast<const BasicBlock *>(CME.Handler)); 330 } else if (Personality == EHPersonality::Wasm_CXX) { 331 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo(); 332 calculateWasmEHInfo(&fn, EHInfo); 333 334 // Map all BB references in the Wasm EH data to MBBs. 335 DenseMap<BBOrMBB, BBOrMBB> SrcToUnwindDest; 336 for (auto &KV : EHInfo.SrcToUnwindDest) { 337 const auto *Src = cast<const BasicBlock *>(KV.first); 338 const auto *Dest = cast<const BasicBlock *>(KV.second); 339 SrcToUnwindDest[getMBB(Src)] = getMBB(Dest); 340 } 341 EHInfo.SrcToUnwindDest = std::move(SrcToUnwindDest); 342 DenseMap<BBOrMBB, SmallPtrSet<BBOrMBB, 4>> UnwindDestToSrcs; 343 for (auto &KV : EHInfo.UnwindDestToSrcs) { 344 const auto *Dest = cast<const BasicBlock *>(KV.first); 345 MachineBasicBlock *DestMBB = getMBB(Dest); 346 UnwindDestToSrcs[DestMBB] = SmallPtrSet<BBOrMBB, 4>(); 347 for (const auto P : KV.second) 348 UnwindDestToSrcs[DestMBB].insert(getMBB(cast<const BasicBlock *>(P))); 349 } 350 EHInfo.UnwindDestToSrcs = std::move(UnwindDestToSrcs); 351 } 352 } 353 354 /// clear - Clear out all the function-specific state. This returns this 355 /// FunctionLoweringInfo to an empty state, ready to be used for a 356 /// different function. 357 void FunctionLoweringInfo::clear() { 358 MBBMap.clear(); 359 ValueMap.clear(); 360 VirtReg2Value.clear(); 361 StaticAllocaMap.clear(); 362 LiveOutRegInfo.clear(); 363 VisitedBBs.clear(); 364 ArgDbgValues.clear(); 365 DescribedArgs.clear(); 366 ByValArgFrameIndexMap.clear(); 367 RegFixups.clear(); 368 RegsWithFixups.clear(); 369 StatepointStackSlots.clear(); 370 StatepointRelocationMaps.clear(); 371 PreferredExtendType.clear(); 372 PreprocessedDbgDeclares.clear(); 373 PreprocessedDVRDeclares.clear(); 374 } 375 376 /// CreateReg - Allocate a single virtual register for the given type. 377 Register FunctionLoweringInfo::CreateReg(MVT VT, bool isDivergent) { 378 return RegInfo->createVirtualRegister(TLI->getRegClassFor(VT, isDivergent)); 379 } 380 381 /// CreateRegs - Allocate the appropriate number of virtual registers of 382 /// the correctly promoted or expanded types. Assign these registers 383 /// consecutive vreg numbers and return the first assigned number. 384 /// 385 /// In the case that the given value has struct or array type, this function 386 /// will assign registers for each member or element. 387 /// 388 Register FunctionLoweringInfo::CreateRegs(Type *Ty, bool isDivergent) { 389 SmallVector<EVT, 4> ValueVTs; 390 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs); 391 392 Register FirstReg; 393 for (EVT ValueVT : ValueVTs) { 394 MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT); 395 396 unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT); 397 for (unsigned i = 0; i != NumRegs; ++i) { 398 Register R = CreateReg(RegisterVT, isDivergent); 399 if (!FirstReg) FirstReg = R; 400 } 401 } 402 return FirstReg; 403 } 404 405 Register FunctionLoweringInfo::CreateRegs(const Value *V) { 406 return CreateRegs(V->getType(), UA && UA->isDivergent(V) && 407 !TLI->requiresUniformRegister(*MF, V)); 408 } 409 410 Register FunctionLoweringInfo::InitializeRegForValue(const Value *V) { 411 // Tokens live in vregs only when used for convergence control. 412 if (V->getType()->isTokenTy() && !isa<ConvergenceControlInst>(V)) 413 return 0; 414 Register &R = ValueMap[V]; 415 assert(R == Register() && "Already initialized this value register!"); 416 assert(VirtReg2Value.empty()); 417 return R = CreateRegs(V); 418 } 419 420 /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the 421 /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If 422 /// the register's LiveOutInfo is for a smaller bit width, it is extended to 423 /// the larger bit width by zero extension. The bit width must be no smaller 424 /// than the LiveOutInfo's existing bit width. 425 const FunctionLoweringInfo::LiveOutInfo * 426 FunctionLoweringInfo::GetLiveOutRegInfo(Register Reg, unsigned BitWidth) { 427 if (!LiveOutRegInfo.inBounds(Reg)) 428 return nullptr; 429 430 LiveOutInfo *LOI = &LiveOutRegInfo[Reg]; 431 if (!LOI->IsValid) 432 return nullptr; 433 434 if (BitWidth > LOI->Known.getBitWidth()) { 435 LOI->NumSignBits = 1; 436 LOI->Known = LOI->Known.anyext(BitWidth); 437 } 438 439 return LOI; 440 } 441 442 /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination 443 /// register based on the LiveOutInfo of its operands. 444 void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) { 445 Type *Ty = PN->getType(); 446 if (!Ty->isIntegerTy() || Ty->isVectorTy()) 447 return; 448 449 SmallVector<EVT, 1> ValueVTs; 450 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs); 451 assert(ValueVTs.size() == 1 && 452 "PHIs with non-vector integer types should have a single VT."); 453 EVT IntVT = ValueVTs[0]; 454 455 if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1) 456 return; 457 IntVT = TLI->getRegisterType(PN->getContext(), IntVT); 458 unsigned BitWidth = IntVT.getSizeInBits(); 459 460 auto It = ValueMap.find(PN); 461 if (It == ValueMap.end()) 462 return; 463 464 Register DestReg = It->second; 465 if (DestReg == 0) 466 return; 467 assert(DestReg.isVirtual() && "Expected a virtual reg"); 468 LiveOutRegInfo.grow(DestReg); 469 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg]; 470 471 Value *V = PN->getIncomingValue(0); 472 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 473 DestLOI.NumSignBits = 1; 474 DestLOI.Known = KnownBits(BitWidth); 475 return; 476 } 477 478 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 479 APInt Val; 480 if (TLI->signExtendConstant(CI)) 481 Val = CI->getValue().sext(BitWidth); 482 else 483 Val = CI->getValue().zext(BitWidth); 484 DestLOI.NumSignBits = Val.getNumSignBits(); 485 DestLOI.Known = KnownBits::makeConstant(Val); 486 } else { 487 assert(ValueMap.count(V) && "V should have been placed in ValueMap when its" 488 "CopyToReg node was created."); 489 Register SrcReg = ValueMap[V]; 490 if (!SrcReg.isVirtual()) { 491 DestLOI.IsValid = false; 492 return; 493 } 494 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 495 if (!SrcLOI) { 496 DestLOI.IsValid = false; 497 return; 498 } 499 DestLOI = *SrcLOI; 500 } 501 502 assert(DestLOI.Known.Zero.getBitWidth() == BitWidth && 503 DestLOI.Known.One.getBitWidth() == BitWidth && 504 "Masks should have the same bit width as the type."); 505 506 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 507 Value *V = PN->getIncomingValue(i); 508 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) { 509 DestLOI.NumSignBits = 1; 510 DestLOI.Known = KnownBits(BitWidth); 511 return; 512 } 513 514 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 515 APInt Val; 516 if (TLI->signExtendConstant(CI)) 517 Val = CI->getValue().sext(BitWidth); 518 else 519 Val = CI->getValue().zext(BitWidth); 520 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits()); 521 DestLOI.Known.Zero &= ~Val; 522 DestLOI.Known.One &= Val; 523 continue; 524 } 525 526 assert(ValueMap.count(V) && "V should have been placed in ValueMap when " 527 "its CopyToReg node was created."); 528 Register SrcReg = ValueMap[V]; 529 if (!SrcReg.isVirtual()) { 530 DestLOI.IsValid = false; 531 return; 532 } 533 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth); 534 if (!SrcLOI) { 535 DestLOI.IsValid = false; 536 return; 537 } 538 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits); 539 DestLOI.Known = DestLOI.Known.intersectWith(SrcLOI->Known); 540 } 541 } 542 543 /// setArgumentFrameIndex - Record frame index for the byval 544 /// argument. This overrides previous frame index entry for this argument, 545 /// if any. 546 void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A, 547 int FI) { 548 ByValArgFrameIndexMap[A] = FI; 549 } 550 551 /// getArgumentFrameIndex - Get frame index for the byval argument. 552 /// If the argument does not have any assigned frame index then 0 is 553 /// returned. 554 int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) { 555 auto I = ByValArgFrameIndexMap.find(A); 556 if (I != ByValArgFrameIndexMap.end()) 557 return I->second; 558 LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n"); 559 return INT_MAX; 560 } 561 562 Register FunctionLoweringInfo::getCatchPadExceptionPointerVReg( 563 const Value *CPI, const TargetRegisterClass *RC) { 564 MachineRegisterInfo &MRI = MF->getRegInfo(); 565 auto I = CatchPadExceptionPointers.insert({CPI, 0}); 566 Register &VReg = I.first->second; 567 if (I.second) 568 VReg = MRI.createVirtualRegister(RC); 569 assert(VReg && "null vreg in exception pointer table!"); 570 return VReg; 571 } 572 573 const Value * 574 FunctionLoweringInfo::getValueFromVirtualReg(Register Vreg) { 575 if (VirtReg2Value.empty()) { 576 SmallVector<EVT, 4> ValueVTs; 577 for (auto &P : ValueMap) { 578 ValueVTs.clear(); 579 ComputeValueVTs(*TLI, Fn->getDataLayout(), 580 P.first->getType(), ValueVTs); 581 unsigned Reg = P.second; 582 for (EVT VT : ValueVTs) { 583 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT); 584 for (unsigned i = 0, e = NumRegisters; i != e; ++i) 585 VirtReg2Value[Reg++] = P.first; 586 } 587 } 588 } 589 return VirtReg2Value.lookup(Vreg); 590 } 591