Lines Matching +full:test +full:- +full:part1

1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
119 /// LimitFloatPrecision - Generate low-precision inline sequences for
124 InsertAssertAlign("insert-assert-align", cl::init(true),
129 LimitFPPrecision("limit-float-precision",
130 cl::desc("Generate low-precision inline sequences "
136 "switch-peel-threshold", cl::Hidden, cl::init(66),
142 // DAG-based analysis from blowing up. For example, alias analysis and
149 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
163 /// getCopyFromParts - Create a value that contains the specified legal parts
218 // Assemble the trailing non-power-of-2 part.
219 unsigned OddParts = NumParts - RoundParts;
282 // zero or sign-extension.
330 if (CI->isInlineAsm())
336 /// getCopyFromPartsVector - Create a value that contains the specified legal
353 // Handle a multi-element vector.
423 // have a vector widening case (e.g. <2 x float> -> <4 x float>).
441 // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
470 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
474 // Handle cases such as i8 -> <1 x i1>
503 /// getCopyToParts - Create a series of nodes that contain the specified value
533 assert(NumParts == 1 && "No-op copy with multiple parts!");
574 // The value may have changed - recompute ValueVT.
582 "scalar-to-vector conversion failed");
591 if (NumParts & (NumParts - 1)) {
597 unsigned OddParts = NumParts - RoundParts;
605 // The odd parts were reversed by getCopyToParts - unreverse them.
625 SDValue &Part1 = Parts[i+StepSize/2];
627 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
634 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
655 // fixed/scalable properties. If a target needs to widen a fixed-length type
677 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
682 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
684 // FIXME: Use CONCAT for 2x -> 4x.
688 /// getCopyToPartsVector - Create a series of nodes that contain the specified
704 // Bitconvert vector->vector case.
759 // Handle a multi-element vector.
794 // Bitconvert vector->vector case.
817 // This does something sensible for scalable vectors - see the
924 unsigned NumSignBits = LOI->NumSignBits;
925 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
940 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
944 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
1015 Chain = Chains[NumRegs-1];
1037 Flag.setRegClass(RC->getID());
1096 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1130 assert(Pending[i].getNode()->getNumOperands() > 1);
1131 if (Pending[i].getNode()->getOperand(0) == Root)
1186 (Address->use_empty() && !isa<Argument>(Address))) {
1189 << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1193 bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1202 Address = BCI->getOperand(0);
1207 SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1226 << " (could not emit func-arg dbg_value)\n");
1234 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1237 // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1238 for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1240 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1241 dropDanglingDebugInfo(Var, It->Expr);
1242 if (It->Values.isKillLocation(It->Expr)) {
1243 handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1246 SmallVector<Value *> Values(It->Values.location_ops());
1247 if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1248 It->Values.hasArgList())) {
1250 for (Value *V : It->Values.location_ops())
1253 FnVarLocs->getDILocalVariable(It->VariableID),
1254 It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1268 // Is there is any debug-info attached to this instruction, in the form of
1269 // DbgRecord non-instruction debug-info records.
1272 assert(DLR->getLabel() && "Missing label");
1274 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1330 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1357 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1359 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1364 << I.getModule()->getName() << "]\n";
1398 auto *Undef = UndefValue::get(V->getType());
1430 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1451 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1459 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1462 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1466 assert(Variable->isValidLocationForIntrinsic(DL) &&
1467 "Expected inlined-at fields to agree");
1474 // have some test case that prove this to be correct we should avoid
1498 auto Undef = UndefValue::get(V->getType());
1519 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1528 // a non-instruction is seen, such as a constant expression or global
1536 Expr->getNumLocationOperands(), Ops,
1566 auto *Undef = UndefValue::get(OrigV->getType());
1607 if (CE->getOpcode() == Instruction::IntToPtr) {
1608 LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1617 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1628 // Only emit func arg dbg value for non-variadic dbg.values for now.
1645 LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1659 isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1664 // We still want the value to appear for the user if possible -- if it has
1668 unsigned Reg = VMI->second;
1671 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1672 V->getType(), std::nullopt);
1679 if (auto VarSize = Var->getSizeInBits())
1681 if (auto Fragment = Expr->getFragmentInfo())
1682 BitsToDescribe = Fragment->SizeInBits;
1690 ? BitsToDescribe - Offset
1721 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1728 /// getCopyFromRegs - If there was virtual register allocated for the value V
1735 Register InReg = It->second;
1749 /// getValue - Return an SDValue for the given Value.
1759 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1769 /// getNonRegisterValue - Return an SDValue for the given Value, but
1780 N->setDebugLoc(DebugLoc());
1792 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1798 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1808 getValue(CPA->getPointer()), getValue(CPA->getKey()),
1809 getValue(CPA->getAddrDiscriminator()),
1810 getValue(CPA->getDiscriminator()));
1814 unsigned AS = V->getType()->getPointerAddressSpace();
1825 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1829 visit(CE->getOpcode(), *CE);
1837 for (const Use &U : C->operands()) {
1843 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1853 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1854 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1857 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1861 if (isa<ArrayType>(CDS->getType()))
1866 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1871 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1893 return getValue(Equiv->getGlobalValue());
1896 return getValue(NC->getGlobalValue());
1899 assert(C->isNullValue() && "Can only zero this target type!");
1904 VectorType *VecTy = cast<VectorType>(V->getType());
1910 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1912 Ops.push_back(getValue(CV->getOperand(i)));
1919 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1940 SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1943 // If this is an instruction which fast-isel has deferred, select it now.
1948 Inst->getType(), std::nullopt);
1954 return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1963 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1969 CatchPadMBB->setIsEHScopeEntry();
1972 CatchPadMBB->setIsEHFuncletEntry();
1976 // Update machine-CFG edge.
1978 FuncInfo.MBB->addSuccessor(TargetMBB);
1979 TargetMBB->setIsEHCatchretTarget(true);
1982 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1985 // If this is not a fall-through branch or optimizations are switched off,
2001 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
2003 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
2018 FuncInfo.MBB->setIsEHScopeEntry();
2019 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2021 FuncInfo.MBB->setIsEHFuncletEntry();
2022 FuncInfo.MBB->setIsCleanupFuncletEntry();
2058 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2062 UnwindDests.back().first->setIsEHScopeEntry();
2067 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2069 UnwindDests.back().first->setIsEHScopeEntry();
2085 /// The passed-in Prob is the edge probability to EHPadBB.
2092 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2106 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2116 UnwindDests.back().first->setIsEHScopeEntry();
2117 UnwindDests.back().first->setIsEHFuncletEntry();
2121 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2125 UnwindDests.back().first->setIsEHFuncletEntry();
2127 UnwindDests.back().first->setIsEHScopeEntry();
2129 NewEHPadBB = CatchSwitch->getUnwindDest();
2136 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2148 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2152 UnwindDest.first->setIsEHPad();
2155 FuncInfo.MBB->normalizeSuccProbs();
2159 FuncInfo.MBBMap[I.getCleanupPad()->getParent()];
2183 if (I.getParent()->getTerminatingDeoptimizeCall()) {
2190 const Function *F = I.getParent()->getParent();
2197 PointerType::get(F->getContext(),
2207 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2212 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2233 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2238 const Function *F = I.getParent()->getParent();
2241 I.getOperand(0)->getType(), F->getCallingConv(),
2245 if (F->getAttributes().hasRetAttr(Attribute::SExt))
2247 else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2250 LLVMContext &Context = F->getContext();
2251 bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2259 CallingConv::ID CC = F->getCallingConv();
2273 if (I.getOperand(0)->getType()->isPointerTy()) {
2276 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2281 if (j == NumValues - 1)
2304 const Function *F = I.getParent()->getParent();
2306 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2334 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
2339 if (V->getType()->isEmptyTy())
2344 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2346 CopyValueToVirtualRegister(V, VMI->second);
2350 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
2370 if (VI->getParent() == FromBB)
2380 if (FromBB->isEntryBlock())
2396 const BasicBlock *SrcBB = Src->getBasicBlock();
2397 const BasicBlock *DstBB = Dst->getBasicBlock();
2404 return BPI->getEdgeProbability(SrcBB, DstBB);
2411 Src->addSuccessorWithoutProb(Dst);
2415 Src->addSuccessor(Dst, Prob);
2421 return I->getParent() == BB;
2425 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2437 const BasicBlock *BB = CurBB->getBasicBlock();
2446 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2447 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2451 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2456 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2462 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2464 SL->SwitchCases.push_back(CB);
2473 SL->SwitchCases.push_back(CB);
2493 if (Necessary->contains(I))
2498 if (!Deps->try_emplace(I, false).second)
2501 for (unsigned OpIdx = 0, E = I->getNumOperands(); OpIdx < E; ++OpIdx)
2502 if (!collectInstructionDeps(Deps, I->getOperand(OpIdx), Necessary,
2534 if (BPI->isEdgeHot(I.getParent(), IfTrue))
2536 else if (BPI->isEdgeHot(I.getParent(), IfFalse))
2547 CostThresh -= Params.UnlikelyBias;
2556 // Use map for stable iteration (to avoid non-determanism of iteration of
2579 for (const auto *U : Ins->users()) {
2635 InBlock(NotCond, CurBB->getBasicBlock())) {
2665 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2666 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2667 !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2668 !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2677 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2678 CurBB->getParent()->insert(++BBI, TmpBB);
2765 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2766 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2770 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2783 // Update machine-CFG edges.
2787 // Update machine-CFG edges.
2788 BrMBB->addSuccessor(Succ0MBB);
2790 // If this is not a fall-through branch or optimizations are switched off,
2810 // As long as jumps are not expensive (exceptions for multi-use logic ops,
2827 BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2850 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2853 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2854 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2855 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2856 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2860 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2861 SL->SwitchCases.erase(SL->SwitchCases.begin());
2867 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2868 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2870 SL->SwitchCases.clear();
2883 /// visitSwitchCase - Emits the necessary code to represent a single node in
2894 SwitchBB->normalizeSuccProbs();
2903 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2920 // values are zero-extended. This breaks signed comparisons so truncate
2931 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2932 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2937 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2944 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2954 SwitchBB->normalizeSuccProbs();
2979 /// visitJumpTable - Emit JumpTable node in the current MBB
2983 assert(JT.Reg != -1U && "Should lower JT Header first!");
2992 /// visitJumpTableHeader - This function emits necessary code to produce index
3027 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
3088 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3094 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3112 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3113 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3118 Entry.Ty = FnTy->getParamType(0);
3119 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3126 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
3188 // WebAssembly needs an unreachable instruction after a non-returning call,
3197 /// visitBitTestHeader - This function emits necessary code to produce value
3209 // Determine the type of the test operands.
3238 SwitchBB->normalizeSuccProbs();
3260 /// visitBitTestCase - this function produces one "bit test"
3281 // There is only one zero bit in the range, test for it directly.
3305 SwitchBB->normalizeSuccProbs();
3341 else if (Fn && Fn->isIntrinsic()) {
3342 switch (Fn->getIntrinsicID()) {
3353 // so dtor-funclet not removed by opts
3354 EHPadMBB->setMachineBlockAddressTaken();
3401 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3408 UnwindDest.first->setIsEHPad();
3411 InvokeMBB->normalizeSuccProbs();
3441 Target->setIsInlineAsmBrIndirectTarget();
3442 Target->setMachineBlockAddressTaken();
3443 Target->setLabelMustBeEmitted();
3448 CallBrMBB->normalizeSuccProbs();
3461 assert(FuncInfo.MBB->isEHPad() &&
3467 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3476 if (LP.getType()->isTokenTy())
3482 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3484 // Get the two live-in registers as SDValues. The physregs have already been
3511 for (JumpTableBlock &JTB : SL->JTCases)
3516 for (BitTestBlock &BTB : SL->BitTestCases)
3524 // Update machine-CFG edges with unique successors.
3535 IndirectBrMBB->normalizeSuccProbs();
3548 Call && Call->doesNotReturn()) {
3552 if (Call->isNonContinuableTrap())
3573 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3574 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3577 Flags.setExact(ExactOp->isExact());
3579 Flags.setDisjoint(DisjointOp->isDisjoint());
3599 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3613 nuw = OFBinOp->hasNoUnsignedWrap();
3614 nsw = OFBinOp->hasNoSignedWrap();
3618 exact = ExactOp->isExact();
3635 cast<PossiblyExactOperator>(&I)->isExact());
3648 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3651 // are zero-extended. This breaks signed comparisons so truncate back to the
3670 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3685 return llvm::all_of(Cond->users(), [](const Value *V) {
3732 // ValueTracking's select pattern matching does not account for -0.0,
3734 // -0.0 is less than +0.0.
3803 EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3816 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3825 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3833 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3841 Flags.setNonNeg(PNI->hasNonNeg());
3856 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3865 // FPTrunc is never a no-op cast, no need to check
3876 // FPExt is never a no-op cast, no need to check
3884 // FPToUI is never a no-op cast, no need to check
3892 // FPToSI is never a no-op cast, no need to check
3900 // UIToFP is never a no-op cast, no need to check
3906 Flags.setNonNeg(PNI->hasNonNeg());
3912 // SIToFP is never a no-op cast, no need to check
3921 // We can either truncate, zero extend, or no-op, accordingly.
3927 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3935 // We can either truncate, zero extend, or no-op, accordingly.
3952 // either a BITCAST or a no-op.
3961 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3973 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3974 unsigned DestAS = I.getType()->getPointerAddressSpace();
4008 Mask = SVI->getShuffleMask();
4027 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
4028 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
4050 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4101 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4105 Idx -= SrcNumElts - PaddedMaskNumElts;
4124 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
4133 Idx -= SrcNumElts;
4168 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4170 Idx -= StartIdx[0];
4190 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
4207 Type *ValTy = Op1->getType();
4240 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4254 Type *AggTy = Op0->getType();
4277 Values[i - LinearIndex] =
4279 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4290 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4295 // Normalize Vector GEP - all scalar operands should be converted to the
4297 bool IsVectorGEP = I.getType()->isVectorTy();
4299 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4312 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4316 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4343 if (C && isa<VectorType>(C->getType()))
4344 C = C->getSplatValue();
4347 if (CI && CI->isZero())
4350 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4432 return; // getValue will auto-populate this.
4462 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4466 const uint64_t StackAlignMask = StackAlign.value() - 1U;
4468 // by add SA-1 to the size. This doesn't overflow because we're computing
4481 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4487 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4494 // transforms that are known not to be poison-safe, such as folding logical
4505 if (CB->hasRetAttr(Attribute::NoUndef))
4506 return CB->getRange();
4523 if (Arg->hasSwiftErrorAttr())
4528 if (Alloca->isSwiftError())
4558 AA->pointsToConstantMemory(MemoryLocation(
4562 // Do not serialize (non-volatile) loads of constant memory with anything.
4567 // Do not serialize non-volatile loads against each other.
4584 // they are side-effect free or do not alias. The optimizer should really
4633 SrcV->getType(), ValueVTs, &Offsets, 0);
4661 !AA->pointsToConstantMemory(MemoryLocation(
4693 if (Arg->hasSwiftErrorAttr())
4698 if (Alloca->isSwiftError())
4706 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4765 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getAlignValue();
4804 TTI.hasConditionalLoadStoreForType(I.getArgOperand(0)->getType())
4825 // When the first GEP operand is a single pointer - it is the uniform base we
4826 // are looking for. If first operand of the GEP is a splat vector - we
4833 SelectionDAG& DAG = SDB->DAG;
4837 assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4841 C = C->getSplatValue();
4845 Base = SDB->getValue(C);
4847 ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4849 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4851 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4856 if (!GEP || GEP->getParent() != CurBB)
4859 if (GEP->getNumOperands() != 2)
4862 const Value *BasePtr = GEP->getPointerOperand();
4863 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4866 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4869 TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4878 Base = SDB->getValue(BasePtr);
4879 Index = SDB->getValue(IndexVal);
4883 DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4896 ->getMaybeAlignValue()
4907 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4939 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getAlignValue();
4970 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4990 TTI.hasConditionalLoadStoreForType(Src0Operand->getType()))
5012 ->getMaybeAlignValue()
5024 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
5197 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
5223 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
5231 bool HasChain = !F->doesNotAccessMemory();
5232 bool OnlyLoad = HasChain && F->onlyReadsMemory();
5236 if (HasChain) { // If this intrinsic has side-effects, chainify it.
5267 EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5269 assert(CI->getBitWidth() <= 64 &&
5286 // Propagate fast-math-flags from IR to node(s).
5296 auto *Token = Bundle->Inputs[0].get();
5322 } else if (!I.getType()->isVoidTy()) {
5329 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5336 if (!I.getType()->isVoidTy()) {
5352 /// GetSignificand - Get the significand and build it into a floating-point
5366 /// GetExponent - Get the exponent:
5368 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5384 /// getF32Constant - Get 32-bit floating point constant.
5393 // TODO: What fast-math-flags should be set on the floating-point nodes?
5398 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
5411 // For floating-point precision of 6:
5426 // For floating-point precision of 12:
5431 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
5445 // For floating-point precision of 18:
5451 // (0.554906021e-1f +
5452 // (0.961591928e-2f +
5453 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5454 // error 2.47208000*10^(-7), which is better than 18 bits
5482 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5483 /// limited-precision mode.
5494 // TODO: What fast-math-flags should be set here?
5504 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5505 /// limited-precision mode.
5508 // TODO: What fast-math-flags should be set on the floating-point nodes?
5520 // Get the significand and build it into a floating-point number with
5526 // For floating-point precision of 6:
5529 // -1.1609546f +
5530 // (1.4034025f - 0.23903021f * x) * x;
5541 // For floating-point precision of 12:
5544 // -1.7417939f +
5546 // (-1.4699568f +
5547 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5564 // For floating-point precision of 18:
5567 // -2.1072184f +
5569 // (-3.7029485f +
5571 // (-0.87823314f +
5572 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5603 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5604 /// limited-precision mode.
5607 // TODO: What fast-math-flags should be set on the floating-point nodes?
5616 // Get the significand and build it into a floating-point number with
5621 // floating-point for various degrees of accuracy over [1,2].
5624 // For floating-point precision of 6:
5626 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5637 // For floating-point precision of 12:
5640 // -2.51285454f +
5642 // (-2.12067489f +
5643 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5660 // For floating-point precision of 18:
5663 // -3.0400495f +
5665 // (-5.3420409f +
5667 // (-1.2669343f +
5668 // (0.27515199f -
5669 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5700 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5701 /// limited-precision mode.
5704 // TODO: What fast-math-flags should be set on the floating-point nodes?
5715 // Get the significand and build it into a floating-point number with
5721 // For floating-point precision of 6:
5724 // -0.50419619f +
5725 // (0.60948995f - 0.10380950f * x) * x;
5736 // For floating-point precision of 12:
5739 // -0.64831180f +
5741 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5755 // For floating-point precision of 18:
5758 // -0.84299375f +
5760 // (-1.0688956f +
5762 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5790 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5791 /// limited-precision mode.
5802 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5803 /// limited-precision mode with x == 10.0f.
5812 IsExp10 = LHSC->isExactlyValue(Ten);
5816 // TODO: What fast-math-flags should be set on the FMUL node?
5832 /// ExpandPowI - Expand a llvm.powi intrinsic.
5839 unsigned Val = RHSC->getSExtValue();
5841 // powi(x, 0) -> 1.0
5849 Val = -Val;
5856 // TODO: Intrinsics should have fast-math-flags that propagate to these
5873 if (RHSC->getSExtValue() < 0)
5909 unsigned ScaleInt = Scale->getAsZExtVal();
5944 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5952 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5965 for (SDValue Op : N->op_values())
5995 auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
6012 auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
6021 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
6038 bool VariableIsFunctionInputArg = Variable->isParameter() &&
6039 !DL->getInlinedAt();
6076 unsigned ArgNo = Arg->getArgNo();
6079 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
6116 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6117 Op = MachineOperand::CreateFI(FINode->getIndex());
6130 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
6131 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6139 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
6150 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
6166 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
6167 V->getType(), std::nullopt);
6173 Op = MachineOperand::CreateReg(VMI->second, false);
6186 assert(Variable->isValidLocationForIntrinsic(DL) &&
6187 "Expected inlined-at fields to agree");
6190 if (Op->isReg())
6191 NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
6193 NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
6218 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6261 ->getCalledFunction()
6262 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6264 for (const auto *U : PreallocatedSetup->users()) {
6266 const Function *Fn = UseCall->getCalledFunction();
6267 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6280 if (!Expr->isEntryValue() || !hasSingleElement(Values))
6285 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6294 Register ArgVReg = ArgIt->getSecond();
6296 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6321 auto *Token = Bundle->Inputs[0].get();
6355 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
6437 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6450 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6556 Type *LengthTy = MI.getLength()->getType();
6572 Type *LengthTy = MI.getLength()->getType();
6588 Type *LengthTy = MI.getLength()->getType();
6633 // it is non-variadic.
6716 MMI.setCurrentCallSite(CI->getZExtValue());
6723 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6811 // clang-format off
6835 // clang-format on
6847 // clang-format off
6855 // clang-format on
6930 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6932 convertStrToRoundingMode(cast<MDString>(MD)->getString());
6936 // Propagate fast-math-flags from IR to node(s).
6960 // TODO: Intrinsic calls should have fast-math-flags.
7007 EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
7008 FPClassTest Test = static_cast<FPClassTest>(
7009 cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
7020 SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
7025 SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
7035 // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
7045 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7072 int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
7142 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
7150 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
7316 Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
7374 // artificial side-effects.
7381 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7389 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7413 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7417 GFI->addStackRoot(FI->getIndex(), TypeMap);
7438 I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7451 cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7463 Args[0].Ty = Args[0].Val->getType();
7514 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7544 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7562 const int FrameIndex = SI->second;
7566 Offset = -1; // Cannot determine offset from alloca to lifetime object.
7574 auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7575 auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7576 auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7641 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7651 TII->get(TargetOpcode::LOCAL_ESCAPE))
7664 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7667 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7669 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7818 GA->getGlobal(), sdl, Val.getValueType(),
7819 GA->getOffset())});
7861 assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
7862 "Non-zero flags not supported yet");
7869 assert(RetTy->isVoidTy() && "Should not return");
7881 Arg.Ty = I.getOperand(Idx)->getType();
7912 // zero-extended to 64 bits when in registers. Thus the mask is 32 bits to
7914 // zero-extended up to 64 bits to match the pointer.
7916 TLI.getValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
7918 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
7956 assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
7958 unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
7959 bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
8038 // If the zero-is-poison flag is set, we can assume the upper limit
8039 // of the result is VF-1.
8041 !cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero();
8043 if (isa<ScalableVectorType>(I.getOperand(0)->getType()))
8080 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8096 Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
8150 assert(Result.getNode()->getNumValues() == 2);
8163 // floating-point exception masks.
8168 // floating-point exception masks or read floating-point exception flags.
8224 ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
8243 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8248 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8253 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
8287 // Do not serialize variable-length loads of constant memory with
8292 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8317 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8380 PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
8421 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8423 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8446 unsigned AS = PtrOperand->getType()->getPointerAddressSpace();
8467 bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8469 // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8470 // flags, but calls that don't return floating-point types can't be
8574 auto Constant = OpValues[1]->getAsZExtVal();
8597 VPIntrin.getOperand(0)->getType());
8657 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8663 EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8689 "Non-null chain expected with non-tail call!");
8729 // Avoid emitting tail calls in functions with the disable-tail-calls
8731 auto *Caller = CB.getParent()->getParent();
8732 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8740 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8749 if (V->getType()->isEmptyTy())
8753 Entry.Node = ArgNode; Entry.Ty = V->getType();
8755 Entry.setAttributes(&CB, I - CB.arg_begin());
8770 // might point to function-local memory), we can't meaningfully tail-call.
8779 Value *V = Bundle->Inputs[0];
8782 Entry.Ty = V->getType();
8787 // Check if target-independent constraints permit a tail call here.
8788 // Target-dependent constraints are checked within TLI->LowerCallTo.
8803 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8804 assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8810 auto *Token = Bundle->Inputs[0].get();
8842 // book-keeping.
8860 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8878 // Do not serialize (non-volatile) loads of constant memory with anything.
8879 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8883 // Do not serialize non-volatile loads against each other.
8917 if (CSize && CSize->getZExtValue() == 0) {
8934 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
8935 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
8947 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
8948 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
8950 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
8951 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
8965 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
8990 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
9152 /// See if we can lower a unary floating-point operation into an SDNode with
9172 /// See if we can lower a binary floating-point operation into an SDNode with
9203 if (F->isDeclaration()) {
9204 // Is this an LLVM intrinsic or a target-specific intrinsic?
9205 unsigned IID = F->getIntrinsicID();
9208 IID = II->getIntrinsicID(F);
9216 // Check for well-known libc/libm calls. If the function is internal, it
9220 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
9221 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
9222 LibInfo->hasOptimizedCodeGen(Func)) {
9452 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9453 const Value *Discriminator = PAB->Inputs[1];
9455 assert(Key->getType()->isIntegerTy(32) && "Invalid ptrauth key");
9456 assert(Discriminator->getType()->isIntegerTy(64) &&
9462 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9464 return LowerCallTo(CB, getValue(CalleeCPA->getPointer()), CB.isTailCall(),
9467 // Functions should never be ptrauth-called directly.
9471 TargetLowering::PtrAuthInfo PAI = {Key->getZExtValue(),
9480 /// AsmOperandInfo - This contains information for each constraint that we are
9484 /// CallOperand - If this is the result output operand or a clobber
9489 /// AssignedRegs - If this is a register or register class operand, this
9569 Type *Ty = OpVal->getType();
9576 StackID = TFI->getStackIDForScalableVectors();
9589 /// GetRegistersForValue - Assign registers (virtual or physical) for the
9645 // refers to the input address rather than the pointed-to value.
9652 // i64, which can be passed with two i32 values on a 32-bit machine.
9684 TargetRegisterClass::iterator I = RC->begin();
9689 I = std::find(I, RC->end(), AssignedReg);
9690 if (I == RC->end()) {
9697 for (; NumRegs; --NumRegs, ++I) {
9698 assert(I != RC->end() && "Ran out of registers to allocate!");
9712 for (; OperandNo; --OperandNo) {
9714 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9732 if (IA->hasSideEffects())
9734 if (IA->isAlignStack())
9738 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9743 // meaning of an Other constraint can be target-specific and we can't easily
9765 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9767 // In normal "call dllimport func" instruction (non-inlineasm) it force
9772 if (Fn && !Fn->hasDLLImportStorageClass())
9779 /// visitInlineAsm - Handle a call to an InlineAsm object.
9784 /// ConstraintOperands - Information about all of the constraints.
9793 bool HasSideEffect = IA->hasSideEffects();
9845 int OpNo = -1;
9847 IA->collectAsmStrs(AsmStrs);
9882 // pc-related, but lea/mov a function adress may use got.
9917 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
9921 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
9934 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
9990 // C_RegisterClass, and a target-defined fashion for
10020 InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10023 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
10035 Register TiedReg = R->getReg();
10036 MVT RegVT = R->getSimpleValueType(0);
10135 AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
10137 GA->getOffset());
10215 ResultTypes = StructResult->elements();
10216 else if (!CallResultType->isVoidTy())
10222 assert((*CurResultType)->isSized() && "Unexpected unsized type");
10233 // class it is put in, eg. a double in a general-purpose register on a
10234 // 32-bit machine.
10286 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
10288 for (const SDValue &V : Val->op_values())
10358 if (I.getType()->isPointerTy())
10385 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10388 APInt Lo = CR->getUnsignedMin();
10392 APInt Hi = CR->getUnsignedMax();
10402 unsigned NumVals = Op.getNode()->getNumValues();
10432 const Value *V = Call->getOperand(ArgI);
10434 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
10438 Entry.Ty = V->getType();
10445 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10447 .setDiscardResult(Call->use_empty())
10450 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
10477 // Things on the stack are pointer-typed, meaning that they are already
10480 Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
10493 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
10514 // Add the STACKMAP operands, starting with DAG house-keeping.
10525 DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
10531 DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
10546 // Set the root to the target-lowered call chain.
10550 FuncInfo.MF->getFrameInfo().setHasStackMap();
10565 bool HasDef = !CB.getType()->isVoidTy();
10571 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10574 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10576 SymbolicCallee->getValueType(0));
10580 unsigned NumArgs = NArgVal->getAsZExtVal();
10583 // Intrinsics include all meta-operands up to but not including CC.
10599 if (CallEnd->getOpcode() == ISD::EH_LABEL)
10600 CallEnd = CallEnd->getOperand(0).getNode();
10601 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10602 CallEnd = CallEnd->getOperand(0).getNode();
10606 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10608 SDNode *Call = CallEnd->getOperand(0).getNode();
10609 bool HasGlue = Call->getGluedNode();
10615 Ops.push_back(*(Call->op_begin()));
10619 Ops.push_back(*(Call->op_end() - 1));
10623 Ops.push_back(*(Call->op_end() - 2));
10625 Ops.push_back(*(Call->op_end() - 1));
10629 Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
10631 Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
10639 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10653 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10654 Ops.append(Call->op_begin() + 2, e);
10698 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10788 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10792 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
10816 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10817 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10829 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10830 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10833 int DemoteStackIdx = -100;
10866 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10868 // sret demotion isn't compatible with tail-calls, since the sret argument
10878 if (I == RetTys.size() - 1)
10882 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10884 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10892 if (CLI.RetTy->isPointerTy()) {
10895 cast<PointerType>(CLI.RetTy)->getAddressSpace());
10937 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
10948 if (Args[i].Ty->isPointerTy()) {
10951 cast<PointerType>(Args[i].Ty)->getAddressSpace());
10959 // passed InReg - is surely an HVA
11023 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11025 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11035 // Conservatively only handle 'returned' on non-vectors that can be lowered,
11040 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
11041 CLI.RetTy->getPointerAddressSpace() ==
11042 Args[i].Ty->getPointerAddressSpace())) &&
11075 if (j == NumParts - 1)
11083 if (NeedsRegBlock && Value == NumValues - 1)
11084 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11102 // For a tail call, the return value is merely live-out and there aren't
11125 PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
11166 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
11168 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
11203 if (N->getNumValues() == 1) {
11210 assert((N->getNumValues() == Res->getNumValues()) &&
11214 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
11227 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
11235 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
11242 ExtendType = PreferredExtendIt->second;
11250 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
11255 // of virtual registers for all non-dead arguments.
11257 return A->use_empty();
11259 const BasicBlock &Entry = A->getParent()->front();
11260 for (const User *U : A->users())
11261 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
11283 unsigned NumArgs = FuncInfo->Fn->arg_size();
11286 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
11289 V = V->stripPointerCasts();
11291 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
11294 return &Iter.first->second;
11299 // by the store. Any non-store use of an alloca escapes it and any subsequent
11302 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
11303 // Look for stores, and handle non-store uses conservatively.
11323 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11327 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11342 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11344 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11345 Arg->getType()->isEmptyTy() ||
11346 DL.getTypeStoreSize(Arg->getType()) !=
11347 DL.getTypeAllocSize(AI->getAllocatedType()) ||
11348 !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11361 // Stop scanning if we've seen all arguments. This will happen early in -O0
11362 // builds, which is useful, because -O0 builds have large entry blocks and
11381 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11390 const AllocaInst *AI = ArgCopyIter->second.first;
11391 int FixedIndex = FINode->getIndex();
11394 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
11401 Align RequiredAlignment = AI->getAlign();
11426 const StoreInst *SI = ArgCopyIter->second.second;
11440 SelectionDAG &DAG = SDB->DAG;
11441 SDLoc dl = SDB->getCurSDLoc();
11449 if (!FuncInfo->CanLowerReturn) {
11461 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
11484 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
11493 if (Arg.getType()->isPointerTy()) {
11496 cast<PointerType>(Arg.getType())->getAddressSpace());
11504 // passed InReg - is surely an HVA
11550 TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11562 // For in-memory arguments, size and alignment should be passed from FE.
11570 MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
11591 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11592 *CurDAG->getContext(), F.getCallingConv(), VT);
11593 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11594 *CurDAG->getContext(), F.getCallingConv(), VT);
11607 if (i == NumRegs - 1)
11612 if (NeedsRegBlock && Value == NumValues - 1)
11613 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11620 SDValue NewRoot = TLI->LowerFormalArguments(
11642 if (!FuncInfo->CanLowerReturn) {
11651 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11657 MachineFunction& MF = SDB->DAG.getMachineFunction();
11660 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11661 FuncInfo->DemoteRegister = SRetReg;
11663 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11687 NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11698 TLI->supportSwiftError() &&
11701 SDB->setUnusedArgValue(&Arg, InVals[i]);
11706 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11711 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11713 unsigned NumParts = TLI->getNumRegistersForCallingConv(
11714 *CurDAG->getContext(), F.getCallingConv(), VT);
11741 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11744 SDB->getCurSDLoc());
11746 SDB->setValue(&Arg, Res);
11759 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11760 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11769 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11771 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11781 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11783 FuncInfo->ValueMap[&Arg] = Reg;
11788 FuncInfo->InitializeRegForValue(&Arg);
11789 SDB->CopyToExportRegsIfNeeded(&Arg);
11806 MF->getInStackSlotVariableDbgInfo()) {
11809 VI.updateStackSlot(I->second);
11831 for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11832 if (!isa<PHINode>(SuccBB->begin())) continue;
11840 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11842 // At this point we know that there is a 1-1 correspondence between LLVM PHI
11845 for (const PHINode &PN : SuccBB->phis()) {
11851 if (PN.getType()->isEmptyTy())
11874 Reg = I->second;
11903 if (++I == FuncInfo.MF->end())
11927 if (++BBI != FuncInfo.MF->end())
11930 unsigned Size = W.LastCluster - W.FirstCluster + 1;
11938 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
11947 const APInt &SmallValue = Small.Low->getValue();
11948 const APInt &BigValue = Big.Low->getValue();
11971 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
11992 // which case their relative ordering is non-deterministic. So we use Low
11993 // as a tie-breaker as clusters are guaranteed to never overlap.
11998 a.Low->getValue().slt(b.Low->getValue());
12004 --I;
12005 if (I->Prob > W.LastCluster->Prob)
12007 if (I->Kind == CC_Range && I->MBB == NextMBB) {
12018 UnhandledProbs += I->Prob;
12028 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
12030 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
12031 CurMF->insert(BBI, Fallthrough);
12035 UnhandledProbs -= I->Prob;
12037 switch (I->Kind) {
12040 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
12041 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
12044 MachineBasicBlock *JumpMBB = JT->MBB;
12045 CurMF->insert(BBI, JumpMBB);
12047 auto JumpProb = I->Prob;
12053 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
12054 SE = JumpMBB->succ_end();
12058 FallthroughProb -= DefaultProb / 2;
12059 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
12060 JumpMBB->normalizeSuccProbs();
12066 // JTH->FallthroughUnreachable which will use it to suppress the range
12071 // gadget - out-of-bounds inputs that are impossible in correct
12077 Function &CurFunc = CurMF->getFunction();
12078 if (!CurFunc.hasFnAttribute("branch-target-enforcement"))
12079 JTH->FallthroughUnreachable = true;
12082 if (!JTH->FallthroughUnreachable)
12085 CurMBB->normalizeSuccProbs();
12089 JTH->HeaderBB = CurMBB;
12090 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
12095 JTH->Emitted = true;
12101 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
12103 // The bit test blocks haven't been inserted yet; insert them here.
12104 for (BitTestCase &BTC : BTB->Cases)
12105 CurMF->insert(BBI, BTC.ThisBB);
12108 BTB->Parent = CurMBB;
12109 BTB->Default = Fallthrough;
12111 BTB->DefaultProb = UnhandledProbs;
12112 // If the cases in bit test don't form a contiguous range, we evenly
12115 if (!BTB->ContiguousRange) {
12116 BTB->Prob += DefaultProb / 2;
12117 BTB->DefaultProb -= DefaultProb / 2;
12121 BTB->FallthroughUnreachable = true;
12123 // If we're in the right place, emit the bit test header right now.
12126 BTB->Emitted = true;
12133 if (I->Low == I->High) {
12134 // Check Cond == I->Low.
12137 RHS=I->Low;
12140 // Check I->Low <= Cond <= I->High.
12142 LHS = I->Low;
12144 RHS = I->High;
12152 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
12153 getCurSDLoc(), I->Prob, UnhandledProbs);
12158 SL->SwitchCases.push_back(CB);
12171 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
12173 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
12176 SL->computeSplitWorkItemInfo(W);
12178 // Use the first element on the right as pivot since we will make less-than
12187 const ConstantInt *Pivot = PivotCluster->Low;
12195 // between the known lower bound and Pivot - 1.
12197 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
12198 FirstLeft->Low == W.GE &&
12199 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
12200 LeftMBB = FirstLeft->MBB;
12202 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12203 FuncInfo.MF->insert(BBI, LeftMBB);
12214 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
12215 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
12216 RightMBB = FirstRight->MBB;
12218 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
12219 FuncInfo.MF->insert(BBI, RightMBB);
12233 SL->SwitchCases.push_back(CB);
12262 SwitchMBB->getParent()->getFunction().hasMinSize())
12286 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
12287 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12316 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
12337 SwitchMBB->addSuccessor(DefaultMBB);
12345 SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
12347 SL->findBitTestClusters(Clusters, &SI);
12357 C.Low->getValue().print(dbgs(), true);
12359 dbgs() << '-';
12360 C.High->getValue().print(dbgs(), true);
12370 CaseClusterIt Last = Clusters.end() - 1;
12382 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
12385 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
12415 // Use VECTOR_SHUFFLE for the fixed-length vector
12420 Mask.push_back(NumElts - 1 - i);
12439 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12464 // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
12506 int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
12519 // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12529 // INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12548 MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12550 assert(MI->getOpcode() == TargetOpcode::COPY &&
12552 Reg = MI->getOperand(1).getReg();
12553 MI = MRI.def_begin(Reg)->getParent();
12555 if (MI->getOpcode() == TargetOpcode::COPY) {
12557 Reg = MI->getOperand(1).getReg();
12559 MI = MRI.def_begin(Reg)->getParent();
12562 assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12568 // setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12575 cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12584 // Re-parse the asm constraints string.
12607 FuncInfo.MBB->addLiveIn(OriginalDef);