Lines Matching refs:VT

183   bool isTypeLegal(Type *Ty, MVT &VT);
184 bool isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed = false);
188 bool simplifyAddress(Address &Addr, MVT VT);
226 unsigned emitLoad(MVT VT, MVT ResultVT, Address Addr, bool WantZExt = true,
228 bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
230 bool emitStoreRelease(MVT VT, unsigned SrcReg, unsigned AddrReg,
237 unsigned emitAdd_ri_(MVT VT, unsigned Op0, int64_t Imm);
266 unsigned materializeInt(const ConstantInt *CI, MVT VT);
267 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
318 static unsigned getImplicitScaleFactor(MVT VT) { in getImplicitScaleFactor() argument
319 switch (VT.SimpleTy) { in getImplicitScaleFactor()
370 unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) { in materializeInt() argument
371 if (VT > MVT::i64) in materializeInt()
375 return fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); in materializeInt()
378 const TargetRegisterClass *RC = (VT == MVT::i64) ? &AArch64::GPR64RegClass in materializeInt()
380 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in materializeInt()
387 unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) { in materializeFP() argument
393 if (VT != MVT::f32 && VT != MVT::f64) in materializeFP()
397 bool Is64Bit = (VT == MVT::f64); in materializeFP()
404 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm); in materializeFP()
417 Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); in materializeFP()
435 Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); in materializeFP()
540 MVT VT = CEVT.getSimpleVT(); in fastMaterializeConstant() local
544 assert(VT == MVT::i64 && "Expected 64-bit pointers"); in fastMaterializeConstant()
545 return materializeInt(ConstantInt::get(Type::getInt64Ty(*Context), 0), VT); in fastMaterializeConstant()
549 return materializeInt(CI, VT); in fastMaterializeConstant()
551 return materializeFP(CFP, VT); in fastMaterializeConstant()
561 MVT VT; in fastMaterializeFloatZero() local
562 if (!isTypeLegal(CFP->getType(), VT)) in fastMaterializeFloatZero()
565 if (VT != MVT::f32 && VT != MVT::f64) in fastMaterializeFloatZero()
568 bool Is64Bit = (VT == MVT::f64); in fastMaterializeFloatZero()
571 return fastEmitInst_r(Opc, TLI.getRegClassFor(VT), ZReg); in fastMaterializeFloatZero()
982 bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) { in isTypeLegal() argument
991 VT = evt.getSimpleVT(); in isTypeLegal()
994 if (VT == MVT::f128) in isTypeLegal()
999 return TLI.isTypeLegal(VT); in isTypeLegal()
1006 bool AArch64FastISel::isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed) { in isTypeSupported() argument
1010 if (isTypeLegal(Ty, VT)) in isTypeSupported()
1015 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) in isTypeSupported()
1029 bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) { in simplifyAddress() argument
1033 unsigned ScaleFactor = getImplicitScaleFactor(VT); in simplifyAddress()
1477 MVT VT = EVT.getSimpleVT(); in emitCmp() local
1479 switch (VT.SimpleTy) { in emitCmp()
1487 return emitICmp(VT, LHS, RHS, IsZExt); in emitCmp()
1490 return emitFCmp(VT, LHS, RHS); in emitCmp()
1549 unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, int64_t Imm) { in emitAdd_ri_() argument
1552 ResultReg = emitAddSub_ri(false, VT, Op0, -Imm); in emitAdd_ri_()
1554 ResultReg = emitAddSub_ri(true, VT, Op0, Imm); in emitAdd_ri_()
1559 unsigned CReg = fastEmit_i(VT, VT, ISD::Constant, Imm); in emitAdd_ri_()
1563 ResultReg = emitAddSub_rr(true, VT, Op0, CReg); in emitAdd_ri_()
1656 MVT VT = std::max(MVT::i32, RetVT.SimpleTy); in emitLogicalOp() local
1657 ResultReg = fastEmit_rr(VT, VT, ISDOpc, LHSReg, RHSReg); in emitLogicalOp()
1757 unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr, in emitLoad() argument
1759 if (!TLI.allowsMisalignedMemoryAccesses(VT)) in emitLoad()
1763 if (!simplifyAddress(Addr, VT)) in emitLoad()
1766 unsigned ScaleFactor = getImplicitScaleFactor(VT); in emitLoad()
1834 switch (VT.SimpleTy) { in emitLoad()
1874 if (VT == MVT::i1) { in emitLoad()
1882 if (WantZExt && RetVT == MVT::i64 && VT <= MVT::i32) { in emitLoad()
1895 MVT VT; in selectAddSub() local
1896 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true)) in selectAddSub()
1899 if (VT.isVector()) in selectAddSub()
1907 ResultReg = emitAdd(VT, I->getOperand(0), I->getOperand(1)); in selectAddSub()
1910 ResultReg = emitSub(VT, I->getOperand(0), I->getOperand(1)); in selectAddSub()
1921 MVT VT; in selectLogicalOp() local
1922 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true)) in selectLogicalOp()
1925 if (VT.isVector()) in selectLogicalOp()
1933 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1)); in selectLogicalOp()
1936 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1)); in selectLogicalOp()
1939 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1)); in selectLogicalOp()
1950 MVT VT; in selectLoad() local
1954 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true) || in selectLoad()
1980 MVT RetVT = VT; in selectLoad()
1987 RetVT = VT; in selectLoad()
1992 RetVT = VT; in selectLoad()
1998 emitLoad(VT, RetVT, Addr, WantZExt, createMachineMemOperandFor(I)); in selectLoad()
2021 if (RetVT == MVT::i64 && VT <= MVT::i32) { in selectLoad()
2060 bool AArch64FastISel::emitStoreRelease(MVT VT, unsigned SrcReg, in emitStoreRelease() argument
2064 switch (VT.SimpleTy) { in emitStoreRelease()
2082 bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr, in emitStore() argument
2084 if (!TLI.allowsMisalignedMemoryAccesses(VT)) in emitStore()
2088 if (!simplifyAddress(Addr, VT)) in emitStore()
2091 unsigned ScaleFactor = getImplicitScaleFactor(VT); in emitStore()
2123 switch (VT.SimpleTy) { in emitStore()
2151 MVT VT; in selectStore() local
2156 if (!isTypeSupported(Op0->getType(), VT, /*IsVectorAllowed=*/true)) in selectStore()
2179 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectStore()
2182 VT = MVT::getIntegerVT(VT.getSizeInBits()); in selectStore()
2183 SrcReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectStore()
2202 return emitStoreRelease(VT, SrcReg, AddrReg, in selectStore()
2212 if (!emitStore(VT, SrcReg, Addr, createMachineMemOperandFor(I))) in selectStore()
2279 MVT VT; in emitCompareAndBranch() local
2280 if (!isTypeSupported(LHS->getType(), VT)) in emitCompareAndBranch()
2283 unsigned BW = VT.getSizeInBits(); in emitCompareAndBranch()
2325 if (VT == MVT::i1) in emitCompareAndBranch()
2374 SrcReg = emitIntExt(VT, SrcReg, MVT::i32, /*isZExt=*/true); in emitCompareAndBranch()
2676 MVT VT; in selectSelect() local
2677 if (!isTypeSupported(I->getType(), VT)) in selectSelect()
2682 switch (VT.SimpleTy) { in selectSelect()
2939 MVT VT = ArgVT.getSimpleVT().SimpleTy; in fastLowerArguments() local
2940 if (VT.isFloatingPoint() && !Subtarget->hasFPARMv8()) in fastLowerArguments()
2943 if (VT.isVector() && in fastLowerArguments()
2947 if (VT >= MVT::i1 && VT <= MVT::i64) in fastLowerArguments()
2949 else if ((VT >= MVT::f16 && VT <= MVT::f64) || VT.is64BitVector() || in fastLowerArguments()
2950 VT.is128BitVector()) in fastLowerArguments()
2977 MVT VT = TLI.getSimpleValueType(DL, Arg.getType()); in fastLowerArguments() local
2980 if (VT >= MVT::i1 && VT <= MVT::i32) { in fastLowerArguments()
2983 VT = MVT::i32; in fastLowerArguments()
2984 } else if (VT == MVT::i64) { in fastLowerArguments()
2987 } else if (VT == MVT::f16) { in fastLowerArguments()
2990 } else if (VT == MVT::f32) { in fastLowerArguments()
2993 } else if ((VT == MVT::f64) || VT.is64BitVector()) { in fastLowerArguments()
2996 } else if (VT.is128BitVector()) { in fastLowerArguments()
3205 MVT VT; in fastLowerCall() local
3206 if (!isTypeLegal(Val->getType(), VT) && in fastLowerCall()
3207 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) in fastLowerCall()
3211 if (VT.isVector() || VT.getSizeInBits() > 64) in fastLowerCall()
3214 OutVTs.push_back(VT); in fastLowerCall()
3311 MVT VT; in tryEmitSmallMemCpy() local
3314 VT = MVT::i64; in tryEmitSmallMemCpy()
3316 VT = MVT::i32; in tryEmitSmallMemCpy()
3318 VT = MVT::i16; in tryEmitSmallMemCpy()
3320 VT = MVT::i8; in tryEmitSmallMemCpy()
3326 VT = MVT::i32; in tryEmitSmallMemCpy()
3328 VT = MVT::i16; in tryEmitSmallMemCpy()
3330 VT = MVT::i8; in tryEmitSmallMemCpy()
3334 unsigned ResultReg = emitLoad(VT, VT, Src); in tryEmitSmallMemCpy()
3338 if (!emitStore(VT, ResultReg, Dest)) in tryEmitSmallMemCpy()
3341 int64_t Size = VT.getSizeInBits() / 8; in tryEmitSmallMemCpy()
3595 MVT VT; in fastLowerIntrinsicCall() local
3596 if (!isTypeLegal(II->getType(), VT)) in fastLowerIntrinsicCall()
3600 switch (VT.SimpleTy) { in fastLowerIntrinsicCall()
3613 Register ResultReg = createResultReg(TLI.getRegClassFor(VT)); in fastLowerIntrinsicCall()
3631 MVT VT; in fastLowerIntrinsicCall() local
3632 if (!isTypeLegal(RetTy, VT)) in fastLowerIntrinsicCall()
3639 unsigned ResultReg = fastEmit_r(VT, VT, ISD::FSQRT, Op0Reg); in fastLowerIntrinsicCall()
3657 MVT VT; in fastLowerIntrinsicCall() local
3658 if (!isTypeLegal(RetTy, VT)) in fastLowerIntrinsicCall()
3661 if (VT != MVT::i32 && VT != MVT::i64) in fastLowerIntrinsicCall()
3696 ResultReg1 = emitAdd(VT, LHS, RHS, /*SetFlags=*/true); in fastLowerIntrinsicCall()
3700 ResultReg1 = emitAdd(VT, LHS, RHS, /*SetFlags=*/true); in fastLowerIntrinsicCall()
3704 ResultReg1 = emitSub(VT, LHS, RHS, /*SetFlags=*/true); in fastLowerIntrinsicCall()
3708 ResultReg1 = emitSub(VT, LHS, RHS, /*SetFlags=*/true); in fastLowerIntrinsicCall()
3721 if (VT == MVT::i32) { in fastLowerIntrinsicCall()
3724 fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32); in fastLowerIntrinsicCall()
3731 assert(VT == MVT::i64 && "Unexpected value type."); in fastLowerIntrinsicCall()
3734 MulReg = emitMul_rr(VT, LHSReg, RHSReg); in fastLowerIntrinsicCall()
3735 unsigned SMULHReg = fastEmit_rr(VT, VT, ISD::MULHS, LHSReg, RHSReg); in fastLowerIntrinsicCall()
3736 emitSubs_rs(VT, SMULHReg, MulReg, AArch64_AM::ASR, 63, in fastLowerIntrinsicCall()
3751 if (VT == MVT::i32) { in fastLowerIntrinsicCall()
3758 MulReg = fastEmitInst_extractsubreg(VT, MulReg, AArch64::sub_32); in fastLowerIntrinsicCall()
3760 assert(VT == MVT::i64 && "Unexpected value type."); in fastLowerIntrinsicCall()
3763 MulReg = emitMul_rr(VT, LHSReg, RHSReg); in fastLowerIntrinsicCall()
3764 unsigned UMULHReg = fastEmit_rr(VT, VT, ISD::MULHU, LHSReg, RHSReg); in fastLowerIntrinsicCall()
3765 emitSubs_rr(VT, AArch64::XZR, UMULHReg, /*WantResult=*/false); in fastLowerIntrinsicCall()
3772 ResultReg1 = createResultReg(TLI.getRegClassFor(VT)); in fastLowerIntrinsicCall()
4605 MVT VT; in selectMul() local
4606 if (!isTypeSupported(I->getType(), VT, /*IsVectorAllowed=*/true)) in selectMul()
4609 if (VT.isVector()) in selectMul()
4622 MVT SrcVT = VT; in selectMul()
4626 MVT VT; in selectMul() local
4627 if (isValueAvailable(ZExt) && isTypeSupported(ZExt->getSrcTy(), VT)) { in selectMul()
4628 SrcVT = VT; in selectMul()
4635 MVT VT; in selectMul() local
4636 if (isValueAvailable(SExt) && isTypeSupported(SExt->getSrcTy(), VT)) { in selectMul()
4637 SrcVT = VT; in selectMul()
4649 emitLSL_ri(VT, SrcVT, Src0Reg, ShiftVal, IsZExt); in selectMul()
4665 unsigned ResultReg = emitMul_rr(VT, Src0Reg, Src1Reg); in selectMul()
4839 MVT VT; in selectSDiv() local
4840 if (!isTypeLegal(I->getType(), VT)) in selectSDiv()
4847 if ((VT != MVT::i32 && VT != MVT::i64) || !C || in selectSDiv()
4857 unsigned ResultReg = emitASR_ri(VT, VT, Src0Reg, Lg2); in selectSDiv()
4865 unsigned AddReg = emitAdd_ri_(VT, Src0Reg, Pow2MinusOne); in selectSDiv()
4870 if (!emitICmp_ri(VT, Src0Reg, 0)) in selectSDiv()
4875 if (VT == MVT::i64) { in selectSDiv()
4889 unsigned ZeroReg = (VT == MVT::i64) ? AArch64::XZR : AArch64::WZR; in selectSDiv()
4892 ResultReg = emitAddSub_rs(/*UseAdd=*/false, VT, ZeroReg, SelectReg, in selectSDiv()
4895 ResultReg = emitASR_ri(VT, VT, SelectReg, Lg2); in selectSDiv()
4938 MVT VT = TLI.getPointerTy(DL); in selectGetElementPtr() local
4960 N = emitAdd_ri_(VT, N, TotalOffs); in selectGetElementPtr()
4973 unsigned C = fastEmit_i(VT, VT, ISD::Constant, ElementSize); in selectGetElementPtr()
4976 IdxN = emitMul_rr(VT, IdxN, C); in selectGetElementPtr()
4980 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN); in selectGetElementPtr()
4986 N = emitAdd_ri_(VT, N, TotalOffs); in selectGetElementPtr()
5003 MVT VT; in selectAtomicCmpXchg() local
5004 if (!isTypeLegal(RetTy, VT)) in selectAtomicCmpXchg()
5011 if (VT == MVT::i32) { in selectAtomicCmpXchg()
5015 } else if (VT == MVT::i64) { in selectAtomicCmpXchg()
5045 .addDef(VT == MVT::i32 ? AArch64::WZR : AArch64::XZR) in selectAtomicCmpXchg()