Lines Matching defs:const

55   explicit NoTTIImpl(const DataLayout &DL)
65 if (containsIrreducibleCFG<const BasicBlock *>(RPOT, LI))
71 Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost,
76 if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
88 const IntrinsicInst *I,
95 ArrayRef<const Value *> Args)
100 for (const Value *Argument : Arguments)
105 ArrayRef<const Value *> Args,
108 const IntrinsicInst *I,
138 const SCEV *EC = SE.getExitCount(L, BB);
141 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) {
202 TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
215 unsigned TargetTransformInfo::getInliningThresholdMultiplier() const {
220 TargetTransformInfo::getInliningCostBenefitAnalysisSavingsMultiplier() const {
226 const {
230 int TargetTransformInfo::getInliningLastCallToStaticBonus() const {
235 TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const {
239 unsigned TargetTransformInfo::getCallerAllocaCost(const CallBase *CB,
240 const AllocaInst *AI) const {
244 int TargetTransformInfo::getInlinerVectorBonusPercent() const {
249 Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands,
250 Type *AccessType, TTI::TargetCostKind CostKind) const {
255 ArrayRef<const Value *> Ptrs, const Value *Base,
256 const TTI::PointersChainInfo &Info, Type *AccessTy,
257 TTI::TargetCostKind CostKind) const {
264 const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
265 BlockFrequencyInfo *BFI) const {
270 TargetTransformInfo::getInstructionCost(const User *U,
271 ArrayRef<const Value *> Operands,
272 enum TargetCostKind CostKind) const {
279 BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const {
285 InstructionCost TargetTransformInfo::getBranchMispredictPenalty() const {
289 bool TargetTransformInfo::hasBranchDivergence(const Function *F) const {
293 bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
294 if (const auto *Call = dyn_cast<CallBase>(V)) {
301 bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
306 unsigned ToAS) const {
311 unsigned ToAS) const {
315 unsigned TargetTransformInfo::getFlatAddressSpace() const {
320 SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
325 unsigned ToAS) const {
330 unsigned AS) const {
334 unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const {
338 bool TargetTransformInfo::isSingleThreaded() const {
342 std::pair<const Value *, unsigned>
343 TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const {
348 IntrinsicInst *II, Value *OldV, Value *NewV) const {
352 bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
358 TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const {
362 unsigned TargetTransformInfo::getEpilogueVectorizationMinVF() const {
367 TailFoldingInfo *TFI) const {
372 bool IVUpdateMayOverflow) const {
378 IntrinsicInst &II) const {
384 bool &KnownBitsComputed) const {
393 SimplifyAndSetOp) const {
401 OptimizationRemarkEmitter *ORE) const {
406 PeelingPreferences &PP) const {
410 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
414 bool TargetTransformInfo::isLegalAddScalableImmediate(int64_t Imm) const {
418 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
427 int64_t ScalableOffset) const {
432 bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1,
433 const LSRCost &C2) const {
437 bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const {
441 bool TargetTransformInfo::shouldDropLSRSolutionIfLessProfitable() const {
445 bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const {
449 bool TargetTransformInfo::canMacroFuseCmp() const {
456 TargetLibraryInfo *LibInfo) const {
461 TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
462 ScalarEvolution *SE) const {
467 Align Alignment) const {
472 Align Alignment) const {
477 Align Alignment) const {
481 bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const {
486 ElementCount NumElements) const {
491 Align Alignment) const {
497 const SmallBitVector &OpcodeMask) const {
502 Align Alignment) const {
507 Align Alignment) const {
512 Align Alignment) const {
517 Align Alignment) const {
522 Align Alignment) const {
527 Align Alignment) const {
533 unsigned AddrSpace) const {
539 Type *DataType) const {
543 bool TargetTransformInfo::enableOrderedReductions() const {
547 bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const {
552 unsigned AddrSpace) const {
556 bool TargetTransformInfo::prefersVectorizedAddressing() const {
562 int64_t Scale, unsigned AddrSpace) const {
569 bool TargetTransformInfo::LSRWithInstrQueries() const {
573 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
577 bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const {
581 bool TargetTransformInfo::useAA() const { return TTIImpl->useAA(); }
583 bool TargetTransformInfo::isTypeLegal(Type *Ty) const {
587 unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const {
591 bool TargetTransformInfo::shouldBuildLookupTables() const {
596 Constant *C) const {
600 bool TargetTransformInfo::shouldBuildRelLookupTables() const {
604 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
609 Intrinsic::ID ID) const {
614 Intrinsic::ID ID, unsigned ScalarOpdIdx) const {
619 Intrinsic::ID ID, int OpdIdx) const {
624 Intrinsic::ID ID, int RetIdx) const {
629 VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
630 TTI::TargetCostKind CostKind, ArrayRef<Value *> VL) const {
636 ArrayRef<const Value *> Args, ArrayRef<Type *> Tys,
637 TTI::TargetCostKind CostKind) const {
641 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
645 bool TargetTransformInfo::supportsTailCalls() const {
649 bool TargetTransformInfo::supportsTailCallFor(const CallBase *CB) const {
654 bool LoopHasReductions) const {
659 TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
663 bool TargetTransformInfo::enableSelectOptimize() const {
668 const Instruction *I) const {
672 bool TargetTransformInfo::enableInterleavedAccessVectorization() const {
676 bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const {
680 bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const {
689 unsigned *Fast) const {
695 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
699 bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
704 const Instruction *I) const {
708 bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const {
712 InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const {
720 const APInt &Imm,
721 Type *Ty) const {
728 TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
729 TTI::TargetCostKind CostKind) const {
736 unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
737 TTI::TargetCostKind CostKind, Instruction *Inst) const {
746 const APInt &Imm, Type *Ty,
747 TTI::TargetCostKind CostKind) const {
755 const Instruction &Inst, const Function &Fn) const {
759 unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const {
763 bool TargetTransformInfo::hasConditionalLoadStoreForType(Type *Ty) const {
768 Type *Ty) const {
772 const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const {
777 TargetTransformInfo::RegisterKind K) const {
781 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
785 std::optional<unsigned> TargetTransformInfo::getMaxVScale() const {
789 std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const {
793 bool TargetTransformInfo::isVScaleKnownToBeAPowerOfTwo() const {
798 TargetTransformInfo::RegisterKind K) const {
803 bool IsScalable) const {
808 unsigned Opcode) const {
813 Type *ScalarValTy) const {
818 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
823 unsigned TargetTransformInfo::getCacheLineSize() const {
829 TargetTransformInfo::getCacheSize(CacheLevel Level) const {
834 TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const {
838 std::optional<unsigned> TargetTransformInfo::getMinPageSize() const {
843 unsigned TargetTransformInfo::getPrefetchDistance() const {
849 unsigned NumPrefetches, bool HasCall) const {
854 unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const {
858 bool TargetTransformInfo::enableWritePrefetching() const {
862 bool TargetTransformInfo::shouldPrefetchAddressSpace(unsigned AS) const {
869 PartialReductionExtendKind OpBExtend, std::optional<unsigned> BinOp) const {
875 unsigned TargetTransformInfo::getMaxInterleaveFactor(ElementCount VF) const {
880 TargetTransformInfo::getOperandInfo(const Value *V) {
885 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
897 if (const auto *ShuffleInst = dyn_cast<ShuffleVectorInst>(V))
901 const Value *Splat = getSplatValue(V);
915 } else if (const auto *CDS = dyn_cast<ConstantDataSequential>(V)) {
943 ArrayRef<const Value *> Args, const Instruction *CxtI,
944 const TargetLibraryInfo *TLibInfo) const {
969 const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const {
979 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
996 TargetTransformInfo::getCastContextHint(const Instruction *I) {
1000 auto getLoadStoreKind = [](const Value *V, unsigned LdStOp, unsigned MaskedOp,
1002 const Instruction *I = dyn_cast<Instruction>(V);
1009 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1041 TTI::TargetCostKind CostKind, const Instruction *I) const {
1051 unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const {
1059 unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
1070 OperandValueInfo Op2Info, const Instruction *I) const {
1081 Value *Op0, Value *Op1) const {
1094 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const {
1105 TargetTransformInfo::getVectorInstrCost(const Instruction &I, Type *Val,
1107 unsigned Index) const {
1117 Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
1118 TTI::TargetCostKind CostKind) const {
1128 const Instruction *I) const {
1139 TTI::TargetCostKind CostKind) const {
1147 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1148 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
1157 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1158 Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
1168 bool UseMaskForCond, bool UseMaskForGaps) const {
1177 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1178 TTI::TargetCostKind CostKind) const {
1187 TTI::TargetCostKind CostKind) const {
1193 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
1199 const SCEV *Ptr) const {
1205 InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
1211 uint64_t TargetTransformInfo::getMaxMemIntrinsicInlineSizeThreshold() const {
1217 TTI::TargetCostKind CostKind) const {
1226 TTI::TargetCostKind CostKind) const {
1235 FastMathFlags FMF, TTI::TargetCostKind CostKind) const {
1242 TTI::TargetCostKind CostKind) const {
1247 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
1252 MemIntrinsicInfo &Info) const {
1256 unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
1261 IntrinsicInst *Inst, Type *ExpectedType) const {
1268 std::optional<uint32_t> AtomicElementSize) const {
1278 std::optional<uint32_t> AtomicCpySize) const {
1284 bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
1285 const Function *Callee) const {
1290 TargetTransformInfo::getInlineCallPenalty(const Function *F,
1291 const CallBase &Call,
1292 unsigned DefaultCallPenalty) const {
1297 const Function *Caller, const Function *Callee,
1298 const ArrayRef<Type *> &Types) const {
1303 Type *Ty) const {
1308 Type *Ty) const {
1312 unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const {
1316 bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const {
1320 bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const {
1325 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1331 unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const {
1337 const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
1341 bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const {
1348 VectorType *VecTy) const {
1355 VectorType *VecTy) const {
1359 bool TargetTransformInfo::preferFixedOverScalableIfEqualCost() const {
1364 ReductionFlags Flags) const {
1369 unsigned Opcode, Type *Ty, ReductionFlags Flags) const {
1373 bool TargetTransformInfo::preferEpilogueVectorization() const {
1378 TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
1382 bool TargetTransformInfo::hasArmWideBranch(bool Thumb) const {
1386 uint64_t TargetTransformInfo::getFeatureMask(const Function &F) const {
1390 bool TargetTransformInfo::isMultiversionedFunction(const Function &F) const {
1394 unsigned TargetTransformInfo::getMaxNumArgs() const {
1398 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
1404 const IntrinsicInst *II) const {
1408 unsigned TargetTransformInfo::getGISelRematGlobalCost() const {
1412 unsigned TargetTransformInfo::getMinTripCountTailFoldingThreshold() const {
1416 bool TargetTransformInfo::supportsScalableVectors() const {
1420 bool TargetTransformInfo::enableScalableVectorization() const {
1425 Align Alignment) const {
1430 Instruction *I, SmallVectorImpl<Use *> &OpsToSink) const {
1434 bool TargetTransformInfo::isVectorShiftByScalarCheap(Type *Ty) const {
1440 Type *ArrayType) const {
1445 const Function &F,
1446 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {
1455 std::function<Result(const Function &)> TTICallback)
1458 TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F,
1465 TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) {
1489 TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) {