1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This pass exposes codegen information to IR-level passes. Every 10 /// transformation that uses codegen information is broken into three parts: 11 /// 1. The IR-level analysis pass. 12 /// 2. The IR-level transformation interface which provides the needed 13 /// information. 14 /// 3. Codegen-level implementation which uses target-specific hooks. 15 /// 16 /// This file defines #2, which is the interface that IR-level transformations 17 /// use for querying the codegen. 18 /// 19 //===----------------------------------------------------------------------===// 20 21 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H 22 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H 23 24 #include "llvm/ADT/APInt.h" 25 #include "llvm/ADT/ArrayRef.h" 26 #include "llvm/IR/FMF.h" 27 #include "llvm/IR/InstrTypes.h" 28 #include "llvm/IR/PassManager.h" 29 #include "llvm/Pass.h" 30 #include "llvm/Support/AtomicOrdering.h" 31 #include "llvm/Support/BranchProbability.h" 32 #include "llvm/Support/InstructionCost.h" 33 #include <functional> 34 #include <optional> 35 #include <utility> 36 37 namespace llvm { 38 39 namespace Intrinsic { 40 typedef unsigned ID; 41 } 42 43 class AllocaInst; 44 class AssumptionCache; 45 class BlockFrequencyInfo; 46 class DominatorTree; 47 class BranchInst; 48 class Function; 49 class GlobalValue; 50 class InstCombiner; 51 class OptimizationRemarkEmitter; 52 class InterleavedAccessInfo; 53 class IntrinsicInst; 54 class LoadInst; 55 class Loop; 56 class LoopInfo; 57 class LoopVectorizationLegality; 58 class ProfileSummaryInfo; 59 class RecurrenceDescriptor; 60 class SCEV; 61 class ScalarEvolution; 62 class SmallBitVector; 63 class StoreInst; 64 class SwitchInst; 65 class TargetLibraryInfo; 66 class Type; 67 class VPIntrinsic; 68 struct KnownBits; 69 70 /// Information about a load/store intrinsic defined by the target. 71 struct MemIntrinsicInfo { 72 /// This is the pointer that the intrinsic is loading from or storing to. 73 /// If this is non-null, then analysis/optimization passes can assume that 74 /// this intrinsic is functionally equivalent to a load/store from this 75 /// pointer. 76 Value *PtrVal = nullptr; 77 78 // Ordering for atomic operations. 79 AtomicOrdering Ordering = AtomicOrdering::NotAtomic; 80 81 // Same Id is set by the target for corresponding load/store intrinsics. 82 unsigned short MatchingId = 0; 83 84 bool ReadMem = false; 85 bool WriteMem = false; 86 bool IsVolatile = false; 87 88 bool isUnordered() const { 89 return (Ordering == AtomicOrdering::NotAtomic || 90 Ordering == AtomicOrdering::Unordered) && 91 !IsVolatile; 92 } 93 }; 94 95 /// Attributes of a target dependent hardware loop. 96 struct HardwareLoopInfo { 97 HardwareLoopInfo() = delete; 98 HardwareLoopInfo(Loop *L); 99 Loop *L = nullptr; 100 BasicBlock *ExitBlock = nullptr; 101 BranchInst *ExitBranch = nullptr; 102 const SCEV *ExitCount = nullptr; 103 IntegerType *CountType = nullptr; 104 Value *LoopDecrement = nullptr; // Decrement the loop counter by this 105 // value in every iteration. 106 bool IsNestingLegal = false; // Can a hardware loop be a parent to 107 // another hardware loop? 108 bool CounterInReg = false; // Should loop counter be updated in 109 // the loop via a phi? 110 bool PerformEntryTest = false; // Generate the intrinsic which also performs 111 // icmp ne zero on the loop counter value and 112 // produces an i1 to guard the loop entry. 113 bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI, 114 DominatorTree &DT, bool ForceNestedLoop = false, 115 bool ForceHardwareLoopPHI = false); 116 bool canAnalyze(LoopInfo &LI); 117 }; 118 119 class IntrinsicCostAttributes { 120 const IntrinsicInst *II = nullptr; 121 Type *RetTy = nullptr; 122 Intrinsic::ID IID; 123 SmallVector<Type *, 4> ParamTys; 124 SmallVector<const Value *, 4> Arguments; 125 FastMathFlags FMF; 126 // If ScalarizationCost is UINT_MAX, the cost of scalarizing the 127 // arguments and the return value will be computed based on types. 128 InstructionCost ScalarizationCost = InstructionCost::getInvalid(); 129 130 public: 131 IntrinsicCostAttributes( 132 Intrinsic::ID Id, const CallBase &CI, 133 InstructionCost ScalarCost = InstructionCost::getInvalid(), 134 bool TypeBasedOnly = false); 135 136 IntrinsicCostAttributes( 137 Intrinsic::ID Id, Type *RTy, ArrayRef<Type *> Tys, 138 FastMathFlags Flags = FastMathFlags(), const IntrinsicInst *I = nullptr, 139 InstructionCost ScalarCost = InstructionCost::getInvalid()); 140 141 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy, 142 ArrayRef<const Value *> Args); 143 144 IntrinsicCostAttributes( 145 Intrinsic::ID Id, Type *RTy, ArrayRef<const Value *> Args, 146 ArrayRef<Type *> Tys, FastMathFlags Flags = FastMathFlags(), 147 const IntrinsicInst *I = nullptr, 148 InstructionCost ScalarCost = InstructionCost::getInvalid()); 149 150 Intrinsic::ID getID() const { return IID; } 151 const IntrinsicInst *getInst() const { return II; } 152 Type *getReturnType() const { return RetTy; } 153 FastMathFlags getFlags() const { return FMF; } 154 InstructionCost getScalarizationCost() const { return ScalarizationCost; } 155 const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; } 156 const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; } 157 158 bool isTypeBasedOnly() const { 159 return Arguments.empty(); 160 } 161 162 bool skipScalarizationCost() const { return ScalarizationCost.isValid(); } 163 }; 164 165 enum class TailFoldingStyle { 166 /// Don't use tail folding 167 None, 168 /// Use predicate only to mask operations on data in the loop. 169 /// When the VL is not known to be a power-of-2, this method requires a 170 /// runtime overflow check for the i + VL in the loop because it compares the 171 /// scalar induction variable against the tripcount rounded up by VL which may 172 /// overflow. When the VL is a power-of-2, both the increment and uprounded 173 /// tripcount will overflow to 0, which does not require a runtime check 174 /// since the loop is exited when the loop induction variable equals the 175 /// uprounded trip-count, which are both 0. 176 Data, 177 /// Same as Data, but avoids using the get.active.lane.mask intrinsic to 178 /// calculate the mask and instead implements this with a 179 /// splat/stepvector/cmp. 180 /// FIXME: Can this kind be removed now that SelectionDAGBuilder expands the 181 /// active.lane.mask intrinsic when it is not natively supported? 182 DataWithoutLaneMask, 183 /// Use predicate to control both data and control flow. 184 /// This method always requires a runtime overflow check for the i + VL 185 /// increment inside the loop, because it uses the result direclty in the 186 /// active.lane.mask to calculate the mask for the next iteration. If the 187 /// increment overflows, the mask is no longer correct. 188 DataAndControlFlow, 189 /// Use predicate to control both data and control flow, but modify 190 /// the trip count so that a runtime overflow check can be avoided 191 /// and such that the scalar epilogue loop can always be removed. 192 DataAndControlFlowWithoutRuntimeCheck, 193 /// Use predicated EVL instructions for tail-folding. 194 /// Indicates that VP intrinsics should be used. 195 DataWithEVL, 196 }; 197 198 struct TailFoldingInfo { 199 TargetLibraryInfo *TLI; 200 LoopVectorizationLegality *LVL; 201 InterleavedAccessInfo *IAI; 202 TailFoldingInfo(TargetLibraryInfo *TLI, LoopVectorizationLegality *LVL, 203 InterleavedAccessInfo *IAI) 204 : TLI(TLI), LVL(LVL), IAI(IAI) {} 205 }; 206 207 class TargetTransformInfo; 208 typedef TargetTransformInfo TTI; 209 210 /// This pass provides access to the codegen interfaces that are needed 211 /// for IR-level transformations. 212 class TargetTransformInfo { 213 public: 214 enum PartialReductionExtendKind { PR_None, PR_SignExtend, PR_ZeroExtend }; 215 216 /// Get the kind of extension that an instruction represents. 217 static PartialReductionExtendKind 218 getPartialReductionExtendKind(Instruction *I); 219 220 /// Construct a TTI object using a type implementing the \c Concept 221 /// API below. 222 /// 223 /// This is used by targets to construct a TTI wrapping their target-specific 224 /// implementation that encodes appropriate costs for their target. 225 template <typename T> TargetTransformInfo(T Impl); 226 227 /// Construct a baseline TTI object using a minimal implementation of 228 /// the \c Concept API below. 229 /// 230 /// The TTI implementation will reflect the information in the DataLayout 231 /// provided if non-null. 232 explicit TargetTransformInfo(const DataLayout &DL); 233 234 // Provide move semantics. 235 TargetTransformInfo(TargetTransformInfo &&Arg); 236 TargetTransformInfo &operator=(TargetTransformInfo &&RHS); 237 238 // We need to define the destructor out-of-line to define our sub-classes 239 // out-of-line. 240 ~TargetTransformInfo(); 241 242 /// Handle the invalidation of this information. 243 /// 244 /// When used as a result of \c TargetIRAnalysis this method will be called 245 /// when the function this was computed for changes. When it returns false, 246 /// the information is preserved across those changes. 247 bool invalidate(Function &, const PreservedAnalyses &, 248 FunctionAnalysisManager::Invalidator &) { 249 // FIXME: We should probably in some way ensure that the subtarget 250 // information for a function hasn't changed. 251 return false; 252 } 253 254 /// \name Generic Target Information 255 /// @{ 256 257 /// The kind of cost model. 258 /// 259 /// There are several different cost models that can be customized by the 260 /// target. The normalization of each cost model may be target specific. 261 /// e.g. TCK_SizeAndLatency should be comparable to target thresholds such as 262 /// those derived from MCSchedModel::LoopMicroOpBufferSize etc. 263 enum TargetCostKind { 264 TCK_RecipThroughput, ///< Reciprocal throughput. 265 TCK_Latency, ///< The latency of instruction. 266 TCK_CodeSize, ///< Instruction code size. 267 TCK_SizeAndLatency ///< The weighted sum of size and latency. 268 }; 269 270 /// Underlying constants for 'cost' values in this interface. 271 /// 272 /// Many APIs in this interface return a cost. This enum defines the 273 /// fundamental values that should be used to interpret (and produce) those 274 /// costs. The costs are returned as an int rather than a member of this 275 /// enumeration because it is expected that the cost of one IR instruction 276 /// may have a multiplicative factor to it or otherwise won't fit directly 277 /// into the enum. Moreover, it is common to sum or average costs which works 278 /// better as simple integral values. Thus this enum only provides constants. 279 /// Also note that the returned costs are signed integers to make it natural 280 /// to add, subtract, and test with zero (a common boundary condition). It is 281 /// not expected that 2^32 is a realistic cost to be modeling at any point. 282 /// 283 /// Note that these costs should usually reflect the intersection of code-size 284 /// cost and execution cost. A free instruction is typically one that folds 285 /// into another instruction. For example, reg-to-reg moves can often be 286 /// skipped by renaming the registers in the CPU, but they still are encoded 287 /// and thus wouldn't be considered 'free' here. 288 enum TargetCostConstants { 289 TCC_Free = 0, ///< Expected to fold away in lowering. 290 TCC_Basic = 1, ///< The cost of a typical 'add' instruction. 291 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86. 292 }; 293 294 /// Estimate the cost of a GEP operation when lowered. 295 /// 296 /// \p PointeeType is the source element type of the GEP. 297 /// \p Ptr is the base pointer operand. 298 /// \p Operands is the list of indices following the base pointer. 299 /// 300 /// \p AccessType is a hint as to what type of memory might be accessed by 301 /// users of the GEP. getGEPCost will use it to determine if the GEP can be 302 /// folded into the addressing mode of a load/store. If AccessType is null, 303 /// then the resulting target type based off of PointeeType will be used as an 304 /// approximation. 305 InstructionCost 306 getGEPCost(Type *PointeeType, const Value *Ptr, 307 ArrayRef<const Value *> Operands, Type *AccessType = nullptr, 308 TargetCostKind CostKind = TCK_SizeAndLatency) const; 309 310 /// Describe known properties for a set of pointers. 311 struct PointersChainInfo { 312 /// All the GEPs in a set have same base address. 313 unsigned IsSameBaseAddress : 1; 314 /// These properties only valid if SameBaseAddress is set. 315 /// True if all pointers are separated by a unit stride. 316 unsigned IsUnitStride : 1; 317 /// True if distance between any two neigbouring pointers is a known value. 318 unsigned IsKnownStride : 1; 319 unsigned Reserved : 29; 320 321 bool isSameBase() const { return IsSameBaseAddress; } 322 bool isUnitStride() const { return IsSameBaseAddress && IsUnitStride; } 323 bool isKnownStride() const { return IsSameBaseAddress && IsKnownStride; } 324 325 static PointersChainInfo getUnitStride() { 326 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/1, 327 /*IsKnownStride=*/1, 0}; 328 } 329 static PointersChainInfo getKnownStride() { 330 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0, 331 /*IsKnownStride=*/1, 0}; 332 } 333 static PointersChainInfo getUnknownStride() { 334 return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0, 335 /*IsKnownStride=*/0, 0}; 336 } 337 }; 338 static_assert(sizeof(PointersChainInfo) == 4, "Was size increase justified?"); 339 340 /// Estimate the cost of a chain of pointers (typically pointer operands of a 341 /// chain of loads or stores within same block) operations set when lowered. 342 /// \p AccessTy is the type of the loads/stores that will ultimately use the 343 /// \p Ptrs. 344 InstructionCost getPointersChainCost( 345 ArrayRef<const Value *> Ptrs, const Value *Base, 346 const PointersChainInfo &Info, Type *AccessTy, 347 TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; 348 349 /// \returns A value by which our inlining threshold should be multiplied. 350 /// This is primarily used to bump up the inlining threshold wholesale on 351 /// targets where calls are unusually expensive. 352 /// 353 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of 354 /// individual classes of instructions would be better. 355 unsigned getInliningThresholdMultiplier() const; 356 357 unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const; 358 unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const; 359 360 /// \returns The bonus of inlining the last call to a static function. 361 int getInliningLastCallToStaticBonus() const; 362 363 /// \returns A value to be added to the inlining threshold. 364 unsigned adjustInliningThreshold(const CallBase *CB) const; 365 366 /// \returns The cost of having an Alloca in the caller if not inlined, to be 367 /// added to the threshold 368 unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const; 369 370 /// \returns Vector bonus in percent. 371 /// 372 /// Vector bonuses: We want to more aggressively inline vector-dense kernels 373 /// and apply this bonus based on the percentage of vector instructions. A 374 /// bonus is applied if the vector instructions exceed 50% and half that 375 /// amount is applied if it exceeds 10%. Note that these bonuses are some what 376 /// arbitrary and evolved over time by accident as much as because they are 377 /// principled bonuses. 378 /// FIXME: It would be nice to base the bonus values on something more 379 /// scientific. A target may has no bonus on vector instructions. 380 int getInlinerVectorBonusPercent() const; 381 382 /// \return the expected cost of a memcpy, which could e.g. depend on the 383 /// source/destination type and alignment and the number of bytes copied. 384 InstructionCost getMemcpyCost(const Instruction *I) const; 385 386 /// Returns the maximum memset / memcpy size in bytes that still makes it 387 /// profitable to inline the call. 388 uint64_t getMaxMemIntrinsicInlineSizeThreshold() const; 389 390 /// \return The estimated number of case clusters when lowering \p 'SI'. 391 /// \p JTSize Set a jump table size only when \p SI is suitable for a jump 392 /// table. 393 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, 394 unsigned &JTSize, 395 ProfileSummaryInfo *PSI, 396 BlockFrequencyInfo *BFI) const; 397 398 /// Estimate the cost of a given IR user when lowered. 399 /// 400 /// This can estimate the cost of either a ConstantExpr or Instruction when 401 /// lowered. 402 /// 403 /// \p Operands is a list of operands which can be a result of transformations 404 /// of the current operands. The number of the operands on the list must equal 405 /// to the number of the current operands the IR user has. Their order on the 406 /// list must be the same as the order of the current operands the IR user 407 /// has. 408 /// 409 /// The returned cost is defined in terms of \c TargetCostConstants, see its 410 /// comments for a detailed explanation of the cost values. 411 InstructionCost getInstructionCost(const User *U, 412 ArrayRef<const Value *> Operands, 413 TargetCostKind CostKind) const; 414 415 /// This is a helper function which calls the three-argument 416 /// getInstructionCost with \p Operands which are the current operands U has. 417 InstructionCost getInstructionCost(const User *U, 418 TargetCostKind CostKind) const { 419 SmallVector<const Value *, 4> Operands(U->operand_values()); 420 return getInstructionCost(U, Operands, CostKind); 421 } 422 423 /// If a branch or a select condition is skewed in one direction by more than 424 /// this factor, it is very likely to be predicted correctly. 425 BranchProbability getPredictableBranchThreshold() const; 426 427 /// Returns estimated penalty of a branch misprediction in latency. Indicates 428 /// how aggressive the target wants for eliminating unpredictable branches. A 429 /// zero return value means extra optimization applied to them should be 430 /// minimal. 431 InstructionCost getBranchMispredictPenalty() const; 432 433 /// Return true if branch divergence exists. 434 /// 435 /// Branch divergence has a significantly negative impact on GPU performance 436 /// when threads in the same wavefront take different paths due to conditional 437 /// branches. 438 /// 439 /// If \p F is passed, provides a context function. If \p F is known to only 440 /// execute in a single threaded environment, the target may choose to skip 441 /// uniformity analysis and assume all values are uniform. 442 bool hasBranchDivergence(const Function *F = nullptr) const; 443 444 /// Returns whether V is a source of divergence. 445 /// 446 /// This function provides the target-dependent information for 447 /// the target-independent UniformityAnalysis. 448 bool isSourceOfDivergence(const Value *V) const; 449 450 // Returns true for the target specific 451 // set of operations which produce uniform result 452 // even taking non-uniform arguments 453 bool isAlwaysUniform(const Value *V) const; 454 455 /// Query the target whether the specified address space cast from FromAS to 456 /// ToAS is valid. 457 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const; 458 459 /// Return false if a \p AS0 address cannot possibly alias a \p AS1 address. 460 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const; 461 462 /// Returns the address space ID for a target's 'flat' address space. Note 463 /// this is not necessarily the same as addrspace(0), which LLVM sometimes 464 /// refers to as the generic address space. The flat address space is a 465 /// generic address space that can be used access multiple segments of memory 466 /// with different address spaces. Access of a memory location through a 467 /// pointer with this address space is expected to be legal but slower 468 /// compared to the same memory location accessed through a pointer with a 469 /// different address space. 470 // 471 /// This is for targets with different pointer representations which can 472 /// be converted with the addrspacecast instruction. If a pointer is converted 473 /// to this address space, optimizations should attempt to replace the access 474 /// with the source address space. 475 /// 476 /// \returns ~0u if the target does not have such a flat address space to 477 /// optimize away. 478 unsigned getFlatAddressSpace() const; 479 480 /// Return any intrinsic address operand indexes which may be rewritten if 481 /// they use a flat address space pointer. 482 /// 483 /// \returns true if the intrinsic was handled. 484 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, 485 Intrinsic::ID IID) const; 486 487 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const; 488 489 /// Return true if globals in this address space can have initializers other 490 /// than `undef`. 491 bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const; 492 493 unsigned getAssumedAddrSpace(const Value *V) const; 494 495 bool isSingleThreaded() const; 496 497 std::pair<const Value *, unsigned> 498 getPredicatedAddrSpace(const Value *V) const; 499 500 /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p 501 /// NewV, which has a different address space. This should happen for every 502 /// operand index that collectFlatAddressOperands returned for the intrinsic. 503 /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the 504 /// new value (which may be the original \p II with modified operands). 505 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, 506 Value *NewV) const; 507 508 /// Test whether calls to a function lower to actual program function 509 /// calls. 510 /// 511 /// The idea is to test whether the program is likely to require a 'call' 512 /// instruction or equivalent in order to call the given function. 513 /// 514 /// FIXME: It's not clear that this is a good or useful query API. Client's 515 /// should probably move to simpler cost metrics using the above. 516 /// Alternatively, we could split the cost interface into distinct code-size 517 /// and execution-speed costs. This would allow modelling the core of this 518 /// query more accurately as a call is a single small instruction, but 519 /// incurs significant execution cost. 520 bool isLoweredToCall(const Function *F) const; 521 522 struct LSRCost { 523 /// TODO: Some of these could be merged. Also, a lexical ordering 524 /// isn't always optimal. 525 unsigned Insns; 526 unsigned NumRegs; 527 unsigned AddRecCost; 528 unsigned NumIVMuls; 529 unsigned NumBaseAdds; 530 unsigned ImmCost; 531 unsigned SetupCost; 532 unsigned ScaleCost; 533 }; 534 535 /// Parameters that control the generic loop unrolling transformation. 536 struct UnrollingPreferences { 537 /// The cost threshold for the unrolled loop. Should be relative to the 538 /// getInstructionCost values returned by this API, and the expectation is 539 /// that the unrolled loop's instructions when run through that interface 540 /// should not exceed this cost. However, this is only an estimate. Also, 541 /// specific loops may be unrolled even with a cost above this threshold if 542 /// deemed profitable. Set this to UINT_MAX to disable the loop body cost 543 /// restriction. 544 unsigned Threshold; 545 /// If complete unrolling will reduce the cost of the loop, we will boost 546 /// the Threshold by a certain percent to allow more aggressive complete 547 /// unrolling. This value provides the maximum boost percentage that we 548 /// can apply to Threshold (The value should be no less than 100). 549 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost, 550 /// MaxPercentThresholdBoost / 100) 551 /// E.g. if complete unrolling reduces the loop execution time by 50% 552 /// then we boost the threshold by the factor of 2x. If unrolling is not 553 /// expected to reduce the running time, then we do not increase the 554 /// threshold. 555 unsigned MaxPercentThresholdBoost; 556 /// The cost threshold for the unrolled loop when optimizing for size (set 557 /// to UINT_MAX to disable). 558 unsigned OptSizeThreshold; 559 /// The cost threshold for the unrolled loop, like Threshold, but used 560 /// for partial/runtime unrolling (set to UINT_MAX to disable). 561 unsigned PartialThreshold; 562 /// The cost threshold for the unrolled loop when optimizing for size, like 563 /// OptSizeThreshold, but used for partial/runtime unrolling (set to 564 /// UINT_MAX to disable). 565 unsigned PartialOptSizeThreshold; 566 /// A forced unrolling factor (the number of concatenated bodies of the 567 /// original loop in the unrolled loop body). When set to 0, the unrolling 568 /// transformation will select an unrolling factor based on the current cost 569 /// threshold and other factors. 570 unsigned Count; 571 /// Default unroll count for loops with run-time trip count. 572 unsigned DefaultUnrollRuntimeCount; 573 // Set the maximum unrolling factor. The unrolling factor may be selected 574 // using the appropriate cost threshold, but may not exceed this number 575 // (set to UINT_MAX to disable). This does not apply in cases where the 576 // loop is being fully unrolled. 577 unsigned MaxCount; 578 /// Set the maximum upper bound of trip count. Allowing the MaxUpperBound 579 /// to be overrided by a target gives more flexiblity on certain cases. 580 /// By default, MaxUpperBound uses UnrollMaxUpperBound which value is 8. 581 unsigned MaxUpperBound; 582 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but 583 /// applies even if full unrolling is selected. This allows a target to fall 584 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount. 585 unsigned FullUnrollMaxCount; 586 // Represents number of instructions optimized when "back edge" 587 // becomes "fall through" in unrolled loop. 588 // For now we count a conditional branch on a backedge and a comparison 589 // feeding it. 590 unsigned BEInsns; 591 /// Allow partial unrolling (unrolling of loops to expand the size of the 592 /// loop body, not only to eliminate small constant-trip-count loops). 593 bool Partial; 594 /// Allow runtime unrolling (unrolling of loops to expand the size of the 595 /// loop body even when the number of loop iterations is not known at 596 /// compile time). 597 bool Runtime; 598 /// Allow generation of a loop remainder (extra iterations after unroll). 599 bool AllowRemainder; 600 /// Allow emitting expensive instructions (such as divisions) when computing 601 /// the trip count of a loop for runtime unrolling. 602 bool AllowExpensiveTripCount; 603 /// Apply loop unroll on any kind of loop 604 /// (mainly to loops that fail runtime unrolling). 605 bool Force; 606 /// Allow using trip count upper bound to unroll loops. 607 bool UpperBound; 608 /// Allow unrolling of all the iterations of the runtime loop remainder. 609 bool UnrollRemainder; 610 /// Allow unroll and jam. Used to enable unroll and jam for the target. 611 bool UnrollAndJam; 612 /// Threshold for unroll and jam, for inner loop size. The 'Threshold' 613 /// value above is used during unroll and jam for the outer loop size. 614 /// This value is used in the same manner to limit the size of the inner 615 /// loop. 616 unsigned UnrollAndJamInnerLoopThreshold; 617 /// Don't allow loop unrolling to simulate more than this number of 618 /// iterations when checking full unroll profitability 619 unsigned MaxIterationsCountToAnalyze; 620 /// Don't disable runtime unroll for the loops which were vectorized. 621 bool UnrollVectorizedLoop = false; 622 /// Don't allow runtime unrolling if expanding the trip count takes more 623 /// than SCEVExpansionBudget. 624 unsigned SCEVExpansionBudget; 625 /// Allow runtime unrolling multi-exit loops. Should only be set if the 626 /// target determined that multi-exit unrolling is profitable for the loop. 627 /// Fall back to the generic logic to determine whether multi-exit unrolling 628 /// is profitable if set to false. 629 bool RuntimeUnrollMultiExit; 630 }; 631 632 /// Get target-customized preferences for the generic loop unrolling 633 /// transformation. The caller will initialize UP with the current 634 /// target-independent defaults. 635 void getUnrollingPreferences(Loop *L, ScalarEvolution &, 636 UnrollingPreferences &UP, 637 OptimizationRemarkEmitter *ORE) const; 638 639 /// Query the target whether it would be profitable to convert the given loop 640 /// into a hardware loop. 641 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 642 AssumptionCache &AC, TargetLibraryInfo *LibInfo, 643 HardwareLoopInfo &HWLoopInfo) const; 644 645 // Query the target for which minimum vectorization factor epilogue 646 // vectorization should be considered. 647 unsigned getEpilogueVectorizationMinVF() const; 648 649 /// Query the target whether it would be prefered to create a predicated 650 /// vector loop, which can avoid the need to emit a scalar epilogue loop. 651 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const; 652 653 /// Query the target what the preferred style of tail folding is. 654 /// \param IVUpdateMayOverflow Tells whether it is known if the IV update 655 /// may (or will never) overflow for the suggested VF/UF in the given loop. 656 /// Targets can use this information to select a more optimal tail folding 657 /// style. The value conservatively defaults to true, such that no assumptions 658 /// are made on overflow. 659 TailFoldingStyle 660 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const; 661 662 // Parameters that control the loop peeling transformation 663 struct PeelingPreferences { 664 /// A forced peeling factor (the number of bodied of the original loop 665 /// that should be peeled off before the loop body). When set to 0, the 666 /// a peeling factor based on profile information and other factors. 667 unsigned PeelCount; 668 /// Allow peeling off loop iterations. 669 bool AllowPeeling; 670 /// Allow peeling off loop iterations for loop nests. 671 bool AllowLoopNestsPeeling; 672 /// Allow peeling basing on profile. Uses to enable peeling off all 673 /// iterations basing on provided profile. 674 /// If the value is true the peeling cost model can decide to peel only 675 /// some iterations and in this case it will set this to false. 676 bool PeelProfiledIterations; 677 }; 678 679 /// Get target-customized preferences for the generic loop peeling 680 /// transformation. The caller will initialize \p PP with the current 681 /// target-independent defaults with information from \p L and \p SE. 682 void getPeelingPreferences(Loop *L, ScalarEvolution &SE, 683 PeelingPreferences &PP) const; 684 685 /// Targets can implement their own combinations for target-specific 686 /// intrinsics. This function will be called from the InstCombine pass every 687 /// time a target-specific intrinsic is encountered. 688 /// 689 /// \returns std::nullopt to not do anything target specific or a value that 690 /// will be returned from the InstCombiner. It is possible to return null and 691 /// stop further processing of the intrinsic by returning nullptr. 692 std::optional<Instruction *> instCombineIntrinsic(InstCombiner & IC, 693 IntrinsicInst & II) const; 694 /// Can be used to implement target-specific instruction combining. 695 /// \see instCombineIntrinsic 696 std::optional<Value *> simplifyDemandedUseBitsIntrinsic( 697 InstCombiner & IC, IntrinsicInst & II, APInt DemandedMask, 698 KnownBits & Known, bool &KnownBitsComputed) const; 699 /// Can be used to implement target-specific instruction combining. 700 /// \see instCombineIntrinsic 701 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic( 702 InstCombiner & IC, IntrinsicInst & II, APInt DemandedElts, 703 APInt & UndefElts, APInt & UndefElts2, APInt & UndefElts3, 704 std::function<void(Instruction *, unsigned, APInt, APInt &)> 705 SimplifyAndSetOp) const; 706 /// @} 707 708 /// \name Scalar Target Information 709 /// @{ 710 711 /// Flags indicating the kind of support for population count. 712 /// 713 /// Compared to the SW implementation, HW support is supposed to 714 /// significantly boost the performance when the population is dense, and it 715 /// may or may not degrade performance if the population is sparse. A HW 716 /// support is considered as "Fast" if it can outperform, or is on a par 717 /// with, SW implementation when the population is sparse; otherwise, it is 718 /// considered as "Slow". 719 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware }; 720 721 /// Return true if the specified immediate is legal add immediate, that 722 /// is the target has add instructions which can add a register with the 723 /// immediate without having to materialize the immediate into a register. 724 bool isLegalAddImmediate(int64_t Imm) const; 725 726 /// Return true if adding the specified scalable immediate is legal, that is 727 /// the target has add instructions which can add a register with the 728 /// immediate (multiplied by vscale) without having to materialize the 729 /// immediate into a register. 730 bool isLegalAddScalableImmediate(int64_t Imm) const; 731 732 /// Return true if the specified immediate is legal icmp immediate, 733 /// that is the target has icmp instructions which can compare a register 734 /// against the immediate without having to materialize the immediate into a 735 /// register. 736 bool isLegalICmpImmediate(int64_t Imm) const; 737 738 /// Return true if the addressing mode represented by AM is legal for 739 /// this target, for a load/store of the specified type. 740 /// The type may be VoidTy, in which case only return true if the addressing 741 /// mode is legal for a load/store of any legal type. 742 /// If target returns true in LSRWithInstrQueries(), I may be valid. 743 /// \param ScalableOffset represents a quantity of bytes multiplied by vscale, 744 /// an invariant value known only at runtime. Most targets should not accept 745 /// a scalable offset. 746 /// 747 /// TODO: Handle pre/postinc as well. 748 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 749 bool HasBaseReg, int64_t Scale, 750 unsigned AddrSpace = 0, Instruction *I = nullptr, 751 int64_t ScalableOffset = 0) const; 752 753 /// Return true if LSR cost of C1 is lower than C2. 754 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, 755 const TargetTransformInfo::LSRCost &C2) const; 756 757 /// Return true if LSR major cost is number of registers. Targets which 758 /// implement their own isLSRCostLess and unset number of registers as major 759 /// cost should return false, otherwise return true. 760 bool isNumRegsMajorCostOfLSR() const; 761 762 /// Return true if LSR should drop a found solution if it's calculated to be 763 /// less profitable than the baseline. 764 bool shouldDropLSRSolutionIfLessProfitable() const; 765 766 /// \returns true if LSR should not optimize a chain that includes \p I. 767 bool isProfitableLSRChainElement(Instruction *I) const; 768 769 /// Return true if the target can fuse a compare and branch. 770 /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost 771 /// calculation for the instructions in a loop. 772 bool canMacroFuseCmp() const; 773 774 /// Return true if the target can save a compare for loop count, for example 775 /// hardware loop saves a compare. 776 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, 777 DominatorTree *DT, AssumptionCache *AC, 778 TargetLibraryInfo *LibInfo) const; 779 780 enum AddressingModeKind { 781 AMK_PreIndexed, 782 AMK_PostIndexed, 783 AMK_None 784 }; 785 786 /// Return the preferred addressing mode LSR should make efforts to generate. 787 AddressingModeKind getPreferredAddressingMode(const Loop *L, 788 ScalarEvolution *SE) const; 789 790 /// Return true if the target supports masked store. 791 bool isLegalMaskedStore(Type *DataType, Align Alignment) const; 792 /// Return true if the target supports masked load. 793 bool isLegalMaskedLoad(Type *DataType, Align Alignment) const; 794 795 /// Return true if the target supports nontemporal store. 796 bool isLegalNTStore(Type *DataType, Align Alignment) const; 797 /// Return true if the target supports nontemporal load. 798 bool isLegalNTLoad(Type *DataType, Align Alignment) const; 799 800 /// \Returns true if the target supports broadcasting a load to a vector of 801 /// type <NumElements x ElementTy>. 802 bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const; 803 804 /// Return true if the target supports masked scatter. 805 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const; 806 /// Return true if the target supports masked gather. 807 bool isLegalMaskedGather(Type *DataType, Align Alignment) const; 808 /// Return true if the target forces scalarizing of llvm.masked.gather 809 /// intrinsics. 810 bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const; 811 /// Return true if the target forces scalarizing of llvm.masked.scatter 812 /// intrinsics. 813 bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const; 814 815 /// Return true if the target supports masked compress store. 816 bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) const; 817 /// Return true if the target supports masked expand load. 818 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) const; 819 820 /// Return true if the target supports strided load. 821 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) const; 822 823 /// Return true is the target supports interleaved access for the given vector 824 /// type \p VTy, interleave factor \p Factor, alignment \p Alignment and 825 /// address space \p AddrSpace. 826 bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, 827 Align Alignment, unsigned AddrSpace) const; 828 829 // Return true if the target supports masked vector histograms. 830 bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) const; 831 832 /// Return true if this is an alternating opcode pattern that can be lowered 833 /// to a single instruction on the target. In X86 this is for the addsub 834 /// instruction which corrsponds to a Shuffle + Fadd + FSub pattern in IR. 835 /// This function expectes two opcodes: \p Opcode1 and \p Opcode2 being 836 /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0` 837 /// when \p Opcode0 is selected and `1` when Opcode1 is selected. 838 /// \p VecTy is the vector type of the instruction to be generated. 839 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 840 const SmallBitVector &OpcodeMask) const; 841 842 /// Return true if we should be enabling ordered reductions for the target. 843 bool enableOrderedReductions() const; 844 845 /// Return true if the target has a unified operation to calculate division 846 /// and remainder. If so, the additional implicit multiplication and 847 /// subtraction required to calculate a remainder from division are free. This 848 /// can enable more aggressive transformations for division and remainder than 849 /// would typically be allowed using throughput or size cost models. 850 bool hasDivRemOp(Type *DataType, bool IsSigned) const; 851 852 /// Return true if the given instruction (assumed to be a memory access 853 /// instruction) has a volatile variant. If that's the case then we can avoid 854 /// addrspacecast to generic AS for volatile loads/stores. Default 855 /// implementation returns false, which prevents address space inference for 856 /// volatile loads/stores. 857 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const; 858 859 /// Return true if target doesn't mind addresses in vectors. 860 bool prefersVectorizedAddressing() const; 861 862 /// Return the cost of the scaling factor used in the addressing 863 /// mode represented by AM for this target, for a load/store 864 /// of the specified type. 865 /// If the AM is supported, the return value must be >= 0. 866 /// If the AM is not supported, it returns a negative value. 867 /// TODO: Handle pre/postinc as well. 868 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 869 StackOffset BaseOffset, bool HasBaseReg, 870 int64_t Scale, 871 unsigned AddrSpace = 0) const; 872 873 /// Return true if the loop strength reduce pass should make 874 /// Instruction* based TTI queries to isLegalAddressingMode(). This is 875 /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned 876 /// immediate offset and no index register. 877 bool LSRWithInstrQueries() const; 878 879 /// Return true if it's free to truncate a value of type Ty1 to type 880 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 881 /// by referencing its sub-register AX. 882 bool isTruncateFree(Type *Ty1, Type *Ty2) const; 883 884 /// Return true if it is profitable to hoist instruction in the 885 /// then/else to before if. 886 bool isProfitableToHoist(Instruction *I) const; 887 888 bool useAA() const; 889 890 /// Return true if this type is legal. 891 bool isTypeLegal(Type *Ty) const; 892 893 /// Returns the estimated number of registers required to represent \p Ty. 894 unsigned getRegUsageForType(Type *Ty) const; 895 896 /// Return true if switches should be turned into lookup tables for the 897 /// target. 898 bool shouldBuildLookupTables() const; 899 900 /// Return true if switches should be turned into lookup tables 901 /// containing this constant value for the target. 902 bool shouldBuildLookupTablesForConstant(Constant *C) const; 903 904 /// Return true if lookup tables should be turned into relative lookup tables. 905 bool shouldBuildRelLookupTables() const; 906 907 /// Return true if the input function which is cold at all call sites, 908 /// should use coldcc calling convention. 909 bool useColdCCForColdCall(Function &F) const; 910 911 bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) const; 912 913 /// Identifies if the vector form of the intrinsic has a scalar operand. 914 bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, 915 unsigned ScalarOpdIdx) const; 916 917 /// Identifies if the vector form of the intrinsic is overloaded on the type 918 /// of the operand at index \p OpdIdx, or on the return type if \p OpdIdx is 919 /// -1. 920 bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, 921 int OpdIdx) const; 922 923 /// Identifies if the vector form of the intrinsic that returns a struct is 924 /// overloaded at the struct element index \p RetIdx. 925 bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, 926 int RetIdx) const; 927 928 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 929 /// are set if the demanded result elements need to be inserted and/or 930 /// extracted from vectors. The involved values may be passed in VL if 931 /// Insert is true. 932 InstructionCost getScalarizationOverhead(VectorType *Ty, 933 const APInt &DemandedElts, 934 bool Insert, bool Extract, 935 TTI::TargetCostKind CostKind, 936 ArrayRef<Value *> VL = {}) const; 937 938 /// Estimate the overhead of scalarizing an instructions unique 939 /// non-constant operands. The (potentially vector) types to use for each of 940 /// argument are passes via Tys. 941 InstructionCost 942 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, 943 ArrayRef<Type *> Tys, 944 TTI::TargetCostKind CostKind) const; 945 946 /// If target has efficient vector element load/store instructions, it can 947 /// return true here so that insertion/extraction costs are not added to 948 /// the scalarization cost of a load/store. 949 bool supportsEfficientVectorElementLoadStore() const; 950 951 /// If the target supports tail calls. 952 bool supportsTailCalls() const; 953 954 /// If target supports tail call on \p CB 955 bool supportsTailCallFor(const CallBase *CB) const; 956 957 /// Don't restrict interleaved unrolling to small loops. 958 bool enableAggressiveInterleaving(bool LoopHasReductions) const; 959 960 /// Returns options for expansion of memcmp. IsZeroCmp is 961 // true if this is the expansion of memcmp(p1, p2, s) == 0. 962 struct MemCmpExpansionOptions { 963 // Return true if memcmp expansion is enabled. 964 operator bool() const { return MaxNumLoads > 0; } 965 966 // Maximum number of load operations. 967 unsigned MaxNumLoads = 0; 968 969 // The list of available load sizes (in bytes), sorted in decreasing order. 970 SmallVector<unsigned, 8> LoadSizes; 971 972 // For memcmp expansion when the memcmp result is only compared equal or 973 // not-equal to 0, allow up to this number of load pairs per block. As an 974 // example, this may allow 'memcmp(a, b, 3) == 0' in a single block: 975 // a0 = load2bytes &a[0] 976 // b0 = load2bytes &b[0] 977 // a2 = load1byte &a[2] 978 // b2 = load1byte &b[2] 979 // r = cmp eq (a0 ^ b0 | a2 ^ b2), 0 980 unsigned NumLoadsPerBlock = 1; 981 982 // Set to true to allow overlapping loads. For example, 7-byte compares can 983 // be done with two 4-byte compares instead of 4+2+1-byte compares. This 984 // requires all loads in LoadSizes to be doable in an unaligned way. 985 bool AllowOverlappingLoads = false; 986 987 // Sometimes, the amount of data that needs to be compared is smaller than 988 // the standard register size, but it cannot be loaded with just one load 989 // instruction. For example, if the size of the memory comparison is 6 990 // bytes, we can handle it more efficiently by loading all 6 bytes in a 991 // single block and generating an 8-byte number, instead of generating two 992 // separate blocks with conditional jumps for 4 and 2 byte loads. This 993 // approach simplifies the process and produces the comparison result as 994 // normal. This array lists the allowed sizes of memcmp tails that can be 995 // merged into one block 996 SmallVector<unsigned, 4> AllowedTailExpansions; 997 }; 998 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 999 bool IsZeroCmp) const; 1000 1001 /// Should the Select Optimization pass be enabled and ran. 1002 bool enableSelectOptimize() const; 1003 1004 /// Should the Select Optimization pass treat the given instruction like a 1005 /// select, potentially converting it to a conditional branch. This can 1006 /// include select-like instructions like or(zext(c), x) that can be converted 1007 /// to selects. 1008 bool shouldTreatInstructionLikeSelect(const Instruction *I) const; 1009 1010 /// Enable matching of interleaved access groups. 1011 bool enableInterleavedAccessVectorization() const; 1012 1013 /// Enable matching of interleaved access groups that contain predicated 1014 /// accesses or gaps and therefore vectorized using masked 1015 /// vector loads/stores. 1016 bool enableMaskedInterleavedAccessVectorization() const; 1017 1018 /// Indicate that it is potentially unsafe to automatically vectorize 1019 /// floating-point operations because the semantics of vector and scalar 1020 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math 1021 /// does not support IEEE-754 denormal numbers, while depending on the 1022 /// platform, scalar floating-point math does. 1023 /// This applies to floating-point math operations and calls, not memory 1024 /// operations, shuffles, or casts. 1025 bool isFPVectorizationPotentiallyUnsafe() const; 1026 1027 /// Determine if the target supports unaligned memory accesses. 1028 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, 1029 unsigned AddressSpace = 0, 1030 Align Alignment = Align(1), 1031 unsigned *Fast = nullptr) const; 1032 1033 /// Return hardware support for population count. 1034 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const; 1035 1036 /// Return true if the hardware has a fast square-root instruction. 1037 bool haveFastSqrt(Type *Ty) const; 1038 1039 /// Return true if the cost of the instruction is too high to speculatively 1040 /// execute and should be kept behind a branch. 1041 /// This normally just wraps around a getInstructionCost() call, but some 1042 /// targets might report a low TCK_SizeAndLatency value that is incompatible 1043 /// with the fixed TCC_Expensive value. 1044 /// NOTE: This assumes the instruction passes isSafeToSpeculativelyExecute(). 1045 bool isExpensiveToSpeculativelyExecute(const Instruction *I) const; 1046 1047 /// Return true if it is faster to check if a floating-point value is NaN 1048 /// (or not-NaN) versus a comparison against a constant FP zero value. 1049 /// Targets should override this if materializing a 0.0 for comparison is 1050 /// generally as cheap as checking for ordered/unordered. 1051 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const; 1052 1053 /// Return the expected cost of supporting the floating point operation 1054 /// of the specified type. 1055 InstructionCost getFPOpCost(Type *Ty) const; 1056 1057 /// Return the expected cost of materializing for the given integer 1058 /// immediate of the specified type. 1059 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, 1060 TargetCostKind CostKind) const; 1061 1062 /// Return the expected cost of materialization for the given integer 1063 /// immediate of the specified type for a given instruction. The cost can be 1064 /// zero if the immediate can be folded into the specified instruction. 1065 InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, 1066 const APInt &Imm, Type *Ty, 1067 TargetCostKind CostKind, 1068 Instruction *Inst = nullptr) const; 1069 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 1070 const APInt &Imm, Type *Ty, 1071 TargetCostKind CostKind) const; 1072 1073 /// Return the expected cost for the given integer when optimising 1074 /// for size. This is different than the other integer immediate cost 1075 /// functions in that it is subtarget agnostic. This is useful when you e.g. 1076 /// target one ISA such as Aarch32 but smaller encodings could be possible 1077 /// with another such as Thumb. This return value is used as a penalty when 1078 /// the total costs for a constant is calculated (the bigger the cost, the 1079 /// more beneficial constant hoisting is). 1080 InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, 1081 const APInt &Imm, Type *Ty) const; 1082 1083 /// It can be advantageous to detach complex constants from their uses to make 1084 /// their generation cheaper. This hook allows targets to report when such 1085 /// transformations might negatively effect the code generation of the 1086 /// underlying operation. The motivating example is divides whereby hoisting 1087 /// constants prevents the code generator's ability to transform them into 1088 /// combinations of simpler operations. 1089 bool preferToKeepConstantsAttached(const Instruction &Inst, 1090 const Function &Fn) const; 1091 1092 /// @} 1093 1094 /// \name Vector Target Information 1095 /// @{ 1096 1097 /// The various kinds of shuffle patterns for vector queries. 1098 enum ShuffleKind { 1099 SK_Broadcast, ///< Broadcast element 0 to all other elements. 1100 SK_Reverse, ///< Reverse the order of the vector. 1101 SK_Select, ///< Selects elements from the corresponding lane of 1102 ///< either source operand. This is equivalent to a 1103 ///< vector select with a constant condition operand. 1104 SK_Transpose, ///< Transpose two vectors. 1105 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset. 1106 SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset. 1107 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one 1108 ///< with any shuffle mask. 1109 SK_PermuteSingleSrc, ///< Shuffle elements of single source vector with any 1110 ///< shuffle mask. 1111 SK_Splice ///< Concatenates elements from the first input vector 1112 ///< with elements of the second input vector. Returning 1113 ///< a vector of the same type as the input vectors. 1114 ///< Index indicates start offset in first input vector. 1115 }; 1116 1117 /// Additional information about an operand's possible values. 1118 enum OperandValueKind { 1119 OK_AnyValue, // Operand can have any value. 1120 OK_UniformValue, // Operand is uniform (splat of a value). 1121 OK_UniformConstantValue, // Operand is uniform constant. 1122 OK_NonUniformConstantValue // Operand is a non uniform constant value. 1123 }; 1124 1125 /// Additional properties of an operand's values. 1126 enum OperandValueProperties { 1127 OP_None = 0, 1128 OP_PowerOf2 = 1, 1129 OP_NegatedPowerOf2 = 2, 1130 }; 1131 1132 // Describe the values an operand can take. We're in the process 1133 // of migrating uses of OperandValueKind and OperandValueProperties 1134 // to use this class, and then will change the internal representation. 1135 struct OperandValueInfo { 1136 OperandValueKind Kind = OK_AnyValue; 1137 OperandValueProperties Properties = OP_None; 1138 1139 bool isConstant() const { 1140 return Kind == OK_UniformConstantValue || Kind == OK_NonUniformConstantValue; 1141 } 1142 bool isUniform() const { 1143 return Kind == OK_UniformConstantValue || Kind == OK_UniformValue; 1144 } 1145 bool isPowerOf2() const { 1146 return Properties == OP_PowerOf2; 1147 } 1148 bool isNegatedPowerOf2() const { 1149 return Properties == OP_NegatedPowerOf2; 1150 } 1151 1152 OperandValueInfo getNoProps() const { 1153 return {Kind, OP_None}; 1154 } 1155 }; 1156 1157 /// \return the number of registers in the target-provided register class. 1158 unsigned getNumberOfRegisters(unsigned ClassID) const; 1159 1160 /// \return true if the target supports load/store that enables fault 1161 /// suppression of memory operands when the source condition is false. 1162 bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const; 1163 1164 /// \return the target-provided register class ID for the provided type, 1165 /// accounting for type promotion and other type-legalization techniques that 1166 /// the target might apply. However, it specifically does not account for the 1167 /// scalarization or splitting of vector types. Should a vector type require 1168 /// scalarization or splitting into multiple underlying vector registers, that 1169 /// type should be mapped to a register class containing no registers. 1170 /// Specifically, this is designed to provide a simple, high-level view of the 1171 /// register allocation later performed by the backend. These register classes 1172 /// don't necessarily map onto the register classes used by the backend. 1173 /// FIXME: It's not currently possible to determine how many registers 1174 /// are used by the provided type. 1175 unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const; 1176 1177 /// \return the target-provided register class name 1178 const char *getRegisterClassName(unsigned ClassID) const; 1179 1180 enum RegisterKind { RGK_Scalar, RGK_FixedWidthVector, RGK_ScalableVector }; 1181 1182 /// \return The width of the largest scalar or vector register type. 1183 TypeSize getRegisterBitWidth(RegisterKind K) const; 1184 1185 /// \return The width of the smallest vector register type. 1186 unsigned getMinVectorRegisterBitWidth() const; 1187 1188 /// \return The maximum value of vscale if the target specifies an 1189 /// architectural maximum vector length, and std::nullopt otherwise. 1190 std::optional<unsigned> getMaxVScale() const; 1191 1192 /// \return the value of vscale to tune the cost model for. 1193 std::optional<unsigned> getVScaleForTuning() const; 1194 1195 /// \return true if vscale is known to be a power of 2 1196 bool isVScaleKnownToBeAPowerOfTwo() const; 1197 1198 /// \return True if the vectorization factor should be chosen to 1199 /// make the vector of the smallest element type match the size of a 1200 /// vector register. For wider element types, this could result in 1201 /// creating vectors that span multiple vector registers. 1202 /// If false, the vectorization factor will be chosen based on the 1203 /// size of the widest element type. 1204 /// \p K Register Kind for vectorization. 1205 bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const; 1206 1207 /// \return The minimum vectorization factor for types of given element 1208 /// bit width, or 0 if there is no minimum VF. The returned value only 1209 /// applies when shouldMaximizeVectorBandwidth returns true. 1210 /// If IsScalable is true, the returned ElementCount must be a scalable VF. 1211 ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const; 1212 1213 /// \return The maximum vectorization factor for types of given element 1214 /// bit width and opcode, or 0 if there is no maximum VF. 1215 /// Currently only used by the SLP vectorizer. 1216 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const; 1217 1218 /// \return The minimum vectorization factor for the store instruction. Given 1219 /// the initial estimation of the minimum vector factor and store value type, 1220 /// it tries to find possible lowest VF, which still might be profitable for 1221 /// the vectorization. 1222 /// \param VF Initial estimation of the minimum vector factor. 1223 /// \param ScalarMemTy Scalar memory type of the store operation. 1224 /// \param ScalarValTy Scalar type of the stored value. 1225 /// Currently only used by the SLP vectorizer. 1226 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, 1227 Type *ScalarValTy) const; 1228 1229 /// \return True if it should be considered for address type promotion. 1230 /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is 1231 /// profitable without finding other extensions fed by the same input. 1232 bool shouldConsiderAddressTypePromotion( 1233 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const; 1234 1235 /// \return The size of a cache line in bytes. 1236 unsigned getCacheLineSize() const; 1237 1238 /// The possible cache levels 1239 enum class CacheLevel { 1240 L1D, // The L1 data cache 1241 L2D, // The L2 data cache 1242 1243 // We currently do not model L3 caches, as their sizes differ widely between 1244 // microarchitectures. Also, we currently do not have a use for L3 cache 1245 // size modeling yet. 1246 }; 1247 1248 /// \return The size of the cache level in bytes, if available. 1249 std::optional<unsigned> getCacheSize(CacheLevel Level) const; 1250 1251 /// \return The associativity of the cache level, if available. 1252 std::optional<unsigned> getCacheAssociativity(CacheLevel Level) const; 1253 1254 /// \return The minimum architectural page size for the target. 1255 std::optional<unsigned> getMinPageSize() const; 1256 1257 /// \return How much before a load we should place the prefetch 1258 /// instruction. This is currently measured in number of 1259 /// instructions. 1260 unsigned getPrefetchDistance() const; 1261 1262 /// Some HW prefetchers can handle accesses up to a certain constant stride. 1263 /// Sometimes prefetching is beneficial even below the HW prefetcher limit, 1264 /// and the arguments provided are meant to serve as a basis for deciding this 1265 /// for a particular loop. 1266 /// 1267 /// \param NumMemAccesses Number of memory accesses in the loop. 1268 /// \param NumStridedMemAccesses Number of the memory accesses that 1269 /// ScalarEvolution could find a known stride 1270 /// for. 1271 /// \param NumPrefetches Number of software prefetches that will be 1272 /// emitted as determined by the addresses 1273 /// involved and the cache line size. 1274 /// \param HasCall True if the loop contains a call. 1275 /// 1276 /// \return This is the minimum stride in bytes where it makes sense to start 1277 /// adding SW prefetches. The default is 1, i.e. prefetch with any 1278 /// stride. 1279 unsigned getMinPrefetchStride(unsigned NumMemAccesses, 1280 unsigned NumStridedMemAccesses, 1281 unsigned NumPrefetches, bool HasCall) const; 1282 1283 /// \return The maximum number of iterations to prefetch ahead. If 1284 /// the required number of iterations is more than this number, no 1285 /// prefetching is performed. 1286 unsigned getMaxPrefetchIterationsAhead() const; 1287 1288 /// \return True if prefetching should also be done for writes. 1289 bool enableWritePrefetching() const; 1290 1291 /// \return if target want to issue a prefetch in address space \p AS. 1292 bool shouldPrefetchAddressSpace(unsigned AS) const; 1293 1294 /// \return The cost of a partial reduction, which is a reduction from a 1295 /// vector to another vector with fewer elements of larger size. They are 1296 /// represented by the llvm.experimental.partial.reduce.add intrinsic, which 1297 /// takes an accumulator and a binary operation operand that itself is fed by 1298 /// two extends. An example of an operation that uses a partial reduction is a 1299 /// dot product, which reduces two vectors to another of 4 times fewer and 4 1300 /// times larger elements. 1301 InstructionCost 1302 getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, 1303 Type *AccumType, ElementCount VF, 1304 PartialReductionExtendKind OpAExtend, 1305 PartialReductionExtendKind OpBExtend, 1306 std::optional<unsigned> BinOp = std::nullopt) const; 1307 1308 /// \return The maximum interleave factor that any transform should try to 1309 /// perform for this target. This number depends on the level of parallelism 1310 /// and the number of execution units in the CPU. 1311 unsigned getMaxInterleaveFactor(ElementCount VF) const; 1312 1313 /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2. 1314 static OperandValueInfo getOperandInfo(const Value *V); 1315 1316 /// This is an approximation of reciprocal throughput of a math/logic op. 1317 /// A higher cost indicates less expected throughput. 1318 /// From Agner Fog's guides, reciprocal throughput is "the average number of 1319 /// clock cycles per instruction when the instructions are not part of a 1320 /// limiting dependency chain." 1321 /// Therefore, costs should be scaled to account for multiple execution units 1322 /// on the target that can process this type of instruction. For example, if 1323 /// there are 5 scalar integer units and 2 vector integer units that can 1324 /// calculate an 'add' in a single cycle, this model should indicate that the 1325 /// cost of the vector add instruction is 2.5 times the cost of the scalar 1326 /// add instruction. 1327 /// \p Args is an optional argument which holds the instruction operands 1328 /// values so the TTI can analyze those values searching for special 1329 /// cases or optimizations based on those values. 1330 /// \p CxtI is the optional original context instruction, if one exists, to 1331 /// provide even more information. 1332 /// \p TLibInfo is used to search for platform specific vector library 1333 /// functions for instructions that might be converted to calls (e.g. frem). 1334 InstructionCost getArithmeticInstrCost( 1335 unsigned Opcode, Type *Ty, 1336 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1337 TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None}, 1338 TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None}, 1339 ArrayRef<const Value *> Args = {}, const Instruction *CxtI = nullptr, 1340 const TargetLibraryInfo *TLibInfo = nullptr) const; 1341 1342 /// Returns the cost estimation for alternating opcode pattern that can be 1343 /// lowered to a single instruction on the target. In X86 this is for the 1344 /// addsub instruction which corrsponds to a Shuffle + Fadd + FSub pattern in 1345 /// IR. This function expects two opcodes: \p Opcode1 and \p Opcode2 being 1346 /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0` 1347 /// when \p Opcode0 is selected and `1` when Opcode1 is selected. 1348 /// \p VecTy is the vector type of the instruction to be generated. 1349 InstructionCost getAltInstrCost( 1350 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 1351 const SmallBitVector &OpcodeMask, 1352 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; 1353 1354 /// \return The cost of a shuffle instruction of kind Kind and of type Tp. 1355 /// The exact mask may be passed as Mask, or else the array will be empty. 1356 /// The index and subtype parameters are used by the subvector insertion and 1357 /// extraction shuffle kinds to show the insert/extract point and the type of 1358 /// the subvector being inserted/extracted. The operands of the shuffle can be 1359 /// passed through \p Args, which helps improve the cost estimation in some 1360 /// cases, like in broadcast loads. 1361 /// NOTE: For subvector extractions Tp represents the source type. 1362 InstructionCost 1363 getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask = {}, 1364 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1365 int Index = 0, VectorType *SubTp = nullptr, 1366 ArrayRef<const Value *> Args = {}, 1367 const Instruction *CxtI = nullptr) const; 1368 1369 /// Represents a hint about the context in which a cast is used. 1370 /// 1371 /// For zext/sext, the context of the cast is the operand, which must be a 1372 /// load of some kind. For trunc, the context is of the cast is the single 1373 /// user of the instruction, which must be a store of some kind. 1374 /// 1375 /// This enum allows the vectorizer to give getCastInstrCost an idea of the 1376 /// type of cast it's dealing with, as not every cast is equal. For instance, 1377 /// the zext of a load may be free, but the zext of an interleaving load can 1378 //// be (very) expensive! 1379 /// 1380 /// See \c getCastContextHint to compute a CastContextHint from a cast 1381 /// Instruction*. Callers can use it if they don't need to override the 1382 /// context and just want it to be calculated from the instruction. 1383 /// 1384 /// FIXME: This handles the types of load/store that the vectorizer can 1385 /// produce, which are the cases where the context instruction is most 1386 /// likely to be incorrect. There are other situations where that can happen 1387 /// too, which might be handled here but in the long run a more general 1388 /// solution of costing multiple instructions at the same times may be better. 1389 enum class CastContextHint : uint8_t { 1390 None, ///< The cast is not used with a load/store of any kind. 1391 Normal, ///< The cast is used with a normal load/store. 1392 Masked, ///< The cast is used with a masked load/store. 1393 GatherScatter, ///< The cast is used with a gather/scatter. 1394 Interleave, ///< The cast is used with an interleaved load/store. 1395 Reversed, ///< The cast is used with a reversed load/store. 1396 }; 1397 1398 /// Calculates a CastContextHint from \p I. 1399 /// This should be used by callers of getCastInstrCost if they wish to 1400 /// determine the context from some instruction. 1401 /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr, 1402 /// or if it's another type of cast. 1403 static CastContextHint getCastContextHint(const Instruction *I); 1404 1405 /// \return The expected cost of cast instructions, such as bitcast, trunc, 1406 /// zext, etc. If there is an existing instruction that holds Opcode, it 1407 /// may be passed in the 'I' parameter. 1408 InstructionCost 1409 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 1410 TTI::CastContextHint CCH, 1411 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency, 1412 const Instruction *I = nullptr) const; 1413 1414 /// \return The expected cost of a sign- or zero-extended vector extract. Use 1415 /// Index = -1 to indicate that there is no information about the index value. 1416 InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, 1417 VectorType *VecTy, 1418 unsigned Index) const; 1419 1420 /// \return The expected cost of control-flow related instructions such as 1421 /// Phi, Ret, Br, Switch. 1422 InstructionCost 1423 getCFInstrCost(unsigned Opcode, 1424 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency, 1425 const Instruction *I = nullptr) const; 1426 1427 /// \returns The expected cost of compare and select instructions. If there 1428 /// is an existing instruction that holds Opcode, it may be passed in the 1429 /// 'I' parameter. The \p VecPred parameter can be used to indicate the select 1430 /// is using a compare with the specified predicate as condition. When vector 1431 /// types are passed, \p VecPred must be used for all lanes. For a 1432 /// comparison, the two operands are the natural values. For a select, the 1433 /// two operands are the *value* operands, not the condition operand. 1434 InstructionCost 1435 getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 1436 CmpInst::Predicate VecPred, 1437 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1438 OperandValueInfo Op1Info = {OK_AnyValue, OP_None}, 1439 OperandValueInfo Op2Info = {OK_AnyValue, OP_None}, 1440 const Instruction *I = nullptr) const; 1441 1442 /// \return The expected cost of vector Insert and Extract. 1443 /// Use -1 to indicate that there is no information on the index value. 1444 /// This is used when the instruction is not available; a typical use 1445 /// case is to provision the cost of vectorization/scalarization in 1446 /// vectorizer passes. 1447 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, 1448 TTI::TargetCostKind CostKind, 1449 unsigned Index = -1, Value *Op0 = nullptr, 1450 Value *Op1 = nullptr) const; 1451 1452 /// \return The expected cost of vector Insert and Extract. 1453 /// Use -1 to indicate that there is no information on the index value. 1454 /// This is used when the instruction is not available; a typical use 1455 /// case is to provision the cost of vectorization/scalarization in 1456 /// vectorizer passes. 1457 /// \param ScalarUserAndIdx encodes the information about extracts from a 1458 /// vector with 'Scalar' being the value being extracted,'User' being the user 1459 /// of the extract(nullptr if user is not known before vectorization) and 1460 /// 'Idx' being the extract lane. 1461 InstructionCost getVectorInstrCost( 1462 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, 1463 Value *Scalar, 1464 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) const; 1465 1466 /// \return The expected cost of vector Insert and Extract. 1467 /// This is used when instruction is available, and implementation 1468 /// asserts 'I' is not nullptr. 1469 /// 1470 /// A typical suitable use case is cost estimation when vector instruction 1471 /// exists (e.g., from basic blocks during transformation). 1472 InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, 1473 TTI::TargetCostKind CostKind, 1474 unsigned Index = -1) const; 1475 1476 /// \return The cost of replication shuffle of \p VF elements typed \p EltTy 1477 /// \p ReplicationFactor times. 1478 /// 1479 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: 1480 /// <0,0,0,1,1,1,2,2,2,3,3,3> 1481 InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, 1482 int VF, 1483 const APInt &DemandedDstElts, 1484 TTI::TargetCostKind CostKind) const; 1485 1486 /// \return The cost of Load and Store instructions. 1487 InstructionCost 1488 getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 1489 unsigned AddressSpace, 1490 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1491 OperandValueInfo OpdInfo = {OK_AnyValue, OP_None}, 1492 const Instruction *I = nullptr) const; 1493 1494 /// \return The cost of VP Load and Store instructions. 1495 InstructionCost 1496 getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 1497 unsigned AddressSpace, 1498 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1499 const Instruction *I = nullptr) const; 1500 1501 /// \return The cost of masked Load and Store instructions. 1502 InstructionCost getMaskedMemoryOpCost( 1503 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, 1504 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; 1505 1506 /// \return The cost of Gather or Scatter operation 1507 /// \p Opcode - is a type of memory access Load or Store 1508 /// \p DataTy - a vector type of the data to be loaded or stored 1509 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory 1510 /// \p VariableMask - true when the memory access is predicated with a mask 1511 /// that is not a compile-time constant 1512 /// \p Alignment - alignment of single element 1513 /// \p I - the optional original context instruction, if one exists, e.g. the 1514 /// load/store to transform or the call to the gather/scatter intrinsic 1515 InstructionCost getGatherScatterOpCost( 1516 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 1517 Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1518 const Instruction *I = nullptr) const; 1519 1520 /// \return The cost of strided memory operations. 1521 /// \p Opcode - is a type of memory access Load or Store 1522 /// \p DataTy - a vector type of the data to be loaded or stored 1523 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory 1524 /// \p VariableMask - true when the memory access is predicated with a mask 1525 /// that is not a compile-time constant 1526 /// \p Alignment - alignment of single element 1527 /// \p I - the optional original context instruction, if one exists, e.g. the 1528 /// load/store to transform or the call to the gather/scatter intrinsic 1529 InstructionCost getStridedMemoryOpCost( 1530 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, 1531 Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1532 const Instruction *I = nullptr) const; 1533 1534 /// \return The cost of the interleaved memory operation. 1535 /// \p Opcode is the memory operation code 1536 /// \p VecTy is the vector type of the interleaved access. 1537 /// \p Factor is the interleave factor 1538 /// \p Indices is the indices for interleaved load members (as interleaved 1539 /// load allows gaps) 1540 /// \p Alignment is the alignment of the memory operation 1541 /// \p AddressSpace is address space of the pointer. 1542 /// \p UseMaskForCond indicates if the memory access is predicated. 1543 /// \p UseMaskForGaps indicates if gaps should be masked. 1544 InstructionCost getInterleavedMemoryOpCost( 1545 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 1546 Align Alignment, unsigned AddressSpace, 1547 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, 1548 bool UseMaskForCond = false, bool UseMaskForGaps = false) const; 1549 1550 /// A helper function to determine the type of reduction algorithm used 1551 /// for a given \p Opcode and set of FastMathFlags \p FMF. 1552 static bool requiresOrderedReduction(std::optional<FastMathFlags> FMF) { 1553 return FMF && !(*FMF).allowReassoc(); 1554 } 1555 1556 /// Calculate the cost of vector reduction intrinsics. 1557 /// 1558 /// This is the cost of reducing the vector value of type \p Ty to a scalar 1559 /// value using the operation denoted by \p Opcode. The FastMathFlags 1560 /// parameter \p FMF indicates what type of reduction we are performing: 1561 /// 1. Tree-wise. This is the typical 'fast' reduction performed that 1562 /// involves successively splitting a vector into half and doing the 1563 /// operation on the pair of halves until you have a scalar value. For 1564 /// example: 1565 /// (v0, v1, v2, v3) 1566 /// ((v0+v2), (v1+v3), undef, undef) 1567 /// ((v0+v2+v1+v3), undef, undef, undef) 1568 /// This is the default behaviour for integer operations, whereas for 1569 /// floating point we only do this if \p FMF indicates that 1570 /// reassociation is allowed. 1571 /// 2. Ordered. For a vector with N elements this involves performing N 1572 /// operations in lane order, starting with an initial scalar value, i.e. 1573 /// result = InitVal + v0 1574 /// result = result + v1 1575 /// result = result + v2 1576 /// result = result + v3 1577 /// This is only the case for FP operations and when reassociation is not 1578 /// allowed. 1579 /// 1580 InstructionCost getArithmeticReductionCost( 1581 unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF, 1582 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; 1583 1584 InstructionCost getMinMaxReductionCost( 1585 Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF = FastMathFlags(), 1586 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; 1587 1588 /// Calculate the cost of an extended reduction pattern, similar to 1589 /// getArithmeticReductionCost of an Add reduction with multiply and optional 1590 /// extensions. This is the cost of as: 1591 /// ResTy vecreduce.add(mul (A, B)). 1592 /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)). 1593 InstructionCost getMulAccReductionCost( 1594 bool IsUnsigned, Type *ResTy, VectorType *Ty, 1595 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; 1596 1597 /// Calculate the cost of an extended reduction pattern, similar to 1598 /// getArithmeticReductionCost of a reduction with an extension. 1599 /// This is the cost of as: 1600 /// ResTy vecreduce.opcode(ext(Ty A)). 1601 InstructionCost getExtendedReductionCost( 1602 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, 1603 FastMathFlags FMF, 1604 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const; 1605 1606 /// \returns The cost of Intrinsic instructions. Analyses the real arguments. 1607 /// Three cases are handled: 1. scalar instruction 2. vector instruction 1608 /// 3. scalar instruction which is to be vectorized. 1609 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 1610 TTI::TargetCostKind CostKind) const; 1611 1612 /// \returns The cost of Call instructions. 1613 InstructionCost getCallInstrCost( 1614 Function *F, Type *RetTy, ArrayRef<Type *> Tys, 1615 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const; 1616 1617 /// \returns The number of pieces into which the provided type must be 1618 /// split during legalization. Zero is returned when the answer is unknown. 1619 unsigned getNumberOfParts(Type *Tp) const; 1620 1621 /// \returns The cost of the address computation. For most targets this can be 1622 /// merged into the instruction indexing mode. Some targets might want to 1623 /// distinguish between address computation for memory operations on vector 1624 /// types and scalar types. Such targets should override this function. 1625 /// The 'SE' parameter holds pointer for the scalar evolution object which 1626 /// is used in order to get the Ptr step value in case of constant stride. 1627 /// The 'Ptr' parameter holds SCEV of the access pointer. 1628 InstructionCost getAddressComputationCost(Type *Ty, 1629 ScalarEvolution *SE = nullptr, 1630 const SCEV *Ptr = nullptr) const; 1631 1632 /// \returns The cost, if any, of keeping values of the given types alive 1633 /// over a callsite. 1634 /// 1635 /// Some types may require the use of register classes that do not have 1636 /// any callee-saved registers, so would require a spill and fill. 1637 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const; 1638 1639 /// \returns True if the intrinsic is a supported memory intrinsic. Info 1640 /// will contain additional information - whether the intrinsic may write 1641 /// or read to memory, volatility and the pointer. Info is undefined 1642 /// if false is returned. 1643 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const; 1644 1645 /// \returns The maximum element size, in bytes, for an element 1646 /// unordered-atomic memory intrinsic. 1647 unsigned getAtomicMemIntrinsicMaxElementSize() const; 1648 1649 /// \returns A value which is the result of the given memory intrinsic. New 1650 /// instructions may be created to extract the result from the given intrinsic 1651 /// memory operation. Returns nullptr if the target cannot create a result 1652 /// from the given intrinsic. 1653 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 1654 Type *ExpectedType) const; 1655 1656 /// \returns The type to use in a loop expansion of a memcpy call. 1657 Type *getMemcpyLoopLoweringType( 1658 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, 1659 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, 1660 std::optional<uint32_t> AtomicElementSize = std::nullopt) const; 1661 1662 /// \param[out] OpsOut The operand types to copy RemainingBytes of memory. 1663 /// \param RemainingBytes The number of bytes to copy. 1664 /// 1665 /// Calculates the operand types to use when copying \p RemainingBytes of 1666 /// memory, where source and destination alignments are \p SrcAlign and 1667 /// \p DestAlign respectively. 1668 void getMemcpyLoopResidualLoweringType( 1669 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 1670 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 1671 Align SrcAlign, Align DestAlign, 1672 std::optional<uint32_t> AtomicCpySize = std::nullopt) const; 1673 1674 /// \returns True if the two functions have compatible attributes for inlining 1675 /// purposes. 1676 bool areInlineCompatible(const Function *Caller, 1677 const Function *Callee) const; 1678 1679 /// Returns a penalty for invoking call \p Call in \p F. 1680 /// For example, if a function F calls a function G, which in turn calls 1681 /// function H, then getInlineCallPenalty(F, H()) would return the 1682 /// penalty of calling H from F, e.g. after inlining G into F. 1683 /// \p DefaultCallPenalty is passed to give a default penalty that 1684 /// the target can amend or override. 1685 unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, 1686 unsigned DefaultCallPenalty) const; 1687 1688 /// \returns True if the caller and callee agree on how \p Types will be 1689 /// passed to or returned from the callee. 1690 /// to the callee. 1691 /// \param Types List of types to check. 1692 bool areTypesABICompatible(const Function *Caller, const Function *Callee, 1693 const ArrayRef<Type *> &Types) const; 1694 1695 /// The type of load/store indexing. 1696 enum MemIndexedMode { 1697 MIM_Unindexed, ///< No indexing. 1698 MIM_PreInc, ///< Pre-incrementing. 1699 MIM_PreDec, ///< Pre-decrementing. 1700 MIM_PostInc, ///< Post-incrementing. 1701 MIM_PostDec ///< Post-decrementing. 1702 }; 1703 1704 /// \returns True if the specified indexed load for the given type is legal. 1705 bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const; 1706 1707 /// \returns True if the specified indexed store for the given type is legal. 1708 bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const; 1709 1710 /// \returns The bitwidth of the largest vector type that should be used to 1711 /// load/store in the given address space. 1712 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const; 1713 1714 /// \returns True if the load instruction is legal to vectorize. 1715 bool isLegalToVectorizeLoad(LoadInst *LI) const; 1716 1717 /// \returns True if the store instruction is legal to vectorize. 1718 bool isLegalToVectorizeStore(StoreInst *SI) const; 1719 1720 /// \returns True if it is legal to vectorize the given load chain. 1721 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, 1722 unsigned AddrSpace) const; 1723 1724 /// \returns True if it is legal to vectorize the given store chain. 1725 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, 1726 unsigned AddrSpace) const; 1727 1728 /// \returns True if it is legal to vectorize the given reduction kind. 1729 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, 1730 ElementCount VF) const; 1731 1732 /// \returns True if the given type is supported for scalable vectors 1733 bool isElementTypeLegalForScalableVector(Type *Ty) const; 1734 1735 /// \returns The new vector factor value if the target doesn't support \p 1736 /// SizeInBytes loads or has a better vector factor. 1737 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, 1738 unsigned ChainSizeInBytes, 1739 VectorType *VecTy) const; 1740 1741 /// \returns The new vector factor value if the target doesn't support \p 1742 /// SizeInBytes stores or has a better vector factor. 1743 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, 1744 unsigned ChainSizeInBytes, 1745 VectorType *VecTy) const; 1746 1747 /// Flags describing the kind of vector reduction. 1748 struct ReductionFlags { 1749 ReductionFlags() = default; 1750 bool IsMaxOp = 1751 false; ///< If the op a min/max kind, true if it's a max operation. 1752 bool IsSigned = false; ///< Whether the operation is a signed int reduction. 1753 bool NoNaN = 1754 false; ///< If op is an fp min/max, whether NaNs may be present. 1755 }; 1756 1757 /// \returns True if the targets prefers fixed width vectorization if the 1758 /// loop vectorizer's cost-model assigns an equal cost to the fixed and 1759 /// scalable version of the vectorized loop. 1760 bool preferFixedOverScalableIfEqualCost() const; 1761 1762 /// \returns True if the target prefers reductions in loop. 1763 bool preferInLoopReduction(unsigned Opcode, Type *Ty, 1764 ReductionFlags Flags) const; 1765 1766 /// \returns True if the target prefers reductions select kept in the loop 1767 /// when tail folding. i.e. 1768 /// loop: 1769 /// p = phi (0, s) 1770 /// a = add (p, x) 1771 /// s = select (mask, a, p) 1772 /// vecreduce.add(s) 1773 /// 1774 /// As opposed to the normal scheme of p = phi (0, a) which allows the select 1775 /// to be pulled out of the loop. If the select(.., add, ..) can be predicated 1776 /// by the target, this can lead to cleaner code generation. 1777 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, 1778 ReductionFlags Flags) const; 1779 1780 /// Return true if the loop vectorizer should consider vectorizing an 1781 /// otherwise scalar epilogue loop. 1782 bool preferEpilogueVectorization() const; 1783 1784 /// \returns True if the target wants to expand the given reduction intrinsic 1785 /// into a shuffle sequence. 1786 bool shouldExpandReduction(const IntrinsicInst *II) const; 1787 1788 enum struct ReductionShuffle { SplitHalf, Pairwise }; 1789 1790 /// \returns The shuffle sequence pattern used to expand the given reduction 1791 /// intrinsic. 1792 ReductionShuffle 1793 getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const; 1794 1795 /// \returns the size cost of rematerializing a GlobalValue address relative 1796 /// to a stack reload. 1797 unsigned getGISelRematGlobalCost() const; 1798 1799 /// \returns the lower bound of a trip count to decide on vectorization 1800 /// while tail-folding. 1801 unsigned getMinTripCountTailFoldingThreshold() const; 1802 1803 /// \returns True if the target supports scalable vectors. 1804 bool supportsScalableVectors() const; 1805 1806 /// \return true when scalable vectorization is preferred. 1807 bool enableScalableVectorization() const; 1808 1809 /// \name Vector Predication Information 1810 /// @{ 1811 /// Whether the target supports the %evl parameter of VP intrinsic efficiently 1812 /// in hardware, for the given opcode and type/alignment. (see LLVM Language 1813 /// Reference - "Vector Predication Intrinsics"). 1814 /// Use of %evl is discouraged when that is not the case. 1815 bool hasActiveVectorLength(unsigned Opcode, Type *DataType, 1816 Align Alignment) const; 1817 1818 /// Return true if sinking I's operands to the same basic block as I is 1819 /// profitable, e.g. because the operands can be folded into a target 1820 /// instruction during instruction selection. After calling the function 1821 /// \p Ops contains the Uses to sink ordered by dominance (dominating users 1822 /// come first). 1823 bool isProfitableToSinkOperands(Instruction *I, 1824 SmallVectorImpl<Use *> &Ops) const; 1825 1826 /// Return true if it's significantly cheaper to shift a vector by a uniform 1827 /// scalar than by an amount which will vary across each lane. On x86 before 1828 /// AVX2 for example, there is a "psllw" instruction for the former case, but 1829 /// no simple instruction for a general "a << b" operation on vectors. 1830 /// This should also apply to lowering for vector funnel shifts (rotates). 1831 bool isVectorShiftByScalarCheap(Type *Ty) const; 1832 1833 struct VPLegalization { 1834 enum VPTransform { 1835 // keep the predicating parameter 1836 Legal = 0, 1837 // where legal, discard the predicate parameter 1838 Discard = 1, 1839 // transform into something else that is also predicating 1840 Convert = 2 1841 }; 1842 1843 // How to transform the EVL parameter. 1844 // Legal: keep the EVL parameter as it is. 1845 // Discard: Ignore the EVL parameter where it is safe to do so. 1846 // Convert: Fold the EVL into the mask parameter. 1847 VPTransform EVLParamStrategy; 1848 1849 // How to transform the operator. 1850 // Legal: The target supports this operator. 1851 // Convert: Convert this to a non-VP operation. 1852 // The 'Discard' strategy is invalid. 1853 VPTransform OpStrategy; 1854 1855 bool shouldDoNothing() const { 1856 return (EVLParamStrategy == Legal) && (OpStrategy == Legal); 1857 } 1858 VPLegalization(VPTransform EVLParamStrategy, VPTransform OpStrategy) 1859 : EVLParamStrategy(EVLParamStrategy), OpStrategy(OpStrategy) {} 1860 }; 1861 1862 /// \returns How the target needs this vector-predicated operation to be 1863 /// transformed. 1864 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const; 1865 /// @} 1866 1867 /// \returns Whether a 32-bit branch instruction is available in Arm or Thumb 1868 /// state. 1869 /// 1870 /// Used by the LowerTypeTests pass, which constructs an IR inline assembler 1871 /// node containing a jump table in a format suitable for the target, so it 1872 /// needs to know what format of jump table it can legally use. 1873 /// 1874 /// For non-Arm targets, this function isn't used. It defaults to returning 1875 /// false, but it shouldn't matter what it returns anyway. 1876 bool hasArmWideBranch(bool Thumb) const; 1877 1878 /// Returns a bitmask constructed from the target-features or fmv-features 1879 /// metadata of a function. 1880 uint64_t getFeatureMask(const Function &F) const; 1881 1882 /// Returns true if this is an instance of a function with multiple versions. 1883 bool isMultiversionedFunction(const Function &F) const; 1884 1885 /// \return The maximum number of function arguments the target supports. 1886 unsigned getMaxNumArgs() const; 1887 1888 /// \return For an array of given Size, return alignment boundary to 1889 /// pad to. Default is no padding. 1890 unsigned getNumBytesToPadGlobalArray(unsigned Size, Type *ArrayType) const; 1891 1892 /// @} 1893 1894 /// Collect kernel launch bounds for \p F into \p LB. 1895 void collectKernelLaunchBounds( 1896 const Function &F, 1897 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const; 1898 1899 private: 1900 /// The abstract base class used to type erase specific TTI 1901 /// implementations. 1902 class Concept; 1903 1904 /// The template model for the base class which wraps a concrete 1905 /// implementation in a type erased interface. 1906 template <typename T> class Model; 1907 1908 std::unique_ptr<Concept> TTIImpl; 1909 }; 1910 1911 class TargetTransformInfo::Concept { 1912 public: 1913 virtual ~Concept() = 0; 1914 virtual const DataLayout &getDataLayout() const = 0; 1915 virtual InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr, 1916 ArrayRef<const Value *> Operands, 1917 Type *AccessType, 1918 TTI::TargetCostKind CostKind) = 0; 1919 virtual InstructionCost 1920 getPointersChainCost(ArrayRef<const Value *> Ptrs, const Value *Base, 1921 const TTI::PointersChainInfo &Info, Type *AccessTy, 1922 TTI::TargetCostKind CostKind) = 0; 1923 virtual unsigned getInliningThresholdMultiplier() const = 0; 1924 virtual unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const = 0; 1925 virtual unsigned 1926 getInliningCostBenefitAnalysisProfitableMultiplier() const = 0; 1927 virtual int getInliningLastCallToStaticBonus() const = 0; 1928 virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0; 1929 virtual int getInlinerVectorBonusPercent() const = 0; 1930 virtual unsigned getCallerAllocaCost(const CallBase *CB, 1931 const AllocaInst *AI) const = 0; 1932 virtual InstructionCost getMemcpyCost(const Instruction *I) = 0; 1933 virtual uint64_t getMaxMemIntrinsicInlineSizeThreshold() const = 0; 1934 virtual unsigned 1935 getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize, 1936 ProfileSummaryInfo *PSI, 1937 BlockFrequencyInfo *BFI) = 0; 1938 virtual InstructionCost getInstructionCost(const User *U, 1939 ArrayRef<const Value *> Operands, 1940 TargetCostKind CostKind) = 0; 1941 virtual BranchProbability getPredictableBranchThreshold() = 0; 1942 virtual InstructionCost getBranchMispredictPenalty() = 0; 1943 virtual bool hasBranchDivergence(const Function *F = nullptr) = 0; 1944 virtual bool isSourceOfDivergence(const Value *V) = 0; 1945 virtual bool isAlwaysUniform(const Value *V) = 0; 1946 virtual bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0; 1947 virtual bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const = 0; 1948 virtual unsigned getFlatAddressSpace() = 0; 1949 virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, 1950 Intrinsic::ID IID) const = 0; 1951 virtual bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0; 1952 virtual bool 1953 canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const = 0; 1954 virtual unsigned getAssumedAddrSpace(const Value *V) const = 0; 1955 virtual bool isSingleThreaded() const = 0; 1956 virtual std::pair<const Value *, unsigned> 1957 getPredicatedAddrSpace(const Value *V) const = 0; 1958 virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, 1959 Value *OldV, 1960 Value *NewV) const = 0; 1961 virtual bool isLoweredToCall(const Function *F) = 0; 1962 virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &, 1963 UnrollingPreferences &UP, 1964 OptimizationRemarkEmitter *ORE) = 0; 1965 virtual void getPeelingPreferences(Loop *L, ScalarEvolution &SE, 1966 PeelingPreferences &PP) = 0; 1967 virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 1968 AssumptionCache &AC, 1969 TargetLibraryInfo *LibInfo, 1970 HardwareLoopInfo &HWLoopInfo) = 0; 1971 virtual unsigned getEpilogueVectorizationMinVF() = 0; 1972 virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) = 0; 1973 virtual TailFoldingStyle 1974 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) = 0; 1975 virtual std::optional<Instruction *> instCombineIntrinsic( 1976 InstCombiner &IC, IntrinsicInst &II) = 0; 1977 virtual std::optional<Value *> simplifyDemandedUseBitsIntrinsic( 1978 InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, 1979 KnownBits & Known, bool &KnownBitsComputed) = 0; 1980 virtual std::optional<Value *> simplifyDemandedVectorEltsIntrinsic( 1981 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, 1982 APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, 1983 std::function<void(Instruction *, unsigned, APInt, APInt &)> 1984 SimplifyAndSetOp) = 0; 1985 virtual bool isLegalAddImmediate(int64_t Imm) = 0; 1986 virtual bool isLegalAddScalableImmediate(int64_t Imm) = 0; 1987 virtual bool isLegalICmpImmediate(int64_t Imm) = 0; 1988 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 1989 int64_t BaseOffset, bool HasBaseReg, 1990 int64_t Scale, unsigned AddrSpace, 1991 Instruction *I, 1992 int64_t ScalableOffset) = 0; 1993 virtual bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, 1994 const TargetTransformInfo::LSRCost &C2) = 0; 1995 virtual bool isNumRegsMajorCostOfLSR() = 0; 1996 virtual bool shouldDropLSRSolutionIfLessProfitable() const = 0; 1997 virtual bool isProfitableLSRChainElement(Instruction *I) = 0; 1998 virtual bool canMacroFuseCmp() = 0; 1999 virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, 2000 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC, 2001 TargetLibraryInfo *LibInfo) = 0; 2002 virtual AddressingModeKind 2003 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0; 2004 virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0; 2005 virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0; 2006 virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0; 2007 virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0; 2008 virtual bool isLegalBroadcastLoad(Type *ElementTy, 2009 ElementCount NumElements) const = 0; 2010 virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) = 0; 2011 virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) = 0; 2012 virtual bool forceScalarizeMaskedGather(VectorType *DataType, 2013 Align Alignment) = 0; 2014 virtual bool forceScalarizeMaskedScatter(VectorType *DataType, 2015 Align Alignment) = 0; 2016 virtual bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) = 0; 2017 virtual bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) = 0; 2018 virtual bool isLegalStridedLoadStore(Type *DataType, Align Alignment) = 0; 2019 virtual bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, 2020 Align Alignment, 2021 unsigned AddrSpace) = 0; 2022 2023 virtual bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) = 0; 2024 virtual bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, 2025 unsigned Opcode1, 2026 const SmallBitVector &OpcodeMask) const = 0; 2027 virtual bool enableOrderedReductions() = 0; 2028 virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0; 2029 virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0; 2030 virtual bool prefersVectorizedAddressing() = 0; 2031 virtual InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 2032 StackOffset BaseOffset, 2033 bool HasBaseReg, int64_t Scale, 2034 unsigned AddrSpace) = 0; 2035 virtual bool LSRWithInstrQueries() = 0; 2036 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0; 2037 virtual bool isProfitableToHoist(Instruction *I) = 0; 2038 virtual bool useAA() = 0; 2039 virtual bool isTypeLegal(Type *Ty) = 0; 2040 virtual unsigned getRegUsageForType(Type *Ty) = 0; 2041 virtual bool shouldBuildLookupTables() = 0; 2042 virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0; 2043 virtual bool shouldBuildRelLookupTables() = 0; 2044 virtual bool useColdCCForColdCall(Function &F) = 0; 2045 virtual bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) = 0; 2046 virtual bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, 2047 unsigned ScalarOpdIdx) = 0; 2048 virtual bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, 2049 int OpdIdx) = 0; 2050 virtual bool 2051 isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, 2052 int RetIdx) = 0; 2053 virtual InstructionCost 2054 getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts, 2055 bool Insert, bool Extract, TargetCostKind CostKind, 2056 ArrayRef<Value *> VL = {}) = 0; 2057 virtual InstructionCost 2058 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, 2059 ArrayRef<Type *> Tys, 2060 TargetCostKind CostKind) = 0; 2061 virtual bool supportsEfficientVectorElementLoadStore() = 0; 2062 virtual bool supportsTailCalls() = 0; 2063 virtual bool supportsTailCallFor(const CallBase *CB) = 0; 2064 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0; 2065 virtual MemCmpExpansionOptions 2066 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0; 2067 virtual bool enableSelectOptimize() = 0; 2068 virtual bool shouldTreatInstructionLikeSelect(const Instruction *I) = 0; 2069 virtual bool enableInterleavedAccessVectorization() = 0; 2070 virtual bool enableMaskedInterleavedAccessVectorization() = 0; 2071 virtual bool isFPVectorizationPotentiallyUnsafe() = 0; 2072 virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context, 2073 unsigned BitWidth, 2074 unsigned AddressSpace, 2075 Align Alignment, 2076 unsigned *Fast) = 0; 2077 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0; 2078 virtual bool haveFastSqrt(Type *Ty) = 0; 2079 virtual bool isExpensiveToSpeculativelyExecute(const Instruction *I) = 0; 2080 virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0; 2081 virtual InstructionCost getFPOpCost(Type *Ty) = 0; 2082 virtual InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, 2083 const APInt &Imm, Type *Ty) = 0; 2084 virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, 2085 TargetCostKind CostKind) = 0; 2086 virtual InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, 2087 const APInt &Imm, Type *Ty, 2088 TargetCostKind CostKind, 2089 Instruction *Inst = nullptr) = 0; 2090 virtual InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 2091 const APInt &Imm, Type *Ty, 2092 TargetCostKind CostKind) = 0; 2093 virtual bool preferToKeepConstantsAttached(const Instruction &Inst, 2094 const Function &Fn) const = 0; 2095 virtual unsigned getNumberOfRegisters(unsigned ClassID) const = 0; 2096 virtual bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const = 0; 2097 virtual unsigned getRegisterClassForType(bool Vector, 2098 Type *Ty = nullptr) const = 0; 2099 virtual const char *getRegisterClassName(unsigned ClassID) const = 0; 2100 virtual TypeSize getRegisterBitWidth(RegisterKind K) const = 0; 2101 virtual unsigned getMinVectorRegisterBitWidth() const = 0; 2102 virtual std::optional<unsigned> getMaxVScale() const = 0; 2103 virtual std::optional<unsigned> getVScaleForTuning() const = 0; 2104 virtual bool isVScaleKnownToBeAPowerOfTwo() const = 0; 2105 virtual bool 2106 shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const = 0; 2107 virtual ElementCount getMinimumVF(unsigned ElemWidth, 2108 bool IsScalable) const = 0; 2109 virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0; 2110 virtual unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, 2111 Type *ScalarValTy) const = 0; 2112 virtual bool shouldConsiderAddressTypePromotion( 2113 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0; 2114 virtual unsigned getCacheLineSize() const = 0; 2115 virtual std::optional<unsigned> getCacheSize(CacheLevel Level) const = 0; 2116 virtual std::optional<unsigned> getCacheAssociativity(CacheLevel Level) 2117 const = 0; 2118 virtual std::optional<unsigned> getMinPageSize() const = 0; 2119 2120 /// \return How much before a load we should place the prefetch 2121 /// instruction. This is currently measured in number of 2122 /// instructions. 2123 virtual unsigned getPrefetchDistance() const = 0; 2124 2125 /// \return Some HW prefetchers can handle accesses up to a certain 2126 /// constant stride. This is the minimum stride in bytes where it 2127 /// makes sense to start adding SW prefetches. The default is 1, 2128 /// i.e. prefetch with any stride. Sometimes prefetching is beneficial 2129 /// even below the HW prefetcher limit, and the arguments provided are 2130 /// meant to serve as a basis for deciding this for a particular loop. 2131 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, 2132 unsigned NumStridedMemAccesses, 2133 unsigned NumPrefetches, 2134 bool HasCall) const = 0; 2135 2136 /// \return The maximum number of iterations to prefetch ahead. If 2137 /// the required number of iterations is more than this number, no 2138 /// prefetching is performed. 2139 virtual unsigned getMaxPrefetchIterationsAhead() const = 0; 2140 2141 /// \return True if prefetching should also be done for writes. 2142 virtual bool enableWritePrefetching() const = 0; 2143 2144 /// \return if target want to issue a prefetch in address space \p AS. 2145 virtual bool shouldPrefetchAddressSpace(unsigned AS) const = 0; 2146 2147 /// \return The cost of a partial reduction, which is a reduction from a 2148 /// vector to another vector with fewer elements of larger size. They are 2149 /// represented by the llvm.experimental.partial.reduce.add intrinsic, which 2150 /// takes an accumulator and a binary operation operand that itself is fed by 2151 /// two extends. An example of an operation that uses a partial reduction is a 2152 /// dot product, which reduces two vectors to another of 4 times fewer and 4 2153 /// times larger elements. 2154 virtual InstructionCost 2155 getPartialReductionCost(unsigned Opcode, Type *InputTypeA, Type *InputTypeB, 2156 Type *AccumType, ElementCount VF, 2157 PartialReductionExtendKind OpAExtend, 2158 PartialReductionExtendKind OpBExtend, 2159 std::optional<unsigned> BinOp) const = 0; 2160 2161 virtual unsigned getMaxInterleaveFactor(ElementCount VF) = 0; 2162 virtual InstructionCost getArithmeticInstrCost( 2163 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 2164 OperandValueInfo Opd1Info, OperandValueInfo Opd2Info, 2165 ArrayRef<const Value *> Args, const Instruction *CxtI = nullptr) = 0; 2166 virtual InstructionCost getAltInstrCost( 2167 VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 2168 const SmallBitVector &OpcodeMask, 2169 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const = 0; 2170 2171 virtual InstructionCost 2172 getShuffleCost(ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask, 2173 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp, 2174 ArrayRef<const Value *> Args, const Instruction *CxtI) = 0; 2175 virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, 2176 Type *Src, CastContextHint CCH, 2177 TTI::TargetCostKind CostKind, 2178 const Instruction *I) = 0; 2179 virtual InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, 2180 VectorType *VecTy, 2181 unsigned Index) = 0; 2182 virtual InstructionCost getCFInstrCost(unsigned Opcode, 2183 TTI::TargetCostKind CostKind, 2184 const Instruction *I = nullptr) = 0; 2185 virtual InstructionCost 2186 getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 2187 CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, 2188 OperandValueInfo Op1Info, OperandValueInfo Op2Info, 2189 const Instruction *I) = 0; 2190 virtual InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, 2191 TTI::TargetCostKind CostKind, 2192 unsigned Index, Value *Op0, 2193 Value *Op1) = 0; 2194 2195 /// \param ScalarUserAndIdx encodes the information about extracts from a 2196 /// vector with 'Scalar' being the value being extracted,'User' being the user 2197 /// of the extract(nullptr if user is not known before vectorization) and 2198 /// 'Idx' being the extract lane. 2199 virtual InstructionCost getVectorInstrCost( 2200 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, 2201 Value *Scalar, 2202 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) = 0; 2203 2204 virtual InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, 2205 TTI::TargetCostKind CostKind, 2206 unsigned Index) = 0; 2207 2208 virtual InstructionCost 2209 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, 2210 const APInt &DemandedDstElts, 2211 TTI::TargetCostKind CostKind) = 0; 2212 2213 virtual InstructionCost 2214 getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 2215 unsigned AddressSpace, TTI::TargetCostKind CostKind, 2216 OperandValueInfo OpInfo, const Instruction *I) = 0; 2217 virtual InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, 2218 Align Alignment, 2219 unsigned AddressSpace, 2220 TTI::TargetCostKind CostKind, 2221 const Instruction *I) = 0; 2222 virtual InstructionCost 2223 getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 2224 unsigned AddressSpace, 2225 TTI::TargetCostKind CostKind) = 0; 2226 virtual InstructionCost 2227 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, 2228 bool VariableMask, Align Alignment, 2229 TTI::TargetCostKind CostKind, 2230 const Instruction *I = nullptr) = 0; 2231 virtual InstructionCost 2232 getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, 2233 bool VariableMask, Align Alignment, 2234 TTI::TargetCostKind CostKind, 2235 const Instruction *I = nullptr) = 0; 2236 2237 virtual InstructionCost getInterleavedMemoryOpCost( 2238 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 2239 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 2240 bool UseMaskForCond = false, bool UseMaskForGaps = false) = 0; 2241 virtual InstructionCost 2242 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, 2243 std::optional<FastMathFlags> FMF, 2244 TTI::TargetCostKind CostKind) = 0; 2245 virtual InstructionCost 2246 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, 2247 TTI::TargetCostKind CostKind) = 0; 2248 virtual InstructionCost getExtendedReductionCost( 2249 unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty, 2250 FastMathFlags FMF, 2251 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0; 2252 virtual InstructionCost getMulAccReductionCost( 2253 bool IsUnsigned, Type *ResTy, VectorType *Ty, 2254 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0; 2255 virtual InstructionCost 2256 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2257 TTI::TargetCostKind CostKind) = 0; 2258 virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy, 2259 ArrayRef<Type *> Tys, 2260 TTI::TargetCostKind CostKind) = 0; 2261 virtual unsigned getNumberOfParts(Type *Tp) = 0; 2262 virtual InstructionCost 2263 getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) = 0; 2264 virtual InstructionCost 2265 getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0; 2266 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst, 2267 MemIntrinsicInfo &Info) = 0; 2268 virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0; 2269 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 2270 Type *ExpectedType) = 0; 2271 virtual Type *getMemcpyLoopLoweringType( 2272 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, 2273 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, 2274 std::optional<uint32_t> AtomicElementSize) const = 0; 2275 2276 virtual void getMemcpyLoopResidualLoweringType( 2277 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 2278 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 2279 Align SrcAlign, Align DestAlign, 2280 std::optional<uint32_t> AtomicCpySize) const = 0; 2281 virtual bool areInlineCompatible(const Function *Caller, 2282 const Function *Callee) const = 0; 2283 virtual unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, 2284 unsigned DefaultCallPenalty) const = 0; 2285 virtual bool areTypesABICompatible(const Function *Caller, 2286 const Function *Callee, 2287 const ArrayRef<Type *> &Types) const = 0; 2288 virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0; 2289 virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0; 2290 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0; 2291 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0; 2292 virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0; 2293 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, 2294 Align Alignment, 2295 unsigned AddrSpace) const = 0; 2296 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, 2297 Align Alignment, 2298 unsigned AddrSpace) const = 0; 2299 virtual bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, 2300 ElementCount VF) const = 0; 2301 virtual bool isElementTypeLegalForScalableVector(Type *Ty) const = 0; 2302 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, 2303 unsigned ChainSizeInBytes, 2304 VectorType *VecTy) const = 0; 2305 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, 2306 unsigned ChainSizeInBytes, 2307 VectorType *VecTy) const = 0; 2308 virtual bool preferFixedOverScalableIfEqualCost() const = 0; 2309 virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty, 2310 ReductionFlags) const = 0; 2311 virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, 2312 ReductionFlags) const = 0; 2313 virtual bool preferEpilogueVectorization() const = 0; 2314 2315 virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0; 2316 virtual ReductionShuffle 2317 getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const = 0; 2318 virtual unsigned getGISelRematGlobalCost() const = 0; 2319 virtual unsigned getMinTripCountTailFoldingThreshold() const = 0; 2320 virtual bool enableScalableVectorization() const = 0; 2321 virtual bool supportsScalableVectors() const = 0; 2322 virtual bool hasActiveVectorLength(unsigned Opcode, Type *DataType, 2323 Align Alignment) const = 0; 2324 virtual bool 2325 isProfitableToSinkOperands(Instruction *I, 2326 SmallVectorImpl<Use *> &OpsToSink) const = 0; 2327 2328 virtual bool isVectorShiftByScalarCheap(Type *Ty) const = 0; 2329 virtual VPLegalization 2330 getVPLegalizationStrategy(const VPIntrinsic &PI) const = 0; 2331 virtual bool hasArmWideBranch(bool Thumb) const = 0; 2332 virtual uint64_t getFeatureMask(const Function &F) const = 0; 2333 virtual bool isMultiversionedFunction(const Function &F) const = 0; 2334 virtual unsigned getMaxNumArgs() const = 0; 2335 virtual unsigned getNumBytesToPadGlobalArray(unsigned Size, 2336 Type *ArrayType) const = 0; 2337 virtual void collectKernelLaunchBounds( 2338 const Function &F, 2339 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const = 0; 2340 }; 2341 2342 template <typename T> 2343 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept { 2344 T Impl; 2345 2346 public: 2347 Model(T Impl) : Impl(std::move(Impl)) {} 2348 ~Model() override = default; 2349 2350 const DataLayout &getDataLayout() const override { 2351 return Impl.getDataLayout(); 2352 } 2353 2354 InstructionCost 2355 getGEPCost(Type *PointeeType, const Value *Ptr, 2356 ArrayRef<const Value *> Operands, Type *AccessType, 2357 TargetTransformInfo::TargetCostKind CostKind) override { 2358 return Impl.getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind); 2359 } 2360 InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs, 2361 const Value *Base, 2362 const PointersChainInfo &Info, 2363 Type *AccessTy, 2364 TargetCostKind CostKind) override { 2365 return Impl.getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind); 2366 } 2367 unsigned getInliningThresholdMultiplier() const override { 2368 return Impl.getInliningThresholdMultiplier(); 2369 } 2370 unsigned adjustInliningThreshold(const CallBase *CB) override { 2371 return Impl.adjustInliningThreshold(CB); 2372 } 2373 unsigned getInliningCostBenefitAnalysisSavingsMultiplier() const override { 2374 return Impl.getInliningCostBenefitAnalysisSavingsMultiplier(); 2375 } 2376 unsigned getInliningCostBenefitAnalysisProfitableMultiplier() const override { 2377 return Impl.getInliningCostBenefitAnalysisProfitableMultiplier(); 2378 } 2379 int getInliningLastCallToStaticBonus() const override { 2380 return Impl.getInliningLastCallToStaticBonus(); 2381 } 2382 int getInlinerVectorBonusPercent() const override { 2383 return Impl.getInlinerVectorBonusPercent(); 2384 } 2385 unsigned getCallerAllocaCost(const CallBase *CB, 2386 const AllocaInst *AI) const override { 2387 return Impl.getCallerAllocaCost(CB, AI); 2388 } 2389 InstructionCost getMemcpyCost(const Instruction *I) override { 2390 return Impl.getMemcpyCost(I); 2391 } 2392 2393 uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override { 2394 return Impl.getMaxMemIntrinsicInlineSizeThreshold(); 2395 } 2396 2397 InstructionCost getInstructionCost(const User *U, 2398 ArrayRef<const Value *> Operands, 2399 TargetCostKind CostKind) override { 2400 return Impl.getInstructionCost(U, Operands, CostKind); 2401 } 2402 BranchProbability getPredictableBranchThreshold() override { 2403 return Impl.getPredictableBranchThreshold(); 2404 } 2405 InstructionCost getBranchMispredictPenalty() override { 2406 return Impl.getBranchMispredictPenalty(); 2407 } 2408 bool hasBranchDivergence(const Function *F = nullptr) override { 2409 return Impl.hasBranchDivergence(F); 2410 } 2411 bool isSourceOfDivergence(const Value *V) override { 2412 return Impl.isSourceOfDivergence(V); 2413 } 2414 2415 bool isAlwaysUniform(const Value *V) override { 2416 return Impl.isAlwaysUniform(V); 2417 } 2418 2419 bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override { 2420 return Impl.isValidAddrSpaceCast(FromAS, ToAS); 2421 } 2422 2423 bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override { 2424 return Impl.addrspacesMayAlias(AS0, AS1); 2425 } 2426 2427 unsigned getFlatAddressSpace() override { return Impl.getFlatAddressSpace(); } 2428 2429 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, 2430 Intrinsic::ID IID) const override { 2431 return Impl.collectFlatAddressOperands(OpIndexes, IID); 2432 } 2433 2434 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override { 2435 return Impl.isNoopAddrSpaceCast(FromAS, ToAS); 2436 } 2437 2438 bool 2439 canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const override { 2440 return Impl.canHaveNonUndefGlobalInitializerInAddressSpace(AS); 2441 } 2442 2443 unsigned getAssumedAddrSpace(const Value *V) const override { 2444 return Impl.getAssumedAddrSpace(V); 2445 } 2446 2447 bool isSingleThreaded() const override { return Impl.isSingleThreaded(); } 2448 2449 std::pair<const Value *, unsigned> 2450 getPredicatedAddrSpace(const Value *V) const override { 2451 return Impl.getPredicatedAddrSpace(V); 2452 } 2453 2454 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, 2455 Value *NewV) const override { 2456 return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV); 2457 } 2458 2459 bool isLoweredToCall(const Function *F) override { 2460 return Impl.isLoweredToCall(F); 2461 } 2462 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 2463 UnrollingPreferences &UP, 2464 OptimizationRemarkEmitter *ORE) override { 2465 return Impl.getUnrollingPreferences(L, SE, UP, ORE); 2466 } 2467 void getPeelingPreferences(Loop *L, ScalarEvolution &SE, 2468 PeelingPreferences &PP) override { 2469 return Impl.getPeelingPreferences(L, SE, PP); 2470 } 2471 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 2472 AssumptionCache &AC, TargetLibraryInfo *LibInfo, 2473 HardwareLoopInfo &HWLoopInfo) override { 2474 return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo); 2475 } 2476 unsigned getEpilogueVectorizationMinVF() override { 2477 return Impl.getEpilogueVectorizationMinVF(); 2478 } 2479 bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) override { 2480 return Impl.preferPredicateOverEpilogue(TFI); 2481 } 2482 TailFoldingStyle 2483 getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) override { 2484 return Impl.getPreferredTailFoldingStyle(IVUpdateMayOverflow); 2485 } 2486 std::optional<Instruction *> 2487 instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) override { 2488 return Impl.instCombineIntrinsic(IC, II); 2489 } 2490 std::optional<Value *> 2491 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II, 2492 APInt DemandedMask, KnownBits &Known, 2493 bool &KnownBitsComputed) override { 2494 return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known, 2495 KnownBitsComputed); 2496 } 2497 std::optional<Value *> simplifyDemandedVectorEltsIntrinsic( 2498 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, 2499 APInt &UndefElts2, APInt &UndefElts3, 2500 std::function<void(Instruction *, unsigned, APInt, APInt &)> 2501 SimplifyAndSetOp) override { 2502 return Impl.simplifyDemandedVectorEltsIntrinsic( 2503 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, 2504 SimplifyAndSetOp); 2505 } 2506 bool isLegalAddImmediate(int64_t Imm) override { 2507 return Impl.isLegalAddImmediate(Imm); 2508 } 2509 bool isLegalAddScalableImmediate(int64_t Imm) override { 2510 return Impl.isLegalAddScalableImmediate(Imm); 2511 } 2512 bool isLegalICmpImmediate(int64_t Imm) override { 2513 return Impl.isLegalICmpImmediate(Imm); 2514 } 2515 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 2516 bool HasBaseReg, int64_t Scale, unsigned AddrSpace, 2517 Instruction *I, int64_t ScalableOffset) override { 2518 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale, 2519 AddrSpace, I, ScalableOffset); 2520 } 2521 bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1, 2522 const TargetTransformInfo::LSRCost &C2) override { 2523 return Impl.isLSRCostLess(C1, C2); 2524 } 2525 bool isNumRegsMajorCostOfLSR() override { 2526 return Impl.isNumRegsMajorCostOfLSR(); 2527 } 2528 bool shouldDropLSRSolutionIfLessProfitable() const override { 2529 return Impl.shouldDropLSRSolutionIfLessProfitable(); 2530 } 2531 bool isProfitableLSRChainElement(Instruction *I) override { 2532 return Impl.isProfitableLSRChainElement(I); 2533 } 2534 bool canMacroFuseCmp() override { return Impl.canMacroFuseCmp(); } 2535 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI, 2536 DominatorTree *DT, AssumptionCache *AC, 2537 TargetLibraryInfo *LibInfo) override { 2538 return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo); 2539 } 2540 AddressingModeKind 2541 getPreferredAddressingMode(const Loop *L, 2542 ScalarEvolution *SE) const override { 2543 return Impl.getPreferredAddressingMode(L, SE); 2544 } 2545 bool isLegalMaskedStore(Type *DataType, Align Alignment) override { 2546 return Impl.isLegalMaskedStore(DataType, Alignment); 2547 } 2548 bool isLegalMaskedLoad(Type *DataType, Align Alignment) override { 2549 return Impl.isLegalMaskedLoad(DataType, Alignment); 2550 } 2551 bool isLegalNTStore(Type *DataType, Align Alignment) override { 2552 return Impl.isLegalNTStore(DataType, Alignment); 2553 } 2554 bool isLegalNTLoad(Type *DataType, Align Alignment) override { 2555 return Impl.isLegalNTLoad(DataType, Alignment); 2556 } 2557 bool isLegalBroadcastLoad(Type *ElementTy, 2558 ElementCount NumElements) const override { 2559 return Impl.isLegalBroadcastLoad(ElementTy, NumElements); 2560 } 2561 bool isLegalMaskedScatter(Type *DataType, Align Alignment) override { 2562 return Impl.isLegalMaskedScatter(DataType, Alignment); 2563 } 2564 bool isLegalMaskedGather(Type *DataType, Align Alignment) override { 2565 return Impl.isLegalMaskedGather(DataType, Alignment); 2566 } 2567 bool forceScalarizeMaskedGather(VectorType *DataType, 2568 Align Alignment) override { 2569 return Impl.forceScalarizeMaskedGather(DataType, Alignment); 2570 } 2571 bool forceScalarizeMaskedScatter(VectorType *DataType, 2572 Align Alignment) override { 2573 return Impl.forceScalarizeMaskedScatter(DataType, Alignment); 2574 } 2575 bool isLegalMaskedCompressStore(Type *DataType, Align Alignment) override { 2576 return Impl.isLegalMaskedCompressStore(DataType, Alignment); 2577 } 2578 bool isLegalMaskedExpandLoad(Type *DataType, Align Alignment) override { 2579 return Impl.isLegalMaskedExpandLoad(DataType, Alignment); 2580 } 2581 bool isLegalStridedLoadStore(Type *DataType, Align Alignment) override { 2582 return Impl.isLegalStridedLoadStore(DataType, Alignment); 2583 } 2584 bool isLegalInterleavedAccessType(VectorType *VTy, unsigned Factor, 2585 Align Alignment, 2586 unsigned AddrSpace) override { 2587 return Impl.isLegalInterleavedAccessType(VTy, Factor, Alignment, AddrSpace); 2588 } 2589 bool isLegalMaskedVectorHistogram(Type *AddrType, Type *DataType) override { 2590 return Impl.isLegalMaskedVectorHistogram(AddrType, DataType); 2591 } 2592 bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, 2593 const SmallBitVector &OpcodeMask) const override { 2594 return Impl.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask); 2595 } 2596 bool enableOrderedReductions() override { 2597 return Impl.enableOrderedReductions(); 2598 } 2599 bool hasDivRemOp(Type *DataType, bool IsSigned) override { 2600 return Impl.hasDivRemOp(DataType, IsSigned); 2601 } 2602 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override { 2603 return Impl.hasVolatileVariant(I, AddrSpace); 2604 } 2605 bool prefersVectorizedAddressing() override { 2606 return Impl.prefersVectorizedAddressing(); 2607 } 2608 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 2609 StackOffset BaseOffset, bool HasBaseReg, 2610 int64_t Scale, 2611 unsigned AddrSpace) override { 2612 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale, 2613 AddrSpace); 2614 } 2615 bool LSRWithInstrQueries() override { return Impl.LSRWithInstrQueries(); } 2616 bool isTruncateFree(Type *Ty1, Type *Ty2) override { 2617 return Impl.isTruncateFree(Ty1, Ty2); 2618 } 2619 bool isProfitableToHoist(Instruction *I) override { 2620 return Impl.isProfitableToHoist(I); 2621 } 2622 bool useAA() override { return Impl.useAA(); } 2623 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); } 2624 unsigned getRegUsageForType(Type *Ty) override { 2625 return Impl.getRegUsageForType(Ty); 2626 } 2627 bool shouldBuildLookupTables() override { 2628 return Impl.shouldBuildLookupTables(); 2629 } 2630 bool shouldBuildLookupTablesForConstant(Constant *C) override { 2631 return Impl.shouldBuildLookupTablesForConstant(C); 2632 } 2633 bool shouldBuildRelLookupTables() override { 2634 return Impl.shouldBuildRelLookupTables(); 2635 } 2636 bool useColdCCForColdCall(Function &F) override { 2637 return Impl.useColdCCForColdCall(F); 2638 } 2639 bool isTargetIntrinsicTriviallyScalarizable(Intrinsic::ID ID) override { 2640 return Impl.isTargetIntrinsicTriviallyScalarizable(ID); 2641 } 2642 2643 bool isTargetIntrinsicWithScalarOpAtArg(Intrinsic::ID ID, 2644 unsigned ScalarOpdIdx) override { 2645 return Impl.isTargetIntrinsicWithScalarOpAtArg(ID, ScalarOpdIdx); 2646 } 2647 2648 bool isTargetIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, 2649 int OpdIdx) override { 2650 return Impl.isTargetIntrinsicWithOverloadTypeAtArg(ID, OpdIdx); 2651 } 2652 2653 bool isTargetIntrinsicWithStructReturnOverloadAtField(Intrinsic::ID ID, 2654 int RetIdx) override { 2655 return Impl.isTargetIntrinsicWithStructReturnOverloadAtField(ID, RetIdx); 2656 } 2657 2658 InstructionCost getScalarizationOverhead(VectorType *Ty, 2659 const APInt &DemandedElts, 2660 bool Insert, bool Extract, 2661 TargetCostKind CostKind, 2662 ArrayRef<Value *> VL = {}) override { 2663 return Impl.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract, 2664 CostKind, VL); 2665 } 2666 InstructionCost 2667 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, 2668 ArrayRef<Type *> Tys, 2669 TargetCostKind CostKind) override { 2670 return Impl.getOperandsScalarizationOverhead(Args, Tys, CostKind); 2671 } 2672 2673 bool supportsEfficientVectorElementLoadStore() override { 2674 return Impl.supportsEfficientVectorElementLoadStore(); 2675 } 2676 2677 bool supportsTailCalls() override { return Impl.supportsTailCalls(); } 2678 bool supportsTailCallFor(const CallBase *CB) override { 2679 return Impl.supportsTailCallFor(CB); 2680 } 2681 2682 bool enableAggressiveInterleaving(bool LoopHasReductions) override { 2683 return Impl.enableAggressiveInterleaving(LoopHasReductions); 2684 } 2685 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize, 2686 bool IsZeroCmp) const override { 2687 return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp); 2688 } 2689 bool enableSelectOptimize() override { 2690 return Impl.enableSelectOptimize(); 2691 } 2692 bool shouldTreatInstructionLikeSelect(const Instruction *I) override { 2693 return Impl.shouldTreatInstructionLikeSelect(I); 2694 } 2695 bool enableInterleavedAccessVectorization() override { 2696 return Impl.enableInterleavedAccessVectorization(); 2697 } 2698 bool enableMaskedInterleavedAccessVectorization() override { 2699 return Impl.enableMaskedInterleavedAccessVectorization(); 2700 } 2701 bool isFPVectorizationPotentiallyUnsafe() override { 2702 return Impl.isFPVectorizationPotentiallyUnsafe(); 2703 } 2704 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, 2705 unsigned AddressSpace, Align Alignment, 2706 unsigned *Fast) override { 2707 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace, 2708 Alignment, Fast); 2709 } 2710 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override { 2711 return Impl.getPopcntSupport(IntTyWidthInBit); 2712 } 2713 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); } 2714 2715 bool isExpensiveToSpeculativelyExecute(const Instruction* I) override { 2716 return Impl.isExpensiveToSpeculativelyExecute(I); 2717 } 2718 2719 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override { 2720 return Impl.isFCmpOrdCheaperThanFCmpZero(Ty); 2721 } 2722 2723 InstructionCost getFPOpCost(Type *Ty) override { 2724 return Impl.getFPOpCost(Ty); 2725 } 2726 2727 InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, 2728 const APInt &Imm, Type *Ty) override { 2729 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty); 2730 } 2731 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty, 2732 TargetCostKind CostKind) override { 2733 return Impl.getIntImmCost(Imm, Ty, CostKind); 2734 } 2735 InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, 2736 const APInt &Imm, Type *Ty, 2737 TargetCostKind CostKind, 2738 Instruction *Inst = nullptr) override { 2739 return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst); 2740 } 2741 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 2742 const APInt &Imm, Type *Ty, 2743 TargetCostKind CostKind) override { 2744 return Impl.getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); 2745 } 2746 bool preferToKeepConstantsAttached(const Instruction &Inst, 2747 const Function &Fn) const override { 2748 return Impl.preferToKeepConstantsAttached(Inst, Fn); 2749 } 2750 unsigned getNumberOfRegisters(unsigned ClassID) const override { 2751 return Impl.getNumberOfRegisters(ClassID); 2752 } 2753 bool hasConditionalLoadStoreForType(Type *Ty = nullptr) const override { 2754 return Impl.hasConditionalLoadStoreForType(Ty); 2755 } 2756 unsigned getRegisterClassForType(bool Vector, 2757 Type *Ty = nullptr) const override { 2758 return Impl.getRegisterClassForType(Vector, Ty); 2759 } 2760 const char *getRegisterClassName(unsigned ClassID) const override { 2761 return Impl.getRegisterClassName(ClassID); 2762 } 2763 TypeSize getRegisterBitWidth(RegisterKind K) const override { 2764 return Impl.getRegisterBitWidth(K); 2765 } 2766 unsigned getMinVectorRegisterBitWidth() const override { 2767 return Impl.getMinVectorRegisterBitWidth(); 2768 } 2769 std::optional<unsigned> getMaxVScale() const override { 2770 return Impl.getMaxVScale(); 2771 } 2772 std::optional<unsigned> getVScaleForTuning() const override { 2773 return Impl.getVScaleForTuning(); 2774 } 2775 bool isVScaleKnownToBeAPowerOfTwo() const override { 2776 return Impl.isVScaleKnownToBeAPowerOfTwo(); 2777 } 2778 bool shouldMaximizeVectorBandwidth( 2779 TargetTransformInfo::RegisterKind K) const override { 2780 return Impl.shouldMaximizeVectorBandwidth(K); 2781 } 2782 ElementCount getMinimumVF(unsigned ElemWidth, 2783 bool IsScalable) const override { 2784 return Impl.getMinimumVF(ElemWidth, IsScalable); 2785 } 2786 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override { 2787 return Impl.getMaximumVF(ElemWidth, Opcode); 2788 } 2789 unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy, 2790 Type *ScalarValTy) const override { 2791 return Impl.getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy); 2792 } 2793 bool shouldConsiderAddressTypePromotion( 2794 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override { 2795 return Impl.shouldConsiderAddressTypePromotion( 2796 I, AllowPromotionWithoutCommonHeader); 2797 } 2798 unsigned getCacheLineSize() const override { return Impl.getCacheLineSize(); } 2799 std::optional<unsigned> getCacheSize(CacheLevel Level) const override { 2800 return Impl.getCacheSize(Level); 2801 } 2802 std::optional<unsigned> 2803 getCacheAssociativity(CacheLevel Level) const override { 2804 return Impl.getCacheAssociativity(Level); 2805 } 2806 2807 std::optional<unsigned> getMinPageSize() const override { 2808 return Impl.getMinPageSize(); 2809 } 2810 2811 /// Return the preferred prefetch distance in terms of instructions. 2812 /// 2813 unsigned getPrefetchDistance() const override { 2814 return Impl.getPrefetchDistance(); 2815 } 2816 2817 /// Return the minimum stride necessary to trigger software 2818 /// prefetching. 2819 /// 2820 unsigned getMinPrefetchStride(unsigned NumMemAccesses, 2821 unsigned NumStridedMemAccesses, 2822 unsigned NumPrefetches, 2823 bool HasCall) const override { 2824 return Impl.getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, 2825 NumPrefetches, HasCall); 2826 } 2827 2828 /// Return the maximum prefetch distance in terms of loop 2829 /// iterations. 2830 /// 2831 unsigned getMaxPrefetchIterationsAhead() const override { 2832 return Impl.getMaxPrefetchIterationsAhead(); 2833 } 2834 2835 /// \return True if prefetching should also be done for writes. 2836 bool enableWritePrefetching() const override { 2837 return Impl.enableWritePrefetching(); 2838 } 2839 2840 /// \return if target want to issue a prefetch in address space \p AS. 2841 bool shouldPrefetchAddressSpace(unsigned AS) const override { 2842 return Impl.shouldPrefetchAddressSpace(AS); 2843 } 2844 2845 InstructionCost getPartialReductionCost( 2846 unsigned Opcode, Type *InputTypeA, Type *InputTypeB, Type *AccumType, 2847 ElementCount VF, PartialReductionExtendKind OpAExtend, 2848 PartialReductionExtendKind OpBExtend, 2849 std::optional<unsigned> BinOp = std::nullopt) const override { 2850 return Impl.getPartialReductionCost(Opcode, InputTypeA, InputTypeB, 2851 AccumType, VF, OpAExtend, OpBExtend, 2852 BinOp); 2853 } 2854 2855 unsigned getMaxInterleaveFactor(ElementCount VF) override { 2856 return Impl.getMaxInterleaveFactor(VF); 2857 } 2858 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, 2859 unsigned &JTSize, 2860 ProfileSummaryInfo *PSI, 2861 BlockFrequencyInfo *BFI) override { 2862 return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI); 2863 } 2864 InstructionCost getArithmeticInstrCost( 2865 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 2866 OperandValueInfo Opd1Info, OperandValueInfo Opd2Info, 2867 ArrayRef<const Value *> Args, 2868 const Instruction *CxtI = nullptr) override { 2869 return Impl.getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info, 2870 Args, CxtI); 2871 } 2872 InstructionCost getAltInstrCost(VectorType *VecTy, unsigned Opcode0, 2873 unsigned Opcode1, 2874 const SmallBitVector &OpcodeMask, 2875 TTI::TargetCostKind CostKind) const override { 2876 return Impl.getAltInstrCost(VecTy, Opcode0, Opcode1, OpcodeMask, CostKind); 2877 } 2878 2879 InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp, 2880 ArrayRef<int> Mask, 2881 TTI::TargetCostKind CostKind, int Index, 2882 VectorType *SubTp, 2883 ArrayRef<const Value *> Args, 2884 const Instruction *CxtI) override { 2885 return Impl.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args, 2886 CxtI); 2887 } 2888 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 2889 CastContextHint CCH, 2890 TTI::TargetCostKind CostKind, 2891 const Instruction *I) override { 2892 return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I); 2893 } 2894 InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, 2895 VectorType *VecTy, 2896 unsigned Index) override { 2897 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index); 2898 } 2899 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, 2900 const Instruction *I = nullptr) override { 2901 return Impl.getCFInstrCost(Opcode, CostKind, I); 2902 } 2903 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 2904 CmpInst::Predicate VecPred, 2905 TTI::TargetCostKind CostKind, 2906 OperandValueInfo Op1Info, 2907 OperandValueInfo Op2Info, 2908 const Instruction *I) override { 2909 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 2910 Op1Info, Op2Info, I); 2911 } 2912 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val, 2913 TTI::TargetCostKind CostKind, 2914 unsigned Index, Value *Op0, 2915 Value *Op1) override { 2916 return Impl.getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1); 2917 } 2918 InstructionCost getVectorInstrCost( 2919 unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index, 2920 Value *Scalar, 2921 ArrayRef<std::tuple<Value *, User *, int>> ScalarUserAndIdx) override { 2922 return Impl.getVectorInstrCost(Opcode, Val, CostKind, Index, Scalar, 2923 ScalarUserAndIdx); 2924 } 2925 InstructionCost getVectorInstrCost(const Instruction &I, Type *Val, 2926 TTI::TargetCostKind CostKind, 2927 unsigned Index) override { 2928 return Impl.getVectorInstrCost(I, Val, CostKind, Index); 2929 } 2930 InstructionCost 2931 getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF, 2932 const APInt &DemandedDstElts, 2933 TTI::TargetCostKind CostKind) override { 2934 return Impl.getReplicationShuffleCost(EltTy, ReplicationFactor, VF, 2935 DemandedDstElts, CostKind); 2936 } 2937 InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 2938 unsigned AddressSpace, 2939 TTI::TargetCostKind CostKind, 2940 OperandValueInfo OpInfo, 2941 const Instruction *I) override { 2942 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind, 2943 OpInfo, I); 2944 } 2945 InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, 2946 unsigned AddressSpace, 2947 TTI::TargetCostKind CostKind, 2948 const Instruction *I) override { 2949 return Impl.getVPMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 2950 CostKind, I); 2951 } 2952 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src, 2953 Align Alignment, unsigned AddressSpace, 2954 TTI::TargetCostKind CostKind) override { 2955 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 2956 CostKind); 2957 } 2958 InstructionCost 2959 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, 2960 bool VariableMask, Align Alignment, 2961 TTI::TargetCostKind CostKind, 2962 const Instruction *I = nullptr) override { 2963 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, 2964 Alignment, CostKind, I); 2965 } 2966 InstructionCost 2967 getStridedMemoryOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr, 2968 bool VariableMask, Align Alignment, 2969 TTI::TargetCostKind CostKind, 2970 const Instruction *I = nullptr) override { 2971 return Impl.getStridedMemoryOpCost(Opcode, DataTy, Ptr, VariableMask, 2972 Alignment, CostKind, I); 2973 } 2974 InstructionCost getInterleavedMemoryOpCost( 2975 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 2976 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 2977 bool UseMaskForCond, bool UseMaskForGaps) override { 2978 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2979 Alignment, AddressSpace, CostKind, 2980 UseMaskForCond, UseMaskForGaps); 2981 } 2982 InstructionCost 2983 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty, 2984 std::optional<FastMathFlags> FMF, 2985 TTI::TargetCostKind CostKind) override { 2986 return Impl.getArithmeticReductionCost(Opcode, Ty, FMF, CostKind); 2987 } 2988 InstructionCost 2989 getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF, 2990 TTI::TargetCostKind CostKind) override { 2991 return Impl.getMinMaxReductionCost(IID, Ty, FMF, CostKind); 2992 } 2993 InstructionCost 2994 getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy, 2995 VectorType *Ty, FastMathFlags FMF, 2996 TTI::TargetCostKind CostKind) override { 2997 return Impl.getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF, 2998 CostKind); 2999 } 3000 InstructionCost 3001 getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty, 3002 TTI::TargetCostKind CostKind) override { 3003 return Impl.getMulAccReductionCost(IsUnsigned, ResTy, Ty, CostKind); 3004 } 3005 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 3006 TTI::TargetCostKind CostKind) override { 3007 return Impl.getIntrinsicInstrCost(ICA, CostKind); 3008 } 3009 InstructionCost getCallInstrCost(Function *F, Type *RetTy, 3010 ArrayRef<Type *> Tys, 3011 TTI::TargetCostKind CostKind) override { 3012 return Impl.getCallInstrCost(F, RetTy, Tys, CostKind); 3013 } 3014 unsigned getNumberOfParts(Type *Tp) override { 3015 return Impl.getNumberOfParts(Tp); 3016 } 3017 InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 3018 const SCEV *Ptr) override { 3019 return Impl.getAddressComputationCost(Ty, SE, Ptr); 3020 } 3021 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override { 3022 return Impl.getCostOfKeepingLiveOverCall(Tys); 3023 } 3024 bool getTgtMemIntrinsic(IntrinsicInst *Inst, 3025 MemIntrinsicInfo &Info) override { 3026 return Impl.getTgtMemIntrinsic(Inst, Info); 3027 } 3028 unsigned getAtomicMemIntrinsicMaxElementSize() const override { 3029 return Impl.getAtomicMemIntrinsicMaxElementSize(); 3030 } 3031 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, 3032 Type *ExpectedType) override { 3033 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); 3034 } 3035 Type *getMemcpyLoopLoweringType( 3036 LLVMContext &Context, Value *Length, unsigned SrcAddrSpace, 3037 unsigned DestAddrSpace, Align SrcAlign, Align DestAlign, 3038 std::optional<uint32_t> AtomicElementSize) const override { 3039 return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace, 3040 DestAddrSpace, SrcAlign, DestAlign, 3041 AtomicElementSize); 3042 } 3043 void getMemcpyLoopResidualLoweringType( 3044 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context, 3045 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace, 3046 Align SrcAlign, Align DestAlign, 3047 std::optional<uint32_t> AtomicCpySize) const override { 3048 Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes, 3049 SrcAddrSpace, DestAddrSpace, 3050 SrcAlign, DestAlign, AtomicCpySize); 3051 } 3052 bool areInlineCompatible(const Function *Caller, 3053 const Function *Callee) const override { 3054 return Impl.areInlineCompatible(Caller, Callee); 3055 } 3056 unsigned getInlineCallPenalty(const Function *F, const CallBase &Call, 3057 unsigned DefaultCallPenalty) const override { 3058 return Impl.getInlineCallPenalty(F, Call, DefaultCallPenalty); 3059 } 3060 bool areTypesABICompatible(const Function *Caller, const Function *Callee, 3061 const ArrayRef<Type *> &Types) const override { 3062 return Impl.areTypesABICompatible(Caller, Callee, Types); 3063 } 3064 bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override { 3065 return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout()); 3066 } 3067 bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override { 3068 return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout()); 3069 } 3070 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override { 3071 return Impl.getLoadStoreVecRegBitWidth(AddrSpace); 3072 } 3073 bool isLegalToVectorizeLoad(LoadInst *LI) const override { 3074 return Impl.isLegalToVectorizeLoad(LI); 3075 } 3076 bool isLegalToVectorizeStore(StoreInst *SI) const override { 3077 return Impl.isLegalToVectorizeStore(SI); 3078 } 3079 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment, 3080 unsigned AddrSpace) const override { 3081 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment, 3082 AddrSpace); 3083 } 3084 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment, 3085 unsigned AddrSpace) const override { 3086 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment, 3087 AddrSpace); 3088 } 3089 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc, 3090 ElementCount VF) const override { 3091 return Impl.isLegalToVectorizeReduction(RdxDesc, VF); 3092 } 3093 bool isElementTypeLegalForScalableVector(Type *Ty) const override { 3094 return Impl.isElementTypeLegalForScalableVector(Ty); 3095 } 3096 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize, 3097 unsigned ChainSizeInBytes, 3098 VectorType *VecTy) const override { 3099 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy); 3100 } 3101 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize, 3102 unsigned ChainSizeInBytes, 3103 VectorType *VecTy) const override { 3104 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy); 3105 } 3106 bool preferFixedOverScalableIfEqualCost() const override { 3107 return Impl.preferFixedOverScalableIfEqualCost(); 3108 } 3109 bool preferInLoopReduction(unsigned Opcode, Type *Ty, 3110 ReductionFlags Flags) const override { 3111 return Impl.preferInLoopReduction(Opcode, Ty, Flags); 3112 } 3113 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty, 3114 ReductionFlags Flags) const override { 3115 return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags); 3116 } 3117 bool preferEpilogueVectorization() const override { 3118 return Impl.preferEpilogueVectorization(); 3119 } 3120 3121 bool shouldExpandReduction(const IntrinsicInst *II) const override { 3122 return Impl.shouldExpandReduction(II); 3123 } 3124 3125 ReductionShuffle 3126 getPreferredExpandedReductionShuffle(const IntrinsicInst *II) const override { 3127 return Impl.getPreferredExpandedReductionShuffle(II); 3128 } 3129 3130 unsigned getGISelRematGlobalCost() const override { 3131 return Impl.getGISelRematGlobalCost(); 3132 } 3133 3134 unsigned getMinTripCountTailFoldingThreshold() const override { 3135 return Impl.getMinTripCountTailFoldingThreshold(); 3136 } 3137 3138 bool supportsScalableVectors() const override { 3139 return Impl.supportsScalableVectors(); 3140 } 3141 3142 bool enableScalableVectorization() const override { 3143 return Impl.enableScalableVectorization(); 3144 } 3145 3146 bool hasActiveVectorLength(unsigned Opcode, Type *DataType, 3147 Align Alignment) const override { 3148 return Impl.hasActiveVectorLength(Opcode, DataType, Alignment); 3149 } 3150 3151 bool isProfitableToSinkOperands(Instruction *I, 3152 SmallVectorImpl<Use *> &Ops) const override { 3153 return Impl.isProfitableToSinkOperands(I, Ops); 3154 }; 3155 3156 bool isVectorShiftByScalarCheap(Type *Ty) const override { 3157 return Impl.isVectorShiftByScalarCheap(Ty); 3158 } 3159 3160 VPLegalization 3161 getVPLegalizationStrategy(const VPIntrinsic &PI) const override { 3162 return Impl.getVPLegalizationStrategy(PI); 3163 } 3164 3165 bool hasArmWideBranch(bool Thumb) const override { 3166 return Impl.hasArmWideBranch(Thumb); 3167 } 3168 3169 uint64_t getFeatureMask(const Function &F) const override { 3170 return Impl.getFeatureMask(F); 3171 } 3172 3173 bool isMultiversionedFunction(const Function &F) const override { 3174 return Impl.isMultiversionedFunction(F); 3175 } 3176 3177 unsigned getMaxNumArgs() const override { 3178 return Impl.getMaxNumArgs(); 3179 } 3180 3181 unsigned getNumBytesToPadGlobalArray(unsigned Size, 3182 Type *ArrayType) const override { 3183 return Impl.getNumBytesToPadGlobalArray(Size, ArrayType); 3184 } 3185 3186 void collectKernelLaunchBounds( 3187 const Function &F, 3188 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const override { 3189 Impl.collectKernelLaunchBounds(F, LB); 3190 } 3191 }; 3192 3193 template <typename T> 3194 TargetTransformInfo::TargetTransformInfo(T Impl) 3195 : TTIImpl(new Model<T>(Impl)) {} 3196 3197 /// Analysis pass providing the \c TargetTransformInfo. 3198 /// 3199 /// The core idea of the TargetIRAnalysis is to expose an interface through 3200 /// which LLVM targets can analyze and provide information about the middle 3201 /// end's target-independent IR. This supports use cases such as target-aware 3202 /// cost modeling of IR constructs. 3203 /// 3204 /// This is a function analysis because much of the cost modeling for targets 3205 /// is done in a subtarget specific way and LLVM supports compiling different 3206 /// functions targeting different subtargets in order to support runtime 3207 /// dispatch according to the observed subtarget. 3208 class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> { 3209 public: 3210 typedef TargetTransformInfo Result; 3211 3212 /// Default construct a target IR analysis. 3213 /// 3214 /// This will use the module's datalayout to construct a baseline 3215 /// conservative TTI result. 3216 TargetIRAnalysis(); 3217 3218 /// Construct an IR analysis pass around a target-provide callback. 3219 /// 3220 /// The callback will be called with a particular function for which the TTI 3221 /// is needed and must return a TTI object for that function. 3222 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback); 3223 3224 // Value semantics. We spell out the constructors for MSVC. 3225 TargetIRAnalysis(const TargetIRAnalysis &Arg) 3226 : TTICallback(Arg.TTICallback) {} 3227 TargetIRAnalysis(TargetIRAnalysis &&Arg) 3228 : TTICallback(std::move(Arg.TTICallback)) {} 3229 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) { 3230 TTICallback = RHS.TTICallback; 3231 return *this; 3232 } 3233 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) { 3234 TTICallback = std::move(RHS.TTICallback); 3235 return *this; 3236 } 3237 3238 Result run(const Function &F, FunctionAnalysisManager &); 3239 3240 private: 3241 friend AnalysisInfoMixin<TargetIRAnalysis>; 3242 static AnalysisKey Key; 3243 3244 /// The callback used to produce a result. 3245 /// 3246 /// We use a completely opaque callback so that targets can provide whatever 3247 /// mechanism they desire for constructing the TTI for a given function. 3248 /// 3249 /// FIXME: Should we really use std::function? It's relatively inefficient. 3250 /// It might be possible to arrange for even stateful callbacks to outlive 3251 /// the analysis and thus use a function_ref which would be lighter weight. 3252 /// This may also be less error prone as the callback is likely to reference 3253 /// the external TargetMachine, and that reference needs to never dangle. 3254 std::function<Result(const Function &)> TTICallback; 3255 3256 /// Helper function used as the callback in the default constructor. 3257 static Result getDefaultTTI(const Function &F); 3258 }; 3259 3260 /// Wrapper pass for TargetTransformInfo. 3261 /// 3262 /// This pass can be constructed from a TTI object which it stores internally 3263 /// and is queried by passes. 3264 class TargetTransformInfoWrapperPass : public ImmutablePass { 3265 TargetIRAnalysis TIRA; 3266 std::optional<TargetTransformInfo> TTI; 3267 3268 virtual void anchor(); 3269 3270 public: 3271 static char ID; 3272 3273 /// We must provide a default constructor for the pass but it should 3274 /// never be used. 3275 /// 3276 /// Use the constructor below or call one of the creation routines. 3277 TargetTransformInfoWrapperPass(); 3278 3279 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA); 3280 3281 TargetTransformInfo &getTTI(const Function &F); 3282 }; 3283 3284 /// Create an analysis pass wrapper around a TTI object. 3285 /// 3286 /// This analysis pass just holds the TTI instance and makes it available to 3287 /// clients. 3288 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA); 3289 3290 } // namespace llvm 3291 3292 #endif 3293