1 //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file provides the implementation of a basic TargetTransformInfo pass 11 /// predicated on the target abstractions present in the target independent 12 /// code generator. It uses these (primarily TargetLowering) to model as much 13 /// of the TTI query interface as possible. It is included by most targets so 14 /// that they can specialize only a small subset of the query space. 15 /// 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/CodeGen/Passes.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Support/CommandLine.h" 22 #include "llvm/Target/TargetLowering.h" 23 #include "llvm/Target/TargetSubtargetInfo.h" 24 #include <utility> 25 using namespace llvm; 26 27 static cl::opt<unsigned> 28 PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0), 29 cl::desc("Threshold for partial unrolling"), cl::Hidden); 30 31 #define DEBUG_TYPE "basictti" 32 33 namespace { 34 35 class BasicTTI final : public ImmutablePass, public TargetTransformInfo { 36 const TargetMachine *TM; 37 38 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 39 /// are set if the result needs to be inserted and/or extracted from vectors. 40 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 41 42 /// Estimate the cost overhead of SK_Alternate shuffle. 43 unsigned getAltShuffleOverhead(Type *Ty) const; 44 45 const TargetLoweringBase *getTLI() const { 46 return TM->getSubtargetImpl()->getTargetLowering(); 47 } 48 49 public: 50 BasicTTI() : ImmutablePass(ID), TM(nullptr) { 51 llvm_unreachable("This pass cannot be directly constructed"); 52 } 53 54 BasicTTI(const TargetMachine *TM) : ImmutablePass(ID), TM(TM) { 55 initializeBasicTTIPass(*PassRegistry::getPassRegistry()); 56 } 57 58 void initializePass() override { 59 pushTTIStack(this); 60 } 61 62 void getAnalysisUsage(AnalysisUsage &AU) const override { 63 TargetTransformInfo::getAnalysisUsage(AU); 64 } 65 66 /// Pass identification. 67 static char ID; 68 69 /// Provide necessary pointer adjustments for the two base classes. 70 void *getAdjustedAnalysisPointer(const void *ID) override { 71 if (ID == &TargetTransformInfo::ID) 72 return (TargetTransformInfo*)this; 73 return this; 74 } 75 76 bool hasBranchDivergence() const override; 77 78 /// \name Scalar TTI Implementations 79 /// @{ 80 81 bool isLegalAddImmediate(int64_t imm) const override; 82 bool isLegalICmpImmediate(int64_t imm) const override; 83 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 84 int64_t BaseOffset, bool HasBaseReg, 85 int64_t Scale) const override; 86 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 87 int64_t BaseOffset, bool HasBaseReg, 88 int64_t Scale) const override; 89 bool isTruncateFree(Type *Ty1, Type *Ty2) const override; 90 bool isTypeLegal(Type *Ty) const override; 91 unsigned getJumpBufAlignment() const override; 92 unsigned getJumpBufSize() const override; 93 bool shouldBuildLookupTables() const override; 94 bool haveFastSqrt(Type *Ty) const override; 95 void getUnrollingPreferences(const Function *F, Loop *L, 96 UnrollingPreferences &UP) const override; 97 98 /// @} 99 100 /// \name Vector TTI Implementations 101 /// @{ 102 103 unsigned getNumberOfRegisters(bool Vector) const override; 104 unsigned getMaxInterleaveFactor() const override; 105 unsigned getRegisterBitWidth(bool Vector) const override; 106 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, 107 OperandValueKind, OperandValueProperties, 108 OperandValueProperties) const override; 109 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 110 int Index, Type *SubTp) const override; 111 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 112 Type *Src) const override; 113 unsigned getCFInstrCost(unsigned Opcode) const override; 114 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 115 Type *CondTy) const override; 116 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 117 unsigned Index) const override; 118 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 119 unsigned AddressSpace) const override; 120 unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy, 121 ArrayRef<Type*> Tys) const override; 122 unsigned getNumberOfParts(Type *Tp) const override; 123 unsigned getAddressComputationCost( Type *Ty, bool IsComplex) const override; 124 unsigned getReductionCost(unsigned Opcode, Type *Ty, 125 bool IsPairwise) const override; 126 127 /// @} 128 }; 129 130 } 131 132 INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti", 133 "Target independent code generator's TTI", true, true, false) 134 char BasicTTI::ID = 0; 135 136 ImmutablePass * 137 llvm::createBasicTargetTransformInfoPass(const TargetMachine *TM) { 138 return new BasicTTI(TM); 139 } 140 141 bool BasicTTI::hasBranchDivergence() const { return false; } 142 143 bool BasicTTI::isLegalAddImmediate(int64_t imm) const { 144 return getTLI()->isLegalAddImmediate(imm); 145 } 146 147 bool BasicTTI::isLegalICmpImmediate(int64_t imm) const { 148 return getTLI()->isLegalICmpImmediate(imm); 149 } 150 151 bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 152 int64_t BaseOffset, bool HasBaseReg, 153 int64_t Scale) const { 154 TargetLoweringBase::AddrMode AM; 155 AM.BaseGV = BaseGV; 156 AM.BaseOffs = BaseOffset; 157 AM.HasBaseReg = HasBaseReg; 158 AM.Scale = Scale; 159 return getTLI()->isLegalAddressingMode(AM, Ty); 160 } 161 162 int BasicTTI::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 163 int64_t BaseOffset, bool HasBaseReg, 164 int64_t Scale) const { 165 TargetLoweringBase::AddrMode AM; 166 AM.BaseGV = BaseGV; 167 AM.BaseOffs = BaseOffset; 168 AM.HasBaseReg = HasBaseReg; 169 AM.Scale = Scale; 170 return getTLI()->getScalingFactorCost(AM, Ty); 171 } 172 173 bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const { 174 return getTLI()->isTruncateFree(Ty1, Ty2); 175 } 176 177 bool BasicTTI::isTypeLegal(Type *Ty) const { 178 EVT T = getTLI()->getValueType(Ty); 179 return getTLI()->isTypeLegal(T); 180 } 181 182 unsigned BasicTTI::getJumpBufAlignment() const { 183 return getTLI()->getJumpBufAlignment(); 184 } 185 186 unsigned BasicTTI::getJumpBufSize() const { 187 return getTLI()->getJumpBufSize(); 188 } 189 190 bool BasicTTI::shouldBuildLookupTables() const { 191 const TargetLoweringBase *TLI = getTLI(); 192 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 193 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other); 194 } 195 196 bool BasicTTI::haveFastSqrt(Type *Ty) const { 197 const TargetLoweringBase *TLI = getTLI(); 198 EVT VT = TLI->getValueType(Ty); 199 return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT); 200 } 201 202 void BasicTTI::getUnrollingPreferences(const Function *F, Loop *L, 203 UnrollingPreferences &UP) const { 204 // This unrolling functionality is target independent, but to provide some 205 // motivation for its intended use, for x86: 206 207 // According to the Intel 64 and IA-32 Architectures Optimization Reference 208 // Manual, Intel Core models and later have a loop stream detector 209 // (and associated uop queue) that can benefit from partial unrolling. 210 // The relevant requirements are: 211 // - The loop must have no more than 4 (8 for Nehalem and later) branches 212 // taken, and none of them may be calls. 213 // - The loop can have no more than 18 (28 for Nehalem and later) uops. 214 215 // According to the Software Optimization Guide for AMD Family 15h Processors, 216 // models 30h-4fh (Steamroller and later) have a loop predictor and loop 217 // buffer which can benefit from partial unrolling. 218 // The relevant requirements are: 219 // - The loop must have fewer than 16 branches 220 // - The loop must have less than 40 uops in all executed loop branches 221 222 // The number of taken branches in a loop is hard to estimate here, and 223 // benchmarking has revealed that it is better not to be conservative when 224 // estimating the branch count. As a result, we'll ignore the branch limits 225 // until someone finds a case where it matters in practice. 226 227 unsigned MaxOps; 228 const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>(F); 229 if (PartialUnrollingThreshold.getNumOccurrences() > 0) 230 MaxOps = PartialUnrollingThreshold; 231 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0) 232 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize; 233 else 234 return; 235 236 // Scan the loop: don't unroll loops with calls. 237 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); 238 I != E; ++I) { 239 BasicBlock *BB = *I; 240 241 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J) 242 if (isa<CallInst>(J) || isa<InvokeInst>(J)) { 243 ImmutableCallSite CS(J); 244 if (const Function *F = CS.getCalledFunction()) { 245 if (!TopTTI->isLoweredToCall(F)) 246 continue; 247 } 248 249 return; 250 } 251 } 252 253 // Enable runtime and partial unrolling up to the specified size. 254 UP.Partial = UP.Runtime = true; 255 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps; 256 } 257 258 //===----------------------------------------------------------------------===// 259 // 260 // Calls used by the vectorizers. 261 // 262 //===----------------------------------------------------------------------===// 263 264 unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert, 265 bool Extract) const { 266 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 267 unsigned Cost = 0; 268 269 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 270 if (Insert) 271 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 272 if (Extract) 273 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 274 } 275 276 return Cost; 277 } 278 279 unsigned BasicTTI::getNumberOfRegisters(bool Vector) const { 280 return 1; 281 } 282 283 unsigned BasicTTI::getRegisterBitWidth(bool Vector) const { 284 return 32; 285 } 286 287 unsigned BasicTTI::getMaxInterleaveFactor() const { 288 return 1; 289 } 290 291 unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 292 OperandValueKind, OperandValueKind, 293 OperandValueProperties, 294 OperandValueProperties) const { 295 // Check if any of the operands are vector operands. 296 const TargetLoweringBase *TLI = getTLI(); 297 int ISD = TLI->InstructionOpcodeToISD(Opcode); 298 assert(ISD && "Invalid opcode"); 299 300 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 301 302 bool IsFloat = Ty->getScalarType()->isFloatingPointTy(); 303 // Assume that floating point arithmetic operations cost twice as much as 304 // integer operations. 305 unsigned OpCost = (IsFloat ? 2 : 1); 306 307 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 308 // The operation is legal. Assume it costs 1. 309 // If the type is split to multiple registers, assume that there is some 310 // overhead to this. 311 // TODO: Once we have extract/insert subvector cost we need to use them. 312 if (LT.first > 1) 313 return LT.first * 2 * OpCost; 314 return LT.first * 1 * OpCost; 315 } 316 317 if (!TLI->isOperationExpand(ISD, LT.second)) { 318 // If the operation is custom lowered then assume 319 // thare the code is twice as expensive. 320 return LT.first * 2 * OpCost; 321 } 322 323 // Else, assume that we need to scalarize this op. 324 if (Ty->isVectorTy()) { 325 unsigned Num = Ty->getVectorNumElements(); 326 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType()); 327 // return the cost of multiple scalar invocation plus the cost of inserting 328 // and extracting the values. 329 return getScalarizationOverhead(Ty, true, true) + Num * Cost; 330 } 331 332 // We don't know anything about this scalar instruction. 333 return OpCost; 334 } 335 336 unsigned BasicTTI::getAltShuffleOverhead(Type *Ty) const { 337 assert(Ty->isVectorTy() && "Can only shuffle vectors"); 338 unsigned Cost = 0; 339 // Shuffle cost is equal to the cost of extracting element from its argument 340 // plus the cost of inserting them onto the result vector. 341 342 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from index 343 // 0 of first vector, index 1 of second vector,index 2 of first vector and 344 // finally index 3 of second vector and insert them at index <0,1,2,3> of 345 // result vector. 346 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 347 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 348 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 349 } 350 return Cost; 351 } 352 353 unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 354 Type *SubTp) const { 355 if (Kind == SK_Alternate) { 356 return getAltShuffleOverhead(Tp); 357 } 358 return 1; 359 } 360 361 unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst, 362 Type *Src) const { 363 const TargetLoweringBase *TLI = getTLI(); 364 int ISD = TLI->InstructionOpcodeToISD(Opcode); 365 assert(ISD && "Invalid opcode"); 366 367 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src); 368 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst); 369 370 // Check for NOOP conversions. 371 if (SrcLT.first == DstLT.first && 372 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { 373 374 // Bitcast between types that are legalized to the same type are free. 375 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc) 376 return 0; 377 } 378 379 if (Opcode == Instruction::Trunc && 380 TLI->isTruncateFree(SrcLT.second, DstLT.second)) 381 return 0; 382 383 if (Opcode == Instruction::ZExt && 384 TLI->isZExtFree(SrcLT.second, DstLT.second)) 385 return 0; 386 387 // If the cast is marked as legal (or promote) then assume low cost. 388 if (SrcLT.first == DstLT.first && 389 TLI->isOperationLegalOrPromote(ISD, DstLT.second)) 390 return 1; 391 392 // Handle scalar conversions. 393 if (!Src->isVectorTy() && !Dst->isVectorTy()) { 394 395 // Scalar bitcasts are usually free. 396 if (Opcode == Instruction::BitCast) 397 return 0; 398 399 // Just check the op cost. If the operation is legal then assume it costs 1. 400 if (!TLI->isOperationExpand(ISD, DstLT.second)) 401 return 1; 402 403 // Assume that illegal scalar instruction are expensive. 404 return 4; 405 } 406 407 // Check vector-to-vector casts. 408 if (Dst->isVectorTy() && Src->isVectorTy()) { 409 410 // If the cast is between same-sized registers, then the check is simple. 411 if (SrcLT.first == DstLT.first && 412 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { 413 414 // Assume that Zext is done using AND. 415 if (Opcode == Instruction::ZExt) 416 return 1; 417 418 // Assume that sext is done using SHL and SRA. 419 if (Opcode == Instruction::SExt) 420 return 2; 421 422 // Just check the op cost. If the operation is legal then assume it costs 423 // 1 and multiply by the type-legalization overhead. 424 if (!TLI->isOperationExpand(ISD, DstLT.second)) 425 return SrcLT.first * 1; 426 } 427 428 // If we are converting vectors and the operation is illegal, or 429 // if the vectors are legalized to different types, estimate the 430 // scalarization costs. 431 unsigned Num = Dst->getVectorNumElements(); 432 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(), 433 Src->getScalarType()); 434 435 // Return the cost of multiple scalar invocation plus the cost of 436 // inserting and extracting the values. 437 return getScalarizationOverhead(Dst, true, true) + Num * Cost; 438 } 439 440 // We already handled vector-to-vector and scalar-to-scalar conversions. This 441 // is where we handle bitcast between vectors and scalars. We need to assume 442 // that the conversion is scalarized in one way or another. 443 if (Opcode == Instruction::BitCast) 444 // Illegal bitcasts are done by storing and loading from a stack slot. 445 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) + 446 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0); 447 448 llvm_unreachable("Unhandled cast"); 449 } 450 451 unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const { 452 // Branches are assumed to be predicted. 453 return 0; 454 } 455 456 unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 457 Type *CondTy) const { 458 const TargetLoweringBase *TLI = getTLI(); 459 int ISD = TLI->InstructionOpcodeToISD(Opcode); 460 assert(ISD && "Invalid opcode"); 461 462 // Selects on vectors are actually vector selects. 463 if (ISD == ISD::SELECT) { 464 assert(CondTy && "CondTy must exist"); 465 if (CondTy->isVectorTy()) 466 ISD = ISD::VSELECT; 467 } 468 469 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 470 471 if (!(ValTy->isVectorTy() && !LT.second.isVector()) && 472 !TLI->isOperationExpand(ISD, LT.second)) { 473 // The operation is legal. Assume it costs 1. Multiply 474 // by the type-legalization overhead. 475 return LT.first * 1; 476 } 477 478 // Otherwise, assume that the cast is scalarized. 479 if (ValTy->isVectorTy()) { 480 unsigned Num = ValTy->getVectorNumElements(); 481 if (CondTy) 482 CondTy = CondTy->getScalarType(); 483 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(), 484 CondTy); 485 486 // Return the cost of multiple scalar invocation plus the cost of inserting 487 // and extracting the values. 488 return getScalarizationOverhead(ValTy, true, false) + Num * Cost; 489 } 490 491 // Unknown scalar opcode. 492 return 1; 493 } 494 495 unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val, 496 unsigned Index) const { 497 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Val->getScalarType()); 498 499 return LT.first; 500 } 501 502 unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src, 503 unsigned Alignment, 504 unsigned AddressSpace) const { 505 assert(!Src->isVoidTy() && "Invalid type"); 506 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src); 507 508 // Assuming that all loads of legal types cost 1. 509 unsigned Cost = LT.first; 510 511 if (Src->isVectorTy() && 512 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) { 513 // This is a vector load that legalizes to a larger type than the vector 514 // itself. Unless the corresponding extending load or truncating store is 515 // legal, then this will scalarize. 516 TargetLowering::LegalizeAction LA = TargetLowering::Expand; 517 EVT MemVT = getTLI()->getValueType(Src, true); 518 if (MemVT.isSimple() && MemVT != MVT::Other) { 519 if (Opcode == Instruction::Store) 520 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT()); 521 else 522 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT); 523 } 524 525 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) { 526 // This is a vector load/store for some illegal type that is scalarized. 527 // We must account for the cost of building or decomposing the vector. 528 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store, 529 Opcode == Instruction::Store); 530 } 531 } 532 533 return Cost; 534 } 535 536 unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 537 ArrayRef<Type *> Tys) const { 538 unsigned ISD = 0; 539 switch (IID) { 540 default: { 541 // Assume that we need to scalarize this intrinsic. 542 unsigned ScalarizationCost = 0; 543 unsigned ScalarCalls = 1; 544 if (RetTy->isVectorTy()) { 545 ScalarizationCost = getScalarizationOverhead(RetTy, true, false); 546 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 547 } 548 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { 549 if (Tys[i]->isVectorTy()) { 550 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true); 551 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 552 } 553 } 554 555 return ScalarCalls + ScalarizationCost; 556 } 557 // Look for intrinsics that can be lowered directly or turned into a scalar 558 // intrinsic call. 559 case Intrinsic::sqrt: ISD = ISD::FSQRT; break; 560 case Intrinsic::sin: ISD = ISD::FSIN; break; 561 case Intrinsic::cos: ISD = ISD::FCOS; break; 562 case Intrinsic::exp: ISD = ISD::FEXP; break; 563 case Intrinsic::exp2: ISD = ISD::FEXP2; break; 564 case Intrinsic::log: ISD = ISD::FLOG; break; 565 case Intrinsic::log10: ISD = ISD::FLOG10; break; 566 case Intrinsic::log2: ISD = ISD::FLOG2; break; 567 case Intrinsic::fabs: ISD = ISD::FABS; break; 568 case Intrinsic::minnum: ISD = ISD::FMINNUM; break; 569 case Intrinsic::maxnum: ISD = ISD::FMAXNUM; break; 570 case Intrinsic::copysign: ISD = ISD::FCOPYSIGN; break; 571 case Intrinsic::floor: ISD = ISD::FFLOOR; break; 572 case Intrinsic::ceil: ISD = ISD::FCEIL; break; 573 case Intrinsic::trunc: ISD = ISD::FTRUNC; break; 574 case Intrinsic::nearbyint: 575 ISD = ISD::FNEARBYINT; break; 576 case Intrinsic::rint: ISD = ISD::FRINT; break; 577 case Intrinsic::round: ISD = ISD::FROUND; break; 578 case Intrinsic::pow: ISD = ISD::FPOW; break; 579 case Intrinsic::fma: ISD = ISD::FMA; break; 580 case Intrinsic::fmuladd: ISD = ISD::FMA; break; 581 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free. 582 case Intrinsic::lifetime_start: 583 case Intrinsic::lifetime_end: 584 return 0; 585 case Intrinsic::masked_store: 586 return TopTTI->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0); 587 case Intrinsic::masked_load: 588 return TopTTI->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0); 589 } 590 591 const TargetLoweringBase *TLI = getTLI(); 592 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy); 593 594 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 595 // The operation is legal. Assume it costs 1. 596 // If the type is split to multiple registers, assume that there is some 597 // overhead to this. 598 // TODO: Once we have extract/insert subvector cost we need to use them. 599 if (LT.first > 1) 600 return LT.first * 2; 601 return LT.first * 1; 602 } 603 604 if (!TLI->isOperationExpand(ISD, LT.second)) { 605 // If the operation is custom lowered then assume 606 // thare the code is twice as expensive. 607 return LT.first * 2; 608 } 609 610 // If we can't lower fmuladd into an FMA estimate the cost as a floating 611 // point mul followed by an add. 612 if (IID == Intrinsic::fmuladd) 613 return TopTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) + 614 TopTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy); 615 616 // Else, assume that we need to scalarize this intrinsic. For math builtins 617 // this will emit a costly libcall, adding call overhead and spills. Make it 618 // very expensive. 619 if (RetTy->isVectorTy()) { 620 unsigned Num = RetTy->getVectorNumElements(); 621 unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(), 622 Tys); 623 return 10 * Cost * Num; 624 } 625 626 // This is going to be turned into a library call, make it expensive. 627 return 10; 628 } 629 630 unsigned BasicTTI::getNumberOfParts(Type *Tp) const { 631 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp); 632 return LT.first; 633 } 634 635 unsigned BasicTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 636 return 0; 637 } 638 639 unsigned BasicTTI::getReductionCost(unsigned Opcode, Type *Ty, 640 bool IsPairwise) const { 641 assert(Ty->isVectorTy() && "Expect a vector type"); 642 unsigned NumVecElts = Ty->getVectorNumElements(); 643 unsigned NumReduxLevels = Log2_32(NumVecElts); 644 unsigned ArithCost = NumReduxLevels * 645 TopTTI->getArithmeticInstrCost(Opcode, Ty); 646 // Assume the pairwise shuffles add a cost. 647 unsigned ShuffleCost = 648 NumReduxLevels * (IsPairwise + 1) * 649 TopTTI->getShuffleCost(SK_ExtractSubvector, Ty, NumVecElts / 2, Ty); 650 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true); 651 } 652