1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "PPCTargetTransformInfo.h" 10 #include "llvm/Analysis/CodeMetrics.h" 11 #include "llvm/Analysis/TargetTransformInfo.h" 12 #include "llvm/CodeGen/BasicTTIImpl.h" 13 #include "llvm/CodeGen/CostTable.h" 14 #include "llvm/CodeGen/TargetLowering.h" 15 #include "llvm/CodeGen/TargetSchedule.h" 16 #include "llvm/Support/CommandLine.h" 17 #include "llvm/Support/Debug.h" 18 using namespace llvm; 19 20 #define DEBUG_TYPE "ppctti" 21 22 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting", 23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden); 24 25 // This is currently only used for the data prefetch pass which is only enabled 26 // for BG/Q by default. 27 static cl::opt<unsigned> 28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), 29 cl::desc("The loop prefetch cache line size")); 30 31 static cl::opt<bool> 32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), 33 cl::desc("Enable using coldcc calling conv for cold " 34 "internal functions")); 35 36 static cl::opt<bool> 37 LsrNoInsnsCost("ppc-lsr-no-insns-cost", cl::Hidden, cl::init(false), 38 cl::desc("Do not add instruction count to lsr cost model")); 39 40 // The latency of mtctr is only justified if there are more than 4 41 // comparisons that will be removed as a result. 42 static cl::opt<unsigned> 43 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, 44 cl::desc("Loops with a constant trip count smaller than " 45 "this value will not use the count register.")); 46 47 //===----------------------------------------------------------------------===// 48 // 49 // PPC cost model. 50 // 51 //===----------------------------------------------------------------------===// 52 53 TargetTransformInfo::PopcntSupportKind 54 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { 55 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 56 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) 57 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? 58 TTI::PSK_SlowHardware : TTI::PSK_FastHardware; 59 return TTI::PSK_Software; 60 } 61 62 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 63 TTI::TargetCostKind CostKind) { 64 if (DisablePPCConstHoist) 65 return BaseT::getIntImmCost(Imm, Ty, CostKind); 66 67 assert(Ty->isIntegerTy()); 68 69 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 70 if (BitSize == 0) 71 return ~0U; 72 73 if (Imm == 0) 74 return TTI::TCC_Free; 75 76 if (Imm.getBitWidth() <= 64) { 77 if (isInt<16>(Imm.getSExtValue())) 78 return TTI::TCC_Basic; 79 80 if (isInt<32>(Imm.getSExtValue())) { 81 // A constant that can be materialized using lis. 82 if ((Imm.getZExtValue() & 0xFFFF) == 0) 83 return TTI::TCC_Basic; 84 85 return 2 * TTI::TCC_Basic; 86 } 87 } 88 89 return 4 * TTI::TCC_Basic; 90 } 91 92 int PPCTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 93 const APInt &Imm, Type *Ty, 94 TTI::TargetCostKind CostKind) { 95 if (DisablePPCConstHoist) 96 return BaseT::getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind); 97 98 assert(Ty->isIntegerTy()); 99 100 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 101 if (BitSize == 0) 102 return ~0U; 103 104 switch (IID) { 105 default: 106 return TTI::TCC_Free; 107 case Intrinsic::sadd_with_overflow: 108 case Intrinsic::uadd_with_overflow: 109 case Intrinsic::ssub_with_overflow: 110 case Intrinsic::usub_with_overflow: 111 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue())) 112 return TTI::TCC_Free; 113 break; 114 case Intrinsic::experimental_stackmap: 115 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 116 return TTI::TCC_Free; 117 break; 118 case Intrinsic::experimental_patchpoint_void: 119 case Intrinsic::experimental_patchpoint_i64: 120 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 121 return TTI::TCC_Free; 122 break; 123 } 124 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); 125 } 126 127 int PPCTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 128 const APInt &Imm, Type *Ty, 129 TTI::TargetCostKind CostKind) { 130 if (DisablePPCConstHoist) 131 return BaseT::getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind); 132 133 assert(Ty->isIntegerTy()); 134 135 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 136 if (BitSize == 0) 137 return ~0U; 138 139 unsigned ImmIdx = ~0U; 140 bool ShiftedFree = false, RunFree = false, UnsignedFree = false, 141 ZeroFree = false; 142 switch (Opcode) { 143 default: 144 return TTI::TCC_Free; 145 case Instruction::GetElementPtr: 146 // Always hoist the base address of a GetElementPtr. This prevents the 147 // creation of new constants for every base constant that gets constant 148 // folded with the offset. 149 if (Idx == 0) 150 return 2 * TTI::TCC_Basic; 151 return TTI::TCC_Free; 152 case Instruction::And: 153 RunFree = true; // (for the rotate-and-mask instructions) 154 LLVM_FALLTHROUGH; 155 case Instruction::Add: 156 case Instruction::Or: 157 case Instruction::Xor: 158 ShiftedFree = true; 159 LLVM_FALLTHROUGH; 160 case Instruction::Sub: 161 case Instruction::Mul: 162 case Instruction::Shl: 163 case Instruction::LShr: 164 case Instruction::AShr: 165 ImmIdx = 1; 166 break; 167 case Instruction::ICmp: 168 UnsignedFree = true; 169 ImmIdx = 1; 170 // Zero comparisons can use record-form instructions. 171 LLVM_FALLTHROUGH; 172 case Instruction::Select: 173 ZeroFree = true; 174 break; 175 case Instruction::PHI: 176 case Instruction::Call: 177 case Instruction::Ret: 178 case Instruction::Load: 179 case Instruction::Store: 180 break; 181 } 182 183 if (ZeroFree && Imm == 0) 184 return TTI::TCC_Free; 185 186 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { 187 if (isInt<16>(Imm.getSExtValue())) 188 return TTI::TCC_Free; 189 190 if (RunFree) { 191 if (Imm.getBitWidth() <= 32 && 192 (isShiftedMask_32(Imm.getZExtValue()) || 193 isShiftedMask_32(~Imm.getZExtValue()))) 194 return TTI::TCC_Free; 195 196 if (ST->isPPC64() && 197 (isShiftedMask_64(Imm.getZExtValue()) || 198 isShiftedMask_64(~Imm.getZExtValue()))) 199 return TTI::TCC_Free; 200 } 201 202 if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) 203 return TTI::TCC_Free; 204 205 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) 206 return TTI::TCC_Free; 207 } 208 209 return PPCTTIImpl::getIntImmCost(Imm, Ty, CostKind); 210 } 211 212 unsigned 213 PPCTTIImpl::getUserCost(const User *U, ArrayRef<const Value *> Operands, 214 TTI::TargetCostKind CostKind) { 215 // We already implement getCastInstrCost and perform the vector adjustment there. 216 if (!isa<CastInst>(U) && U->getType()->isVectorTy()) { 217 // Instructions that need to be split should cost more. 218 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType()); 219 return LT.first * BaseT::getUserCost(U, Operands, CostKind); 220 } 221 222 return BaseT::getUserCost(U, Operands, CostKind); 223 } 224 225 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, TargetLibraryInfo *LibInfo, 226 SmallPtrSetImpl<const Value *> &Visited) { 227 const PPCTargetMachine &TM = ST->getTargetMachine(); 228 229 // Loop through the inline asm constraints and look for something that 230 // clobbers ctr. 231 auto asmClobbersCTR = [](InlineAsm *IA) { 232 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 233 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 234 InlineAsm::ConstraintInfo &C = CIV[i]; 235 if (C.Type != InlineAsm::isInput) 236 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 237 if (StringRef(C.Codes[j]).equals_lower("{ctr}")) 238 return true; 239 } 240 return false; 241 }; 242 243 // Determining the address of a TLS variable results in a function call in 244 // certain TLS models. 245 std::function<bool(const Value *)> memAddrUsesCTR = 246 [&memAddrUsesCTR, &TM, &Visited](const Value *MemAddr) -> bool { 247 // No need to traverse again if we already checked this operand. 248 if (!Visited.insert(MemAddr).second) 249 return false; 250 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 251 if (!GV) { 252 // Recurse to check for constants that refer to TLS global variables. 253 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 254 for (const auto &CO : CV->operands()) 255 if (memAddrUsesCTR(CO)) 256 return true; 257 258 return false; 259 } 260 261 if (!GV->isThreadLocal()) 262 return false; 263 TLSModel::Model Model = TM.getTLSModel(GV); 264 return Model == TLSModel::GeneralDynamic || 265 Model == TLSModel::LocalDynamic; 266 }; 267 268 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) { 269 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 270 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 271 272 return false; 273 }; 274 275 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 276 J != JE; ++J) { 277 if (CallInst *CI = dyn_cast<CallInst>(J)) { 278 // Inline ASM is okay, unless it clobbers the ctr register. 279 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) { 280 if (asmClobbersCTR(IA)) 281 return true; 282 continue; 283 } 284 285 if (Function *F = CI->getCalledFunction()) { 286 // Most intrinsics don't become function calls, but some might. 287 // sin, cos, exp and log are always calls. 288 unsigned Opcode = 0; 289 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 290 switch (F->getIntrinsicID()) { 291 default: continue; 292 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr 293 // we're definitely using CTR. 294 case Intrinsic::set_loop_iterations: 295 case Intrinsic::loop_decrement: 296 return true; 297 298 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 299 // because, although it does clobber the counter register, the 300 // control can't then return to inside the loop unless there is also 301 // an eh_sjlj_setjmp. 302 case Intrinsic::eh_sjlj_setjmp: 303 304 case Intrinsic::memcpy: 305 case Intrinsic::memmove: 306 case Intrinsic::memset: 307 case Intrinsic::powi: 308 case Intrinsic::log: 309 case Intrinsic::log2: 310 case Intrinsic::log10: 311 case Intrinsic::exp: 312 case Intrinsic::exp2: 313 case Intrinsic::pow: 314 case Intrinsic::sin: 315 case Intrinsic::cos: 316 return true; 317 case Intrinsic::copysign: 318 if (CI->getArgOperand(0)->getType()->getScalarType()-> 319 isPPC_FP128Ty()) 320 return true; 321 else 322 continue; // ISD::FCOPYSIGN is never a library call. 323 case Intrinsic::fma: Opcode = ISD::FMA; break; 324 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 325 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 326 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 327 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 328 case Intrinsic::rint: Opcode = ISD::FRINT; break; 329 case Intrinsic::lrint: Opcode = ISD::LRINT; break; 330 case Intrinsic::llrint: Opcode = ISD::LLRINT; break; 331 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 332 case Intrinsic::round: Opcode = ISD::FROUND; break; 333 case Intrinsic::lround: Opcode = ISD::LROUND; break; 334 case Intrinsic::llround: Opcode = ISD::LLROUND; break; 335 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; 336 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; 337 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; 338 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; 339 } 340 } 341 342 // PowerPC does not use [US]DIVREM or other library calls for 343 // operations on regular types which are not otherwise library calls 344 // (i.e. soft float or atomics). If adapting for targets that do, 345 // additional care is required here. 346 347 LibFunc Func; 348 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 349 LibInfo->getLibFunc(F->getName(), Func) && 350 LibInfo->hasOptimizedCodeGen(Func)) { 351 // Non-read-only functions are never treated as intrinsics. 352 if (!CI->onlyReadsMemory()) 353 return true; 354 355 // Conversion happens only for FP calls. 356 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 357 return true; 358 359 switch (Func) { 360 default: return true; 361 case LibFunc_copysign: 362 case LibFunc_copysignf: 363 continue; // ISD::FCOPYSIGN is never a library call. 364 case LibFunc_copysignl: 365 return true; 366 case LibFunc_fabs: 367 case LibFunc_fabsf: 368 case LibFunc_fabsl: 369 continue; // ISD::FABS is never a library call. 370 case LibFunc_sqrt: 371 case LibFunc_sqrtf: 372 case LibFunc_sqrtl: 373 Opcode = ISD::FSQRT; break; 374 case LibFunc_floor: 375 case LibFunc_floorf: 376 case LibFunc_floorl: 377 Opcode = ISD::FFLOOR; break; 378 case LibFunc_nearbyint: 379 case LibFunc_nearbyintf: 380 case LibFunc_nearbyintl: 381 Opcode = ISD::FNEARBYINT; break; 382 case LibFunc_ceil: 383 case LibFunc_ceilf: 384 case LibFunc_ceill: 385 Opcode = ISD::FCEIL; break; 386 case LibFunc_rint: 387 case LibFunc_rintf: 388 case LibFunc_rintl: 389 Opcode = ISD::FRINT; break; 390 case LibFunc_round: 391 case LibFunc_roundf: 392 case LibFunc_roundl: 393 Opcode = ISD::FROUND; break; 394 case LibFunc_trunc: 395 case LibFunc_truncf: 396 case LibFunc_truncl: 397 Opcode = ISD::FTRUNC; break; 398 case LibFunc_fmin: 399 case LibFunc_fminf: 400 case LibFunc_fminl: 401 Opcode = ISD::FMINNUM; break; 402 case LibFunc_fmax: 403 case LibFunc_fmaxf: 404 case LibFunc_fmaxl: 405 Opcode = ISD::FMAXNUM; break; 406 } 407 } 408 409 if (Opcode) { 410 EVT EVTy = 411 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true); 412 413 if (EVTy == MVT::Other) 414 return true; 415 416 if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) 417 continue; 418 else if (EVTy.isVector() && 419 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) 420 continue; 421 422 return true; 423 } 424 } 425 426 return true; 427 } else if (isa<BinaryOperator>(J) && 428 J->getType()->getScalarType()->isPPC_FP128Ty()) { 429 // Most operations on ppc_f128 values become calls. 430 return true; 431 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 432 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 433 CastInst *CI = cast<CastInst>(J); 434 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 435 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 436 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) || 437 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType())) 438 return true; 439 } else if (isLargeIntegerTy(!TM.isPPC64(), 440 J->getType()->getScalarType()) && 441 (J->getOpcode() == Instruction::UDiv || 442 J->getOpcode() == Instruction::SDiv || 443 J->getOpcode() == Instruction::URem || 444 J->getOpcode() == Instruction::SRem)) { 445 return true; 446 } else if (!TM.isPPC64() && 447 isLargeIntegerTy(false, J->getType()->getScalarType()) && 448 (J->getOpcode() == Instruction::Shl || 449 J->getOpcode() == Instruction::AShr || 450 J->getOpcode() == Instruction::LShr)) { 451 // Only on PPC32, for 128-bit integers (specifically not 64-bit 452 // integers), these might be runtime calls. 453 return true; 454 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 455 // On PowerPC, indirect jumps use the counter register. 456 return true; 457 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 458 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 459 return true; 460 } 461 462 // FREM is always a call. 463 if (J->getOpcode() == Instruction::FRem) 464 return true; 465 466 if (ST->useSoftFloat()) { 467 switch(J->getOpcode()) { 468 case Instruction::FAdd: 469 case Instruction::FSub: 470 case Instruction::FMul: 471 case Instruction::FDiv: 472 case Instruction::FPTrunc: 473 case Instruction::FPExt: 474 case Instruction::FPToUI: 475 case Instruction::FPToSI: 476 case Instruction::UIToFP: 477 case Instruction::SIToFP: 478 case Instruction::FCmp: 479 return true; 480 } 481 } 482 483 for (Value *Operand : J->operands()) 484 if (memAddrUsesCTR(Operand)) 485 return true; 486 } 487 488 return false; 489 } 490 491 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 492 AssumptionCache &AC, 493 TargetLibraryInfo *LibInfo, 494 HardwareLoopInfo &HWLoopInfo) { 495 const PPCTargetMachine &TM = ST->getTargetMachine(); 496 TargetSchedModel SchedModel; 497 SchedModel.init(ST); 498 499 // Do not convert small short loops to CTR loop. 500 unsigned ConstTripCount = SE.getSmallConstantTripCount(L); 501 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { 502 SmallPtrSet<const Value *, 32> EphValues; 503 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 504 CodeMetrics Metrics; 505 for (BasicBlock *BB : L->blocks()) 506 Metrics.analyzeBasicBlock(BB, *this, EphValues); 507 // 6 is an approximate latency for the mtctr instruction. 508 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) 509 return false; 510 } 511 512 // We don't want to spill/restore the counter register, and so we don't 513 // want to use the counter register if the loop contains calls. 514 SmallPtrSet<const Value *, 4> Visited; 515 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 516 I != IE; ++I) 517 if (mightUseCTR(*I, LibInfo, Visited)) 518 return false; 519 520 SmallVector<BasicBlock*, 4> ExitingBlocks; 521 L->getExitingBlocks(ExitingBlocks); 522 523 // If there is an exit edge known to be frequently taken, 524 // we should not transform this loop. 525 for (auto &BB : ExitingBlocks) { 526 Instruction *TI = BB->getTerminator(); 527 if (!TI) continue; 528 529 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 530 uint64_t TrueWeight = 0, FalseWeight = 0; 531 if (!BI->isConditional() || 532 !BI->extractProfMetadata(TrueWeight, FalseWeight)) 533 continue; 534 535 // If the exit path is more frequent than the loop path, 536 // we return here without further analysis for this loop. 537 bool TrueIsExit = !L->contains(BI->getSuccessor(0)); 538 if (( TrueIsExit && FalseWeight < TrueWeight) || 539 (!TrueIsExit && FalseWeight > TrueWeight)) 540 return false; 541 } 542 } 543 544 LLVMContext &C = L->getHeader()->getContext(); 545 HWLoopInfo.CountType = TM.isPPC64() ? 546 Type::getInt64Ty(C) : Type::getInt32Ty(C); 547 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 548 return true; 549 } 550 551 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 552 TTI::UnrollingPreferences &UP) { 553 if (ST->getCPUDirective() == PPC::DIR_A2) { 554 // The A2 is in-order with a deep pipeline, and concatenation unrolling 555 // helps expose latency-hiding opportunities to the instruction scheduler. 556 UP.Partial = UP.Runtime = true; 557 558 // We unroll a lot on the A2 (hundreds of instructions), and the benefits 559 // often outweigh the cost of a division to compute the trip count. 560 UP.AllowExpensiveTripCount = true; 561 } 562 563 BaseT::getUnrollingPreferences(L, SE, UP); 564 } 565 566 // This function returns true to allow using coldcc calling convention. 567 // Returning true results in coldcc being used for functions which are cold at 568 // all call sites when the callers of the functions are not calling any other 569 // non coldcc functions. 570 bool PPCTTIImpl::useColdCCForColdCall(Function &F) { 571 return EnablePPCColdCC; 572 } 573 574 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { 575 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend 576 // on combining the loads generated for consecutive accesses, and failure to 577 // do so is particularly expensive. This makes it much more likely (compared 578 // to only using concatenation unrolling). 579 if (ST->getCPUDirective() == PPC::DIR_A2) 580 return true; 581 582 return LoopHasReductions; 583 } 584 585 PPCTTIImpl::TTI::MemCmpExpansionOptions 586 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 587 TTI::MemCmpExpansionOptions Options; 588 Options.LoadSizes = {8, 4, 2, 1}; 589 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 590 return Options; 591 } 592 593 bool PPCTTIImpl::enableInterleavedAccessVectorization() { 594 return true; 595 } 596 597 unsigned PPCTTIImpl::getNumberOfRegisters(unsigned ClassID) const { 598 assert(ClassID == GPRRC || ClassID == FPRRC || 599 ClassID == VRRC || ClassID == VSXRC); 600 if (ST->hasVSX()) { 601 assert(ClassID == GPRRC || ClassID == VSXRC || ClassID == VRRC); 602 return ClassID == VSXRC ? 64 : 32; 603 } 604 assert(ClassID == GPRRC || ClassID == FPRRC || ClassID == VRRC); 605 return 32; 606 } 607 608 unsigned PPCTTIImpl::getRegisterClassForType(bool Vector, Type *Ty) const { 609 if (Vector) 610 return ST->hasVSX() ? VSXRC : VRRC; 611 else if (Ty && (Ty->getScalarType()->isFloatTy() || 612 Ty->getScalarType()->isDoubleTy())) 613 return ST->hasVSX() ? VSXRC : FPRRC; 614 else if (Ty && (Ty->getScalarType()->isFP128Ty() || 615 Ty->getScalarType()->isPPC_FP128Ty())) 616 return VRRC; 617 else if (Ty && Ty->getScalarType()->isHalfTy()) 618 return VSXRC; 619 else 620 return GPRRC; 621 } 622 623 const char* PPCTTIImpl::getRegisterClassName(unsigned ClassID) const { 624 625 switch (ClassID) { 626 default: 627 llvm_unreachable("unknown register class"); 628 return "PPC::unknown register class"; 629 case GPRRC: return "PPC::GPRRC"; 630 case FPRRC: return "PPC::FPRRC"; 631 case VRRC: return "PPC::VRRC"; 632 case VSXRC: return "PPC::VSXRC"; 633 } 634 } 635 636 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const { 637 if (Vector) { 638 if (ST->hasQPX()) return 256; 639 if (ST->hasAltivec()) return 128; 640 return 0; 641 } 642 643 if (ST->isPPC64()) 644 return 64; 645 return 32; 646 647 } 648 649 unsigned PPCTTIImpl::getCacheLineSize() const { 650 // Check first if the user specified a custom line size. 651 if (CacheLineSize.getNumOccurrences() > 0) 652 return CacheLineSize; 653 654 // Starting with P7 we have a cache line size of 128. 655 unsigned Directive = ST->getCPUDirective(); 656 // Assume that Future CPU has the same cache line size as the others. 657 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 658 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || 659 Directive == PPC::DIR_PWR_FUTURE) 660 return 128; 661 662 // On other processors return a default of 64 bytes. 663 return 64; 664 } 665 666 unsigned PPCTTIImpl::getPrefetchDistance() const { 667 // This seems like a reasonable default for the BG/Q (this pass is enabled, by 668 // default, only on the BG/Q). 669 return 300; 670 } 671 672 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) { 673 unsigned Directive = ST->getCPUDirective(); 674 // The 440 has no SIMD support, but floating-point instructions 675 // have a 5-cycle latency, so unroll by 5x for latency hiding. 676 if (Directive == PPC::DIR_440) 677 return 5; 678 679 // The A2 has no SIMD support, but floating-point instructions 680 // have a 6-cycle latency, so unroll by 6x for latency hiding. 681 if (Directive == PPC::DIR_A2) 682 return 6; 683 684 // FIXME: For lack of any better information, do no harm... 685 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) 686 return 1; 687 688 // For P7 and P8, floating-point instructions have a 6-cycle latency and 689 // there are two execution units, so unroll by 12x for latency hiding. 690 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready 691 // FIXME: the same for P10 as previous gen until POWER10 scheduling is ready 692 // Assume that future is the same as the others. 693 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 694 Directive == PPC::DIR_PWR9 || Directive == PPC::DIR_PWR10 || 695 Directive == PPC::DIR_PWR_FUTURE) 696 return 12; 697 698 // For most things, modern systems have two execution units (and 699 // out-of-order execution). 700 return 2; 701 } 702 703 // Adjust the cost of vector instructions on targets which there is overlap 704 // between the vector and scalar units, thereby reducing the overall throughput 705 // of vector code wrt. scalar code. 706 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, 707 Type *Ty2) { 708 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) 709 return Cost; 710 711 std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1); 712 // If type legalization involves splitting the vector, we don't want to 713 // double the cost at every step - only the last step. 714 if (LT1.first != 1 || !LT1.second.isVector()) 715 return Cost; 716 717 int ISD = TLI->InstructionOpcodeToISD(Opcode); 718 if (TLI->isOperationExpand(ISD, LT1.second)) 719 return Cost; 720 721 if (Ty2) { 722 std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2); 723 if (LT2.first != 1 || !LT2.second.isVector()) 724 return Cost; 725 } 726 727 return Cost * 2; 728 } 729 730 int PPCTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 731 TTI::TargetCostKind CostKind, 732 TTI::OperandValueKind Op1Info, 733 TTI::OperandValueKind Op2Info, 734 TTI::OperandValueProperties Opd1PropInfo, 735 TTI::OperandValueProperties Opd2PropInfo, 736 ArrayRef<const Value *> Args, 737 const Instruction *CxtI) { 738 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 739 740 // Fallback to the default implementation. 741 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 742 Op2Info, 743 Opd1PropInfo, Opd2PropInfo); 744 return vectorCostAdjustment(Cost, Opcode, Ty, nullptr); 745 } 746 747 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 748 Type *SubTp) { 749 // Legalize the type. 750 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 751 752 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 753 // (at least in the sense that there need only be one non-loop-invariant 754 // instruction). We need one such shuffle instruction for each actual 755 // register (this is not true for arbitrary shuffles, but is true for the 756 // structured types of shuffles covered by TTI::ShuffleKind). 757 return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp, 758 nullptr); 759 } 760 761 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 762 TTI::TargetCostKind CostKind, 763 const Instruction *I) { 764 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 765 766 int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I); 767 Cost = vectorCostAdjustment(Cost, Opcode, Dst, Src); 768 // TODO: Allow non-throughput costs that aren't binary. 769 if (CostKind != TTI::TCK_RecipThroughput) 770 return Cost == 0 ? 0 : 1; 771 return Cost; 772 } 773 774 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 775 TTI::TargetCostKind CostKind, 776 const Instruction *I) { 777 int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I); 778 return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr); 779 } 780 781 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 782 assert(Val->isVectorTy() && "This must be a vector type"); 783 784 int ISD = TLI->InstructionOpcodeToISD(Opcode); 785 assert(ISD && "Invalid opcode"); 786 787 int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index); 788 Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr); 789 790 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { 791 // Double-precision scalars are already located in index #0 (or #1 if LE). 792 if (ISD == ISD::EXTRACT_VECTOR_ELT && 793 Index == (ST->isLittleEndian() ? 1 : 0)) 794 return 0; 795 796 return Cost; 797 798 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) { 799 // Floating point scalars are already located in index #0. 800 if (Index == 0) 801 return 0; 802 803 return Cost; 804 805 } else if (Val->getScalarType()->isIntegerTy() && Index != -1U) { 806 if (ST->hasP9Altivec()) { 807 if (ISD == ISD::INSERT_VECTOR_ELT) 808 // A move-to VSR and a permute/insert. Assume vector operation cost 809 // for both (cost will be 2x on P9). 810 return vectorCostAdjustment(2, Opcode, Val, nullptr); 811 812 // It's an extract. Maybe we can do a cheap move-from VSR. 813 unsigned EltSize = Val->getScalarSizeInBits(); 814 if (EltSize == 64) { 815 unsigned MfvsrdIndex = ST->isLittleEndian() ? 1 : 0; 816 if (Index == MfvsrdIndex) 817 return 1; 818 } else if (EltSize == 32) { 819 unsigned MfvsrwzIndex = ST->isLittleEndian() ? 2 : 1; 820 if (Index == MfvsrwzIndex) 821 return 1; 822 } 823 824 // We need a vector extract (or mfvsrld). Assume vector operation cost. 825 // The cost of the load constant for a vector extract is disregarded 826 // (invariant, easily schedulable). 827 return vectorCostAdjustment(1, Opcode, Val, nullptr); 828 829 } else if (ST->hasDirectMove()) 830 // Assume permute has standard cost. 831 // Assume move-to/move-from VSR have 2x standard cost. 832 return 3; 833 } 834 835 // Estimated cost of a load-hit-store delay. This was obtained 836 // experimentally as a minimum needed to prevent unprofitable 837 // vectorization for the paq8p benchmark. It may need to be 838 // raised further if other unprofitable cases remain. 839 unsigned LHSPenalty = 2; 840 if (ISD == ISD::INSERT_VECTOR_ELT) 841 LHSPenalty += 7; 842 843 // Vector element insert/extract with Altivec is very expensive, 844 // because they require store and reload with the attendant 845 // processor stall for load-hit-store. Until VSX is available, 846 // these need to be estimated as very costly. 847 if (ISD == ISD::EXTRACT_VECTOR_ELT || 848 ISD == ISD::INSERT_VECTOR_ELT) 849 return LHSPenalty + Cost; 850 851 return Cost; 852 } 853 854 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 855 MaybeAlign Alignment, unsigned AddressSpace, 856 TTI::TargetCostKind CostKind, 857 const Instruction *I) { 858 // Legalize the type. 859 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 860 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 861 "Invalid Opcode"); 862 863 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 864 CostKind); 865 Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr); 866 867 bool IsAltivecType = ST->hasAltivec() && 868 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || 869 LT.second == MVT::v4i32 || LT.second == MVT::v4f32); 870 bool IsVSXType = ST->hasVSX() && 871 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); 872 bool IsQPXType = ST->hasQPX() && 873 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32); 874 875 // VSX has 32b/64b load instructions. Legalization can handle loading of 876 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and 877 // PPCTargetLowering can't compute the cost appropriately. So here we 878 // explicitly check this case. 879 unsigned MemBytes = Src->getPrimitiveSizeInBits(); 880 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType && 881 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) 882 return 1; 883 884 // Aligned loads and stores are easy. 885 unsigned SrcBytes = LT.second.getStoreSize(); 886 if (!SrcBytes || !Alignment || *Alignment >= SrcBytes) 887 return Cost; 888 889 // If we can use the permutation-based load sequence, then this is also 890 // relatively cheap (not counting loop-invariant instructions): one load plus 891 // one permute (the last load in a series has extra cost, but we're 892 // neglecting that here). Note that on the P7, we could do unaligned loads 893 // for Altivec types using the VSX instructions, but that's more expensive 894 // than using the permutation-based load sequence. On the P8, that's no 895 // longer true. 896 if (Opcode == Instruction::Load && 897 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) && 898 *Alignment >= LT.second.getScalarType().getStoreSize()) 899 return Cost + LT.first; // Add the cost of the permutations. 900 901 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the 902 // P7, unaligned vector loads are more expensive than the permutation-based 903 // load sequence, so that might be used instead, but regardless, the net cost 904 // is about the same (not counting loop-invariant instructions). 905 if (IsVSXType || (ST->hasVSX() && IsAltivecType)) 906 return Cost; 907 908 // Newer PPC supports unaligned memory access. 909 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0)) 910 return Cost; 911 912 // PPC in general does not support unaligned loads and stores. They'll need 913 // to be decomposed based on the alignment factor. 914 915 // Add the cost of each scalar load or store. 916 assert(Alignment); 917 Cost += LT.first * ((SrcBytes / Alignment->value()) - 1); 918 919 // For a vector type, there is also scalarization overhead (only for 920 // stores, loads are expanded using the vector-load + permutation sequence, 921 // which is much less expensive). 922 if (Src->isVectorTy() && Opcode == Instruction::Store) 923 for (int i = 0, e = cast<FixedVectorType>(Src)->getNumElements(); i < e; 924 ++i) 925 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); 926 927 return Cost; 928 } 929 930 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 931 unsigned Factor, 932 ArrayRef<unsigned> Indices, 933 unsigned Alignment, 934 unsigned AddressSpace, 935 TTI::TargetCostKind CostKind, 936 bool UseMaskForCond, 937 bool UseMaskForGaps) { 938 if (UseMaskForCond || UseMaskForGaps) 939 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 940 Alignment, AddressSpace, CostKind, 941 UseMaskForCond, UseMaskForGaps); 942 943 assert(isa<VectorType>(VecTy) && 944 "Expect a vector type for interleaved memory op"); 945 946 // Legalize the type. 947 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy); 948 949 // Firstly, the cost of load/store operation. 950 int Cost = 951 getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, 952 CostKind); 953 954 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 955 // (at least in the sense that there need only be one non-loop-invariant 956 // instruction). For each result vector, we need one shuffle per incoming 957 // vector (except that the first shuffle can take two incoming vectors 958 // because it does not need to take itself). 959 Cost += Factor*(LT.first-1); 960 961 return Cost; 962 } 963 964 unsigned PPCTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 965 TTI::TargetCostKind CostKind) { 966 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 967 } 968 969 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, 970 LoopInfo *LI, DominatorTree *DT, 971 AssumptionCache *AC, TargetLibraryInfo *LibInfo) { 972 // Process nested loops first. 973 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 974 if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo)) 975 return false; // Stop search. 976 977 HardwareLoopInfo HWLoopInfo(L); 978 979 if (!HWLoopInfo.canAnalyze(*LI)) 980 return false; 981 982 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo)) 983 return false; 984 985 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) 986 return false; 987 988 *BI = HWLoopInfo.ExitBranch; 989 return true; 990 } 991 992 bool PPCTTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 993 TargetTransformInfo::LSRCost &C2) { 994 // PowerPC default behaviour here is "instruction number 1st priority". 995 // If LsrNoInsnsCost is set, call default implementation. 996 if (!LsrNoInsnsCost) 997 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, 998 C1.NumBaseAdds, C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 999 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, 1000 C2.NumBaseAdds, C2.ScaleCost, C2.ImmCost, C2.SetupCost); 1001 else 1002 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2); 1003 } 1004