1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "PPCTargetTransformInfo.h" 10 #include "llvm/Analysis/CodeMetrics.h" 11 #include "llvm/Analysis/TargetTransformInfo.h" 12 #include "llvm/CodeGen/BasicTTIImpl.h" 13 #include "llvm/CodeGen/CostTable.h" 14 #include "llvm/CodeGen/TargetLowering.h" 15 #include "llvm/CodeGen/TargetSchedule.h" 16 #include "llvm/Support/CommandLine.h" 17 #include "llvm/Support/Debug.h" 18 using namespace llvm; 19 20 #define DEBUG_TYPE "ppctti" 21 22 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting", 23 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden); 24 25 // This is currently only used for the data prefetch pass which is only enabled 26 // for BG/Q by default. 27 static cl::opt<unsigned> 28 CacheLineSize("ppc-loop-prefetch-cache-line", cl::Hidden, cl::init(64), 29 cl::desc("The loop prefetch cache line size")); 30 31 static cl::opt<bool> 32 EnablePPCColdCC("ppc-enable-coldcc", cl::Hidden, cl::init(false), 33 cl::desc("Enable using coldcc calling conv for cold " 34 "internal functions")); 35 36 // The latency of mtctr is only justified if there are more than 4 37 // comparisons that will be removed as a result. 38 static cl::opt<unsigned> 39 SmallCTRLoopThreshold("min-ctr-loop-threshold", cl::init(4), cl::Hidden, 40 cl::desc("Loops with a constant trip count smaller than " 41 "this value will not use the count register.")); 42 43 //===----------------------------------------------------------------------===// 44 // 45 // PPC cost model. 46 // 47 //===----------------------------------------------------------------------===// 48 49 TargetTransformInfo::PopcntSupportKind 50 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) { 51 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 52 if (ST->hasPOPCNTD() != PPCSubtarget::POPCNTD_Unavailable && TyWidth <= 64) 53 return ST->hasPOPCNTD() == PPCSubtarget::POPCNTD_Slow ? 54 TTI::PSK_SlowHardware : TTI::PSK_FastHardware; 55 return TTI::PSK_Software; 56 } 57 58 int PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 59 if (DisablePPCConstHoist) 60 return BaseT::getIntImmCost(Imm, Ty); 61 62 assert(Ty->isIntegerTy()); 63 64 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 65 if (BitSize == 0) 66 return ~0U; 67 68 if (Imm == 0) 69 return TTI::TCC_Free; 70 71 if (Imm.getBitWidth() <= 64) { 72 if (isInt<16>(Imm.getSExtValue())) 73 return TTI::TCC_Basic; 74 75 if (isInt<32>(Imm.getSExtValue())) { 76 // A constant that can be materialized using lis. 77 if ((Imm.getZExtValue() & 0xFFFF) == 0) 78 return TTI::TCC_Basic; 79 80 return 2 * TTI::TCC_Basic; 81 } 82 } 83 84 return 4 * TTI::TCC_Basic; 85 } 86 87 int PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 88 Type *Ty) { 89 if (DisablePPCConstHoist) 90 return BaseT::getIntImmCost(IID, Idx, Imm, Ty); 91 92 assert(Ty->isIntegerTy()); 93 94 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 95 if (BitSize == 0) 96 return ~0U; 97 98 switch (IID) { 99 default: 100 return TTI::TCC_Free; 101 case Intrinsic::sadd_with_overflow: 102 case Intrinsic::uadd_with_overflow: 103 case Intrinsic::ssub_with_overflow: 104 case Intrinsic::usub_with_overflow: 105 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue())) 106 return TTI::TCC_Free; 107 break; 108 case Intrinsic::experimental_stackmap: 109 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 110 return TTI::TCC_Free; 111 break; 112 case Intrinsic::experimental_patchpoint_void: 113 case Intrinsic::experimental_patchpoint_i64: 114 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 115 return TTI::TCC_Free; 116 break; 117 } 118 return PPCTTIImpl::getIntImmCost(Imm, Ty); 119 } 120 121 int PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 122 Type *Ty) { 123 if (DisablePPCConstHoist) 124 return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty); 125 126 assert(Ty->isIntegerTy()); 127 128 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 129 if (BitSize == 0) 130 return ~0U; 131 132 unsigned ImmIdx = ~0U; 133 bool ShiftedFree = false, RunFree = false, UnsignedFree = false, 134 ZeroFree = false; 135 switch (Opcode) { 136 default: 137 return TTI::TCC_Free; 138 case Instruction::GetElementPtr: 139 // Always hoist the base address of a GetElementPtr. This prevents the 140 // creation of new constants for every base constant that gets constant 141 // folded with the offset. 142 if (Idx == 0) 143 return 2 * TTI::TCC_Basic; 144 return TTI::TCC_Free; 145 case Instruction::And: 146 RunFree = true; // (for the rotate-and-mask instructions) 147 LLVM_FALLTHROUGH; 148 case Instruction::Add: 149 case Instruction::Or: 150 case Instruction::Xor: 151 ShiftedFree = true; 152 LLVM_FALLTHROUGH; 153 case Instruction::Sub: 154 case Instruction::Mul: 155 case Instruction::Shl: 156 case Instruction::LShr: 157 case Instruction::AShr: 158 ImmIdx = 1; 159 break; 160 case Instruction::ICmp: 161 UnsignedFree = true; 162 ImmIdx = 1; 163 // Zero comparisons can use record-form instructions. 164 LLVM_FALLTHROUGH; 165 case Instruction::Select: 166 ZeroFree = true; 167 break; 168 case Instruction::PHI: 169 case Instruction::Call: 170 case Instruction::Ret: 171 case Instruction::Load: 172 case Instruction::Store: 173 break; 174 } 175 176 if (ZeroFree && Imm == 0) 177 return TTI::TCC_Free; 178 179 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) { 180 if (isInt<16>(Imm.getSExtValue())) 181 return TTI::TCC_Free; 182 183 if (RunFree) { 184 if (Imm.getBitWidth() <= 32 && 185 (isShiftedMask_32(Imm.getZExtValue()) || 186 isShiftedMask_32(~Imm.getZExtValue()))) 187 return TTI::TCC_Free; 188 189 if (ST->isPPC64() && 190 (isShiftedMask_64(Imm.getZExtValue()) || 191 isShiftedMask_64(~Imm.getZExtValue()))) 192 return TTI::TCC_Free; 193 } 194 195 if (UnsignedFree && isUInt<16>(Imm.getZExtValue())) 196 return TTI::TCC_Free; 197 198 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0) 199 return TTI::TCC_Free; 200 } 201 202 return PPCTTIImpl::getIntImmCost(Imm, Ty); 203 } 204 205 unsigned PPCTTIImpl::getUserCost(const User *U, 206 ArrayRef<const Value *> Operands) { 207 if (U->getType()->isVectorTy()) { 208 // Instructions that need to be split should cost more. 209 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, U->getType()); 210 return LT.first * BaseT::getUserCost(U, Operands); 211 } 212 213 return BaseT::getUserCost(U, Operands); 214 } 215 216 bool PPCTTIImpl::mightUseCTR(BasicBlock *BB, 217 TargetLibraryInfo *LibInfo) { 218 const PPCTargetMachine &TM = ST->getTargetMachine(); 219 220 // Loop through the inline asm constraints and look for something that 221 // clobbers ctr. 222 auto asmClobbersCTR = [](InlineAsm *IA) { 223 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 224 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 225 InlineAsm::ConstraintInfo &C = CIV[i]; 226 if (C.Type != InlineAsm::isInput) 227 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 228 if (StringRef(C.Codes[j]).equals_lower("{ctr}")) 229 return true; 230 } 231 return false; 232 }; 233 234 // Determining the address of a TLS variable results in a function call in 235 // certain TLS models. 236 std::function<bool(const Value*)> memAddrUsesCTR = 237 [&memAddrUsesCTR, &TM](const Value *MemAddr) -> bool { 238 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 239 if (!GV) { 240 // Recurse to check for constants that refer to TLS global variables. 241 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 242 for (const auto &CO : CV->operands()) 243 if (memAddrUsesCTR(CO)) 244 return true; 245 246 return false; 247 } 248 249 if (!GV->isThreadLocal()) 250 return false; 251 TLSModel::Model Model = TM.getTLSModel(GV); 252 return Model == TLSModel::GeneralDynamic || 253 Model == TLSModel::LocalDynamic; 254 }; 255 256 auto isLargeIntegerTy = [](bool Is32Bit, Type *Ty) { 257 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 258 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 259 260 return false; 261 }; 262 263 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 264 J != JE; ++J) { 265 if (CallInst *CI = dyn_cast<CallInst>(J)) { 266 // Inline ASM is okay, unless it clobbers the ctr register. 267 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { 268 if (asmClobbersCTR(IA)) 269 return true; 270 continue; 271 } 272 273 if (Function *F = CI->getCalledFunction()) { 274 // Most intrinsics don't become function calls, but some might. 275 // sin, cos, exp and log are always calls. 276 unsigned Opcode = 0; 277 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 278 switch (F->getIntrinsicID()) { 279 default: continue; 280 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr 281 // we're definitely using CTR. 282 case Intrinsic::set_loop_iterations: 283 case Intrinsic::loop_decrement: 284 return true; 285 286 // VisualStudio defines setjmp as _setjmp 287 #if defined(_MSC_VER) && defined(setjmp) && \ 288 !defined(setjmp_undefined_for_msvc) 289 # pragma push_macro("setjmp") 290 # undef setjmp 291 # define setjmp_undefined_for_msvc 292 #endif 293 294 case Intrinsic::setjmp: 295 296 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) 297 // let's return it to _setjmp state 298 # pragma pop_macro("setjmp") 299 # undef setjmp_undefined_for_msvc 300 #endif 301 302 case Intrinsic::longjmp: 303 304 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 305 // because, although it does clobber the counter register, the 306 // control can't then return to inside the loop unless there is also 307 // an eh_sjlj_setjmp. 308 case Intrinsic::eh_sjlj_setjmp: 309 310 case Intrinsic::memcpy: 311 case Intrinsic::memmove: 312 case Intrinsic::memset: 313 case Intrinsic::powi: 314 case Intrinsic::log: 315 case Intrinsic::log2: 316 case Intrinsic::log10: 317 case Intrinsic::exp: 318 case Intrinsic::exp2: 319 case Intrinsic::pow: 320 case Intrinsic::sin: 321 case Intrinsic::cos: 322 return true; 323 case Intrinsic::copysign: 324 if (CI->getArgOperand(0)->getType()->getScalarType()-> 325 isPPC_FP128Ty()) 326 return true; 327 else 328 continue; // ISD::FCOPYSIGN is never a library call. 329 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 330 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 331 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 332 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 333 case Intrinsic::rint: Opcode = ISD::FRINT; break; 334 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 335 case Intrinsic::round: Opcode = ISD::FROUND; break; 336 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; 337 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; 338 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; 339 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; 340 } 341 } 342 343 // PowerPC does not use [US]DIVREM or other library calls for 344 // operations on regular types which are not otherwise library calls 345 // (i.e. soft float or atomics). If adapting for targets that do, 346 // additional care is required here. 347 348 LibFunc Func; 349 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 350 LibInfo->getLibFunc(F->getName(), Func) && 351 LibInfo->hasOptimizedCodeGen(Func)) { 352 // Non-read-only functions are never treated as intrinsics. 353 if (!CI->onlyReadsMemory()) 354 return true; 355 356 // Conversion happens only for FP calls. 357 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 358 return true; 359 360 switch (Func) { 361 default: return true; 362 case LibFunc_copysign: 363 case LibFunc_copysignf: 364 continue; // ISD::FCOPYSIGN is never a library call. 365 case LibFunc_copysignl: 366 return true; 367 case LibFunc_fabs: 368 case LibFunc_fabsf: 369 case LibFunc_fabsl: 370 continue; // ISD::FABS is never a library call. 371 case LibFunc_sqrt: 372 case LibFunc_sqrtf: 373 case LibFunc_sqrtl: 374 Opcode = ISD::FSQRT; break; 375 case LibFunc_floor: 376 case LibFunc_floorf: 377 case LibFunc_floorl: 378 Opcode = ISD::FFLOOR; break; 379 case LibFunc_nearbyint: 380 case LibFunc_nearbyintf: 381 case LibFunc_nearbyintl: 382 Opcode = ISD::FNEARBYINT; break; 383 case LibFunc_ceil: 384 case LibFunc_ceilf: 385 case LibFunc_ceill: 386 Opcode = ISD::FCEIL; break; 387 case LibFunc_rint: 388 case LibFunc_rintf: 389 case LibFunc_rintl: 390 Opcode = ISD::FRINT; break; 391 case LibFunc_round: 392 case LibFunc_roundf: 393 case LibFunc_roundl: 394 Opcode = ISD::FROUND; break; 395 case LibFunc_trunc: 396 case LibFunc_truncf: 397 case LibFunc_truncl: 398 Opcode = ISD::FTRUNC; break; 399 case LibFunc_fmin: 400 case LibFunc_fminf: 401 case LibFunc_fminl: 402 Opcode = ISD::FMINNUM; break; 403 case LibFunc_fmax: 404 case LibFunc_fmaxf: 405 case LibFunc_fmaxl: 406 Opcode = ISD::FMAXNUM; break; 407 } 408 } 409 410 if (Opcode) { 411 EVT EVTy = 412 TLI->getValueType(DL, CI->getArgOperand(0)->getType(), true); 413 414 if (EVTy == MVT::Other) 415 return true; 416 417 if (TLI->isOperationLegalOrCustom(Opcode, EVTy)) 418 continue; 419 else if (EVTy.isVector() && 420 TLI->isOperationLegalOrCustom(Opcode, EVTy.getScalarType())) 421 continue; 422 423 return true; 424 } 425 } 426 427 return true; 428 } else if (isa<BinaryOperator>(J) && 429 J->getType()->getScalarType()->isPPC_FP128Ty()) { 430 // Most operations on ppc_f128 values become calls. 431 return true; 432 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 433 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 434 CastInst *CI = cast<CastInst>(J); 435 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 436 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 437 isLargeIntegerTy(!TM.isPPC64(), CI->getSrcTy()->getScalarType()) || 438 isLargeIntegerTy(!TM.isPPC64(), CI->getDestTy()->getScalarType())) 439 return true; 440 } else if (isLargeIntegerTy(!TM.isPPC64(), 441 J->getType()->getScalarType()) && 442 (J->getOpcode() == Instruction::UDiv || 443 J->getOpcode() == Instruction::SDiv || 444 J->getOpcode() == Instruction::URem || 445 J->getOpcode() == Instruction::SRem)) { 446 return true; 447 } else if (!TM.isPPC64() && 448 isLargeIntegerTy(false, J->getType()->getScalarType()) && 449 (J->getOpcode() == Instruction::Shl || 450 J->getOpcode() == Instruction::AShr || 451 J->getOpcode() == Instruction::LShr)) { 452 // Only on PPC32, for 128-bit integers (specifically not 64-bit 453 // integers), these might be runtime calls. 454 return true; 455 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 456 // On PowerPC, indirect jumps use the counter register. 457 return true; 458 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 459 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 460 return true; 461 } 462 463 // FREM is always a call. 464 if (J->getOpcode() == Instruction::FRem) 465 return true; 466 467 if (ST->useSoftFloat()) { 468 switch(J->getOpcode()) { 469 case Instruction::FAdd: 470 case Instruction::FSub: 471 case Instruction::FMul: 472 case Instruction::FDiv: 473 case Instruction::FPTrunc: 474 case Instruction::FPExt: 475 case Instruction::FPToUI: 476 case Instruction::FPToSI: 477 case Instruction::UIToFP: 478 case Instruction::SIToFP: 479 case Instruction::FCmp: 480 return true; 481 } 482 } 483 484 for (Value *Operand : J->operands()) 485 if (memAddrUsesCTR(Operand)) 486 return true; 487 } 488 489 return false; 490 } 491 492 bool PPCTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, 493 AssumptionCache &AC, 494 TargetLibraryInfo *LibInfo, 495 HardwareLoopInfo &HWLoopInfo) { 496 const PPCTargetMachine &TM = ST->getTargetMachine(); 497 TargetSchedModel SchedModel; 498 SchedModel.init(ST); 499 500 // Do not convert small short loops to CTR loop. 501 unsigned ConstTripCount = SE.getSmallConstantTripCount(L); 502 if (ConstTripCount && ConstTripCount < SmallCTRLoopThreshold) { 503 SmallPtrSet<const Value *, 32> EphValues; 504 CodeMetrics::collectEphemeralValues(L, &AC, EphValues); 505 CodeMetrics Metrics; 506 for (BasicBlock *BB : L->blocks()) 507 Metrics.analyzeBasicBlock(BB, *this, EphValues); 508 // 6 is an approximate latency for the mtctr instruction. 509 if (Metrics.NumInsts <= (6 * SchedModel.getIssueWidth())) 510 return false; 511 } 512 513 // We don't want to spill/restore the counter register, and so we don't 514 // want to use the counter register if the loop contains calls. 515 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 516 I != IE; ++I) 517 if (mightUseCTR(*I, LibInfo)) 518 return false; 519 520 SmallVector<BasicBlock*, 4> ExitingBlocks; 521 L->getExitingBlocks(ExitingBlocks); 522 523 // If there is an exit edge known to be frequently taken, 524 // we should not transform this loop. 525 for (auto &BB : ExitingBlocks) { 526 Instruction *TI = BB->getTerminator(); 527 if (!TI) continue; 528 529 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 530 uint64_t TrueWeight = 0, FalseWeight = 0; 531 if (!BI->isConditional() || 532 !BI->extractProfMetadata(TrueWeight, FalseWeight)) 533 continue; 534 535 // If the exit path is more frequent than the loop path, 536 // we return here without further analysis for this loop. 537 bool TrueIsExit = !L->contains(BI->getSuccessor(0)); 538 if (( TrueIsExit && FalseWeight < TrueWeight) || 539 (!TrueIsExit && FalseWeight > TrueWeight)) 540 return false; 541 } 542 } 543 544 LLVMContext &C = L->getHeader()->getContext(); 545 HWLoopInfo.CountType = TM.isPPC64() ? 546 Type::getInt64Ty(C) : Type::getInt32Ty(C); 547 HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1); 548 return true; 549 } 550 551 void PPCTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, 552 TTI::UnrollingPreferences &UP) { 553 if (ST->getDarwinDirective() == PPC::DIR_A2) { 554 // The A2 is in-order with a deep pipeline, and concatenation unrolling 555 // helps expose latency-hiding opportunities to the instruction scheduler. 556 UP.Partial = UP.Runtime = true; 557 558 // We unroll a lot on the A2 (hundreds of instructions), and the benefits 559 // often outweigh the cost of a division to compute the trip count. 560 UP.AllowExpensiveTripCount = true; 561 } 562 563 BaseT::getUnrollingPreferences(L, SE, UP); 564 } 565 566 // This function returns true to allow using coldcc calling convention. 567 // Returning true results in coldcc being used for functions which are cold at 568 // all call sites when the callers of the functions are not calling any other 569 // non coldcc functions. 570 bool PPCTTIImpl::useColdCCForColdCall(Function &F) { 571 return EnablePPCColdCC; 572 } 573 574 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) { 575 // On the A2, always unroll aggressively. For QPX unaligned loads, we depend 576 // on combining the loads generated for consecutive accesses, and failure to 577 // do so is particularly expensive. This makes it much more likely (compared 578 // to only using concatenation unrolling). 579 if (ST->getDarwinDirective() == PPC::DIR_A2) 580 return true; 581 582 return LoopHasReductions; 583 } 584 585 PPCTTIImpl::TTI::MemCmpExpansionOptions 586 PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 587 TTI::MemCmpExpansionOptions Options; 588 Options.LoadSizes = {8, 4, 2, 1}; 589 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 590 return Options; 591 } 592 593 bool PPCTTIImpl::enableInterleavedAccessVectorization() { 594 return true; 595 } 596 597 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) { 598 if (Vector && !ST->hasAltivec() && !ST->hasQPX()) 599 return 0; 600 return ST->hasVSX() ? 64 : 32; 601 } 602 603 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) const { 604 if (Vector) { 605 if (ST->hasQPX()) return 256; 606 if (ST->hasAltivec()) return 128; 607 return 0; 608 } 609 610 if (ST->isPPC64()) 611 return 64; 612 return 32; 613 614 } 615 616 unsigned PPCTTIImpl::getCacheLineSize() const { 617 // Check first if the user specified a custom line size. 618 if (CacheLineSize.getNumOccurrences() > 0) 619 return CacheLineSize; 620 621 // On P7, P8 or P9 we have a cache line size of 128. 622 unsigned Directive = ST->getDarwinDirective(); 623 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 624 Directive == PPC::DIR_PWR9) 625 return 128; 626 627 // On other processors return a default of 64 bytes. 628 return 64; 629 } 630 631 unsigned PPCTTIImpl::getPrefetchDistance() const { 632 // This seems like a reasonable default for the BG/Q (this pass is enabled, by 633 // default, only on the BG/Q). 634 return 300; 635 } 636 637 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) { 638 unsigned Directive = ST->getDarwinDirective(); 639 // The 440 has no SIMD support, but floating-point instructions 640 // have a 5-cycle latency, so unroll by 5x for latency hiding. 641 if (Directive == PPC::DIR_440) 642 return 5; 643 644 // The A2 has no SIMD support, but floating-point instructions 645 // have a 6-cycle latency, so unroll by 6x for latency hiding. 646 if (Directive == PPC::DIR_A2) 647 return 6; 648 649 // FIXME: For lack of any better information, do no harm... 650 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) 651 return 1; 652 653 // For P7 and P8, floating-point instructions have a 6-cycle latency and 654 // there are two execution units, so unroll by 12x for latency hiding. 655 // FIXME: the same for P9 as previous gen until POWER9 scheduling is ready 656 if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8 || 657 Directive == PPC::DIR_PWR9) 658 return 12; 659 660 // For most things, modern systems have two execution units (and 661 // out-of-order execution). 662 return 2; 663 } 664 665 // Adjust the cost of vector instructions on targets which there is overlap 666 // between the vector and scalar units, thereby reducing the overall throughput 667 // of vector code wrt. scalar code. 668 int PPCTTIImpl::vectorCostAdjustment(int Cost, unsigned Opcode, Type *Ty1, 669 Type *Ty2) { 670 if (!ST->vectorsUseTwoUnits() || !Ty1->isVectorTy()) 671 return Cost; 672 673 std::pair<int, MVT> LT1 = TLI->getTypeLegalizationCost(DL, Ty1); 674 // If type legalization involves splitting the vector, we don't want to 675 // double the cost at every step - only the last step. 676 if (LT1.first != 1 || !LT1.second.isVector()) 677 return Cost; 678 679 int ISD = TLI->InstructionOpcodeToISD(Opcode); 680 if (TLI->isOperationExpand(ISD, LT1.second)) 681 return Cost; 682 683 if (Ty2) { 684 std::pair<int, MVT> LT2 = TLI->getTypeLegalizationCost(DL, Ty2); 685 if (LT2.first != 1 || !LT2.second.isVector()) 686 return Cost; 687 } 688 689 return Cost * 2; 690 } 691 692 int PPCTTIImpl::getArithmeticInstrCost( 693 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 694 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 695 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) { 696 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 697 698 // Fallback to the default implementation. 699 int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info, 700 Opd1PropInfo, Opd2PropInfo); 701 return vectorCostAdjustment(Cost, Opcode, Ty, nullptr); 702 } 703 704 int PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 705 Type *SubTp) { 706 // Legalize the type. 707 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 708 709 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 710 // (at least in the sense that there need only be one non-loop-invariant 711 // instruction). We need one such shuffle instruction for each actual 712 // register (this is not true for arbitrary shuffles, but is true for the 713 // structured types of shuffles covered by TTI::ShuffleKind). 714 return vectorCostAdjustment(LT.first, Instruction::ShuffleVector, Tp, 715 nullptr); 716 } 717 718 int PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 719 const Instruction *I) { 720 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode"); 721 722 int Cost = BaseT::getCastInstrCost(Opcode, Dst, Src); 723 return vectorCostAdjustment(Cost, Opcode, Dst, Src); 724 } 725 726 int PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 727 const Instruction *I) { 728 int Cost = BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 729 return vectorCostAdjustment(Cost, Opcode, ValTy, nullptr); 730 } 731 732 int PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 733 assert(Val->isVectorTy() && "This must be a vector type"); 734 735 int ISD = TLI->InstructionOpcodeToISD(Opcode); 736 assert(ISD && "Invalid opcode"); 737 738 int Cost = BaseT::getVectorInstrCost(Opcode, Val, Index); 739 Cost = vectorCostAdjustment(Cost, Opcode, Val, nullptr); 740 741 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) { 742 // Double-precision scalars are already located in index #0 (or #1 if LE). 743 if (ISD == ISD::EXTRACT_VECTOR_ELT && 744 Index == (ST->isLittleEndian() ? 1 : 0)) 745 return 0; 746 747 return Cost; 748 749 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) { 750 // Floating point scalars are already located in index #0. 751 if (Index == 0) 752 return 0; 753 754 return Cost; 755 } 756 757 // Estimated cost of a load-hit-store delay. This was obtained 758 // experimentally as a minimum needed to prevent unprofitable 759 // vectorization for the paq8p benchmark. It may need to be 760 // raised further if other unprofitable cases remain. 761 unsigned LHSPenalty = 2; 762 if (ISD == ISD::INSERT_VECTOR_ELT) 763 LHSPenalty += 7; 764 765 // Vector element insert/extract with Altivec is very expensive, 766 // because they require store and reload with the attendant 767 // processor stall for load-hit-store. Until VSX is available, 768 // these need to be estimated as very costly. 769 if (ISD == ISD::EXTRACT_VECTOR_ELT || 770 ISD == ISD::INSERT_VECTOR_ELT) 771 return LHSPenalty + Cost; 772 773 return Cost; 774 } 775 776 int PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 777 unsigned AddressSpace, const Instruction *I) { 778 // Legalize the type. 779 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 780 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 781 "Invalid Opcode"); 782 783 int Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace); 784 Cost = vectorCostAdjustment(Cost, Opcode, Src, nullptr); 785 786 bool IsAltivecType = ST->hasAltivec() && 787 (LT.second == MVT::v16i8 || LT.second == MVT::v8i16 || 788 LT.second == MVT::v4i32 || LT.second == MVT::v4f32); 789 bool IsVSXType = ST->hasVSX() && 790 (LT.second == MVT::v2f64 || LT.second == MVT::v2i64); 791 bool IsQPXType = ST->hasQPX() && 792 (LT.second == MVT::v4f64 || LT.second == MVT::v4f32); 793 794 // VSX has 32b/64b load instructions. Legalization can handle loading of 795 // 32b/64b to VSR correctly and cheaply. But BaseT::getMemoryOpCost and 796 // PPCTargetLowering can't compute the cost appropriately. So here we 797 // explicitly check this case. 798 unsigned MemBytes = Src->getPrimitiveSizeInBits(); 799 if (Opcode == Instruction::Load && ST->hasVSX() && IsAltivecType && 800 (MemBytes == 64 || (ST->hasP8Vector() && MemBytes == 32))) 801 return 1; 802 803 // Aligned loads and stores are easy. 804 unsigned SrcBytes = LT.second.getStoreSize(); 805 if (!SrcBytes || !Alignment || Alignment >= SrcBytes) 806 return Cost; 807 808 // If we can use the permutation-based load sequence, then this is also 809 // relatively cheap (not counting loop-invariant instructions): one load plus 810 // one permute (the last load in a series has extra cost, but we're 811 // neglecting that here). Note that on the P7, we could do unaligned loads 812 // for Altivec types using the VSX instructions, but that's more expensive 813 // than using the permutation-based load sequence. On the P8, that's no 814 // longer true. 815 if (Opcode == Instruction::Load && 816 ((!ST->hasP8Vector() && IsAltivecType) || IsQPXType) && 817 Alignment >= LT.second.getScalarType().getStoreSize()) 818 return Cost + LT.first; // Add the cost of the permutations. 819 820 // For VSX, we can do unaligned loads and stores on Altivec/VSX types. On the 821 // P7, unaligned vector loads are more expensive than the permutation-based 822 // load sequence, so that might be used instead, but regardless, the net cost 823 // is about the same (not counting loop-invariant instructions). 824 if (IsVSXType || (ST->hasVSX() && IsAltivecType)) 825 return Cost; 826 827 // Newer PPC supports unaligned memory access. 828 if (TLI->allowsMisalignedMemoryAccesses(LT.second, 0)) 829 return Cost; 830 831 // PPC in general does not support unaligned loads and stores. They'll need 832 // to be decomposed based on the alignment factor. 833 834 // Add the cost of each scalar load or store. 835 Cost += LT.first*(SrcBytes/Alignment-1); 836 837 // For a vector type, there is also scalarization overhead (only for 838 // stores, loads are expanded using the vector-load + permutation sequence, 839 // which is much less expensive). 840 if (Src->isVectorTy() && Opcode == Instruction::Store) 841 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i) 842 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); 843 844 return Cost; 845 } 846 847 int PPCTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 848 unsigned Factor, 849 ArrayRef<unsigned> Indices, 850 unsigned Alignment, 851 unsigned AddressSpace, 852 bool UseMaskForCond, 853 bool UseMaskForGaps) { 854 if (UseMaskForCond || UseMaskForGaps) 855 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 856 Alignment, AddressSpace, 857 UseMaskForCond, UseMaskForGaps); 858 859 assert(isa<VectorType>(VecTy) && 860 "Expect a vector type for interleaved memory op"); 861 862 // Legalize the type. 863 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, VecTy); 864 865 // Firstly, the cost of load/store operation. 866 int Cost = getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace); 867 868 // PPC, for both Altivec/VSX and QPX, support cheap arbitrary permutations 869 // (at least in the sense that there need only be one non-loop-invariant 870 // instruction). For each result vector, we need one shuffle per incoming 871 // vector (except that the first shuffle can take two incoming vectors 872 // because it does not need to take itself). 873 Cost += Factor*(LT.first-1); 874 875 return Cost; 876 } 877 878 bool PPCTTIImpl::canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, 879 LoopInfo *LI, DominatorTree *DT, 880 AssumptionCache *AC, TargetLibraryInfo *LibInfo) { 881 // Process nested loops first. 882 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) 883 if (canSaveCmp(*I, BI, SE, LI, DT, AC, LibInfo)) 884 return false; // Stop search. 885 886 HardwareLoopInfo HWLoopInfo(L); 887 888 if (!HWLoopInfo.canAnalyze(*LI)) 889 return false; 890 891 if (!isHardwareLoopProfitable(L, *SE, *AC, LibInfo, HWLoopInfo)) 892 return false; 893 894 if (!HWLoopInfo.isHardwareLoopCandidate(*SE, *LI, *DT)) 895 return false; 896 897 *BI = HWLoopInfo.ExitBranch; 898 return true; 899 } 900