1 //===-- PPCCTRLoops.cpp - Identify and generate CTR loops -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass identifies loops where we can generate the PPC branch instructions 11 // that decrement and test the count register (CTR) (bdnz and friends). 12 // 13 // The pattern that defines the induction variable can changed depending on 14 // prior optimizations. For example, the IndVarSimplify phase run by 'opt' 15 // normalizes induction variables, and the Loop Strength Reduction pass 16 // run by 'llc' may also make changes to the induction variable. 17 // 18 // Criteria for CTR loops: 19 // - Countable loops (w/ ind. var for a trip count) 20 // - Try inner-most loops first 21 // - No nested CTR loops. 22 // - No function calls in loops. 23 // 24 //===----------------------------------------------------------------------===// 25 26 #include "llvm/Transforms/Scalar.h" 27 #include "PPC.h" 28 #include "PPCTargetMachine.h" 29 #include "llvm/ADT/STLExtras.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/ScalarEvolutionExpander.h" 33 #include "llvm/Analysis/TargetLibraryInfo.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/InlineAsm.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Module.h" 41 #include "llvm/IR/ValueHandle.h" 42 #include "llvm/PassSupport.h" 43 #include "llvm/Support/CommandLine.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 47 #include "llvm/Transforms/Utils/Local.h" 48 #include "llvm/Transforms/Utils/LoopUtils.h" 49 50 #ifndef NDEBUG 51 #include "llvm/CodeGen/MachineDominators.h" 52 #include "llvm/CodeGen/MachineFunction.h" 53 #include "llvm/CodeGen/MachineFunctionPass.h" 54 #include "llvm/CodeGen/MachineRegisterInfo.h" 55 #endif 56 57 using namespace llvm; 58 59 #define DEBUG_TYPE "ctrloops" 60 61 #ifndef NDEBUG 62 static cl::opt<int> CTRLoopLimit("ppc-max-ctrloop", cl::Hidden, cl::init(-1)); 63 #endif 64 65 STATISTIC(NumCTRLoops, "Number of loops converted to CTR loops"); 66 67 namespace llvm { 68 void initializePPCCTRLoopsPass(PassRegistry&); 69 #ifndef NDEBUG 70 void initializePPCCTRLoopsVerifyPass(PassRegistry&); 71 #endif 72 } 73 74 namespace { 75 struct PPCCTRLoops : public FunctionPass { 76 77 #ifndef NDEBUG 78 static int Counter; 79 #endif 80 81 public: 82 static char ID; 83 84 PPCCTRLoops() : FunctionPass(ID), TM(nullptr) { 85 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry()); 86 } 87 PPCCTRLoops(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) { 88 initializePPCCTRLoopsPass(*PassRegistry::getPassRegistry()); 89 } 90 91 bool runOnFunction(Function &F) override; 92 93 void getAnalysisUsage(AnalysisUsage &AU) const override { 94 AU.addRequired<LoopInfoWrapperPass>(); 95 AU.addPreserved<LoopInfoWrapperPass>(); 96 AU.addRequired<DominatorTreeWrapperPass>(); 97 AU.addPreserved<DominatorTreeWrapperPass>(); 98 AU.addRequired<ScalarEvolutionWrapperPass>(); 99 } 100 101 private: 102 bool mightUseCTR(const Triple &TT, BasicBlock *BB); 103 bool convertToCTRLoop(Loop *L); 104 105 private: 106 PPCTargetMachine *TM; 107 LoopInfo *LI; 108 ScalarEvolution *SE; 109 const DataLayout *DL; 110 DominatorTree *DT; 111 const TargetLibraryInfo *LibInfo; 112 bool PreserveLCSSA; 113 }; 114 115 char PPCCTRLoops::ID = 0; 116 #ifndef NDEBUG 117 int PPCCTRLoops::Counter = 0; 118 #endif 119 120 #ifndef NDEBUG 121 struct PPCCTRLoopsVerify : public MachineFunctionPass { 122 public: 123 static char ID; 124 125 PPCCTRLoopsVerify() : MachineFunctionPass(ID) { 126 initializePPCCTRLoopsVerifyPass(*PassRegistry::getPassRegistry()); 127 } 128 129 void getAnalysisUsage(AnalysisUsage &AU) const override { 130 AU.addRequired<MachineDominatorTree>(); 131 MachineFunctionPass::getAnalysisUsage(AU); 132 } 133 134 bool runOnMachineFunction(MachineFunction &MF) override; 135 136 private: 137 MachineDominatorTree *MDT; 138 }; 139 140 char PPCCTRLoopsVerify::ID = 0; 141 #endif // NDEBUG 142 } // end anonymous namespace 143 144 INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops", 145 false, false) 146 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 147 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 148 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 149 INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops", 150 false, false) 151 152 FunctionPass *llvm::createPPCCTRLoops(PPCTargetMachine &TM) { 153 return new PPCCTRLoops(TM); 154 } 155 156 #ifndef NDEBUG 157 INITIALIZE_PASS_BEGIN(PPCCTRLoopsVerify, "ppc-ctr-loops-verify", 158 "PowerPC CTR Loops Verify", false, false) 159 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 160 INITIALIZE_PASS_END(PPCCTRLoopsVerify, "ppc-ctr-loops-verify", 161 "PowerPC CTR Loops Verify", false, false) 162 163 FunctionPass *llvm::createPPCCTRLoopsVerify() { 164 return new PPCCTRLoopsVerify(); 165 } 166 #endif // NDEBUG 167 168 bool PPCCTRLoops::runOnFunction(Function &F) { 169 if (skipFunction(F)) 170 return false; 171 172 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 173 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 174 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 175 DL = &F.getParent()->getDataLayout(); 176 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 177 LibInfo = TLIP ? &TLIP->getTLI() : nullptr; 178 PreserveLCSSA = mustPreserveAnalysisID(LCSSAID); 179 180 bool MadeChange = false; 181 182 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); 183 I != E; ++I) { 184 Loop *L = *I; 185 if (!L->getParentLoop()) 186 MadeChange |= convertToCTRLoop(L); 187 } 188 189 return MadeChange; 190 } 191 192 static bool isLargeIntegerTy(bool Is32Bit, Type *Ty) { 193 if (IntegerType *ITy = dyn_cast<IntegerType>(Ty)) 194 return ITy->getBitWidth() > (Is32Bit ? 32U : 64U); 195 196 return false; 197 } 198 199 // Determining the address of a TLS variable results in a function call in 200 // certain TLS models. 201 static bool memAddrUsesCTR(const PPCTargetMachine *TM, 202 const Value *MemAddr) { 203 const auto *GV = dyn_cast<GlobalValue>(MemAddr); 204 if (!GV) { 205 // Recurse to check for constants that refer to TLS global variables. 206 if (const auto *CV = dyn_cast<Constant>(MemAddr)) 207 for (const auto &CO : CV->operands()) 208 if (memAddrUsesCTR(TM, CO)) 209 return true; 210 211 return false; 212 } 213 214 if (!GV->isThreadLocal()) 215 return false; 216 if (!TM) 217 return true; 218 TLSModel::Model Model = TM->getTLSModel(GV); 219 return Model == TLSModel::GeneralDynamic || Model == TLSModel::LocalDynamic; 220 } 221 222 bool PPCCTRLoops::mightUseCTR(const Triple &TT, BasicBlock *BB) { 223 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); 224 J != JE; ++J) { 225 if (CallInst *CI = dyn_cast<CallInst>(J)) { 226 if (InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue())) { 227 // Inline ASM is okay, unless it clobbers the ctr register. 228 InlineAsm::ConstraintInfoVector CIV = IA->ParseConstraints(); 229 for (unsigned i = 0, ie = CIV.size(); i < ie; ++i) { 230 InlineAsm::ConstraintInfo &C = CIV[i]; 231 if (C.Type != InlineAsm::isInput) 232 for (unsigned j = 0, je = C.Codes.size(); j < je; ++j) 233 if (StringRef(C.Codes[j]).equals_lower("{ctr}")) 234 return true; 235 } 236 237 continue; 238 } 239 240 if (!TM) 241 return true; 242 const TargetLowering *TLI = 243 TM->getSubtargetImpl(*BB->getParent())->getTargetLowering(); 244 245 if (Function *F = CI->getCalledFunction()) { 246 // Most intrinsics don't become function calls, but some might. 247 // sin, cos, exp and log are always calls. 248 unsigned Opcode = 0; 249 if (F->getIntrinsicID() != Intrinsic::not_intrinsic) { 250 switch (F->getIntrinsicID()) { 251 default: continue; 252 // If we have a call to ppc_is_decremented_ctr_nonzero, or ppc_mtctr 253 // we're definitely using CTR. 254 case Intrinsic::ppc_is_decremented_ctr_nonzero: 255 case Intrinsic::ppc_mtctr: 256 return true; 257 258 // VisualStudio defines setjmp as _setjmp 259 #if defined(_MSC_VER) && defined(setjmp) && \ 260 !defined(setjmp_undefined_for_msvc) 261 # pragma push_macro("setjmp") 262 # undef setjmp 263 # define setjmp_undefined_for_msvc 264 #endif 265 266 case Intrinsic::setjmp: 267 268 #if defined(_MSC_VER) && defined(setjmp_undefined_for_msvc) 269 // let's return it to _setjmp state 270 # pragma pop_macro("setjmp") 271 # undef setjmp_undefined_for_msvc 272 #endif 273 274 case Intrinsic::longjmp: 275 276 // Exclude eh_sjlj_setjmp; we don't need to exclude eh_sjlj_longjmp 277 // because, although it does clobber the counter register, the 278 // control can't then return to inside the loop unless there is also 279 // an eh_sjlj_setjmp. 280 case Intrinsic::eh_sjlj_setjmp: 281 282 case Intrinsic::memcpy: 283 case Intrinsic::memmove: 284 case Intrinsic::memset: 285 case Intrinsic::powi: 286 case Intrinsic::log: 287 case Intrinsic::log2: 288 case Intrinsic::log10: 289 case Intrinsic::exp: 290 case Intrinsic::exp2: 291 case Intrinsic::pow: 292 case Intrinsic::sin: 293 case Intrinsic::cos: 294 return true; 295 case Intrinsic::copysign: 296 if (CI->getArgOperand(0)->getType()->getScalarType()-> 297 isPPC_FP128Ty()) 298 return true; 299 else 300 continue; // ISD::FCOPYSIGN is never a library call. 301 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; 302 case Intrinsic::floor: Opcode = ISD::FFLOOR; break; 303 case Intrinsic::ceil: Opcode = ISD::FCEIL; break; 304 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; 305 case Intrinsic::rint: Opcode = ISD::FRINT; break; 306 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; 307 case Intrinsic::round: Opcode = ISD::FROUND; break; 308 case Intrinsic::minnum: Opcode = ISD::FMINNUM; break; 309 case Intrinsic::maxnum: Opcode = ISD::FMAXNUM; break; 310 case Intrinsic::umul_with_overflow: Opcode = ISD::UMULO; break; 311 case Intrinsic::smul_with_overflow: Opcode = ISD::SMULO; break; 312 } 313 } 314 315 // PowerPC does not use [US]DIVREM or other library calls for 316 // operations on regular types which are not otherwise library calls 317 // (i.e. soft float or atomics). If adapting for targets that do, 318 // additional care is required here. 319 320 LibFunc Func; 321 if (!F->hasLocalLinkage() && F->hasName() && LibInfo && 322 LibInfo->getLibFunc(F->getName(), Func) && 323 LibInfo->hasOptimizedCodeGen(Func)) { 324 // Non-read-only functions are never treated as intrinsics. 325 if (!CI->onlyReadsMemory()) 326 return true; 327 328 // Conversion happens only for FP calls. 329 if (!CI->getArgOperand(0)->getType()->isFloatingPointTy()) 330 return true; 331 332 switch (Func) { 333 default: return true; 334 case LibFunc_copysign: 335 case LibFunc_copysignf: 336 continue; // ISD::FCOPYSIGN is never a library call. 337 case LibFunc_copysignl: 338 return true; 339 case LibFunc_fabs: 340 case LibFunc_fabsf: 341 case LibFunc_fabsl: 342 continue; // ISD::FABS is never a library call. 343 case LibFunc_sqrt: 344 case LibFunc_sqrtf: 345 case LibFunc_sqrtl: 346 Opcode = ISD::FSQRT; break; 347 case LibFunc_floor: 348 case LibFunc_floorf: 349 case LibFunc_floorl: 350 Opcode = ISD::FFLOOR; break; 351 case LibFunc_nearbyint: 352 case LibFunc_nearbyintf: 353 case LibFunc_nearbyintl: 354 Opcode = ISD::FNEARBYINT; break; 355 case LibFunc_ceil: 356 case LibFunc_ceilf: 357 case LibFunc_ceill: 358 Opcode = ISD::FCEIL; break; 359 case LibFunc_rint: 360 case LibFunc_rintf: 361 case LibFunc_rintl: 362 Opcode = ISD::FRINT; break; 363 case LibFunc_round: 364 case LibFunc_roundf: 365 case LibFunc_roundl: 366 Opcode = ISD::FROUND; break; 367 case LibFunc_trunc: 368 case LibFunc_truncf: 369 case LibFunc_truncl: 370 Opcode = ISD::FTRUNC; break; 371 case LibFunc_fmin: 372 case LibFunc_fminf: 373 case LibFunc_fminl: 374 Opcode = ISD::FMINNUM; break; 375 case LibFunc_fmax: 376 case LibFunc_fmaxf: 377 case LibFunc_fmaxl: 378 Opcode = ISD::FMAXNUM; break; 379 } 380 } 381 382 if (Opcode) { 383 auto &DL = CI->getModule()->getDataLayout(); 384 MVT VTy = TLI->getSimpleValueType(DL, CI->getArgOperand(0)->getType(), 385 true); 386 if (VTy == MVT::Other) 387 return true; 388 389 if (TLI->isOperationLegalOrCustom(Opcode, VTy)) 390 continue; 391 else if (VTy.isVector() && 392 TLI->isOperationLegalOrCustom(Opcode, VTy.getScalarType())) 393 continue; 394 395 return true; 396 } 397 } 398 399 return true; 400 } else if (isa<BinaryOperator>(J) && 401 J->getType()->getScalarType()->isPPC_FP128Ty()) { 402 // Most operations on ppc_f128 values become calls. 403 return true; 404 } else if (isa<UIToFPInst>(J) || isa<SIToFPInst>(J) || 405 isa<FPToUIInst>(J) || isa<FPToSIInst>(J)) { 406 CastInst *CI = cast<CastInst>(J); 407 if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || 408 CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || 409 isLargeIntegerTy(TT.isArch32Bit(), CI->getSrcTy()->getScalarType()) || 410 isLargeIntegerTy(TT.isArch32Bit(), CI->getDestTy()->getScalarType())) 411 return true; 412 } else if (isLargeIntegerTy(TT.isArch32Bit(), 413 J->getType()->getScalarType()) && 414 (J->getOpcode() == Instruction::UDiv || 415 J->getOpcode() == Instruction::SDiv || 416 J->getOpcode() == Instruction::URem || 417 J->getOpcode() == Instruction::SRem)) { 418 return true; 419 } else if (TT.isArch32Bit() && 420 isLargeIntegerTy(false, J->getType()->getScalarType()) && 421 (J->getOpcode() == Instruction::Shl || 422 J->getOpcode() == Instruction::AShr || 423 J->getOpcode() == Instruction::LShr)) { 424 // Only on PPC32, for 128-bit integers (specifically not 64-bit 425 // integers), these might be runtime calls. 426 return true; 427 } else if (isa<IndirectBrInst>(J) || isa<InvokeInst>(J)) { 428 // On PowerPC, indirect jumps use the counter register. 429 return true; 430 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(J)) { 431 if (!TM) 432 return true; 433 const TargetLowering *TLI = 434 TM->getSubtargetImpl(*BB->getParent())->getTargetLowering(); 435 436 if (SI->getNumCases() + 1 >= (unsigned)TLI->getMinimumJumpTableEntries()) 437 return true; 438 } 439 440 if (TM->getSubtargetImpl(*BB->getParent())->getTargetLowering()->useSoftFloat()) { 441 switch(J->getOpcode()) { 442 case Instruction::FAdd: 443 case Instruction::FSub: 444 case Instruction::FMul: 445 case Instruction::FDiv: 446 case Instruction::FRem: 447 case Instruction::FPTrunc: 448 case Instruction::FPExt: 449 case Instruction::FPToUI: 450 case Instruction::FPToSI: 451 case Instruction::UIToFP: 452 case Instruction::SIToFP: 453 case Instruction::FCmp: 454 return true; 455 } 456 } 457 458 for (Value *Operand : J->operands()) 459 if (memAddrUsesCTR(TM, Operand)) 460 return true; 461 } 462 463 return false; 464 } 465 466 bool PPCCTRLoops::convertToCTRLoop(Loop *L) { 467 bool MadeChange = false; 468 469 const Triple TT = 470 Triple(L->getHeader()->getParent()->getParent()->getTargetTriple()); 471 if (!TT.isArch32Bit() && !TT.isArch64Bit()) 472 return MadeChange; // Unknown arch. type. 473 474 // Process nested loops first. 475 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I) { 476 MadeChange |= convertToCTRLoop(*I); 477 DEBUG(dbgs() << "Nested loop converted\n"); 478 } 479 480 // If a nested loop has been converted, then we can't convert this loop. 481 if (MadeChange) 482 return MadeChange; 483 484 #ifndef NDEBUG 485 // Stop trying after reaching the limit (if any). 486 int Limit = CTRLoopLimit; 487 if (Limit >= 0) { 488 if (Counter >= CTRLoopLimit) 489 return false; 490 Counter++; 491 } 492 #endif 493 494 // We don't want to spill/restore the counter register, and so we don't 495 // want to use the counter register if the loop contains calls. 496 for (Loop::block_iterator I = L->block_begin(), IE = L->block_end(); 497 I != IE; ++I) 498 if (mightUseCTR(TT, *I)) 499 return MadeChange; 500 501 SmallVector<BasicBlock*, 4> ExitingBlocks; 502 L->getExitingBlocks(ExitingBlocks); 503 504 BasicBlock *CountedExitBlock = nullptr; 505 const SCEV *ExitCount = nullptr; 506 BranchInst *CountedExitBranch = nullptr; 507 for (SmallVectorImpl<BasicBlock *>::iterator I = ExitingBlocks.begin(), 508 IE = ExitingBlocks.end(); I != IE; ++I) { 509 const SCEV *EC = SE->getExitCount(L, *I); 510 DEBUG(dbgs() << "Exit Count for " << *L << " from block " << 511 (*I)->getName() << ": " << *EC << "\n"); 512 if (isa<SCEVCouldNotCompute>(EC)) 513 continue; 514 if (const SCEVConstant *ConstEC = dyn_cast<SCEVConstant>(EC)) { 515 if (ConstEC->getValue()->isZero()) 516 continue; 517 } else if (!SE->isLoopInvariant(EC, L)) 518 continue; 519 520 if (SE->getTypeSizeInBits(EC->getType()) > (TT.isArch64Bit() ? 64 : 32)) 521 continue; 522 523 // We now have a loop-invariant count of loop iterations (which is not the 524 // constant zero) for which we know that this loop will not exit via this 525 // exisiting block. 526 527 // We need to make sure that this block will run on every loop iteration. 528 // For this to be true, we must dominate all blocks with backedges. Such 529 // blocks are in-loop predecessors to the header block. 530 bool NotAlways = false; 531 for (pred_iterator PI = pred_begin(L->getHeader()), 532 PIE = pred_end(L->getHeader()); PI != PIE; ++PI) { 533 if (!L->contains(*PI)) 534 continue; 535 536 if (!DT->dominates(*I, *PI)) { 537 NotAlways = true; 538 break; 539 } 540 } 541 542 if (NotAlways) 543 continue; 544 545 // Make sure this blocks ends with a conditional branch. 546 Instruction *TI = (*I)->getTerminator(); 547 if (!TI) 548 continue; 549 550 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 551 if (!BI->isConditional()) 552 continue; 553 554 CountedExitBranch = BI; 555 } else 556 continue; 557 558 // Note that this block may not be the loop latch block, even if the loop 559 // has a latch block. 560 CountedExitBlock = *I; 561 ExitCount = EC; 562 break; 563 } 564 565 if (!CountedExitBlock) 566 return MadeChange; 567 568 BasicBlock *Preheader = L->getLoopPreheader(); 569 570 // If we don't have a preheader, then insert one. If we already have a 571 // preheader, then we can use it (except if the preheader contains a use of 572 // the CTR register because some such uses might be reordered by the 573 // selection DAG after the mtctr instruction). 574 if (!Preheader || mightUseCTR(TT, Preheader)) 575 Preheader = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA); 576 if (!Preheader) 577 return MadeChange; 578 579 DEBUG(dbgs() << "Preheader for exit count: " << Preheader->getName() << "\n"); 580 581 // Insert the count into the preheader and replace the condition used by the 582 // selected branch. 583 MadeChange = true; 584 585 SCEVExpander SCEVE(*SE, Preheader->getModule()->getDataLayout(), "loopcnt"); 586 LLVMContext &C = SE->getContext(); 587 Type *CountType = TT.isArch64Bit() ? Type::getInt64Ty(C) : 588 Type::getInt32Ty(C); 589 if (!ExitCount->getType()->isPointerTy() && 590 ExitCount->getType() != CountType) 591 ExitCount = SE->getZeroExtendExpr(ExitCount, CountType); 592 ExitCount = SE->getAddExpr(ExitCount, SE->getOne(CountType)); 593 Value *ECValue = 594 SCEVE.expandCodeFor(ExitCount, CountType, Preheader->getTerminator()); 595 596 IRBuilder<> CountBuilder(Preheader->getTerminator()); 597 Module *M = Preheader->getParent()->getParent(); 598 Value *MTCTRFunc = Intrinsic::getDeclaration(M, Intrinsic::ppc_mtctr, 599 CountType); 600 CountBuilder.CreateCall(MTCTRFunc, ECValue); 601 602 IRBuilder<> CondBuilder(CountedExitBranch); 603 Value *DecFunc = 604 Intrinsic::getDeclaration(M, Intrinsic::ppc_is_decremented_ctr_nonzero); 605 Value *NewCond = CondBuilder.CreateCall(DecFunc, {}); 606 Value *OldCond = CountedExitBranch->getCondition(); 607 CountedExitBranch->setCondition(NewCond); 608 609 // The false branch must exit the loop. 610 if (!L->contains(CountedExitBranch->getSuccessor(0))) 611 CountedExitBranch->swapSuccessors(); 612 613 // The old condition may be dead now, and may have even created a dead PHI 614 // (the original induction variable). 615 RecursivelyDeleteTriviallyDeadInstructions(OldCond); 616 DeleteDeadPHIs(CountedExitBlock); 617 618 ++NumCTRLoops; 619 return MadeChange; 620 } 621 622 #ifndef NDEBUG 623 static bool clobbersCTR(const MachineInstr &MI) { 624 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 625 const MachineOperand &MO = MI.getOperand(i); 626 if (MO.isReg()) { 627 if (MO.isDef() && (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8)) 628 return true; 629 } else if (MO.isRegMask()) { 630 if (MO.clobbersPhysReg(PPC::CTR) || MO.clobbersPhysReg(PPC::CTR8)) 631 return true; 632 } 633 } 634 635 return false; 636 } 637 638 static bool verifyCTRBranch(MachineBasicBlock *MBB, 639 MachineBasicBlock::iterator I) { 640 MachineBasicBlock::iterator BI = I; 641 SmallSet<MachineBasicBlock *, 16> Visited; 642 SmallVector<MachineBasicBlock *, 8> Preds; 643 bool CheckPreds; 644 645 if (I == MBB->begin()) { 646 Visited.insert(MBB); 647 goto queue_preds; 648 } else 649 --I; 650 651 check_block: 652 Visited.insert(MBB); 653 if (I == MBB->end()) 654 goto queue_preds; 655 656 CheckPreds = true; 657 for (MachineBasicBlock::iterator IE = MBB->begin();; --I) { 658 unsigned Opc = I->getOpcode(); 659 if (Opc == PPC::MTCTRloop || Opc == PPC::MTCTR8loop) { 660 CheckPreds = false; 661 break; 662 } 663 664 if (I != BI && clobbersCTR(*I)) { 665 DEBUG(dbgs() << "BB#" << MBB->getNumber() << " (" << 666 MBB->getFullName() << ") instruction " << *I << 667 " clobbers CTR, invalidating " << "BB#" << 668 BI->getParent()->getNumber() << " (" << 669 BI->getParent()->getFullName() << ") instruction " << 670 *BI << "\n"); 671 return false; 672 } 673 674 if (I == IE) 675 break; 676 } 677 678 if (!CheckPreds && Preds.empty()) 679 return true; 680 681 if (CheckPreds) { 682 queue_preds: 683 if (MachineFunction::iterator(MBB) == MBB->getParent()->begin()) { 684 DEBUG(dbgs() << "Unable to find a MTCTR instruction for BB#" << 685 BI->getParent()->getNumber() << " (" << 686 BI->getParent()->getFullName() << ") instruction " << 687 *BI << "\n"); 688 return false; 689 } 690 691 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(), 692 PIE = MBB->pred_end(); PI != PIE; ++PI) 693 Preds.push_back(*PI); 694 } 695 696 do { 697 MBB = Preds.pop_back_val(); 698 if (!Visited.count(MBB)) { 699 I = MBB->getLastNonDebugInstr(); 700 goto check_block; 701 } 702 } while (!Preds.empty()); 703 704 return true; 705 } 706 707 bool PPCCTRLoopsVerify::runOnMachineFunction(MachineFunction &MF) { 708 MDT = &getAnalysis<MachineDominatorTree>(); 709 710 // Verify that all bdnz/bdz instructions are dominated by a loop mtctr before 711 // any other instructions that might clobber the ctr register. 712 for (MachineFunction::iterator I = MF.begin(), IE = MF.end(); 713 I != IE; ++I) { 714 MachineBasicBlock *MBB = &*I; 715 if (!MDT->isReachableFromEntry(MBB)) 716 continue; 717 718 for (MachineBasicBlock::iterator MII = MBB->getFirstTerminator(), 719 MIIE = MBB->end(); MII != MIIE; ++MII) { 720 unsigned Opc = MII->getOpcode(); 721 if (Opc == PPC::BDNZ8 || Opc == PPC::BDNZ || 722 Opc == PPC::BDZ8 || Opc == PPC::BDZ) 723 if (!verifyCTRBranch(MBB, MII)) 724 llvm_unreachable("Invalid PPC CTR loop!"); 725 } 726 } 727 728 return false; 729 } 730 #endif // NDEBUG 731