1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The machine combiner pass uses machine trace metrics to ensure the combined 11 // instructions do not lengthen the critical path or the resource depth. 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/Statistic.h" 16 #include "llvm/CodeGen/MachineDominators.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineLoopInfo.h" 20 #include "llvm/CodeGen/MachineRegisterInfo.h" 21 #include "llvm/CodeGen/MachineTraceMetrics.h" 22 #include "llvm/CodeGen/Passes.h" 23 #include "llvm/CodeGen/TargetInstrInfo.h" 24 #include "llvm/CodeGen/TargetRegisterInfo.h" 25 #include "llvm/CodeGen/TargetSchedule.h" 26 #include "llvm/CodeGen/TargetSubtargetInfo.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/raw_ostream.h" 30 31 using namespace llvm; 32 33 #define DEBUG_TYPE "machine-combiner" 34 35 STATISTIC(NumInstCombined, "Number of machineinst combined"); 36 37 static cl::opt<unsigned> 38 inc_threshold("machine-combiner-inc-threshold", cl::Hidden, 39 cl::desc("Incremental depth computation will be used for basic " 40 "blocks with more instructions."), cl::init(500)); 41 42 static cl::opt<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, 43 cl::desc("Dump all substituted intrs"), 44 cl::init(false)); 45 46 #ifdef EXPENSIVE_CHECKS 47 static cl::opt<bool> VerifyPatternOrder( 48 "machine-combiner-verify-pattern-order", cl::Hidden, 49 cl::desc( 50 "Verify that the generated patterns are ordered by increasing latency"), 51 cl::init(true)); 52 #else 53 static cl::opt<bool> VerifyPatternOrder( 54 "machine-combiner-verify-pattern-order", cl::Hidden, 55 cl::desc( 56 "Verify that the generated patterns are ordered by increasing latency"), 57 cl::init(false)); 58 #endif 59 60 namespace { 61 class MachineCombiner : public MachineFunctionPass { 62 const TargetSubtargetInfo *STI; 63 const TargetInstrInfo *TII; 64 const TargetRegisterInfo *TRI; 65 MCSchedModel SchedModel; 66 MachineRegisterInfo *MRI; 67 MachineLoopInfo *MLI; // Current MachineLoopInfo 68 MachineTraceMetrics *Traces; 69 MachineTraceMetrics::Ensemble *MinInstr; 70 71 TargetSchedModel TSchedModel; 72 73 /// True if optimizing for code size. 74 bool OptSize; 75 76 public: 77 static char ID; 78 MachineCombiner() : MachineFunctionPass(ID) { 79 initializeMachineCombinerPass(*PassRegistry::getPassRegistry()); 80 } 81 void getAnalysisUsage(AnalysisUsage &AU) const override; 82 bool runOnMachineFunction(MachineFunction &MF) override; 83 StringRef getPassName() const override { return "Machine InstCombiner"; } 84 85 private: 86 bool doSubstitute(unsigned NewSize, unsigned OldSize); 87 bool combineInstructions(MachineBasicBlock *); 88 MachineInstr *getOperandDef(const MachineOperand &MO); 89 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 90 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 91 MachineTraceMetrics::Trace BlockTrace); 92 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, 93 MachineTraceMetrics::Trace BlockTrace); 94 bool 95 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, 96 MachineTraceMetrics::Trace BlockTrace, 97 SmallVectorImpl<MachineInstr *> &InsInstrs, 98 SmallVectorImpl<MachineInstr *> &DelInstrs, 99 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 100 MachineCombinerPattern Pattern, bool SlackIsAccurate); 101 bool preservesResourceLen(MachineBasicBlock *MBB, 102 MachineTraceMetrics::Trace BlockTrace, 103 SmallVectorImpl<MachineInstr *> &InsInstrs, 104 SmallVectorImpl<MachineInstr *> &DelInstrs); 105 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs, 106 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC); 107 std::pair<unsigned, unsigned> 108 getLatenciesForInstrSequences(MachineInstr &MI, 109 SmallVectorImpl<MachineInstr *> &InsInstrs, 110 SmallVectorImpl<MachineInstr *> &DelInstrs, 111 MachineTraceMetrics::Trace BlockTrace); 112 113 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root, 114 SmallVector<MachineCombinerPattern, 16> &Patterns); 115 }; 116 } 117 118 char MachineCombiner::ID = 0; 119 char &llvm::MachineCombinerID = MachineCombiner::ID; 120 121 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE, 122 "Machine InstCombiner", false, false) 123 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 124 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics) 125 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner", 126 false, false) 127 128 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 129 AU.setPreservesCFG(); 130 AU.addPreserved<MachineDominatorTree>(); 131 AU.addRequired<MachineLoopInfo>(); 132 AU.addPreserved<MachineLoopInfo>(); 133 AU.addRequired<MachineTraceMetrics>(); 134 AU.addPreserved<MachineTraceMetrics>(); 135 MachineFunctionPass::getAnalysisUsage(AU); 136 } 137 138 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) { 139 MachineInstr *DefInstr = nullptr; 140 // We need a virtual register definition. 141 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) 142 DefInstr = MRI->getUniqueVRegDef(MO.getReg()); 143 // PHI's have no depth etc. 144 if (DefInstr && DefInstr->isPHI()) 145 DefInstr = nullptr; 146 return DefInstr; 147 } 148 149 /// Computes depth of instructions in vector \InsInstr. 150 /// 151 /// \param InsInstrs is a vector of machine instructions 152 /// \param InstrIdxForVirtReg is a dense map of virtual register to index 153 /// of defining machine instruction in \p InsInstrs 154 /// \param BlockTrace is a trace of machine instructions 155 /// 156 /// \returns Depth of last instruction in \InsInstrs ("NewRoot") 157 unsigned 158 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 159 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 160 MachineTraceMetrics::Trace BlockTrace) { 161 SmallVector<unsigned, 16> InstrDepth; 162 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 163 "Missing machine model\n"); 164 165 // For each instruction in the new sequence compute the depth based on the 166 // operands. Use the trace information when possible. For new operands which 167 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth 168 for (auto *InstrPtr : InsInstrs) { // for each Use 169 unsigned IDepth = 0; 170 for (const MachineOperand &MO : InstrPtr->operands()) { 171 // Check for virtual register operand. 172 if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))) 173 continue; 174 if (!MO.isUse()) 175 continue; 176 unsigned DepthOp = 0; 177 unsigned LatencyOp = 0; 178 DenseMap<unsigned, unsigned>::iterator II = 179 InstrIdxForVirtReg.find(MO.getReg()); 180 if (II != InstrIdxForVirtReg.end()) { 181 // Operand is new virtual register not in trace 182 assert(II->second < InstrDepth.size() && "Bad Index"); 183 MachineInstr *DefInstr = InsInstrs[II->second]; 184 assert(DefInstr && 185 "There must be a definition for a new virtual register"); 186 DepthOp = InstrDepth[II->second]; 187 int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg()); 188 int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg()); 189 LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx, 190 InstrPtr, UseIdx); 191 } else { 192 MachineInstr *DefInstr = getOperandDef(MO); 193 if (DefInstr) { 194 DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth; 195 LatencyOp = TSchedModel.computeOperandLatency( 196 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), 197 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); 198 } 199 } 200 IDepth = std::max(IDepth, DepthOp + LatencyOp); 201 } 202 InstrDepth.push_back(IDepth); 203 } 204 unsigned NewRootIdx = InsInstrs.size() - 1; 205 return InstrDepth[NewRootIdx]; 206 } 207 208 /// Computes instruction latency as max of latency of defined operands. 209 /// 210 /// \param Root is a machine instruction that could be replaced by NewRoot. 211 /// It is used to compute a more accurate latency information for NewRoot in 212 /// case there is a dependent instruction in the same trace (\p BlockTrace) 213 /// \param NewRoot is the instruction for which the latency is computed 214 /// \param BlockTrace is a trace of machine instructions 215 /// 216 /// \returns Latency of \p NewRoot 217 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot, 218 MachineTraceMetrics::Trace BlockTrace) { 219 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 220 "Missing machine model\n"); 221 222 // Check each definition in NewRoot and compute the latency 223 unsigned NewRootLatency = 0; 224 225 for (const MachineOperand &MO : NewRoot->operands()) { 226 // Check for virtual register operand. 227 if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))) 228 continue; 229 if (!MO.isDef()) 230 continue; 231 // Get the first instruction that uses MO 232 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg()); 233 RI++; 234 MachineInstr *UseMO = RI->getParent(); 235 unsigned LatencyOp = 0; 236 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) { 237 LatencyOp = TSchedModel.computeOperandLatency( 238 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO, 239 UseMO->findRegisterUseOperandIdx(MO.getReg())); 240 } else { 241 LatencyOp = TSchedModel.computeInstrLatency(NewRoot); 242 } 243 NewRootLatency = std::max(NewRootLatency, LatencyOp); 244 } 245 return NewRootLatency; 246 } 247 248 /// The combiner's goal may differ based on which pattern it is attempting 249 /// to optimize. 250 enum class CombinerObjective { 251 MustReduceDepth, // The data dependency chain must be improved. 252 Default // The critical path must not be lengthened. 253 }; 254 255 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) { 256 // TODO: If C++ ever gets a real enum class, make this part of the 257 // MachineCombinerPattern class. 258 switch (P) { 259 case MachineCombinerPattern::REASSOC_AX_BY: 260 case MachineCombinerPattern::REASSOC_AX_YB: 261 case MachineCombinerPattern::REASSOC_XA_BY: 262 case MachineCombinerPattern::REASSOC_XA_YB: 263 return CombinerObjective::MustReduceDepth; 264 default: 265 return CombinerObjective::Default; 266 } 267 } 268 269 /// Estimate the latency of the new and original instruction sequence by summing 270 /// up the latencies of the inserted and deleted instructions. This assumes 271 /// that the inserted and deleted instructions are dependent instruction chains, 272 /// which might not hold in all cases. 273 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences( 274 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs, 275 SmallVectorImpl<MachineInstr *> &DelInstrs, 276 MachineTraceMetrics::Trace BlockTrace) { 277 assert(!InsInstrs.empty() && "Only support sequences that insert instrs."); 278 unsigned NewRootLatency = 0; 279 // NewRoot is the last instruction in the \p InsInstrs vector. 280 MachineInstr *NewRoot = InsInstrs.back(); 281 for (unsigned i = 0; i < InsInstrs.size() - 1; i++) 282 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]); 283 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace); 284 285 unsigned RootLatency = 0; 286 for (auto I : DelInstrs) 287 RootLatency += TSchedModel.computeInstrLatency(I); 288 289 return {NewRootLatency, RootLatency}; 290 } 291 292 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root. 293 /// The new code sequence ends in MI NewRoot. A necessary condition for the new 294 /// sequence to replace the old sequence is that it cannot lengthen the critical 295 /// path. The definition of "improve" may be restricted by specifying that the 296 /// new path improves the data dependency chain (MustReduceDepth). 297 bool MachineCombiner::improvesCriticalPathLen( 298 MachineBasicBlock *MBB, MachineInstr *Root, 299 MachineTraceMetrics::Trace BlockTrace, 300 SmallVectorImpl<MachineInstr *> &InsInstrs, 301 SmallVectorImpl<MachineInstr *> &DelInstrs, 302 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 303 MachineCombinerPattern Pattern, 304 bool SlackIsAccurate) { 305 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 306 "Missing machine model\n"); 307 // Get depth and latency of NewRoot and Root. 308 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace); 309 unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth; 310 311 DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: " 312 << NewRootDepth << "\tRootDepth: " << RootDepth); 313 314 // For a transform such as reassociation, the cost equation is 315 // conservatively calculated so that we must improve the depth (data 316 // dependency cycles) in the critical path to proceed with the transform. 317 // Being conservative also protects against inaccuracies in the underlying 318 // machine trace metrics and CPU models. 319 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) { 320 DEBUG(dbgs() << "\tIt MustReduceDepth "); 321 DEBUG(NewRootDepth < RootDepth ? dbgs() << "\t and it does it\n" 322 : dbgs() << "\t but it does NOT do it\n"); 323 return NewRootDepth < RootDepth; 324 } 325 326 // A more flexible cost calculation for the critical path includes the slack 327 // of the original code sequence. This may allow the transform to proceed 328 // even if the instruction depths (data dependency cycles) become worse. 329 330 // Account for the latency of the inserted and deleted instructions by 331 unsigned NewRootLatency, RootLatency; 332 std::tie(NewRootLatency, RootLatency) = 333 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace); 334 335 unsigned RootSlack = BlockTrace.getInstrSlack(*Root); 336 unsigned NewCycleCount = NewRootDepth + NewRootLatency; 337 unsigned OldCycleCount = 338 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0); 339 DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency << "\tRootLatency: " 340 << RootLatency << "\n\tRootSlack: " << RootSlack 341 << " SlackIsAccurate=" << SlackIsAccurate 342 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount 343 << "\n\tRootDepth + RootLatency + RootSlack = " 344 << OldCycleCount;); 345 DEBUG(NewCycleCount <= OldCycleCount 346 ? dbgs() << "\n\t It IMPROVES PathLen because" 347 : dbgs() << "\n\t It DOES NOT improve PathLen because"); 348 DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount 349 << ", OldCycleCount = " << OldCycleCount << "\n"); 350 351 return NewCycleCount <= OldCycleCount; 352 } 353 354 /// helper routine to convert instructions into SC 355 void MachineCombiner::instr2instrSC( 356 SmallVectorImpl<MachineInstr *> &Instrs, 357 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) { 358 for (auto *InstrPtr : Instrs) { 359 unsigned Opc = InstrPtr->getOpcode(); 360 unsigned Idx = TII->get(Opc).getSchedClass(); 361 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx); 362 InstrsSC.push_back(SC); 363 } 364 } 365 366 /// True when the new instructions do not increase resource length 367 bool MachineCombiner::preservesResourceLen( 368 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, 369 SmallVectorImpl<MachineInstr *> &InsInstrs, 370 SmallVectorImpl<MachineInstr *> &DelInstrs) { 371 if (!TSchedModel.hasInstrSchedModel()) 372 return true; 373 374 // Compute current resource length 375 376 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB); 377 SmallVector <const MachineBasicBlock *, 1> MBBarr; 378 MBBarr.push_back(MBB); 379 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr); 380 381 // Deal with SC rather than Instructions. 382 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC; 383 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC; 384 385 instr2instrSC(InsInstrs, InsInstrsSC); 386 instr2instrSC(DelInstrs, DelInstrsSC); 387 388 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC); 389 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC); 390 391 // Compute new resource length. 392 unsigned ResLenAfterCombine = 393 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr); 394 395 DEBUG(dbgs() << "\t\tResource length before replacement: " 396 << ResLenBeforeCombine << " and after: " << ResLenAfterCombine 397 << "\n";); 398 DEBUG( 399 ResLenAfterCombine <= ResLenBeforeCombine 400 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n" 401 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource " 402 "Length\n"); 403 404 return ResLenAfterCombine <= ResLenBeforeCombine; 405 } 406 407 /// \returns true when new instruction sequence should be generated 408 /// independent if it lengthens critical path or not 409 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) { 410 if (OptSize && (NewSize < OldSize)) 411 return true; 412 if (!TSchedModel.hasInstrSchedModelOrItineraries()) 413 return true; 414 return false; 415 } 416 417 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction 418 /// depths if requested. 419 /// 420 /// \param MBB basic block to insert instructions in 421 /// \param MI current machine instruction 422 /// \param InsInstrs new instructions to insert in \p MBB 423 /// \param DelInstrs instruction to delete from \p MBB 424 /// \param MinInstr is a pointer to the machine trace information 425 /// \param RegUnits set of live registers, needed to compute instruction depths 426 /// \param IncrementalUpdate if true, compute instruction depths incrementally, 427 /// otherwise invalidate the trace 428 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, 429 SmallVector<MachineInstr *, 16> InsInstrs, 430 SmallVector<MachineInstr *, 16> DelInstrs, 431 MachineTraceMetrics::Ensemble *MinInstr, 432 SparseSet<LiveRegUnit> &RegUnits, 433 bool IncrementalUpdate) { 434 for (auto *InstrPtr : InsInstrs) 435 MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr); 436 437 for (auto *InstrPtr : DelInstrs) { 438 InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval(); 439 // Erase all LiveRegs defined by the removed instruction 440 for (auto I = RegUnits.begin(); I != RegUnits.end(); ) { 441 if (I->MI == InstrPtr) 442 I = RegUnits.erase(I); 443 else 444 I++; 445 } 446 } 447 448 if (IncrementalUpdate) 449 for (auto *InstrPtr : InsInstrs) 450 MinInstr->updateDepth(MBB, *InstrPtr, RegUnits); 451 else 452 MinInstr->invalidate(MBB); 453 454 NumInstCombined++; 455 } 456 457 // Check that the difference between original and new latency is decreasing for 458 // later patterns. This helps to discover sub-optimal pattern orderings. 459 void MachineCombiner::verifyPatternOrder( 460 MachineBasicBlock *MBB, MachineInstr &Root, 461 SmallVector<MachineCombinerPattern, 16> &Patterns) { 462 long PrevLatencyDiff = std::numeric_limits<long>::max(); 463 (void)PrevLatencyDiff; // Variable is used in assert only. 464 for (auto P : Patterns) { 465 SmallVector<MachineInstr *, 16> InsInstrs; 466 SmallVector<MachineInstr *, 16> DelInstrs; 467 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 468 TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs, 469 InstrIdxForVirtReg); 470 // Found pattern, but did not generate alternative sequence. 471 // This can happen e.g. when an immediate could not be materialized 472 // in a single instruction. 473 if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries()) 474 continue; 475 476 unsigned NewRootLatency, RootLatency; 477 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences( 478 Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB)); 479 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency); 480 assert(CurrentLatencyDiff <= PrevLatencyDiff && 481 "Current pattern is better than previous pattern."); 482 PrevLatencyDiff = CurrentLatencyDiff; 483 } 484 } 485 486 /// Substitute a slow code sequence with a faster one by 487 /// evaluating instruction combining pattern. 488 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction 489 /// combining based on machine trace metrics. Only combine a sequence of 490 /// instructions when this neither lengthens the critical path nor increases 491 /// resource pressure. When optimizing for codesize always combine when the new 492 /// sequence is shorter. 493 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { 494 bool Changed = false; 495 DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n"); 496 497 bool IncrementalUpdate = false; 498 auto BlockIter = MBB->begin(); 499 decltype(BlockIter) LastUpdate; 500 // Check if the block is in a loop. 501 const MachineLoop *ML = MLI->getLoopFor(MBB); 502 if (!MinInstr) 503 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount); 504 505 SparseSet<LiveRegUnit> RegUnits; 506 RegUnits.setUniverse(TRI->getNumRegUnits()); 507 508 while (BlockIter != MBB->end()) { 509 auto &MI = *BlockIter++; 510 SmallVector<MachineCombinerPattern, 16> Patterns; 511 // The motivating example is: 512 // 513 // MUL Other MUL_op1 MUL_op2 Other 514 // \ / \ | / 515 // ADD/SUB => MADD/MSUB 516 // (=Root) (=NewRoot) 517 518 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is 519 // usually beneficial for code size it unfortunately can hurt performance 520 // when the ADD is on the critical path, but the MUL is not. With the 521 // substitution the MUL becomes part of the critical path (in form of the 522 // MADD) and can lengthen it on architectures where the MADD latency is 523 // longer than the ADD latency. 524 // 525 // For each instruction we check if it can be the root of a combiner 526 // pattern. Then for each pattern the new code sequence in form of MI is 527 // generated and evaluated. When the efficiency criteria (don't lengthen 528 // critical path, don't use more resources) is met the new sequence gets 529 // hooked up into the basic block before the old sequence is removed. 530 // 531 // The algorithm does not try to evaluate all patterns and pick the best. 532 // This is only an artificial restriction though. In practice there is 533 // mostly one pattern, and getMachineCombinerPatterns() can order patterns 534 // based on an internal cost heuristic. If 535 // machine-combiner-verify-pattern-order is enabled, all patterns are 536 // checked to ensure later patterns do not provide better latency savings. 537 538 if (!TII->getMachineCombinerPatterns(MI, Patterns)) 539 continue; 540 541 if (VerifyPatternOrder) 542 verifyPatternOrder(MBB, MI, Patterns); 543 544 for (auto P : Patterns) { 545 SmallVector<MachineInstr *, 16> InsInstrs; 546 SmallVector<MachineInstr *, 16> DelInstrs; 547 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 548 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, 549 InstrIdxForVirtReg); 550 unsigned NewInstCount = InsInstrs.size(); 551 unsigned OldInstCount = DelInstrs.size(); 552 // Found pattern, but did not generate alternative sequence. 553 // This can happen e.g. when an immediate could not be materialized 554 // in a single instruction. 555 if (!NewInstCount) 556 continue; 557 558 DEBUG(if (dump_intrs) { 559 dbgs() << "\tFor the Pattern (" << (int)P << ") these instructions could be removed\n"; 560 for (auto const *InstrPtr : DelInstrs) { 561 dbgs() << "\t\t" << STI->getSchedInfoStr(*InstrPtr) << ": "; 562 InstrPtr->print(dbgs(), false, false, false, TII); 563 } 564 dbgs() << "\tThese instructions could replace the removed ones\n"; 565 for (auto const *InstrPtr : InsInstrs) { 566 dbgs() << "\t\t" << STI->getSchedInfoStr(*InstrPtr) << ": "; 567 InstrPtr->print(dbgs(), false, false, false, TII); 568 } 569 }); 570 571 bool SubstituteAlways = false; 572 if (ML && TII->isThroughputPattern(P)) 573 SubstituteAlways = true; 574 575 if (IncrementalUpdate) { 576 // Update depths since the last incremental update. 577 MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits); 578 LastUpdate = BlockIter; 579 } 580 581 // Substitute when we optimize for codesize and the new sequence has 582 // fewer instructions OR 583 // the new sequence neither lengthens the critical path nor increases 584 // resource pressure. 585 if (SubstituteAlways || doSubstitute(NewInstCount, OldInstCount)) { 586 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 587 RegUnits, IncrementalUpdate); 588 // Eagerly stop after the first pattern fires. 589 Changed = true; 590 break; 591 } else { 592 // For big basic blocks, we only compute the full trace the first time 593 // we hit this. We do not invalidate the trace, but instead update the 594 // instruction depths incrementally. 595 // NOTE: Only the instruction depths up to MI are accurate. All other 596 // trace information is not updated. 597 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB); 598 Traces->verifyAnalysis(); 599 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs, 600 InstrIdxForVirtReg, P, 601 !IncrementalUpdate) && 602 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) { 603 if (MBB->size() > inc_threshold) { 604 // Use incremental depth updates for basic blocks above treshold 605 IncrementalUpdate = true; 606 LastUpdate = BlockIter; 607 } 608 609 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 610 RegUnits, IncrementalUpdate); 611 612 // Eagerly stop after the first pattern fires. 613 Changed = true; 614 break; 615 } 616 // Cleanup instructions of the alternative code sequence. There is no 617 // use for them. 618 MachineFunction *MF = MBB->getParent(); 619 for (auto *InstrPtr : InsInstrs) 620 MF->DeleteMachineInstr(InstrPtr); 621 } 622 InstrIdxForVirtReg.clear(); 623 } 624 } 625 626 if (Changed && IncrementalUpdate) 627 Traces->invalidate(MBB); 628 return Changed; 629 } 630 631 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { 632 STI = &MF.getSubtarget(); 633 TII = STI->getInstrInfo(); 634 TRI = STI->getRegisterInfo(); 635 SchedModel = STI->getSchedModel(); 636 TSchedModel.init(SchedModel, STI, TII); 637 MRI = &MF.getRegInfo(); 638 MLI = &getAnalysis<MachineLoopInfo>(); 639 Traces = &getAnalysis<MachineTraceMetrics>(); 640 MinInstr = nullptr; 641 OptSize = MF.getFunction().optForSize(); 642 643 DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); 644 if (!TII->useMachineCombiner()) { 645 DEBUG(dbgs() << " Skipping pass: Target does not support machine combiner\n"); 646 return false; 647 } 648 649 bool Changed = false; 650 651 // Try to combine instructions. 652 for (auto &MBB : MF) 653 Changed |= combineInstructions(&MBB); 654 655 return Changed; 656 } 657