1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The machine combiner pass uses machine trace metrics to ensure the combined 10 // instructions do not lengthen the critical path or the resource depth. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/DenseMap.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/CodeGen/MachineDominators.h" 16 #include "llvm/CodeGen/MachineFunction.h" 17 #include "llvm/CodeGen/MachineFunctionPass.h" 18 #include "llvm/CodeGen/MachineLoopInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/MachineTraceMetrics.h" 21 #include "llvm/CodeGen/Passes.h" 22 #include "llvm/CodeGen/TargetInstrInfo.h" 23 #include "llvm/CodeGen/TargetRegisterInfo.h" 24 #include "llvm/CodeGen/TargetSchedule.h" 25 #include "llvm/CodeGen/TargetSubtargetInfo.h" 26 #include "llvm/InitializePasses.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/raw_ostream.h" 30 31 using namespace llvm; 32 33 #define DEBUG_TYPE "machine-combiner" 34 35 STATISTIC(NumInstCombined, "Number of machineinst combined"); 36 37 static cl::opt<unsigned> 38 inc_threshold("machine-combiner-inc-threshold", cl::Hidden, 39 cl::desc("Incremental depth computation will be used for basic " 40 "blocks with more instructions."), cl::init(500)); 41 42 static cl::opt<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, 43 cl::desc("Dump all substituted intrs"), 44 cl::init(false)); 45 46 #ifdef EXPENSIVE_CHECKS 47 static cl::opt<bool> VerifyPatternOrder( 48 "machine-combiner-verify-pattern-order", cl::Hidden, 49 cl::desc( 50 "Verify that the generated patterns are ordered by increasing latency"), 51 cl::init(true)); 52 #else 53 static cl::opt<bool> VerifyPatternOrder( 54 "machine-combiner-verify-pattern-order", cl::Hidden, 55 cl::desc( 56 "Verify that the generated patterns are ordered by increasing latency"), 57 cl::init(false)); 58 #endif 59 60 namespace { 61 class MachineCombiner : public MachineFunctionPass { 62 const TargetSubtargetInfo *STI; 63 const TargetInstrInfo *TII; 64 const TargetRegisterInfo *TRI; 65 MCSchedModel SchedModel; 66 MachineRegisterInfo *MRI; 67 MachineLoopInfo *MLI; // Current MachineLoopInfo 68 MachineTraceMetrics *Traces; 69 MachineTraceMetrics::Ensemble *MinInstr; 70 71 TargetSchedModel TSchedModel; 72 73 /// True if optimizing for code size. 74 bool OptSize; 75 76 public: 77 static char ID; 78 MachineCombiner() : MachineFunctionPass(ID) { 79 initializeMachineCombinerPass(*PassRegistry::getPassRegistry()); 80 } 81 void getAnalysisUsage(AnalysisUsage &AU) const override; 82 bool runOnMachineFunction(MachineFunction &MF) override; 83 StringRef getPassName() const override { return "Machine InstCombiner"; } 84 85 private: 86 bool doSubstitute(unsigned NewSize, unsigned OldSize); 87 bool combineInstructions(MachineBasicBlock *); 88 MachineInstr *getOperandDef(const MachineOperand &MO); 89 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 90 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 91 MachineTraceMetrics::Trace BlockTrace); 92 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, 93 MachineTraceMetrics::Trace BlockTrace); 94 bool 95 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, 96 MachineTraceMetrics::Trace BlockTrace, 97 SmallVectorImpl<MachineInstr *> &InsInstrs, 98 SmallVectorImpl<MachineInstr *> &DelInstrs, 99 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 100 MachineCombinerPattern Pattern, bool SlackIsAccurate); 101 bool preservesResourceLen(MachineBasicBlock *MBB, 102 MachineTraceMetrics::Trace BlockTrace, 103 SmallVectorImpl<MachineInstr *> &InsInstrs, 104 SmallVectorImpl<MachineInstr *> &DelInstrs); 105 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs, 106 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC); 107 std::pair<unsigned, unsigned> 108 getLatenciesForInstrSequences(MachineInstr &MI, 109 SmallVectorImpl<MachineInstr *> &InsInstrs, 110 SmallVectorImpl<MachineInstr *> &DelInstrs, 111 MachineTraceMetrics::Trace BlockTrace); 112 113 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root, 114 SmallVector<MachineCombinerPattern, 16> &Patterns); 115 }; 116 } 117 118 char MachineCombiner::ID = 0; 119 char &llvm::MachineCombinerID = MachineCombiner::ID; 120 121 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE, 122 "Machine InstCombiner", false, false) 123 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 124 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics) 125 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner", 126 false, false) 127 128 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 129 AU.setPreservesCFG(); 130 AU.addPreserved<MachineDominatorTree>(); 131 AU.addRequired<MachineLoopInfo>(); 132 AU.addPreserved<MachineLoopInfo>(); 133 AU.addRequired<MachineTraceMetrics>(); 134 AU.addPreserved<MachineTraceMetrics>(); 135 MachineFunctionPass::getAnalysisUsage(AU); 136 } 137 138 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) { 139 MachineInstr *DefInstr = nullptr; 140 // We need a virtual register definition. 141 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) 142 DefInstr = MRI->getUniqueVRegDef(MO.getReg()); 143 // PHI's have no depth etc. 144 if (DefInstr && DefInstr->isPHI()) 145 DefInstr = nullptr; 146 return DefInstr; 147 } 148 149 /// Computes depth of instructions in vector \InsInstr. 150 /// 151 /// \param InsInstrs is a vector of machine instructions 152 /// \param InstrIdxForVirtReg is a dense map of virtual register to index 153 /// of defining machine instruction in \p InsInstrs 154 /// \param BlockTrace is a trace of machine instructions 155 /// 156 /// \returns Depth of last instruction in \InsInstrs ("NewRoot") 157 unsigned 158 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 159 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 160 MachineTraceMetrics::Trace BlockTrace) { 161 SmallVector<unsigned, 16> InstrDepth; 162 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 163 "Missing machine model\n"); 164 165 // For each instruction in the new sequence compute the depth based on the 166 // operands. Use the trace information when possible. For new operands which 167 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth 168 for (auto *InstrPtr : InsInstrs) { // for each Use 169 unsigned IDepth = 0; 170 for (const MachineOperand &MO : InstrPtr->operands()) { 171 // Check for virtual register operand. 172 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 173 continue; 174 if (!MO.isUse()) 175 continue; 176 unsigned DepthOp = 0; 177 unsigned LatencyOp = 0; 178 DenseMap<unsigned, unsigned>::iterator II = 179 InstrIdxForVirtReg.find(MO.getReg()); 180 if (II != InstrIdxForVirtReg.end()) { 181 // Operand is new virtual register not in trace 182 assert(II->second < InstrDepth.size() && "Bad Index"); 183 MachineInstr *DefInstr = InsInstrs[II->second]; 184 assert(DefInstr && 185 "There must be a definition for a new virtual register"); 186 DepthOp = InstrDepth[II->second]; 187 int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg()); 188 int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg()); 189 LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx, 190 InstrPtr, UseIdx); 191 } else { 192 MachineInstr *DefInstr = getOperandDef(MO); 193 if (DefInstr) { 194 DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth; 195 LatencyOp = TSchedModel.computeOperandLatency( 196 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), 197 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); 198 } 199 } 200 IDepth = std::max(IDepth, DepthOp + LatencyOp); 201 } 202 InstrDepth.push_back(IDepth); 203 } 204 unsigned NewRootIdx = InsInstrs.size() - 1; 205 return InstrDepth[NewRootIdx]; 206 } 207 208 /// Computes instruction latency as max of latency of defined operands. 209 /// 210 /// \param Root is a machine instruction that could be replaced by NewRoot. 211 /// It is used to compute a more accurate latency information for NewRoot in 212 /// case there is a dependent instruction in the same trace (\p BlockTrace) 213 /// \param NewRoot is the instruction for which the latency is computed 214 /// \param BlockTrace is a trace of machine instructions 215 /// 216 /// \returns Latency of \p NewRoot 217 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot, 218 MachineTraceMetrics::Trace BlockTrace) { 219 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 220 "Missing machine model\n"); 221 222 // Check each definition in NewRoot and compute the latency 223 unsigned NewRootLatency = 0; 224 225 for (const MachineOperand &MO : NewRoot->operands()) { 226 // Check for virtual register operand. 227 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 228 continue; 229 if (!MO.isDef()) 230 continue; 231 // Get the first instruction that uses MO 232 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg()); 233 RI++; 234 if (RI == MRI->reg_end()) 235 continue; 236 MachineInstr *UseMO = RI->getParent(); 237 unsigned LatencyOp = 0; 238 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) { 239 LatencyOp = TSchedModel.computeOperandLatency( 240 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO, 241 UseMO->findRegisterUseOperandIdx(MO.getReg())); 242 } else { 243 LatencyOp = TSchedModel.computeInstrLatency(NewRoot); 244 } 245 NewRootLatency = std::max(NewRootLatency, LatencyOp); 246 } 247 return NewRootLatency; 248 } 249 250 /// The combiner's goal may differ based on which pattern it is attempting 251 /// to optimize. 252 enum class CombinerObjective { 253 MustReduceDepth, // The data dependency chain must be improved. 254 Default // The critical path must not be lengthened. 255 }; 256 257 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) { 258 // TODO: If C++ ever gets a real enum class, make this part of the 259 // MachineCombinerPattern class. 260 switch (P) { 261 case MachineCombinerPattern::REASSOC_AX_BY: 262 case MachineCombinerPattern::REASSOC_AX_YB: 263 case MachineCombinerPattern::REASSOC_XA_BY: 264 case MachineCombinerPattern::REASSOC_XA_YB: 265 return CombinerObjective::MustReduceDepth; 266 default: 267 return CombinerObjective::Default; 268 } 269 } 270 271 /// Estimate the latency of the new and original instruction sequence by summing 272 /// up the latencies of the inserted and deleted instructions. This assumes 273 /// that the inserted and deleted instructions are dependent instruction chains, 274 /// which might not hold in all cases. 275 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences( 276 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs, 277 SmallVectorImpl<MachineInstr *> &DelInstrs, 278 MachineTraceMetrics::Trace BlockTrace) { 279 assert(!InsInstrs.empty() && "Only support sequences that insert instrs."); 280 unsigned NewRootLatency = 0; 281 // NewRoot is the last instruction in the \p InsInstrs vector. 282 MachineInstr *NewRoot = InsInstrs.back(); 283 for (unsigned i = 0; i < InsInstrs.size() - 1; i++) 284 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]); 285 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace); 286 287 unsigned RootLatency = 0; 288 for (auto I : DelInstrs) 289 RootLatency += TSchedModel.computeInstrLatency(I); 290 291 return {NewRootLatency, RootLatency}; 292 } 293 294 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root. 295 /// The new code sequence ends in MI NewRoot. A necessary condition for the new 296 /// sequence to replace the old sequence is that it cannot lengthen the critical 297 /// path. The definition of "improve" may be restricted by specifying that the 298 /// new path improves the data dependency chain (MustReduceDepth). 299 bool MachineCombiner::improvesCriticalPathLen( 300 MachineBasicBlock *MBB, MachineInstr *Root, 301 MachineTraceMetrics::Trace BlockTrace, 302 SmallVectorImpl<MachineInstr *> &InsInstrs, 303 SmallVectorImpl<MachineInstr *> &DelInstrs, 304 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 305 MachineCombinerPattern Pattern, 306 bool SlackIsAccurate) { 307 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 308 "Missing machine model\n"); 309 // Get depth and latency of NewRoot and Root. 310 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace); 311 unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth; 312 313 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: " 314 << NewRootDepth << "\tRootDepth: " << RootDepth); 315 316 // For a transform such as reassociation, the cost equation is 317 // conservatively calculated so that we must improve the depth (data 318 // dependency cycles) in the critical path to proceed with the transform. 319 // Being conservative also protects against inaccuracies in the underlying 320 // machine trace metrics and CPU models. 321 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) { 322 LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth "); 323 LLVM_DEBUG(NewRootDepth < RootDepth 324 ? dbgs() << "\t and it does it\n" 325 : dbgs() << "\t but it does NOT do it\n"); 326 return NewRootDepth < RootDepth; 327 } 328 329 // A more flexible cost calculation for the critical path includes the slack 330 // of the original code sequence. This may allow the transform to proceed 331 // even if the instruction depths (data dependency cycles) become worse. 332 333 // Account for the latency of the inserted and deleted instructions by 334 unsigned NewRootLatency, RootLatency; 335 std::tie(NewRootLatency, RootLatency) = 336 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace); 337 338 unsigned RootSlack = BlockTrace.getInstrSlack(*Root); 339 unsigned NewCycleCount = NewRootDepth + NewRootLatency; 340 unsigned OldCycleCount = 341 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0); 342 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency 343 << "\tRootLatency: " << RootLatency << "\n\tRootSlack: " 344 << RootSlack << " SlackIsAccurate=" << SlackIsAccurate 345 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount 346 << "\n\tRootDepth + RootLatency + RootSlack = " 347 << OldCycleCount;); 348 LLVM_DEBUG(NewCycleCount <= OldCycleCount 349 ? dbgs() << "\n\t It IMPROVES PathLen because" 350 : dbgs() << "\n\t It DOES NOT improve PathLen because"); 351 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount 352 << ", OldCycleCount = " << OldCycleCount << "\n"); 353 354 return NewCycleCount <= OldCycleCount; 355 } 356 357 /// helper routine to convert instructions into SC 358 void MachineCombiner::instr2instrSC( 359 SmallVectorImpl<MachineInstr *> &Instrs, 360 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) { 361 for (auto *InstrPtr : Instrs) { 362 unsigned Opc = InstrPtr->getOpcode(); 363 unsigned Idx = TII->get(Opc).getSchedClass(); 364 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx); 365 InstrsSC.push_back(SC); 366 } 367 } 368 369 /// True when the new instructions do not increase resource length 370 bool MachineCombiner::preservesResourceLen( 371 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, 372 SmallVectorImpl<MachineInstr *> &InsInstrs, 373 SmallVectorImpl<MachineInstr *> &DelInstrs) { 374 if (!TSchedModel.hasInstrSchedModel()) 375 return true; 376 377 // Compute current resource length 378 379 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB); 380 SmallVector <const MachineBasicBlock *, 1> MBBarr; 381 MBBarr.push_back(MBB); 382 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr); 383 384 // Deal with SC rather than Instructions. 385 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC; 386 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC; 387 388 instr2instrSC(InsInstrs, InsInstrsSC); 389 instr2instrSC(DelInstrs, DelInstrsSC); 390 391 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC); 392 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC); 393 394 // Compute new resource length. 395 unsigned ResLenAfterCombine = 396 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr); 397 398 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: " 399 << ResLenBeforeCombine 400 << " and after: " << ResLenAfterCombine << "\n";); 401 LLVM_DEBUG( 402 ResLenAfterCombine <= ResLenBeforeCombine 403 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n" 404 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource " 405 "Length\n"); 406 407 return ResLenAfterCombine <= ResLenBeforeCombine; 408 } 409 410 /// \returns true when new instruction sequence should be generated 411 /// independent if it lengthens critical path or not 412 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) { 413 if (OptSize && (NewSize < OldSize)) 414 return true; 415 if (!TSchedModel.hasInstrSchedModelOrItineraries()) 416 return true; 417 return false; 418 } 419 420 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction 421 /// depths if requested. 422 /// 423 /// \param MBB basic block to insert instructions in 424 /// \param MI current machine instruction 425 /// \param InsInstrs new instructions to insert in \p MBB 426 /// \param DelInstrs instruction to delete from \p MBB 427 /// \param MinInstr is a pointer to the machine trace information 428 /// \param RegUnits set of live registers, needed to compute instruction depths 429 /// \param IncrementalUpdate if true, compute instruction depths incrementally, 430 /// otherwise invalidate the trace 431 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, 432 SmallVector<MachineInstr *, 16> InsInstrs, 433 SmallVector<MachineInstr *, 16> DelInstrs, 434 MachineTraceMetrics::Ensemble *MinInstr, 435 SparseSet<LiveRegUnit> &RegUnits, 436 bool IncrementalUpdate) { 437 for (auto *InstrPtr : InsInstrs) 438 MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr); 439 440 for (auto *InstrPtr : DelInstrs) { 441 InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval(); 442 // Erase all LiveRegs defined by the removed instruction 443 for (auto I = RegUnits.begin(); I != RegUnits.end(); ) { 444 if (I->MI == InstrPtr) 445 I = RegUnits.erase(I); 446 else 447 I++; 448 } 449 } 450 451 if (IncrementalUpdate) 452 for (auto *InstrPtr : InsInstrs) 453 MinInstr->updateDepth(MBB, *InstrPtr, RegUnits); 454 else 455 MinInstr->invalidate(MBB); 456 457 NumInstCombined++; 458 } 459 460 // Check that the difference between original and new latency is decreasing for 461 // later patterns. This helps to discover sub-optimal pattern orderings. 462 void MachineCombiner::verifyPatternOrder( 463 MachineBasicBlock *MBB, MachineInstr &Root, 464 SmallVector<MachineCombinerPattern, 16> &Patterns) { 465 long PrevLatencyDiff = std::numeric_limits<long>::max(); 466 (void)PrevLatencyDiff; // Variable is used in assert only. 467 for (auto P : Patterns) { 468 SmallVector<MachineInstr *, 16> InsInstrs; 469 SmallVector<MachineInstr *, 16> DelInstrs; 470 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 471 TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs, 472 InstrIdxForVirtReg); 473 // Found pattern, but did not generate alternative sequence. 474 // This can happen e.g. when an immediate could not be materialized 475 // in a single instruction. 476 if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries()) 477 continue; 478 479 unsigned NewRootLatency, RootLatency; 480 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences( 481 Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB)); 482 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency); 483 assert(CurrentLatencyDiff <= PrevLatencyDiff && 484 "Current pattern is better than previous pattern."); 485 PrevLatencyDiff = CurrentLatencyDiff; 486 } 487 } 488 489 /// Substitute a slow code sequence with a faster one by 490 /// evaluating instruction combining pattern. 491 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction 492 /// combining based on machine trace metrics. Only combine a sequence of 493 /// instructions when this neither lengthens the critical path nor increases 494 /// resource pressure. When optimizing for codesize always combine when the new 495 /// sequence is shorter. 496 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { 497 bool Changed = false; 498 LLVM_DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n"); 499 500 bool IncrementalUpdate = false; 501 auto BlockIter = MBB->begin(); 502 decltype(BlockIter) LastUpdate; 503 // Check if the block is in a loop. 504 const MachineLoop *ML = MLI->getLoopFor(MBB); 505 if (!MinInstr) 506 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount); 507 508 SparseSet<LiveRegUnit> RegUnits; 509 RegUnits.setUniverse(TRI->getNumRegUnits()); 510 511 while (BlockIter != MBB->end()) { 512 auto &MI = *BlockIter++; 513 SmallVector<MachineCombinerPattern, 16> Patterns; 514 // The motivating example is: 515 // 516 // MUL Other MUL_op1 MUL_op2 Other 517 // \ / \ | / 518 // ADD/SUB => MADD/MSUB 519 // (=Root) (=NewRoot) 520 521 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is 522 // usually beneficial for code size it unfortunately can hurt performance 523 // when the ADD is on the critical path, but the MUL is not. With the 524 // substitution the MUL becomes part of the critical path (in form of the 525 // MADD) and can lengthen it on architectures where the MADD latency is 526 // longer than the ADD latency. 527 // 528 // For each instruction we check if it can be the root of a combiner 529 // pattern. Then for each pattern the new code sequence in form of MI is 530 // generated and evaluated. When the efficiency criteria (don't lengthen 531 // critical path, don't use more resources) is met the new sequence gets 532 // hooked up into the basic block before the old sequence is removed. 533 // 534 // The algorithm does not try to evaluate all patterns and pick the best. 535 // This is only an artificial restriction though. In practice there is 536 // mostly one pattern, and getMachineCombinerPatterns() can order patterns 537 // based on an internal cost heuristic. If 538 // machine-combiner-verify-pattern-order is enabled, all patterns are 539 // checked to ensure later patterns do not provide better latency savings. 540 541 if (!TII->getMachineCombinerPatterns(MI, Patterns)) 542 continue; 543 544 if (VerifyPatternOrder) 545 verifyPatternOrder(MBB, MI, Patterns); 546 547 for (auto P : Patterns) { 548 SmallVector<MachineInstr *, 16> InsInstrs; 549 SmallVector<MachineInstr *, 16> DelInstrs; 550 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 551 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, 552 InstrIdxForVirtReg); 553 unsigned NewInstCount = InsInstrs.size(); 554 unsigned OldInstCount = DelInstrs.size(); 555 // Found pattern, but did not generate alternative sequence. 556 // This can happen e.g. when an immediate could not be materialized 557 // in a single instruction. 558 if (!NewInstCount) 559 continue; 560 561 LLVM_DEBUG(if (dump_intrs) { 562 dbgs() << "\tFor the Pattern (" << (int)P 563 << ") these instructions could be removed\n"; 564 for (auto const *InstrPtr : DelInstrs) 565 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 566 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 567 dbgs() << "\tThese instructions could replace the removed ones\n"; 568 for (auto const *InstrPtr : InsInstrs) 569 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 570 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 571 }); 572 573 bool SubstituteAlways = false; 574 if (ML && TII->isThroughputPattern(P)) 575 SubstituteAlways = true; 576 577 if (IncrementalUpdate) { 578 // Update depths since the last incremental update. 579 MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits); 580 LastUpdate = BlockIter; 581 } 582 583 // Substitute when we optimize for codesize and the new sequence has 584 // fewer instructions OR 585 // the new sequence neither lengthens the critical path nor increases 586 // resource pressure. 587 if (SubstituteAlways || doSubstitute(NewInstCount, OldInstCount)) { 588 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 589 RegUnits, IncrementalUpdate); 590 // Eagerly stop after the first pattern fires. 591 Changed = true; 592 break; 593 } else { 594 // For big basic blocks, we only compute the full trace the first time 595 // we hit this. We do not invalidate the trace, but instead update the 596 // instruction depths incrementally. 597 // NOTE: Only the instruction depths up to MI are accurate. All other 598 // trace information is not updated. 599 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB); 600 Traces->verifyAnalysis(); 601 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs, 602 InstrIdxForVirtReg, P, 603 !IncrementalUpdate) && 604 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) { 605 if (MBB->size() > inc_threshold) { 606 // Use incremental depth updates for basic blocks above treshold 607 IncrementalUpdate = true; 608 LastUpdate = BlockIter; 609 } 610 611 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 612 RegUnits, IncrementalUpdate); 613 614 // Eagerly stop after the first pattern fires. 615 Changed = true; 616 break; 617 } 618 // Cleanup instructions of the alternative code sequence. There is no 619 // use for them. 620 MachineFunction *MF = MBB->getParent(); 621 for (auto *InstrPtr : InsInstrs) 622 MF->DeleteMachineInstr(InstrPtr); 623 } 624 InstrIdxForVirtReg.clear(); 625 } 626 } 627 628 if (Changed && IncrementalUpdate) 629 Traces->invalidate(MBB); 630 return Changed; 631 } 632 633 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { 634 STI = &MF.getSubtarget(); 635 TII = STI->getInstrInfo(); 636 TRI = STI->getRegisterInfo(); 637 SchedModel = STI->getSchedModel(); 638 TSchedModel.init(STI); 639 MRI = &MF.getRegInfo(); 640 MLI = &getAnalysis<MachineLoopInfo>(); 641 Traces = &getAnalysis<MachineTraceMetrics>(); 642 MinInstr = nullptr; 643 OptSize = MF.getFunction().hasOptSize(); 644 645 LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); 646 if (!TII->useMachineCombiner()) { 647 LLVM_DEBUG( 648 dbgs() 649 << " Skipping pass: Target does not support machine combiner\n"); 650 return false; 651 } 652 653 bool Changed = false; 654 655 // Try to combine instructions. 656 for (auto &MBB : MF) 657 Changed |= combineInstructions(&MBB); 658 659 return Changed; 660 } 661