1 //===- MacroFusion.cpp - Macro Fusion -------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This file contains the implementation of the DAG scheduling mutation 10 /// to pair instructions back to back. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/MacroFusion.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/CodeGen/MachineInstr.h" 18 #include "llvm/CodeGen/MachineScheduler.h" 19 #include "llvm/CodeGen/ScheduleDAG.h" 20 #include "llvm/CodeGen/ScheduleDAGMutation.h" 21 #include "llvm/CodeGen/TargetInstrInfo.h" 22 #include "llvm/Support/CommandLine.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 26 #define DEBUG_TYPE "machine-scheduler" 27 28 STATISTIC(NumFused, "Number of instr pairs fused"); 29 30 using namespace llvm; 31 32 static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden, 33 cl::desc("Enable scheduling for macro fusion."), cl::init(true)); 34 35 static bool isHazard(const SDep &Dep) { 36 return Dep.getKind() == SDep::Anti || Dep.getKind() == SDep::Output; 37 } 38 39 static SUnit *getPredClusterSU(const SUnit &SU) { 40 for (const SDep &SI : SU.Preds) 41 if (SI.isCluster()) 42 return SI.getSUnit(); 43 44 return nullptr; 45 } 46 47 static bool hasLessThanNumFused(const SUnit &SU, unsigned FuseLimit) { 48 unsigned Num = 1; 49 const SUnit *CurrentSU = &SU; 50 while ((CurrentSU = getPredClusterSU(*CurrentSU)) && Num < FuseLimit) Num ++; 51 return Num < FuseLimit; 52 } 53 54 static bool fuseInstructionPair(ScheduleDAGInstrs &DAG, SUnit &FirstSU, 55 SUnit &SecondSU) { 56 // Check that neither instr is already paired with another along the edge 57 // between them. 58 for (SDep &SI : FirstSU.Succs) 59 if (SI.isCluster()) 60 return false; 61 62 for (SDep &SI : SecondSU.Preds) 63 if (SI.isCluster()) 64 return false; 65 // Though the reachability checks above could be made more generic, 66 // perhaps as part of ScheduleDAGInstrs::addEdge(), since such edges are valid, 67 // the extra computation cost makes it less interesting in general cases. 68 69 // Create a single weak edge between the adjacent instrs. The only effect is 70 // to cause bottom-up scheduling to heavily prioritize the clustered instrs. 71 if (!DAG.addEdge(&SecondSU, SDep(&FirstSU, SDep::Cluster))) 72 return false; 73 74 // Adjust the latency between both instrs. 75 for (SDep &SI : FirstSU.Succs) 76 if (SI.getSUnit() == &SecondSU) 77 SI.setLatency(0); 78 79 for (SDep &SI : SecondSU.Preds) 80 if (SI.getSUnit() == &FirstSU) 81 SI.setLatency(0); 82 83 LLVM_DEBUG( 84 dbgs() << "Macro fuse: "; DAG.dumpNodeName(FirstSU); dbgs() << " - "; 85 DAG.dumpNodeName(SecondSU); dbgs() << " / "; 86 dbgs() << DAG.TII->getName(FirstSU.getInstr()->getOpcode()) << " - " 87 << DAG.TII->getName(SecondSU.getInstr()->getOpcode()) << '\n';); 88 89 // Make data dependencies from the FirstSU also dependent on the SecondSU to 90 // prevent them from being scheduled between the FirstSU and the SecondSU. 91 if (&SecondSU != &DAG.ExitSU) 92 for (const SDep &SI : FirstSU.Succs) { 93 SUnit *SU = SI.getSUnit(); 94 if (SI.isWeak() || isHazard(SI) || 95 SU == &DAG.ExitSU || SU == &SecondSU || SU->isPred(&SecondSU)) 96 continue; 97 LLVM_DEBUG(dbgs() << " Bind "; DAG.dumpNodeName(SecondSU); 98 dbgs() << " - "; DAG.dumpNodeName(*SU); dbgs() << '\n';); 99 DAG.addEdge(SU, SDep(&SecondSU, SDep::Artificial)); 100 } 101 102 // Make the FirstSU also dependent on the dependencies of the SecondSU to 103 // prevent them from being scheduled between the FirstSU and the SecondSU. 104 if (&FirstSU != &DAG.EntrySU) { 105 for (const SDep &SI : SecondSU.Preds) { 106 SUnit *SU = SI.getSUnit(); 107 if (SI.isWeak() || isHazard(SI) || &FirstSU == SU || FirstSU.isSucc(SU)) 108 continue; 109 LLVM_DEBUG(dbgs() << " Bind "; DAG.dumpNodeName(*SU); dbgs() << " - "; 110 DAG.dumpNodeName(FirstSU); dbgs() << '\n';); 111 DAG.addEdge(&FirstSU, SDep(SU, SDep::Artificial)); 112 } 113 // ExitSU comes last by design, which acts like an implicit dependency 114 // between ExitSU and any bottom root in the graph. We should transfer 115 // this to FirstSU as well. 116 if (&SecondSU == &DAG.ExitSU) { 117 for (SUnit &SU : DAG.SUnits) { 118 if (SU.Succs.empty()) 119 DAG.addEdge(&FirstSU, SDep(&SU, SDep::Artificial)); 120 } 121 } 122 } 123 124 ++NumFused; 125 return true; 126 } 127 128 namespace { 129 130 /// Post-process the DAG to create cluster edges between instrs that may 131 /// be fused by the processor into a single operation. 132 class MacroFusion : public ScheduleDAGMutation { 133 ShouldSchedulePredTy shouldScheduleAdjacent; 134 bool FuseBlock; 135 bool scheduleAdjacentImpl(ScheduleDAGInstrs &DAG, SUnit &AnchorSU); 136 137 public: 138 MacroFusion(ShouldSchedulePredTy shouldScheduleAdjacent, bool FuseBlock) 139 : shouldScheduleAdjacent(shouldScheduleAdjacent), FuseBlock(FuseBlock) {} 140 141 void apply(ScheduleDAGInstrs *DAGInstrs) override; 142 }; 143 144 } // end anonymous namespace 145 146 void MacroFusion::apply(ScheduleDAGInstrs *DAG) { 147 if (FuseBlock) 148 // For each of the SUnits in the scheduling block, try to fuse the instr in 149 // it with one in its predecessors. 150 for (SUnit &ISU : DAG->SUnits) 151 scheduleAdjacentImpl(*DAG, ISU); 152 153 if (DAG->ExitSU.getInstr()) 154 // Try to fuse the instr in the ExitSU with one in its predecessors. 155 scheduleAdjacentImpl(*DAG, DAG->ExitSU); 156 } 157 158 /// Implement the fusion of instr pairs in the scheduling DAG, 159 /// anchored at the instr in AnchorSU.. 160 bool MacroFusion::scheduleAdjacentImpl(ScheduleDAGInstrs &DAG, SUnit &AnchorSU) { 161 const MachineInstr &AnchorMI = *AnchorSU.getInstr(); 162 const TargetInstrInfo &TII = *DAG.TII; 163 const TargetSubtargetInfo &ST = DAG.MF.getSubtarget(); 164 165 // Check if the anchor instr may be fused. 166 if (!shouldScheduleAdjacent(TII, ST, nullptr, AnchorMI)) 167 return false; 168 169 // Explorer for fusion candidates among the dependencies of the anchor instr. 170 for (SDep &Dep : AnchorSU.Preds) { 171 // Ignore dependencies other than data or strong ordering. 172 if (Dep.isWeak() || isHazard(Dep)) 173 continue; 174 175 SUnit &DepSU = *Dep.getSUnit(); 176 if (DepSU.isBoundaryNode()) 177 continue; 178 179 // Only chain two instructions together at most. 180 const MachineInstr *DepMI = DepSU.getInstr(); 181 if (!hasLessThanNumFused(DepSU, 2) || 182 !shouldScheduleAdjacent(TII, ST, DepMI, AnchorMI)) 183 continue; 184 185 if (fuseInstructionPair(DAG, DepSU, AnchorSU)) 186 return true; 187 } 188 189 return false; 190 } 191 192 std::unique_ptr<ScheduleDAGMutation> 193 llvm::createMacroFusionDAGMutation( 194 ShouldSchedulePredTy shouldScheduleAdjacent) { 195 if(EnableMacroFusion) 196 return std::make_unique<MacroFusion>(shouldScheduleAdjacent, true); 197 return nullptr; 198 } 199 200 std::unique_ptr<ScheduleDAGMutation> 201 llvm::createBranchMacroFusionDAGMutation( 202 ShouldSchedulePredTy shouldScheduleAdjacent) { 203 if(EnableMacroFusion) 204 return std::make_unique<MacroFusion>(shouldScheduleAdjacent, false); 205 return nullptr; 206 } 207