1 //===- MCSchedule.cpp - Scheduling ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the default scheduling model. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/MC/MCSchedule.h" 14 #include "llvm/MC/MCInst.h" 15 #include "llvm/MC/MCInstrDesc.h" 16 #include "llvm/MC/MCInstrInfo.h" 17 #include "llvm/MC/MCSubtargetInfo.h" 18 #include <optional> 19 #include <type_traits> 20 21 using namespace llvm; 22 23 static_assert(std::is_trivial_v<MCSchedModel>, 24 "MCSchedModel is required to be a trivial type"); 25 const MCSchedModel MCSchedModel::Default = {DefaultIssueWidth, 26 DefaultMicroOpBufferSize, 27 DefaultLoopMicroOpBufferSize, 28 DefaultLoadLatency, 29 DefaultHighLatency, 30 DefaultMispredictPenalty, 31 false, 32 true, 33 /*EnableIntervals=*/false, 34 0, 35 nullptr, 36 nullptr, 37 0, 38 0, 39 nullptr, 40 nullptr}; 41 42 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 43 const MCSchedClassDesc &SCDesc) { 44 int Latency = 0; 45 for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries; 46 DefIdx != DefEnd; ++DefIdx) { 47 // Lookup the definition's write latency in SubtargetInfo. 48 const MCWriteLatencyEntry *WLEntry = 49 STI.getWriteLatencyEntry(&SCDesc, DefIdx); 50 // Early exit if we found an invalid latency. 51 if (WLEntry->Cycles < 0) 52 return WLEntry->Cycles; 53 Latency = std::max(Latency, static_cast<int>(WLEntry->Cycles)); 54 } 55 return Latency; 56 } 57 58 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 59 unsigned SchedClass) const { 60 const MCSchedClassDesc &SCDesc = *getSchedClassDesc(SchedClass); 61 if (!SCDesc.isValid()) 62 return 0; 63 if (!SCDesc.isVariant()) 64 return MCSchedModel::computeInstrLatency(STI, SCDesc); 65 66 llvm_unreachable("unsupported variant scheduling class"); 67 } 68 69 int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI, 70 const MCInstrInfo &MCII, 71 const MCInst &Inst) const { 72 return MCSchedModel::computeInstrLatency<MCSubtargetInfo, MCInstrInfo, 73 InstrItineraryData, MCInst>( 74 STI, MCII, Inst, 75 [&](const MCSchedClassDesc *SCDesc) -> const MCSchedClassDesc * { 76 if (!SCDesc->isValid()) 77 return nullptr; 78 79 unsigned CPUID = getProcessorID(); 80 unsigned SchedClass = 0; 81 while (SCDesc->isVariant()) { 82 SchedClass = 83 STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID); 84 SCDesc = getSchedClassDesc(SchedClass); 85 } 86 87 if (!SchedClass) { 88 assert(false && "unsupported variant scheduling class"); 89 return nullptr; 90 } 91 92 return SCDesc; 93 }); 94 } 95 96 double 97 MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, 98 const MCSchedClassDesc &SCDesc) { 99 std::optional<double> Throughput; 100 const MCSchedModel &SM = STI.getSchedModel(); 101 const MCWriteProcResEntry *I = STI.getWriteProcResBegin(&SCDesc); 102 const MCWriteProcResEntry *E = STI.getWriteProcResEnd(&SCDesc); 103 for (; I != E; ++I) { 104 if (!I->ReleaseAtCycle) 105 continue; 106 unsigned NumUnits = SM.getProcResource(I->ProcResourceIdx)->NumUnits; 107 double Temp = NumUnits * 1.0 / I->ReleaseAtCycle; 108 Throughput = Throughput ? std::min(*Throughput, Temp) : Temp; 109 } 110 if (Throughput) 111 return 1.0 / *Throughput; 112 113 // If no throughput value was calculated, assume that we can execute at the 114 // maximum issue width scaled by number of micro-ops for the schedule class. 115 return ((double)SCDesc.NumMicroOps) / SM.IssueWidth; 116 } 117 118 double 119 MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI, 120 const MCInstrInfo &MCII, 121 const MCInst &Inst) const { 122 unsigned SchedClass = MCII.get(Inst.getOpcode()).getSchedClass(); 123 const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClass); 124 125 // If there's no valid class, assume that the instruction executes/completes 126 // at the maximum issue width. 127 if (!SCDesc->isValid()) 128 return 1.0 / IssueWidth; 129 130 unsigned CPUID = getProcessorID(); 131 while (SCDesc->isVariant()) { 132 SchedClass = STI.resolveVariantSchedClass(SchedClass, &Inst, &MCII, CPUID); 133 SCDesc = getSchedClassDesc(SchedClass); 134 } 135 136 if (SchedClass) 137 return MCSchedModel::getReciprocalThroughput(STI, *SCDesc); 138 139 llvm_unreachable("unsupported variant scheduling class"); 140 } 141 142 double 143 MCSchedModel::getReciprocalThroughput(unsigned SchedClass, 144 const InstrItineraryData &IID) { 145 std::optional<double> Throughput; 146 const InstrStage *I = IID.beginStage(SchedClass); 147 const InstrStage *E = IID.endStage(SchedClass); 148 for (; I != E; ++I) { 149 if (!I->getCycles()) 150 continue; 151 double Temp = llvm::popcount(I->getUnits()) * 1.0 / I->getCycles(); 152 Throughput = Throughput ? std::min(*Throughput, Temp) : Temp; 153 } 154 if (Throughput) 155 return 1.0 / *Throughput; 156 157 // If there are no execution resources specified for this class, then assume 158 // that it can execute at the maximum default issue width. 159 return 1.0 / DefaultIssueWidth; 160 } 161 162 unsigned 163 MCSchedModel::getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries, 164 unsigned WriteResourceID) { 165 if (Entries.empty()) 166 return 0; 167 168 int DelayCycles = 0; 169 for (const MCReadAdvanceEntry &E : Entries) { 170 if (E.WriteResourceID != WriteResourceID) 171 continue; 172 DelayCycles = std::min(DelayCycles, E.Cycles); 173 } 174 175 return std::abs(DelayCycles); 176 } 177