1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// R600 Machine Scheduler interface 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "R600MachineScheduler.h" 16 #include "AMDGPUSubtarget.h" 17 #include "R600InstrInfo.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/IR/LegacyPassManager.h" 20 #include "llvm/Pass.h" 21 #include "llvm/Support/raw_ostream.h" 22 23 using namespace llvm; 24 25 #define DEBUG_TYPE "machine-scheduler" 26 27 void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { 28 assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness"); 29 DAG = static_cast<ScheduleDAGMILive*>(dag); 30 const R600Subtarget &ST = DAG->MF.getSubtarget<R600Subtarget>(); 31 TII = static_cast<const R600InstrInfo*>(DAG->TII); 32 TRI = static_cast<const R600RegisterInfo*>(DAG->TRI); 33 VLIW5 = !ST.hasCaymanISA(); 34 MRI = &DAG->MRI; 35 CurInstKind = IDOther; 36 CurEmitted = 0; 37 OccupedSlotsMask = 31; 38 InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); 39 InstKindLimit[IDOther] = 32; 40 InstKindLimit[IDFetch] = ST.getTexVTXClauseSize(); 41 AluInstCount = 0; 42 FetchInstCount = 0; 43 } 44 45 void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc, 46 std::vector<SUnit *> &QDst) 47 { 48 QDst.insert(QDst.end(), QSrc.begin(), QSrc.end()); 49 QSrc.clear(); 50 } 51 52 static unsigned getWFCountLimitedByGPR(unsigned GPRCount) { 53 assert (GPRCount && "GPRCount cannot be 0"); 54 return 248 / GPRCount; 55 } 56 57 SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { 58 SUnit *SU = nullptr; 59 NextInstKind = IDOther; 60 61 IsTopNode = false; 62 63 // check if we might want to switch current clause type 64 bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) || 65 (Available[CurInstKind].empty()); 66 bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) && 67 (!Available[IDFetch].empty() || !Available[IDOther].empty()); 68 69 if (CurInstKind == IDAlu && !Available[IDFetch].empty()) { 70 // We use the heuristic provided by AMD Accelerated Parallel Processing 71 // OpenCL Programming Guide : 72 // The approx. number of WF that allows TEX inst to hide ALU inst is : 73 // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU)) 74 float ALUFetchRationEstimate = 75 (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) / 76 (FetchInstCount + Available[IDFetch].size()); 77 if (ALUFetchRationEstimate == 0) { 78 AllowSwitchFromAlu = true; 79 } else { 80 unsigned NeededWF = 62.5f / ALUFetchRationEstimate; 81 LLVM_DEBUG(dbgs() << NeededWF << " approx. Wavefronts Required\n"); 82 // We assume the local GPR requirements to be "dominated" by the requirement 83 // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and 84 // after TEX are indeed likely to consume or generate values from/for the 85 // TEX clause. 86 // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause 87 // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need 88 // one GPR) or TmXYZW = TnXYZW (need 2 GPR). 89 // (TODO : use RegisterPressure) 90 // If we are going too use too many GPR, we flush Fetch instruction to lower 91 // register pressure on 128 bits regs. 92 unsigned NearRegisterRequirement = 2 * Available[IDFetch].size(); 93 if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement)) 94 AllowSwitchFromAlu = true; 95 } 96 } 97 98 if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) || 99 (!AllowSwitchFromAlu && CurInstKind == IDAlu))) { 100 // try to pick ALU 101 SU = pickAlu(); 102 if (!SU && !PhysicalRegCopy.empty()) { 103 SU = PhysicalRegCopy.front(); 104 PhysicalRegCopy.erase(PhysicalRegCopy.begin()); 105 } 106 if (SU) { 107 if (CurEmitted >= InstKindLimit[IDAlu]) 108 CurEmitted = 0; 109 NextInstKind = IDAlu; 110 } 111 } 112 113 if (!SU) { 114 // try to pick FETCH 115 SU = pickOther(IDFetch); 116 if (SU) 117 NextInstKind = IDFetch; 118 } 119 120 // try to pick other 121 if (!SU) { 122 SU = pickOther(IDOther); 123 if (SU) 124 NextInstKind = IDOther; 125 } 126 127 LLVM_DEBUG(if (SU) { 128 dbgs() << " ** Pick node **\n"; 129 SU->dump(DAG); 130 } else { 131 dbgs() << "NO NODE \n"; 132 for (unsigned i = 0; i < DAG->SUnits.size(); i++) { 133 const SUnit &S = DAG->SUnits[i]; 134 if (!S.isScheduled) 135 S.dump(DAG); 136 } 137 }); 138 139 return SU; 140 } 141 142 void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { 143 if (NextInstKind != CurInstKind) { 144 LLVM_DEBUG(dbgs() << "Instruction Type Switch\n"); 145 if (NextInstKind != IDAlu) 146 OccupedSlotsMask |= 31; 147 CurEmitted = 0; 148 CurInstKind = NextInstKind; 149 } 150 151 if (CurInstKind == IDAlu) { 152 AluInstCount ++; 153 switch (getAluKind(SU)) { 154 case AluT_XYZW: 155 CurEmitted += 4; 156 break; 157 case AluDiscarded: 158 break; 159 default: { 160 ++CurEmitted; 161 for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(), 162 E = SU->getInstr()->operands_end(); It != E; ++It) { 163 MachineOperand &MO = *It; 164 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X) 165 ++CurEmitted; 166 } 167 } 168 } 169 } else { 170 ++CurEmitted; 171 } 172 173 LLVM_DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n"); 174 175 if (CurInstKind != IDFetch) { 176 MoveUnits(Pending[IDFetch], Available[IDFetch]); 177 } else 178 FetchInstCount++; 179 } 180 181 static bool 182 isPhysicalRegCopy(MachineInstr *MI) { 183 if (MI->getOpcode() != AMDGPU::COPY) 184 return false; 185 186 return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()); 187 } 188 189 void R600SchedStrategy::releaseTopNode(SUnit *SU) { 190 LLVM_DEBUG(dbgs() << "Top Releasing "; SU->dump(DAG);); 191 } 192 193 void R600SchedStrategy::releaseBottomNode(SUnit *SU) { 194 LLVM_DEBUG(dbgs() << "Bottom Releasing "; SU->dump(DAG);); 195 if (isPhysicalRegCopy(SU->getInstr())) { 196 PhysicalRegCopy.push_back(SU); 197 return; 198 } 199 200 int IK = getInstKind(SU); 201 202 // There is no export clause, we can schedule one as soon as its ready 203 if (IK == IDOther) 204 Available[IDOther].push_back(SU); 205 else 206 Pending[IK].push_back(SU); 207 208 } 209 210 bool R600SchedStrategy::regBelongsToClass(unsigned Reg, 211 const TargetRegisterClass *RC) const { 212 if (!TargetRegisterInfo::isVirtualRegister(Reg)) { 213 return RC->contains(Reg); 214 } else { 215 return MRI->getRegClass(Reg) == RC; 216 } 217 } 218 219 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { 220 MachineInstr *MI = SU->getInstr(); 221 222 if (TII->isTransOnly(*MI)) 223 return AluTrans; 224 225 switch (MI->getOpcode()) { 226 case AMDGPU::PRED_X: 227 return AluPredX; 228 case AMDGPU::INTERP_PAIR_XY: 229 case AMDGPU::INTERP_PAIR_ZW: 230 case AMDGPU::INTERP_VEC_LOAD: 231 case AMDGPU::DOT_4: 232 return AluT_XYZW; 233 case AMDGPU::COPY: 234 if (MI->getOperand(1).isUndef()) { 235 // MI will become a KILL, don't considers it in scheduling 236 return AluDiscarded; 237 } 238 default: 239 break; 240 } 241 242 // Does the instruction take a whole IG ? 243 // XXX: Is it possible to add a helper function in R600InstrInfo that can 244 // be used here and in R600PacketizerList::isSoloInstruction() ? 245 if(TII->isVector(*MI) || 246 TII->isCubeOp(MI->getOpcode()) || 247 TII->isReductionOp(MI->getOpcode()) || 248 MI->getOpcode() == AMDGPU::GROUP_BARRIER) { 249 return AluT_XYZW; 250 } 251 252 if (TII->isLDSInstr(MI->getOpcode())) { 253 return AluT_X; 254 } 255 256 // Is the result already assigned to a channel ? 257 unsigned DestSubReg = MI->getOperand(0).getSubReg(); 258 switch (DestSubReg) { 259 case AMDGPU::sub0: 260 return AluT_X; 261 case AMDGPU::sub1: 262 return AluT_Y; 263 case AMDGPU::sub2: 264 return AluT_Z; 265 case AMDGPU::sub3: 266 return AluT_W; 267 default: 268 break; 269 } 270 271 // Is the result already member of a X/Y/Z/W class ? 272 unsigned DestReg = MI->getOperand(0).getReg(); 273 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) || 274 regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass)) 275 return AluT_X; 276 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass)) 277 return AluT_Y; 278 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass)) 279 return AluT_Z; 280 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass)) 281 return AluT_W; 282 if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass)) 283 return AluT_XYZW; 284 285 // LDS src registers cannot be used in the Trans slot. 286 if (TII->readsLDSSrcReg(*MI)) 287 return AluT_XYZW; 288 289 return AluAny; 290 } 291 292 int R600SchedStrategy::getInstKind(SUnit* SU) { 293 int Opcode = SU->getInstr()->getOpcode(); 294 295 if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode)) 296 return IDFetch; 297 298 if (TII->isALUInstr(Opcode)) { 299 return IDAlu; 300 } 301 302 switch (Opcode) { 303 case AMDGPU::PRED_X: 304 case AMDGPU::COPY: 305 case AMDGPU::CONST_COPY: 306 case AMDGPU::INTERP_PAIR_XY: 307 case AMDGPU::INTERP_PAIR_ZW: 308 case AMDGPU::INTERP_VEC_LOAD: 309 case AMDGPU::DOT_4: 310 return IDAlu; 311 default: 312 return IDOther; 313 } 314 } 315 316 SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) { 317 if (Q.empty()) 318 return nullptr; 319 for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend(); 320 It != E; ++It) { 321 SUnit *SU = *It; 322 InstructionsGroupCandidate.push_back(SU->getInstr()); 323 if (TII->fitsConstReadLimitations(InstructionsGroupCandidate) && 324 (!AnyALU || !TII->isVectorOnly(*SU->getInstr()))) { 325 InstructionsGroupCandidate.pop_back(); 326 Q.erase((It + 1).base()); 327 return SU; 328 } else { 329 InstructionsGroupCandidate.pop_back(); 330 } 331 } 332 return nullptr; 333 } 334 335 void R600SchedStrategy::LoadAlu() { 336 std::vector<SUnit *> &QSrc = Pending[IDAlu]; 337 for (unsigned i = 0, e = QSrc.size(); i < e; ++i) { 338 AluKind AK = getAluKind(QSrc[i]); 339 AvailableAlus[AK].push_back(QSrc[i]); 340 } 341 QSrc.clear(); 342 } 343 344 void R600SchedStrategy::PrepareNextSlot() { 345 LLVM_DEBUG(dbgs() << "New Slot\n"); 346 assert (OccupedSlotsMask && "Slot wasn't filled"); 347 OccupedSlotsMask = 0; 348 // if (HwGen == R600Subtarget::NORTHERN_ISLANDS) 349 // OccupedSlotsMask |= 16; 350 InstructionsGroupCandidate.clear(); 351 LoadAlu(); 352 } 353 354 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { 355 int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); 356 if (DstIndex == -1) { 357 return; 358 } 359 unsigned DestReg = MI->getOperand(DstIndex).getReg(); 360 // PressureRegister crashes if an operand is def and used in the same inst 361 // and we try to constraint its regclass 362 for (MachineInstr::mop_iterator It = MI->operands_begin(), 363 E = MI->operands_end(); It != E; ++It) { 364 MachineOperand &MO = *It; 365 if (MO.isReg() && !MO.isDef() && 366 MO.getReg() == DestReg) 367 return; 368 } 369 // Constrains the regclass of DestReg to assign it to Slot 370 switch (Slot) { 371 case 0: 372 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass); 373 break; 374 case 1: 375 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass); 376 break; 377 case 2: 378 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass); 379 break; 380 case 3: 381 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass); 382 break; 383 } 384 } 385 386 SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) { 387 static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W}; 388 SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu); 389 if (SlotedSU) 390 return SlotedSU; 391 SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu); 392 if (UnslotedSU) 393 AssignSlot(UnslotedSU->getInstr(), Slot); 394 return UnslotedSU; 395 } 396 397 unsigned R600SchedStrategy::AvailablesAluCount() const { 398 return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() + 399 AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() + 400 AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() + 401 AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() + 402 AvailableAlus[AluPredX].size(); 403 } 404 405 SUnit* R600SchedStrategy::pickAlu() { 406 while (AvailablesAluCount() || !Pending[IDAlu].empty()) { 407 if (!OccupedSlotsMask) { 408 // Bottom up scheduling : predX must comes first 409 if (!AvailableAlus[AluPredX].empty()) { 410 OccupedSlotsMask |= 31; 411 return PopInst(AvailableAlus[AluPredX], false); 412 } 413 // Flush physical reg copies (RA will discard them) 414 if (!AvailableAlus[AluDiscarded].empty()) { 415 OccupedSlotsMask |= 31; 416 return PopInst(AvailableAlus[AluDiscarded], false); 417 } 418 // If there is a T_XYZW alu available, use it 419 if (!AvailableAlus[AluT_XYZW].empty()) { 420 OccupedSlotsMask |= 15; 421 return PopInst(AvailableAlus[AluT_XYZW], false); 422 } 423 } 424 bool TransSlotOccuped = OccupedSlotsMask & 16; 425 if (!TransSlotOccuped && VLIW5) { 426 if (!AvailableAlus[AluTrans].empty()) { 427 OccupedSlotsMask |= 16; 428 return PopInst(AvailableAlus[AluTrans], false); 429 } 430 SUnit *SU = AttemptFillSlot(3, true); 431 if (SU) { 432 OccupedSlotsMask |= 16; 433 return SU; 434 } 435 } 436 for (int Chan = 3; Chan > -1; --Chan) { 437 bool isOccupied = OccupedSlotsMask & (1 << Chan); 438 if (!isOccupied) { 439 SUnit *SU = AttemptFillSlot(Chan, false); 440 if (SU) { 441 OccupedSlotsMask |= (1 << Chan); 442 InstructionsGroupCandidate.push_back(SU->getInstr()); 443 return SU; 444 } 445 } 446 } 447 PrepareNextSlot(); 448 } 449 return nullptr; 450 } 451 452 SUnit* R600SchedStrategy::pickOther(int QID) { 453 SUnit *SU = nullptr; 454 std::vector<SUnit *> &AQ = Available[QID]; 455 456 if (AQ.empty()) { 457 MoveUnits(Pending[QID], AQ); 458 } 459 if (!AQ.empty()) { 460 SU = AQ.back(); 461 AQ.resize(AQ.size() - 1); 462 } 463 return SU; 464 } 465