xref: /llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp (revision 21a4625a168a54e2c7d422111e22f73edaa3719e)
1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief This pass lowers the pseudo control flow instructions to real
12 /// machine instructions.
13 ///
14 /// All control flow is handled using predicated instructions and
15 /// a predicate stack.  Each Scalar ALU controls the operations of 64 Vector
16 /// ALUs.  The Scalar ALU can update the predicate for any of the Vector ALUs
17 /// by writting to the 64-bit EXEC register (each bit corresponds to a
18 /// single vector ALU).  Typically, for predicates, a vector ALU will write
19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
21 /// EXEC to update the predicates.
22 ///
23 /// For example:
24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
25 /// %SGPR0 = SI_IF %VCC
26 ///   %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
27 /// %SGPR0 = SI_ELSE %SGPR0
28 ///   %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
29 /// SI_END_CF %SGPR0
30 ///
31 /// becomes:
32 ///
33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC  // Save and update the exec mask
34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC  // Clear live bits from saved exec mask
35 /// S_CBRANCH_EXECZ label0            // This instruction is an optional
36 ///                                   // optimization which allows us to
37 ///                                   // branch if all the bits of
38 ///                                   // EXEC are zero.
39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40 ///
41 /// label0:
42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC   // Restore the exec mask for the Then block
43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC    // Clear live bits from saved exec mask
44 /// S_BRANCH_EXECZ label1              // Use our branch optimization
45 ///                                    // instruction again.
46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR   // Do the THEN block
47 /// label1:
48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0     // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
50 
51 #include "AMDGPU.h"
52 #include "AMDGPUSubtarget.h"
53 #include "SIInstrInfo.h"
54 #include "SIMachineFunctionInfo.h"
55 #include "llvm/CodeGen/LivePhysRegs.h"
56 #include "llvm/CodeGen/MachineFrameInfo.h"
57 #include "llvm/CodeGen/MachineFunction.h"
58 #include "llvm/CodeGen/MachineFunctionPass.h"
59 #include "llvm/CodeGen/MachineInstrBuilder.h"
60 #include "llvm/CodeGen/MachineRegisterInfo.h"
61 #include "llvm/IR/Constants.h"
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "si-lower-control-flow"
66 
67 namespace {
68 
69 class SILowerControlFlow : public MachineFunctionPass {
70 private:
71   static const unsigned SkipThreshold = 12;
72 
73   const SIRegisterInfo *TRI;
74   const SIInstrInfo *TII;
75 
76   bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
77 
78   void Skip(MachineInstr &From, MachineOperand &To);
79   void SkipIfDead(MachineInstr &MI);
80 
81   void If(MachineInstr &MI);
82   void Else(MachineInstr &MI, bool ExecModified);
83   void Break(MachineInstr &MI);
84   void IfBreak(MachineInstr &MI);
85   void ElseBreak(MachineInstr &MI);
86   void Loop(MachineInstr &MI);
87   void EndCf(MachineInstr &MI);
88 
89   void Kill(MachineInstr &MI);
90   void Branch(MachineInstr &MI);
91 
92   void splitBlockLiveIns(const MachineBasicBlock &MBB,
93                          const MachineInstr &MI,
94                          MachineBasicBlock &LoopBB,
95                          MachineBasicBlock &RemainderBB,
96                          unsigned SaveReg,
97                          const MachineOperand &IdxReg);
98 
99   void emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB, DebugLoc DL,
100                               MachineInstr *MovRel,
101                               const MachineOperand &IdxReg,
102                               int Offset);
103 
104   bool loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
105   void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
106   bool indirectSrc(MachineInstr &MI);
107   bool indirectDst(MachineInstr &MI);
108 
109 public:
110   static char ID;
111 
112   SILowerControlFlow() :
113     MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
114 
115   bool runOnMachineFunction(MachineFunction &MF) override;
116 
117   const char *getPassName() const override {
118     return "SI Lower control flow pseudo instructions";
119   }
120 };
121 
122 } // End anonymous namespace
123 
124 char SILowerControlFlow::ID = 0;
125 
126 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
127                 "SI lower control flow", false, false)
128 
129 char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID;
130 
131 
132 FunctionPass *llvm::createSILowerControlFlowPass() {
133   return new SILowerControlFlow();
134 }
135 
136 static bool opcodeEmitsNoInsts(unsigned Opc) {
137   switch (Opc) {
138   case TargetOpcode::IMPLICIT_DEF:
139   case TargetOpcode::KILL:
140   case TargetOpcode::BUNDLE:
141   case TargetOpcode::CFI_INSTRUCTION:
142   case TargetOpcode::EH_LABEL:
143   case TargetOpcode::GC_LABEL:
144   case TargetOpcode::DBG_VALUE:
145     return true;
146   default:
147     return false;
148   }
149 }
150 
151 bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From,
152                                     MachineBasicBlock *To) {
153 
154   unsigned NumInstr = 0;
155   MachineFunction *MF = From->getParent();
156 
157   for (MachineFunction::iterator MBBI(From), ToI(To), End = MF->end();
158        MBBI != End && MBBI != ToI; ++MBBI) {
159     MachineBasicBlock &MBB = *MBBI;
160 
161     for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
162          NumInstr < SkipThreshold && I != E; ++I) {
163       if (opcodeEmitsNoInsts(I->getOpcode()))
164         continue;
165 
166       // When a uniform loop is inside non-uniform control flow, the branch
167       // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
168       // when EXEC = 0. We should skip the loop lest it becomes infinite.
169       if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
170           I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
171         return true;
172 
173       if (++NumInstr >= SkipThreshold)
174         return true;
175     }
176   }
177 
178   return false;
179 }
180 
181 void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) {
182 
183   if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
184     return;
185 
186   DebugLoc DL = From.getDebugLoc();
187   BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
188     .addOperand(To);
189 }
190 
191 void SILowerControlFlow::SkipIfDead(MachineInstr &MI) {
192 
193   MachineBasicBlock &MBB = *MI.getParent();
194   DebugLoc DL = MI.getDebugLoc();
195 
196   if (MBB.getParent()->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
197       !shouldSkip(&MBB, &MBB.getParent()->back()))
198     return;
199 
200   MachineBasicBlock::iterator Insert = &MI;
201   ++Insert;
202 
203   // If the exec mask is non-zero, skip the next two instructions
204   BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
205     .addImm(3);
206 
207   // Exec mask is zero: Export to NULL target...
208   BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
209           .addImm(0)
210           .addImm(0x09) // V_008DFC_SQ_EXP_NULL
211           .addImm(0)
212           .addImm(1)
213           .addImm(1)
214           .addReg(AMDGPU::VGPR0)
215           .addReg(AMDGPU::VGPR0)
216           .addReg(AMDGPU::VGPR0)
217           .addReg(AMDGPU::VGPR0);
218 
219   // ... and terminate wavefront
220   BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
221 }
222 
223 void SILowerControlFlow::If(MachineInstr &MI) {
224   MachineBasicBlock &MBB = *MI.getParent();
225   DebugLoc DL = MI.getDebugLoc();
226   unsigned Reg = MI.getOperand(0).getReg();
227   unsigned Vcc = MI.getOperand(1).getReg();
228 
229   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
230           .addReg(Vcc);
231 
232   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
233           .addReg(AMDGPU::EXEC)
234           .addReg(Reg);
235 
236   Skip(MI, MI.getOperand(2));
237 
238   // Insert a pseudo terminator to help keep the verifier happy.
239   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH), Reg)
240     .addOperand(MI.getOperand(2));
241 
242   MI.eraseFromParent();
243 }
244 
245 void SILowerControlFlow::Else(MachineInstr &MI, bool ExecModified) {
246   MachineBasicBlock &MBB = *MI.getParent();
247   DebugLoc DL = MI.getDebugLoc();
248   unsigned Dst = MI.getOperand(0).getReg();
249   unsigned Src = MI.getOperand(1).getReg();
250 
251   BuildMI(MBB, MBB.getFirstNonPHI(), DL,
252           TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
253           .addReg(Src); // Saved EXEC
254 
255   if (ExecModified) {
256     // Adjust the saved exec to account for the modifications during the flow
257     // block that contains the ELSE. This can happen when WQM mode is switched
258     // off.
259     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64), Dst)
260             .addReg(AMDGPU::EXEC)
261             .addReg(Dst);
262   }
263 
264   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
265           .addReg(AMDGPU::EXEC)
266           .addReg(Dst);
267 
268   Skip(MI, MI.getOperand(2));
269 
270   // Insert a pseudo terminator to help keep the verifier happy.
271   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::SI_MASK_BRANCH), Dst)
272     .addOperand(MI.getOperand(2));
273 
274   MI.eraseFromParent();
275 }
276 
277 void SILowerControlFlow::Break(MachineInstr &MI) {
278   MachineBasicBlock &MBB = *MI.getParent();
279   DebugLoc DL = MI.getDebugLoc();
280 
281   unsigned Dst = MI.getOperand(0).getReg();
282   unsigned Src = MI.getOperand(1).getReg();
283 
284   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
285           .addReg(AMDGPU::EXEC)
286           .addReg(Src);
287 
288   MI.eraseFromParent();
289 }
290 
291 void SILowerControlFlow::IfBreak(MachineInstr &MI) {
292   MachineBasicBlock &MBB = *MI.getParent();
293   DebugLoc DL = MI.getDebugLoc();
294 
295   unsigned Dst = MI.getOperand(0).getReg();
296   unsigned Vcc = MI.getOperand(1).getReg();
297   unsigned Src = MI.getOperand(2).getReg();
298 
299   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
300           .addReg(Vcc)
301           .addReg(Src);
302 
303   MI.eraseFromParent();
304 }
305 
306 void SILowerControlFlow::ElseBreak(MachineInstr &MI) {
307   MachineBasicBlock &MBB = *MI.getParent();
308   DebugLoc DL = MI.getDebugLoc();
309 
310   unsigned Dst = MI.getOperand(0).getReg();
311   unsigned Saved = MI.getOperand(1).getReg();
312   unsigned Src = MI.getOperand(2).getReg();
313 
314   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
315           .addReg(Saved)
316           .addReg(Src);
317 
318   MI.eraseFromParent();
319 }
320 
321 void SILowerControlFlow::Loop(MachineInstr &MI) {
322   MachineBasicBlock &MBB = *MI.getParent();
323   DebugLoc DL = MI.getDebugLoc();
324   unsigned Src = MI.getOperand(0).getReg();
325 
326   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
327           .addReg(AMDGPU::EXEC)
328           .addReg(Src);
329 
330   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
331     .addOperand(MI.getOperand(1));
332 
333   MI.eraseFromParent();
334 }
335 
336 void SILowerControlFlow::EndCf(MachineInstr &MI) {
337   MachineBasicBlock &MBB = *MI.getParent();
338   DebugLoc DL = MI.getDebugLoc();
339   unsigned Reg = MI.getOperand(0).getReg();
340 
341   BuildMI(MBB, MBB.getFirstNonPHI(), DL,
342           TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
343           .addReg(AMDGPU::EXEC)
344           .addReg(Reg);
345 
346   MI.eraseFromParent();
347 }
348 
349 void SILowerControlFlow::Branch(MachineInstr &MI) {
350   MachineBasicBlock *MBB = MI.getOperand(0).getMBB();
351   if (MBB == MI.getParent()->getNextNode())
352     MI.eraseFromParent();
353 
354   // If these aren't equal, this is probably an infinite loop.
355 }
356 
357 void SILowerControlFlow::Kill(MachineInstr &MI) {
358   MachineBasicBlock &MBB = *MI.getParent();
359   DebugLoc DL = MI.getDebugLoc();
360   const MachineOperand &Op = MI.getOperand(0);
361 
362 #ifndef NDEBUG
363   CallingConv::ID CallConv = MBB.getParent()->getFunction()->getCallingConv();
364   // Kill is only allowed in pixel / geometry shaders.
365   assert(CallConv == CallingConv::AMDGPU_PS ||
366          CallConv == CallingConv::AMDGPU_GS);
367 #endif
368 
369   // Clear this thread from the exec mask if the operand is negative
370   if ((Op.isImm())) {
371     // Constant operand: Set exec mask to 0 or do nothing
372     if (Op.getImm() & 0x80000000) {
373       BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
374               .addImm(0);
375     }
376   } else {
377     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
378            .addImm(0)
379            .addOperand(Op);
380   }
381 
382   MI.eraseFromParent();
383 }
384 
385 // All currently live registers must remain so in the remainder block.
386 void SILowerControlFlow::splitBlockLiveIns(const MachineBasicBlock &MBB,
387                                            const MachineInstr &MI,
388                                            MachineBasicBlock &LoopBB,
389                                            MachineBasicBlock &RemainderBB,
390                                            unsigned SaveReg,
391                                            const MachineOperand &IdxReg) {
392   LivePhysRegs RemainderLiveRegs(TRI);
393 
394   RemainderLiveRegs.addLiveOuts(MBB);
395   for (MachineBasicBlock::const_reverse_iterator I = MBB.rbegin(), E(&MI);
396        I != E; ++I) {
397     RemainderLiveRegs.stepBackward(*I);
398   }
399 
400   // Add reg defined in loop body.
401   RemainderLiveRegs.addReg(SaveReg);
402 
403   if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) {
404     if (!Val->isUndef()) {
405       RemainderLiveRegs.addReg(Val->getReg());
406       LoopBB.addLiveIn(Val->getReg());
407     }
408   }
409 
410   const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
411   for (unsigned Reg : RemainderLiveRegs) {
412     if (MRI.isAllocatable(Reg))
413       RemainderBB.addLiveIn(Reg);
414   }
415 
416 
417   const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src);
418   if (!Src->isUndef())
419     LoopBB.addLiveIn(Src->getReg());
420 
421   if (!IdxReg.isUndef())
422     LoopBB.addLiveIn(IdxReg.getReg());
423   LoopBB.sortUniqueLiveIns();
424 }
425 
426 void SILowerControlFlow::emitLoadM0FromVGPRLoop(MachineBasicBlock &LoopBB,
427                                                 DebugLoc DL,
428                                                 MachineInstr *MovRel,
429                                                 const MachineOperand &IdxReg,
430                                                 int Offset) {
431   MachineBasicBlock::iterator I = LoopBB.begin();
432 
433   // Read the next variant into VCC (lower 32 bits) <- also loop target
434   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), AMDGPU::VCC_LO)
435     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
436 
437   // Move index from VCC into M0
438   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
439     .addReg(AMDGPU::VCC_LO);
440 
441   // Compare the just read M0 value to all possible Idx values
442   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
443     .addReg(AMDGPU::M0)
444     .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
445 
446   // Update EXEC, save the original EXEC value to VCC
447   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
448     .addReg(AMDGPU::VCC);
449 
450   if (Offset) {
451     BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
452       .addReg(AMDGPU::M0)
453       .addImm(Offset);
454   }
455 
456   // Do the actual move
457   LoopBB.insert(I, MovRel);
458 
459   // Update EXEC, switch all done bits to 0 and all todo bits to 1
460   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
461     .addReg(AMDGPU::EXEC)
462     .addReg(AMDGPU::VCC);
463 
464   // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
465   BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
466     .addMBB(&LoopBB);
467 }
468 
469 // Returns true if a new block was inserted.
470 bool SILowerControlFlow::loadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
471   MachineBasicBlock &MBB = *MI.getParent();
472   DebugLoc DL = MI.getDebugLoc();
473   MachineBasicBlock::iterator I(&MI);
474 
475   const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
476 
477   if (AMDGPU::SReg_32RegClass.contains(Idx->getReg())) {
478     if (Offset) {
479       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
480         .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()))
481         .addImm(Offset);
482     } else {
483       BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
484         .addReg(Idx->getReg(), getUndefRegState(Idx->isUndef()));
485     }
486 
487     MBB.insert(I, MovRel);
488     MI.eraseFromParent();
489     return false;
490   }
491 
492   MachineFunction &MF = *MBB.getParent();
493   MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
494   SaveOp->setIsDead(false);
495   unsigned Save = SaveOp->getReg();
496 
497   // Reading from a VGPR requires looping over all workitems in the wavefront.
498   assert(AMDGPU::SReg_64RegClass.contains(Save) &&
499          AMDGPU::VGPR_32RegClass.contains(Idx->getReg()));
500 
501   // Save the EXEC mask
502   BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), Save)
503     .addReg(AMDGPU::EXEC);
504 
505   // To insert the loop we need to split the block. Move everything after this
506   // point to a new block, and insert a new empty block between the two.
507   MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock();
508   MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock();
509   MachineFunction::iterator MBBI(MBB);
510   ++MBBI;
511 
512   MF.insert(MBBI, LoopBB);
513   MF.insert(MBBI, RemainderBB);
514 
515   LoopBB->addSuccessor(LoopBB);
516   LoopBB->addSuccessor(RemainderBB);
517 
518   splitBlockLiveIns(MBB, MI, *LoopBB, *RemainderBB, Save, *Idx);
519 
520   // Move the rest of the block into a new block.
521   RemainderBB->transferSuccessors(&MBB);
522   RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
523 
524   emitLoadM0FromVGPRLoop(*LoopBB, DL, MovRel, *Idx, Offset);
525 
526   MachineBasicBlock::iterator First = RemainderBB->begin();
527   BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
528     .addReg(Save);
529 
530   MI.eraseFromParent();
531   return true;
532 }
533 
534 /// \param @VecReg The register which holds element zero of the vector
535 ///                 being addressed into.
536 /// \param[out] @Reg The base register to use in the indirect addressing instruction.
537 /// \param[in,out] @Offset As an input, this is the constant offset part of the
538 //                         indirect Index. e.g. v0 = v[VecReg + Offset]
539 //                         As an output, this is a constant value that needs
540 //                         to be added to the value stored in M0.
541 void SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg,
542                                                      unsigned &Reg,
543                                                      int &Offset) {
544   unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
545   if (!SubReg)
546     SubReg = VecReg;
547 
548   const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
549   int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
550 
551   if (RegIdx < 0) {
552     Offset = RegIdx;
553     RegIdx = 0;
554   } else {
555     Offset = 0;
556   }
557 
558   Reg = RC->getRegister(RegIdx);
559 }
560 
561 // Return true if a new block was inserted.
562 bool SILowerControlFlow::indirectSrc(MachineInstr &MI) {
563   MachineBasicBlock &MBB = *MI.getParent();
564   DebugLoc DL = MI.getDebugLoc();
565 
566   unsigned Dst = MI.getOperand(0).getReg();
567   const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
568   int Off = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
569   unsigned Reg;
570 
571   computeIndirectRegAndOffset(SrcVec->getReg(), Reg, Off);
572 
573   MachineInstr *MovRel =
574     BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
575     .addReg(Reg, getUndefRegState(SrcVec->isUndef()))
576     .addReg(SrcVec->getReg(), RegState::Implicit);
577 
578   return loadM0(MI, MovRel, Off);
579 }
580 
581 // Return true if a new block was inserted.
582 bool SILowerControlFlow::indirectDst(MachineInstr &MI) {
583   MachineBasicBlock &MBB = *MI.getParent();
584   DebugLoc DL = MI.getDebugLoc();
585 
586   unsigned Dst = MI.getOperand(0).getReg();
587   int Off = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
588   MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
589   unsigned Reg;
590 
591   computeIndirectRegAndOffset(Dst, Reg, Off);
592 
593   MachineInstr *MovRel =
594     BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
595     .addReg(Reg, RegState::Define)
596     .addReg(Val->getReg(), getUndefRegState(Val->isUndef()))
597     .addReg(Dst, RegState::Implicit);
598 
599   return loadM0(MI, MovRel, Off);
600 }
601 
602 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
603   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
604   TII = ST.getInstrInfo();
605   TRI = &TII->getRegisterInfo();
606 
607   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
608 
609   bool HaveKill = false;
610   bool NeedFlat = false;
611   unsigned Depth = 0;
612 
613   MachineFunction::iterator NextBB;
614 
615   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
616        BI != BE; BI = NextBB) {
617     NextBB = std::next(BI);
618     MachineBasicBlock &MBB = *BI;
619 
620     MachineBasicBlock *EmptyMBBAtEnd = nullptr;
621     MachineBasicBlock::iterator I, Next;
622     bool ExecModified = false;
623 
624     for (I = MBB.begin(); I != MBB.end(); I = Next) {
625       Next = std::next(I);
626 
627       MachineInstr &MI = *I;
628 
629       // Flat uses m0 in case it needs to access LDS.
630       if (TII->isFLAT(MI))
631         NeedFlat = true;
632 
633       for (const auto &Def : I->defs()) {
634         if (Def.isReg() && Def.isDef() && Def.getReg() == AMDGPU::EXEC) {
635           ExecModified = true;
636           break;
637         }
638       }
639 
640       switch (MI.getOpcode()) {
641         default: break;
642         case AMDGPU::SI_IF:
643           ++Depth;
644           If(MI);
645           break;
646 
647         case AMDGPU::SI_ELSE:
648           Else(MI, ExecModified);
649           break;
650 
651         case AMDGPU::SI_BREAK:
652           Break(MI);
653           break;
654 
655         case AMDGPU::SI_IF_BREAK:
656           IfBreak(MI);
657           break;
658 
659         case AMDGPU::SI_ELSE_BREAK:
660           ElseBreak(MI);
661           break;
662 
663         case AMDGPU::SI_LOOP:
664           ++Depth;
665           Loop(MI);
666           break;
667 
668         case AMDGPU::SI_END_CF:
669           if (--Depth == 0 && HaveKill) {
670             SkipIfDead(MI);
671             HaveKill = false;
672           }
673           EndCf(MI);
674           break;
675 
676         case AMDGPU::SI_KILL:
677           if (Depth == 0)
678             SkipIfDead(MI);
679           else
680             HaveKill = true;
681           Kill(MI);
682           break;
683 
684         case AMDGPU::S_BRANCH:
685           Branch(MI);
686           break;
687 
688         case AMDGPU::SI_INDIRECT_SRC_V1:
689         case AMDGPU::SI_INDIRECT_SRC_V2:
690         case AMDGPU::SI_INDIRECT_SRC_V4:
691         case AMDGPU::SI_INDIRECT_SRC_V8:
692         case AMDGPU::SI_INDIRECT_SRC_V16:
693           if (indirectSrc(MI)) {
694             // The block was split at this point. We can safely skip the middle
695             // inserted block to the following which contains the rest of this
696             // block's instructions.
697             NextBB = std::next(BI);
698             BE = MF.end();
699             Next = MBB.end();
700           }
701 
702           break;
703 
704         case AMDGPU::SI_INDIRECT_DST_V1:
705         case AMDGPU::SI_INDIRECT_DST_V2:
706         case AMDGPU::SI_INDIRECT_DST_V4:
707         case AMDGPU::SI_INDIRECT_DST_V8:
708         case AMDGPU::SI_INDIRECT_DST_V16:
709           if (indirectDst(MI)) {
710             // The block was split at this point. We can safely skip the middle
711             // inserted block to the following which contains the rest of this
712             // block's instructions.
713             NextBB = std::next(BI);
714             BE = MF.end();
715             Next = MBB.end();
716           }
717 
718           break;
719 
720         case AMDGPU::S_ENDPGM: {
721           if (MF.getInfo<SIMachineFunctionInfo>()->returnsVoid())
722             break;
723 
724           // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
725           // because external bytecode will be appended at the end.
726           if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
727             // S_ENDPGM is not the last instruction. Add an empty block at
728             // the end and jump there.
729             if (!EmptyMBBAtEnd) {
730               EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
731               MF.insert(MF.end(), EmptyMBBAtEnd);
732             }
733 
734             MBB.addSuccessor(EmptyMBBAtEnd);
735             BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
736                     .addMBB(EmptyMBBAtEnd);
737           }
738 
739           I->eraseFromParent();
740           break;
741         }
742       }
743     }
744   }
745 
746   if (NeedFlat && MFI->IsKernel) {
747     // TODO: What to use with function calls?
748     // We will need to Initialize the flat scratch register pair.
749     if (NeedFlat)
750       MFI->setHasFlatInstructions(true);
751   }
752 
753   return true;
754 }
755