xref: /llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp (revision fa771811b3cf146ce0cadd1995d149ed50acc96d)
1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief This pass lowers the pseudo control flow instructions to real
12 /// machine instructions.
13 ///
14 /// All control flow is handled using predicated instructions and
15 /// a predicate stack.  Each Scalar ALU controls the operations of 64 Vector
16 /// ALUs.  The Scalar ALU can update the predicate for any of the Vector ALUs
17 /// by writting to the 64-bit EXEC register (each bit corresponds to a
18 /// single vector ALU).  Typically, for predicates, a vector ALU will write
19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
21 /// EXEC to update the predicates.
22 ///
23 /// For example:
24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
25 /// %SGPR0 = SI_IF %VCC
26 ///   %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
27 /// %SGPR0 = SI_ELSE %SGPR0
28 ///   %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
29 /// SI_END_CF %SGPR0
30 ///
31 /// becomes:
32 ///
33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC  // Save and update the exec mask
34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC  // Clear live bits from saved exec mask
35 /// S_CBRANCH_EXECZ label0            // This instruction is an optional
36 ///                                   // optimization which allows us to
37 ///                                   // branch if all the bits of
38 ///                                   // EXEC are zero.
39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
40 ///
41 /// label0:
42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC   // Restore the exec mask for the Then block
43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC    // Clear live bits from saved exec mask
44 /// S_BRANCH_EXECZ label1              // Use our branch optimization
45 ///                                    // instruction again.
46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR   // Do the THEN block
47 /// label1:
48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0     // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
50 
51 #include "AMDGPU.h"
52 #include "AMDGPUSubtarget.h"
53 #include "SIInstrInfo.h"
54 #include "SIMachineFunctionInfo.h"
55 #include "llvm/CodeGen/MachineFrameInfo.h"
56 #include "llvm/CodeGen/MachineFunction.h"
57 #include "llvm/CodeGen/MachineFunctionPass.h"
58 #include "llvm/CodeGen/MachineInstrBuilder.h"
59 #include "llvm/CodeGen/MachineRegisterInfo.h"
60 #include "llvm/IR/Constants.h"
61 
62 using namespace llvm;
63 
64 #define DEBUG_TYPE "si-lower-control-flow"
65 
66 namespace {
67 
68 class SILowerControlFlow : public MachineFunctionPass {
69 private:
70   static const unsigned SkipThreshold = 12;
71 
72   const SIRegisterInfo *TRI;
73   const SIInstrInfo *TII;
74 
75   bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
76 
77   void Skip(MachineInstr &From, MachineOperand &To);
78   void SkipIfDead(MachineInstr &MI);
79 
80   void If(MachineInstr &MI);
81   void Else(MachineInstr &MI);
82   void Break(MachineInstr &MI);
83   void IfBreak(MachineInstr &MI);
84   void ElseBreak(MachineInstr &MI);
85   void Loop(MachineInstr &MI);
86   void EndCf(MachineInstr &MI);
87 
88   void Kill(MachineInstr &MI);
89   void Branch(MachineInstr &MI);
90 
91   void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
92   void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
93   void IndirectSrc(MachineInstr &MI);
94   void IndirectDst(MachineInstr &MI);
95 
96 public:
97   static char ID;
98 
99   SILowerControlFlow() :
100     MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
101 
102   bool runOnMachineFunction(MachineFunction &MF) override;
103 
104   const char *getPassName() const override {
105     return "SI Lower control flow pseudo instructions";
106   }
107 
108   void getAnalysisUsage(AnalysisUsage &AU) const override {
109     AU.setPreservesCFG();
110     MachineFunctionPass::getAnalysisUsage(AU);
111   }
112 };
113 
114 } // End anonymous namespace
115 
116 char SILowerControlFlow::ID = 0;
117 
118 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
119                 "SI lower control flow", false, false)
120 
121 char &llvm::SILowerControlFlowPassID = SILowerControlFlow::ID;
122 
123 
124 FunctionPass *llvm::createSILowerControlFlowPass() {
125   return new SILowerControlFlow();
126 }
127 
128 bool SILowerControlFlow::shouldSkip(MachineBasicBlock *From,
129                                     MachineBasicBlock *To) {
130 
131   unsigned NumInstr = 0;
132 
133   for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty();
134        MBB = *MBB->succ_begin()) {
135 
136     for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
137          NumInstr < SkipThreshold && I != E; ++I) {
138 
139       if (I->isBundle() || !I->isBundled()) {
140         // When a uniform loop is inside non-uniform control flow, the branch
141         // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
142         // when EXEC = 0. We should skip the loop lest it becomes infinite.
143         if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ)
144           return true;
145 
146         if (++NumInstr >= SkipThreshold)
147           return true;
148       }
149     }
150   }
151 
152   return false;
153 }
154 
155 void SILowerControlFlow::Skip(MachineInstr &From, MachineOperand &To) {
156 
157   if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
158     return;
159 
160   DebugLoc DL = From.getDebugLoc();
161   BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
162     .addOperand(To);
163 }
164 
165 void SILowerControlFlow::SkipIfDead(MachineInstr &MI) {
166 
167   MachineBasicBlock &MBB = *MI.getParent();
168   DebugLoc DL = MI.getDebugLoc();
169 
170   if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() !=
171       ShaderType::PIXEL ||
172       !shouldSkip(&MBB, &MBB.getParent()->back()))
173     return;
174 
175   MachineBasicBlock::iterator Insert = &MI;
176   ++Insert;
177 
178   // If the exec mask is non-zero, skip the next two instructions
179   BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
180     .addImm(3);
181 
182   // Exec mask is zero: Export to NULL target...
183   BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
184           .addImm(0)
185           .addImm(0x09) // V_008DFC_SQ_EXP_NULL
186           .addImm(0)
187           .addImm(1)
188           .addImm(1)
189           .addReg(AMDGPU::VGPR0)
190           .addReg(AMDGPU::VGPR0)
191           .addReg(AMDGPU::VGPR0)
192           .addReg(AMDGPU::VGPR0);
193 
194   // ... and terminate wavefront
195   BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
196 }
197 
198 void SILowerControlFlow::If(MachineInstr &MI) {
199   MachineBasicBlock &MBB = *MI.getParent();
200   DebugLoc DL = MI.getDebugLoc();
201   unsigned Reg = MI.getOperand(0).getReg();
202   unsigned Vcc = MI.getOperand(1).getReg();
203 
204   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
205           .addReg(Vcc);
206 
207   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
208           .addReg(AMDGPU::EXEC)
209           .addReg(Reg);
210 
211   Skip(MI, MI.getOperand(2));
212 
213   MI.eraseFromParent();
214 }
215 
216 void SILowerControlFlow::Else(MachineInstr &MI) {
217   MachineBasicBlock &MBB = *MI.getParent();
218   DebugLoc DL = MI.getDebugLoc();
219   unsigned Dst = MI.getOperand(0).getReg();
220   unsigned Src = MI.getOperand(1).getReg();
221 
222   BuildMI(MBB, MBB.getFirstNonPHI(), DL,
223           TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
224           .addReg(Src); // Saved EXEC
225 
226   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
227           .addReg(AMDGPU::EXEC)
228           .addReg(Dst);
229 
230   Skip(MI, MI.getOperand(2));
231 
232   MI.eraseFromParent();
233 }
234 
235 void SILowerControlFlow::Break(MachineInstr &MI) {
236   MachineBasicBlock &MBB = *MI.getParent();
237   DebugLoc DL = MI.getDebugLoc();
238 
239   unsigned Dst = MI.getOperand(0).getReg();
240   unsigned Src = MI.getOperand(1).getReg();
241 
242   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
243           .addReg(AMDGPU::EXEC)
244           .addReg(Src);
245 
246   MI.eraseFromParent();
247 }
248 
249 void SILowerControlFlow::IfBreak(MachineInstr &MI) {
250   MachineBasicBlock &MBB = *MI.getParent();
251   DebugLoc DL = MI.getDebugLoc();
252 
253   unsigned Dst = MI.getOperand(0).getReg();
254   unsigned Vcc = MI.getOperand(1).getReg();
255   unsigned Src = MI.getOperand(2).getReg();
256 
257   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
258           .addReg(Vcc)
259           .addReg(Src);
260 
261   MI.eraseFromParent();
262 }
263 
264 void SILowerControlFlow::ElseBreak(MachineInstr &MI) {
265   MachineBasicBlock &MBB = *MI.getParent();
266   DebugLoc DL = MI.getDebugLoc();
267 
268   unsigned Dst = MI.getOperand(0).getReg();
269   unsigned Saved = MI.getOperand(1).getReg();
270   unsigned Src = MI.getOperand(2).getReg();
271 
272   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
273           .addReg(Saved)
274           .addReg(Src);
275 
276   MI.eraseFromParent();
277 }
278 
279 void SILowerControlFlow::Loop(MachineInstr &MI) {
280   MachineBasicBlock &MBB = *MI.getParent();
281   DebugLoc DL = MI.getDebugLoc();
282   unsigned Src = MI.getOperand(0).getReg();
283 
284   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
285           .addReg(AMDGPU::EXEC)
286           .addReg(Src);
287 
288   BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
289     .addOperand(MI.getOperand(1));
290 
291   MI.eraseFromParent();
292 }
293 
294 void SILowerControlFlow::EndCf(MachineInstr &MI) {
295   MachineBasicBlock &MBB = *MI.getParent();
296   DebugLoc DL = MI.getDebugLoc();
297   unsigned Reg = MI.getOperand(0).getReg();
298 
299   BuildMI(MBB, MBB.getFirstNonPHI(), DL,
300           TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
301           .addReg(AMDGPU::EXEC)
302           .addReg(Reg);
303 
304   MI.eraseFromParent();
305 }
306 
307 void SILowerControlFlow::Branch(MachineInstr &MI) {
308   if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
309     MI.eraseFromParent();
310 
311   // If these aren't equal, this is probably an infinite loop.
312 }
313 
314 void SILowerControlFlow::Kill(MachineInstr &MI) {
315   MachineBasicBlock &MBB = *MI.getParent();
316   DebugLoc DL = MI.getDebugLoc();
317   const MachineOperand &Op = MI.getOperand(0);
318 
319 #ifndef NDEBUG
320   const SIMachineFunctionInfo *MFI
321     = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
322   // Kill is only allowed in pixel / geometry shaders.
323   assert(MFI->getShaderType() == ShaderType::PIXEL ||
324          MFI->getShaderType() == ShaderType::GEOMETRY);
325 #endif
326 
327   // Clear this thread from the exec mask if the operand is negative
328   if ((Op.isImm())) {
329     // Constant operand: Set exec mask to 0 or do nothing
330     if (Op.getImm() & 0x80000000) {
331       BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
332               .addImm(0);
333     }
334   } else {
335     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
336            .addImm(0)
337            .addOperand(Op);
338   }
339 
340   MI.eraseFromParent();
341 }
342 
343 void SILowerControlFlow::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
344 
345   MachineBasicBlock &MBB = *MI.getParent();
346   DebugLoc DL = MI.getDebugLoc();
347   MachineBasicBlock::iterator I = MI;
348 
349   unsigned Save = MI.getOperand(1).getReg();
350   unsigned Idx = MI.getOperand(3).getReg();
351 
352   if (AMDGPU::SReg_32RegClass.contains(Idx)) {
353     if (Offset) {
354       BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
355               .addReg(Idx)
356               .addImm(Offset);
357     } else {
358       BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
359               .addReg(Idx);
360     }
361     MBB.insert(I, MovRel);
362   } else {
363 
364     assert(AMDGPU::SReg_64RegClass.contains(Save));
365     assert(AMDGPU::VGPR_32RegClass.contains(Idx));
366 
367     // Save the EXEC mask
368     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
369             .addReg(AMDGPU::EXEC);
370 
371     // Read the next variant into VCC (lower 32 bits) <- also loop target
372     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
373             AMDGPU::VCC_LO)
374             .addReg(Idx);
375 
376     // Move index from VCC into M0
377     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
378             .addReg(AMDGPU::VCC_LO);
379 
380     // Compare the just read M0 value to all possible Idx values
381     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
382       .addReg(AMDGPU::M0)
383       .addReg(Idx);
384 
385     // Update EXEC, save the original EXEC value to VCC
386     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
387             .addReg(AMDGPU::VCC);
388 
389     if (Offset) {
390       BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
391               .addReg(AMDGPU::M0)
392               .addImm(Offset);
393     }
394     // Do the actual move
395     MBB.insert(I, MovRel);
396 
397     // Update EXEC, switch all done bits to 0 and all todo bits to 1
398     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
399             .addReg(AMDGPU::EXEC)
400             .addReg(AMDGPU::VCC);
401 
402     // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
403     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
404       .addImm(-7);
405 
406     // Restore EXEC
407     BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
408             .addReg(Save);
409 
410   }
411   MI.eraseFromParent();
412 }
413 
414 /// \param @VecReg The register which holds element zero of the vector
415 ///                 being addressed into.
416 /// \param[out] @Reg The base register to use in the indirect addressing instruction.
417 /// \param[in,out] @Offset As an input, this is the constant offset part of the
418 //                         indirect Index. e.g. v0 = v[VecReg + Offset]
419 //                         As an output, this is a constant value that needs
420 //                         to be added to the value stored in M0.
421 void SILowerControlFlow::computeIndirectRegAndOffset(unsigned VecReg,
422                                                      unsigned &Reg,
423                                                      int &Offset) {
424   unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
425   if (!SubReg)
426     SubReg = VecReg;
427 
428   const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
429   int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
430 
431   if (RegIdx < 0) {
432     Offset = RegIdx;
433     RegIdx = 0;
434   } else {
435     Offset = 0;
436   }
437 
438   Reg = RC->getRegister(RegIdx);
439 }
440 
441 void SILowerControlFlow::IndirectSrc(MachineInstr &MI) {
442 
443   MachineBasicBlock &MBB = *MI.getParent();
444   DebugLoc DL = MI.getDebugLoc();
445 
446   unsigned Dst = MI.getOperand(0).getReg();
447   unsigned Vec = MI.getOperand(2).getReg();
448   int Off = MI.getOperand(4).getImm();
449   unsigned Reg;
450 
451   computeIndirectRegAndOffset(Vec, Reg, Off);
452 
453   MachineInstr *MovRel =
454     BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
455             .addReg(Reg)
456             .addReg(Vec, RegState::Implicit);
457 
458   LoadM0(MI, MovRel, Off);
459 }
460 
461 void SILowerControlFlow::IndirectDst(MachineInstr &MI) {
462 
463   MachineBasicBlock &MBB = *MI.getParent();
464   DebugLoc DL = MI.getDebugLoc();
465 
466   unsigned Dst = MI.getOperand(0).getReg();
467   int Off = MI.getOperand(4).getImm();
468   unsigned Val = MI.getOperand(5).getReg();
469   unsigned Reg;
470 
471   computeIndirectRegAndOffset(Dst, Reg, Off);
472 
473   MachineInstr *MovRel =
474     BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
475             .addReg(Reg, RegState::Define)
476             .addReg(Val)
477             .addReg(Dst, RegState::Implicit);
478 
479   LoadM0(MI, MovRel, Off);
480 }
481 
482 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
483   TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
484   TRI =
485       static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
486   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
487 
488   bool HaveKill = false;
489   bool NeedWQM = false;
490   bool NeedFlat = false;
491   unsigned Depth = 0;
492 
493   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
494        BI != BE; ++BI) {
495 
496     MachineBasicBlock *EmptyMBBAtEnd = NULL;
497     MachineBasicBlock &MBB = *BI;
498     MachineBasicBlock::iterator I, Next;
499     for (I = MBB.begin(); I != MBB.end(); I = Next) {
500       Next = std::next(I);
501 
502       MachineInstr &MI = *I;
503       if (TII->isWQM(MI) || TII->isDS(MI))
504         NeedWQM = true;
505 
506       // Flat uses m0 in case it needs to access LDS.
507       if (TII->isFLAT(MI))
508         NeedFlat = true;
509 
510       switch (MI.getOpcode()) {
511         default: break;
512         case AMDGPU::SI_IF:
513           ++Depth;
514           If(MI);
515           break;
516 
517         case AMDGPU::SI_ELSE:
518           Else(MI);
519           break;
520 
521         case AMDGPU::SI_BREAK:
522           Break(MI);
523           break;
524 
525         case AMDGPU::SI_IF_BREAK:
526           IfBreak(MI);
527           break;
528 
529         case AMDGPU::SI_ELSE_BREAK:
530           ElseBreak(MI);
531           break;
532 
533         case AMDGPU::SI_LOOP:
534           ++Depth;
535           Loop(MI);
536           break;
537 
538         case AMDGPU::SI_END_CF:
539           if (--Depth == 0 && HaveKill) {
540             SkipIfDead(MI);
541             HaveKill = false;
542           }
543           EndCf(MI);
544           break;
545 
546         case AMDGPU::SI_KILL:
547           if (Depth == 0)
548             SkipIfDead(MI);
549           else
550             HaveKill = true;
551           Kill(MI);
552           break;
553 
554         case AMDGPU::S_BRANCH:
555           Branch(MI);
556           break;
557 
558         case AMDGPU::SI_INDIRECT_SRC_V1:
559         case AMDGPU::SI_INDIRECT_SRC_V2:
560         case AMDGPU::SI_INDIRECT_SRC_V4:
561         case AMDGPU::SI_INDIRECT_SRC_V8:
562         case AMDGPU::SI_INDIRECT_SRC_V16:
563           IndirectSrc(MI);
564           break;
565 
566         case AMDGPU::SI_INDIRECT_DST_V1:
567         case AMDGPU::SI_INDIRECT_DST_V2:
568         case AMDGPU::SI_INDIRECT_DST_V4:
569         case AMDGPU::SI_INDIRECT_DST_V8:
570         case AMDGPU::SI_INDIRECT_DST_V16:
571           IndirectDst(MI);
572           break;
573 
574         case AMDGPU::S_ENDPGM: {
575           if (MF.getInfo<SIMachineFunctionInfo>()->returnsVoid())
576             break;
577 
578           // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
579           // because external bytecode will be appended at the end.
580           if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
581             // S_ENDPGM is not the last instruction. Add an empty block at
582             // the end and jump there.
583             if (!EmptyMBBAtEnd) {
584               EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
585               MF.insert(MF.end(), EmptyMBBAtEnd);
586             }
587 
588             MBB.addSuccessor(EmptyMBBAtEnd);
589             BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
590                     .addMBB(EmptyMBBAtEnd);
591           }
592 
593           I->eraseFromParent();
594           break;
595         }
596       }
597     }
598   }
599 
600   if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
601     MachineBasicBlock &MBB = MF.front();
602     BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
603             AMDGPU::EXEC).addReg(AMDGPU::EXEC);
604   }
605 
606   if (NeedFlat && MFI->IsKernel) {
607     // TODO: What to use with function calls?
608     // We will need to Initialize the flat scratch register pair.
609     if (NeedFlat)
610       MFI->setHasFlatInstructions(true);
611   }
612 
613   return true;
614 }
615