1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass lowers the pseudo control flow instructions to real
11 /// machine instructions.
12 ///
13 /// All control flow is handled using predicated instructions and
14 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
15 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
16 /// by writing to the 64-bit EXEC register (each bit corresponds to a
17 /// single vector ALU). Typically, for predicates, a vector ALU will write
18 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
19 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
20 /// EXEC to update the predicates.
21 ///
22 /// For example:
23 /// %vcc = V_CMP_GT_F32 %vgpr1, %vgpr2
24 /// %sgpr0 = SI_IF %vcc
25 /// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0
26 /// %sgpr0 = SI_ELSE %sgpr0
27 /// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr0
28 /// SI_END_CF %sgpr0
29 ///
30 /// becomes:
31 ///
32 /// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc // Save and update the exec mask
33 /// %sgpr0 = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask
34 /// S_CBRANCH_EXECZ label0 // This instruction is an optional
35 /// // optimization which allows us to
36 /// // branch if all the bits of
37 /// // EXEC are zero.
38 /// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch
39 ///
40 /// label0:
41 /// %sgpr0 = S_OR_SAVEEXEC_B64 %sgpr0 // Restore the exec mask for the Then
42 /// // block
43 /// %exec = S_XOR_B64 %sgpr0, %exec // Update the exec mask
44 /// S_BRANCH_EXECZ label1 // Use our branch optimization
45 /// // instruction again.
46 /// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the THEN block
47 /// label1:
48 /// %exec = S_OR_B64 %exec, %sgpr0 // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
50
51 #include "AMDGPU.h"
52 #include "GCNSubtarget.h"
53 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
54 #include "llvm/ADT/SmallSet.h"
55 #include "llvm/CodeGen/LiveIntervals.h"
56 #include "llvm/CodeGen/LiveVariables.h"
57 #include "llvm/CodeGen/MachineDominators.h"
58 #include "llvm/CodeGen/MachineFunctionPass.h"
59 #include "llvm/Target/TargetMachine.h"
60
61 using namespace llvm;
62
63 #define DEBUG_TYPE "si-lower-control-flow"
64
65 static cl::opt<bool>
66 RemoveRedundantEndcf("amdgpu-remove-redundant-endcf",
67 cl::init(true), cl::ReallyHidden);
68
69 namespace {
70
71 class SILowerControlFlow : public MachineFunctionPass {
72 private:
73 const SIRegisterInfo *TRI = nullptr;
74 const SIInstrInfo *TII = nullptr;
75 LiveIntervals *LIS = nullptr;
76 LiveVariables *LV = nullptr;
77 MachineDominatorTree *MDT = nullptr;
78 MachineRegisterInfo *MRI = nullptr;
79 SetVector<MachineInstr*> LoweredEndCf;
80 DenseSet<Register> LoweredIf;
81 SmallSet<MachineBasicBlock *, 4> KillBlocks;
82
83 const TargetRegisterClass *BoolRC = nullptr;
84 unsigned AndOpc;
85 unsigned OrOpc;
86 unsigned XorOpc;
87 unsigned MovTermOpc;
88 unsigned Andn2TermOpc;
89 unsigned XorTermrOpc;
90 unsigned OrTermrOpc;
91 unsigned OrSaveExecOpc;
92 unsigned Exec;
93
94 bool EnableOptimizeEndCf = false;
95
96 bool hasKill(const MachineBasicBlock *Begin, const MachineBasicBlock *End);
97
98 void emitIf(MachineInstr &MI);
99 void emitElse(MachineInstr &MI);
100 void emitIfBreak(MachineInstr &MI);
101 void emitLoop(MachineInstr &MI);
102
103 MachineBasicBlock *emitEndCf(MachineInstr &MI);
104
105 void lowerInitExec(MachineBasicBlock *MBB, MachineInstr &MI);
106
107 void findMaskOperands(MachineInstr &MI, unsigned OpNo,
108 SmallVectorImpl<MachineOperand> &Src) const;
109
110 void combineMasks(MachineInstr &MI);
111
112 bool removeMBBifRedundant(MachineBasicBlock &MBB);
113
114 MachineBasicBlock *process(MachineInstr &MI);
115
116 // Skip to the next instruction, ignoring debug instructions, and trivial
117 // block boundaries (blocks that have one (typically fallthrough) successor,
118 // and the successor has one predecessor.
119 MachineBasicBlock::iterator
120 skipIgnoreExecInstsTrivialSucc(MachineBasicBlock &MBB,
121 MachineBasicBlock::iterator It) const;
122
123 /// Find the insertion point for a new conditional branch.
124 MachineBasicBlock::iterator
skipToUncondBrOrEnd(MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const125 skipToUncondBrOrEnd(MachineBasicBlock &MBB,
126 MachineBasicBlock::iterator I) const {
127 assert(I->isTerminator());
128
129 // FIXME: What if we had multiple pre-existing conditional branches?
130 MachineBasicBlock::iterator End = MBB.end();
131 while (I != End && !I->isUnconditionalBranch())
132 ++I;
133 return I;
134 }
135
136 // Remove redundant SI_END_CF instructions.
137 void optimizeEndCf();
138
139 public:
140 static char ID;
141
SILowerControlFlow()142 SILowerControlFlow() : MachineFunctionPass(ID) {}
143
144 bool runOnMachineFunction(MachineFunction &MF) override;
145
getPassName() const146 StringRef getPassName() const override {
147 return "SI Lower control flow pseudo instructions";
148 }
149
getAnalysisUsage(AnalysisUsage & AU) const150 void getAnalysisUsage(AnalysisUsage &AU) const override {
151 AU.addUsedIfAvailable<LiveIntervals>();
152 // Should preserve the same set that TwoAddressInstructions does.
153 AU.addPreserved<MachineDominatorTree>();
154 AU.addPreserved<SlotIndexes>();
155 AU.addPreserved<LiveIntervals>();
156 AU.addPreservedID(LiveVariablesID);
157 MachineFunctionPass::getAnalysisUsage(AU);
158 }
159 };
160
161 } // end anonymous namespace
162
163 char SILowerControlFlow::ID = 0;
164
165 INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
166 "SI lower control flow", false, false)
167
setImpSCCDefDead(MachineInstr & MI,bool IsDead)168 static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
169 MachineOperand &ImpDefSCC = MI.getOperand(3);
170 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
171
172 ImpDefSCC.setIsDead(IsDead);
173 }
174
175 char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
176
hasKill(const MachineBasicBlock * Begin,const MachineBasicBlock * End)177 bool SILowerControlFlow::hasKill(const MachineBasicBlock *Begin,
178 const MachineBasicBlock *End) {
179 DenseSet<const MachineBasicBlock*> Visited;
180 SmallVector<MachineBasicBlock *, 4> Worklist(Begin->successors());
181
182 while (!Worklist.empty()) {
183 MachineBasicBlock *MBB = Worklist.pop_back_val();
184
185 if (MBB == End || !Visited.insert(MBB).second)
186 continue;
187 if (KillBlocks.contains(MBB))
188 return true;
189
190 Worklist.append(MBB->succ_begin(), MBB->succ_end());
191 }
192
193 return false;
194 }
195
isSimpleIf(const MachineInstr & MI,const MachineRegisterInfo * MRI)196 static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI) {
197 Register SaveExecReg = MI.getOperand(0).getReg();
198 auto U = MRI->use_instr_nodbg_begin(SaveExecReg);
199
200 if (U == MRI->use_instr_nodbg_end() ||
201 std::next(U) != MRI->use_instr_nodbg_end() ||
202 U->getOpcode() != AMDGPU::SI_END_CF)
203 return false;
204
205 return true;
206 }
207
emitIf(MachineInstr & MI)208 void SILowerControlFlow::emitIf(MachineInstr &MI) {
209 MachineBasicBlock &MBB = *MI.getParent();
210 const DebugLoc &DL = MI.getDebugLoc();
211 MachineBasicBlock::iterator I(&MI);
212 Register SaveExecReg = MI.getOperand(0).getReg();
213 MachineOperand& Cond = MI.getOperand(1);
214 assert(Cond.getSubReg() == AMDGPU::NoSubRegister);
215
216 MachineOperand &ImpDefSCC = MI.getOperand(4);
217 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
218
219 // If there is only one use of save exec register and that use is SI_END_CF,
220 // we can optimize SI_IF by returning the full saved exec mask instead of
221 // just cleared bits.
222 bool SimpleIf = isSimpleIf(MI, MRI);
223
224 if (SimpleIf) {
225 // Check for SI_KILL_*_TERMINATOR on path from if to endif.
226 // if there is any such terminator simplifications are not safe.
227 auto UseMI = MRI->use_instr_nodbg_begin(SaveExecReg);
228 SimpleIf = !hasKill(MI.getParent(), UseMI->getParent());
229 }
230
231 // Add an implicit def of exec to discourage scheduling VALU after this which
232 // will interfere with trying to form s_and_saveexec_b64 later.
233 Register CopyReg = SimpleIf ? SaveExecReg
234 : MRI->createVirtualRegister(BoolRC);
235 MachineInstr *CopyExec =
236 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
237 .addReg(Exec)
238 .addReg(Exec, RegState::ImplicitDefine);
239 LoweredIf.insert(CopyReg);
240
241 Register Tmp = MRI->createVirtualRegister(BoolRC);
242
243 MachineInstr *And =
244 BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp)
245 .addReg(CopyReg)
246 .add(Cond);
247 if (LV)
248 LV->replaceKillInstruction(Cond.getReg(), MI, *And);
249
250 setImpSCCDefDead(*And, true);
251
252 MachineInstr *Xor = nullptr;
253 if (!SimpleIf) {
254 Xor =
255 BuildMI(MBB, I, DL, TII->get(XorOpc), SaveExecReg)
256 .addReg(Tmp)
257 .addReg(CopyReg);
258 setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
259 }
260
261 // Use a copy that is a terminator to get correct spill code placement it with
262 // fast regalloc.
263 MachineInstr *SetExec =
264 BuildMI(MBB, I, DL, TII->get(MovTermOpc), Exec)
265 .addReg(Tmp, RegState::Kill);
266 if (LV)
267 LV->getVarInfo(Tmp).Kills.push_back(SetExec);
268
269 // Skip ahead to the unconditional branch in case there are other terminators
270 // present.
271 I = skipToUncondBrOrEnd(MBB, I);
272
273 // Insert the S_CBRANCH_EXECZ instruction which will be optimized later
274 // during SIRemoveShortExecBranches.
275 MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
276 .add(MI.getOperand(2));
277
278 if (!LIS) {
279 MI.eraseFromParent();
280 return;
281 }
282
283 LIS->InsertMachineInstrInMaps(*CopyExec);
284
285 // Replace with and so we don't need to fix the live interval for condition
286 // register.
287 LIS->ReplaceMachineInstrInMaps(MI, *And);
288
289 if (!SimpleIf)
290 LIS->InsertMachineInstrInMaps(*Xor);
291 LIS->InsertMachineInstrInMaps(*SetExec);
292 LIS->InsertMachineInstrInMaps(*NewBr);
293
294 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
295 MI.eraseFromParent();
296
297 // FIXME: Is there a better way of adjusting the liveness? It shouldn't be
298 // hard to add another def here but I'm not sure how to correctly update the
299 // valno.
300 LIS->removeInterval(SaveExecReg);
301 LIS->createAndComputeVirtRegInterval(SaveExecReg);
302 LIS->createAndComputeVirtRegInterval(Tmp);
303 if (!SimpleIf)
304 LIS->createAndComputeVirtRegInterval(CopyReg);
305 }
306
emitElse(MachineInstr & MI)307 void SILowerControlFlow::emitElse(MachineInstr &MI) {
308 MachineBasicBlock &MBB = *MI.getParent();
309 const DebugLoc &DL = MI.getDebugLoc();
310
311 Register DstReg = MI.getOperand(0).getReg();
312
313 MachineBasicBlock::iterator Start = MBB.begin();
314
315 // This must be inserted before phis and any spill code inserted before the
316 // else.
317 Register SaveReg = MRI->createVirtualRegister(BoolRC);
318 MachineInstr *OrSaveExec =
319 BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)
320 .add(MI.getOperand(1)); // Saved EXEC
321 if (LV)
322 LV->replaceKillInstruction(MI.getOperand(1).getReg(), MI, *OrSaveExec);
323
324 MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
325
326 MachineBasicBlock::iterator ElsePt(MI);
327
328 // This accounts for any modification of the EXEC mask within the block and
329 // can be optimized out pre-RA when not required.
330 MachineInstr *And = BuildMI(MBB, ElsePt, DL, TII->get(AndOpc), DstReg)
331 .addReg(Exec)
332 .addReg(SaveReg);
333
334 if (LIS)
335 LIS->InsertMachineInstrInMaps(*And);
336
337 MachineInstr *Xor =
338 BuildMI(MBB, ElsePt, DL, TII->get(XorTermrOpc), Exec)
339 .addReg(Exec)
340 .addReg(DstReg);
341
342 // Skip ahead to the unconditional branch in case there are other terminators
343 // present.
344 ElsePt = skipToUncondBrOrEnd(MBB, ElsePt);
345
346 MachineInstr *Branch =
347 BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
348 .addMBB(DestBB);
349
350 if (!LIS) {
351 MI.eraseFromParent();
352 return;
353 }
354
355 LIS->RemoveMachineInstrFromMaps(MI);
356 MI.eraseFromParent();
357
358 LIS->InsertMachineInstrInMaps(*OrSaveExec);
359
360 LIS->InsertMachineInstrInMaps(*Xor);
361 LIS->InsertMachineInstrInMaps(*Branch);
362
363 LIS->removeInterval(DstReg);
364 LIS->createAndComputeVirtRegInterval(DstReg);
365 LIS->createAndComputeVirtRegInterval(SaveReg);
366
367 // Let this be recomputed.
368 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
369 }
370
emitIfBreak(MachineInstr & MI)371 void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
372 MachineBasicBlock &MBB = *MI.getParent();
373 const DebugLoc &DL = MI.getDebugLoc();
374 auto Dst = MI.getOperand(0).getReg();
375
376 // Skip ANDing with exec if the break condition is already masked by exec
377 // because it is a V_CMP in the same basic block. (We know the break
378 // condition operand was an i1 in IR, so if it is a VALU instruction it must
379 // be one with a carry-out.)
380 bool SkipAnding = false;
381 if (MI.getOperand(1).isReg()) {
382 if (MachineInstr *Def = MRI->getUniqueVRegDef(MI.getOperand(1).getReg())) {
383 SkipAnding = Def->getParent() == MI.getParent()
384 && SIInstrInfo::isVALU(*Def);
385 }
386 }
387
388 // AND the break condition operand with exec, then OR that into the "loop
389 // exit" mask.
390 MachineInstr *And = nullptr, *Or = nullptr;
391 if (!SkipAnding) {
392 Register AndReg = MRI->createVirtualRegister(BoolRC);
393 And = BuildMI(MBB, &MI, DL, TII->get(AndOpc), AndReg)
394 .addReg(Exec)
395 .add(MI.getOperand(1));
396 if (LV)
397 LV->replaceKillInstruction(MI.getOperand(1).getReg(), MI, *And);
398 Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
399 .addReg(AndReg)
400 .add(MI.getOperand(2));
401 if (LIS)
402 LIS->createAndComputeVirtRegInterval(AndReg);
403 } else {
404 Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
405 .add(MI.getOperand(1))
406 .add(MI.getOperand(2));
407 if (LV)
408 LV->replaceKillInstruction(MI.getOperand(1).getReg(), MI, *Or);
409 }
410 if (LV)
411 LV->replaceKillInstruction(MI.getOperand(2).getReg(), MI, *Or);
412
413 if (LIS) {
414 if (And)
415 LIS->InsertMachineInstrInMaps(*And);
416 LIS->ReplaceMachineInstrInMaps(MI, *Or);
417 }
418
419 MI.eraseFromParent();
420 }
421
emitLoop(MachineInstr & MI)422 void SILowerControlFlow::emitLoop(MachineInstr &MI) {
423 MachineBasicBlock &MBB = *MI.getParent();
424 const DebugLoc &DL = MI.getDebugLoc();
425
426 MachineInstr *AndN2 =
427 BuildMI(MBB, &MI, DL, TII->get(Andn2TermOpc), Exec)
428 .addReg(Exec)
429 .add(MI.getOperand(0));
430
431 auto BranchPt = skipToUncondBrOrEnd(MBB, MI.getIterator());
432 MachineInstr *Branch =
433 BuildMI(MBB, BranchPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
434 .add(MI.getOperand(1));
435
436 if (LIS) {
437 LIS->ReplaceMachineInstrInMaps(MI, *AndN2);
438 LIS->InsertMachineInstrInMaps(*Branch);
439 }
440
441 MI.eraseFromParent();
442 }
443
444 MachineBasicBlock::iterator
skipIgnoreExecInstsTrivialSucc(MachineBasicBlock & MBB,MachineBasicBlock::iterator It) const445 SILowerControlFlow::skipIgnoreExecInstsTrivialSucc(
446 MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
447
448 SmallSet<const MachineBasicBlock *, 4> Visited;
449 MachineBasicBlock *B = &MBB;
450 do {
451 if (!Visited.insert(B).second)
452 return MBB.end();
453
454 auto E = B->end();
455 for ( ; It != E; ++It) {
456 if (TII->mayReadEXEC(*MRI, *It))
457 break;
458 }
459
460 if (It != E)
461 return It;
462
463 if (B->succ_size() != 1)
464 return MBB.end();
465
466 // If there is one trivial successor, advance to the next block.
467 MachineBasicBlock *Succ = *B->succ_begin();
468
469 It = Succ->begin();
470 B = Succ;
471 } while (true);
472 }
473
emitEndCf(MachineInstr & MI)474 MachineBasicBlock *SILowerControlFlow::emitEndCf(MachineInstr &MI) {
475 MachineBasicBlock &MBB = *MI.getParent();
476 const DebugLoc &DL = MI.getDebugLoc();
477
478 MachineBasicBlock::iterator InsPt = MBB.begin();
479
480 // If we have instructions that aren't prolog instructions, split the block
481 // and emit a terminator instruction. This ensures correct spill placement.
482 // FIXME: We should unconditionally split the block here.
483 bool NeedBlockSplit = false;
484 Register DataReg = MI.getOperand(0).getReg();
485 for (MachineBasicBlock::iterator I = InsPt, E = MI.getIterator();
486 I != E; ++I) {
487 if (I->modifiesRegister(DataReg, TRI)) {
488 NeedBlockSplit = true;
489 break;
490 }
491 }
492
493 unsigned Opcode = OrOpc;
494 MachineBasicBlock *SplitBB = &MBB;
495 if (NeedBlockSplit) {
496 SplitBB = MBB.splitAt(MI, /*UpdateLiveIns*/true, LIS);
497 if (MDT && SplitBB != &MBB) {
498 MachineDomTreeNode *MBBNode = (*MDT)[&MBB];
499 SmallVector<MachineDomTreeNode *> Children(MBBNode->begin(),
500 MBBNode->end());
501 MachineDomTreeNode *SplitBBNode = MDT->addNewBlock(SplitBB, &MBB);
502 for (MachineDomTreeNode *Child : Children)
503 MDT->changeImmediateDominator(Child, SplitBBNode);
504 }
505 Opcode = OrTermrOpc;
506 InsPt = MI;
507 }
508
509 MachineInstr *NewMI =
510 BuildMI(MBB, InsPt, DL, TII->get(Opcode), Exec)
511 .addReg(Exec)
512 .add(MI.getOperand(0));
513 if (LV) {
514 LV->replaceKillInstruction(DataReg, MI, *NewMI);
515
516 if (SplitBB != &MBB) {
517 // Track the set of registers defined in the split block so we don't
518 // accidentally add the original block to AliveBlocks.
519 DenseSet<Register> SplitDefs;
520 for (MachineInstr &X : *SplitBB) {
521 for (MachineOperand &Op : X.operands()) {
522 if (Op.isReg() && Op.isDef() && Op.getReg().isVirtual())
523 SplitDefs.insert(Op.getReg());
524 }
525 }
526
527 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
528 Register Reg = Register::index2VirtReg(i);
529 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
530
531 if (VI.AliveBlocks.test(MBB.getNumber()))
532 VI.AliveBlocks.set(SplitBB->getNumber());
533 else {
534 for (MachineInstr *Kill : VI.Kills) {
535 if (Kill->getParent() == SplitBB && !SplitDefs.contains(Reg))
536 VI.AliveBlocks.set(MBB.getNumber());
537 }
538 }
539 }
540 }
541 }
542
543 LoweredEndCf.insert(NewMI);
544
545 if (LIS)
546 LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
547
548 MI.eraseFromParent();
549
550 if (LIS)
551 LIS->handleMove(*NewMI);
552 return SplitBB;
553 }
554
555 // Returns replace operands for a logical operation, either single result
556 // for exec or two operands if source was another equivalent operation.
findMaskOperands(MachineInstr & MI,unsigned OpNo,SmallVectorImpl<MachineOperand> & Src) const557 void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
558 SmallVectorImpl<MachineOperand> &Src) const {
559 MachineOperand &Op = MI.getOperand(OpNo);
560 if (!Op.isReg() || !Op.getReg().isVirtual()) {
561 Src.push_back(Op);
562 return;
563 }
564
565 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
566 if (!Def || Def->getParent() != MI.getParent() ||
567 !(Def->isFullCopy() || (Def->getOpcode() == MI.getOpcode())))
568 return;
569
570 // Make sure we do not modify exec between def and use.
571 // A copy with implicitly defined exec inserted earlier is an exclusion, it
572 // does not really modify exec.
573 for (auto I = Def->getIterator(); I != MI.getIterator(); ++I)
574 if (I->modifiesRegister(AMDGPU::EXEC, TRI) &&
575 !(I->isCopy() && I->getOperand(0).getReg() != Exec))
576 return;
577
578 for (const auto &SrcOp : Def->explicit_operands())
579 if (SrcOp.isReg() && SrcOp.isUse() &&
580 (SrcOp.getReg().isVirtual() || SrcOp.getReg() == Exec))
581 Src.push_back(SrcOp);
582 }
583
584 // Search and combine pairs of equivalent instructions, like
585 // S_AND_B64 x, (S_AND_B64 x, y) => S_AND_B64 x, y
586 // S_OR_B64 x, (S_OR_B64 x, y) => S_OR_B64 x, y
587 // One of the operands is exec mask.
combineMasks(MachineInstr & MI)588 void SILowerControlFlow::combineMasks(MachineInstr &MI) {
589 assert(MI.getNumExplicitOperands() == 3);
590 SmallVector<MachineOperand, 4> Ops;
591 unsigned OpToReplace = 1;
592 findMaskOperands(MI, 1, Ops);
593 if (Ops.size() == 1) OpToReplace = 2; // First operand can be exec or its copy
594 findMaskOperands(MI, 2, Ops);
595 if (Ops.size() != 3) return;
596
597 unsigned UniqueOpndIdx;
598 if (Ops[0].isIdenticalTo(Ops[1])) UniqueOpndIdx = 2;
599 else if (Ops[0].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
600 else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
601 else return;
602
603 Register Reg = MI.getOperand(OpToReplace).getReg();
604 MI.removeOperand(OpToReplace);
605 MI.addOperand(Ops[UniqueOpndIdx]);
606 if (MRI->use_empty(Reg))
607 MRI->getUniqueVRegDef(Reg)->eraseFromParent();
608 }
609
optimizeEndCf()610 void SILowerControlFlow::optimizeEndCf() {
611 // If the only instruction immediately following this END_CF is another
612 // END_CF in the only successor we can avoid emitting exec mask restore here.
613 if (!EnableOptimizeEndCf)
614 return;
615
616 for (MachineInstr *MI : reverse(LoweredEndCf)) {
617 MachineBasicBlock &MBB = *MI->getParent();
618 auto Next =
619 skipIgnoreExecInstsTrivialSucc(MBB, std::next(MI->getIterator()));
620 if (Next == MBB.end() || !LoweredEndCf.count(&*Next))
621 continue;
622 // Only skip inner END_CF if outer ENDCF belongs to SI_IF.
623 // If that belongs to SI_ELSE then saved mask has an inverted value.
624 Register SavedExec
625 = TII->getNamedOperand(*Next, AMDGPU::OpName::src1)->getReg();
626 assert(SavedExec.isVirtual() && "Expected saved exec to be src1!");
627
628 const MachineInstr *Def = MRI->getUniqueVRegDef(SavedExec);
629 if (Def && LoweredIf.count(SavedExec)) {
630 LLVM_DEBUG(dbgs() << "Skip redundant "; MI->dump());
631 if (LIS)
632 LIS->RemoveMachineInstrFromMaps(*MI);
633 Register Reg;
634 if (LV)
635 Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::src1)->getReg();
636 MI->eraseFromParent();
637 if (LV)
638 LV->recomputeForSingleDefVirtReg(Reg);
639 removeMBBifRedundant(MBB);
640 }
641 }
642 }
643
process(MachineInstr & MI)644 MachineBasicBlock *SILowerControlFlow::process(MachineInstr &MI) {
645 MachineBasicBlock &MBB = *MI.getParent();
646 MachineBasicBlock::iterator I(MI);
647 MachineInstr *Prev = (I != MBB.begin()) ? &*(std::prev(I)) : nullptr;
648
649 MachineBasicBlock *SplitBB = &MBB;
650
651 switch (MI.getOpcode()) {
652 case AMDGPU::SI_IF:
653 emitIf(MI);
654 break;
655
656 case AMDGPU::SI_ELSE:
657 emitElse(MI);
658 break;
659
660 case AMDGPU::SI_IF_BREAK:
661 emitIfBreak(MI);
662 break;
663
664 case AMDGPU::SI_LOOP:
665 emitLoop(MI);
666 break;
667
668 case AMDGPU::SI_WATERFALL_LOOP:
669 MI.setDesc(TII->get(AMDGPU::S_CBRANCH_EXECNZ));
670 break;
671
672 case AMDGPU::SI_END_CF:
673 SplitBB = emitEndCf(MI);
674 break;
675
676 default:
677 assert(false && "Attempt to process unsupported instruction");
678 break;
679 }
680
681 MachineBasicBlock::iterator Next;
682 for (I = Prev ? Prev->getIterator() : MBB.begin(); I != MBB.end(); I = Next) {
683 Next = std::next(I);
684 MachineInstr &MaskMI = *I;
685 switch (MaskMI.getOpcode()) {
686 case AMDGPU::S_AND_B64:
687 case AMDGPU::S_OR_B64:
688 case AMDGPU::S_AND_B32:
689 case AMDGPU::S_OR_B32:
690 // Cleanup bit manipulations on exec mask
691 combineMasks(MaskMI);
692 break;
693 default:
694 I = MBB.end();
695 break;
696 }
697 }
698
699 return SplitBB;
700 }
701
lowerInitExec(MachineBasicBlock * MBB,MachineInstr & MI)702 void SILowerControlFlow::lowerInitExec(MachineBasicBlock *MBB,
703 MachineInstr &MI) {
704 MachineFunction &MF = *MBB->getParent();
705 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
706 bool IsWave32 = ST.isWave32();
707
708 if (MI.getOpcode() == AMDGPU::SI_INIT_EXEC) {
709 // This should be before all vector instructions.
710 BuildMI(*MBB, MBB->begin(), MI.getDebugLoc(),
711 TII->get(IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64), Exec)
712 .addImm(MI.getOperand(0).getImm());
713 if (LIS)
714 LIS->RemoveMachineInstrFromMaps(MI);
715 MI.eraseFromParent();
716 return;
717 }
718
719 // Extract the thread count from an SGPR input and set EXEC accordingly.
720 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
721 //
722 // S_BFE_U32 count, input, {shift, 7}
723 // S_BFM_B64 exec, count, 0
724 // S_CMP_EQ_U32 count, 64
725 // S_CMOV_B64 exec, -1
726 Register InputReg = MI.getOperand(0).getReg();
727 MachineInstr *FirstMI = &*MBB->begin();
728 if (InputReg.isVirtual()) {
729 MachineInstr *DefInstr = MRI->getVRegDef(InputReg);
730 assert(DefInstr && DefInstr->isCopy());
731 if (DefInstr->getParent() == MBB) {
732 if (DefInstr != FirstMI) {
733 // If the `InputReg` is defined in current block, we also need to
734 // move that instruction to the beginning of the block.
735 DefInstr->removeFromParent();
736 MBB->insert(FirstMI, DefInstr);
737 if (LIS)
738 LIS->handleMove(*DefInstr);
739 } else {
740 // If first instruction is definition then move pointer after it.
741 FirstMI = &*std::next(FirstMI->getIterator());
742 }
743 }
744 }
745
746 // Insert instruction sequence at block beginning (before vector operations).
747 const DebugLoc DL = MI.getDebugLoc();
748 const unsigned WavefrontSize = ST.getWavefrontSize();
749 const unsigned Mask = (WavefrontSize << 1) - 1;
750 Register CountReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
751 auto BfeMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_BFE_U32), CountReg)
752 .addReg(InputReg)
753 .addImm((MI.getOperand(1).getImm() & Mask) | 0x70000);
754 if (LV)
755 LV->recomputeForSingleDefVirtReg(InputReg);
756 auto BfmMI =
757 BuildMI(*MBB, FirstMI, DL,
758 TII->get(IsWave32 ? AMDGPU::S_BFM_B32 : AMDGPU::S_BFM_B64), Exec)
759 .addReg(CountReg)
760 .addImm(0);
761 auto CmpMI = BuildMI(*MBB, FirstMI, DL, TII->get(AMDGPU::S_CMP_EQ_U32))
762 .addReg(CountReg, RegState::Kill)
763 .addImm(WavefrontSize);
764 if (LV)
765 LV->getVarInfo(CountReg).Kills.push_back(CmpMI);
766 auto CmovMI =
767 BuildMI(*MBB, FirstMI, DL,
768 TII->get(IsWave32 ? AMDGPU::S_CMOV_B32 : AMDGPU::S_CMOV_B64),
769 Exec)
770 .addImm(-1);
771
772 if (!LIS) {
773 MI.eraseFromParent();
774 return;
775 }
776
777 LIS->RemoveMachineInstrFromMaps(MI);
778 MI.eraseFromParent();
779
780 LIS->InsertMachineInstrInMaps(*BfeMI);
781 LIS->InsertMachineInstrInMaps(*BfmMI);
782 LIS->InsertMachineInstrInMaps(*CmpMI);
783 LIS->InsertMachineInstrInMaps(*CmovMI);
784
785 LIS->removeInterval(InputReg);
786 LIS->createAndComputeVirtRegInterval(InputReg);
787 LIS->createAndComputeVirtRegInterval(CountReg);
788 }
789
removeMBBifRedundant(MachineBasicBlock & MBB)790 bool SILowerControlFlow::removeMBBifRedundant(MachineBasicBlock &MBB) {
791 for (auto &I : MBB.instrs()) {
792 if (!I.isDebugInstr() && !I.isUnconditionalBranch())
793 return false;
794 }
795
796 assert(MBB.succ_size() == 1 && "MBB has more than one successor");
797
798 MachineBasicBlock *Succ = *MBB.succ_begin();
799 MachineBasicBlock *FallThrough = nullptr;
800
801 while (!MBB.predecessors().empty()) {
802 MachineBasicBlock *P = *MBB.pred_begin();
803 if (P->getFallThrough() == &MBB)
804 FallThrough = P;
805 P->ReplaceUsesOfBlockWith(&MBB, Succ);
806 }
807 MBB.removeSuccessor(Succ);
808 if (LIS) {
809 for (auto &I : MBB.instrs())
810 LIS->RemoveMachineInstrFromMaps(I);
811 }
812 if (MDT) {
813 // If Succ, the single successor of MBB, is dominated by MBB, MDT needs
814 // updating by changing Succ's idom to the one of MBB; otherwise, MBB must
815 // be a leaf node in MDT and could be erased directly.
816 if (MDT->dominates(&MBB, Succ))
817 MDT->changeImmediateDominator(MDT->getNode(Succ),
818 MDT->getNode(&MBB)->getIDom());
819 MDT->eraseNode(&MBB);
820 }
821 MBB.clear();
822 MBB.eraseFromParent();
823 if (FallThrough && !FallThrough->isLayoutSuccessor(Succ)) {
824 if (!Succ->canFallThrough()) {
825 MachineFunction *MF = FallThrough->getParent();
826 MachineFunction::iterator FallThroughPos(FallThrough);
827 MF->splice(std::next(FallThroughPos), Succ);
828 } else
829 BuildMI(*FallThrough, FallThrough->end(),
830 FallThrough->findBranchDebugLoc(), TII->get(AMDGPU::S_BRANCH))
831 .addMBB(Succ);
832 }
833
834 return true;
835 }
836
runOnMachineFunction(MachineFunction & MF)837 bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
838 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
839 TII = ST.getInstrInfo();
840 TRI = &TII->getRegisterInfo();
841 EnableOptimizeEndCf =
842 RemoveRedundantEndcf && MF.getTarget().getOptLevel() > CodeGenOpt::None;
843
844 // This doesn't actually need LiveIntervals, but we can preserve them.
845 LIS = getAnalysisIfAvailable<LiveIntervals>();
846 // This doesn't actually need LiveVariables, but we can preserve them.
847 LV = getAnalysisIfAvailable<LiveVariables>();
848 MDT = getAnalysisIfAvailable<MachineDominatorTree>();
849 MRI = &MF.getRegInfo();
850 BoolRC = TRI->getBoolRC();
851
852 if (ST.isWave32()) {
853 AndOpc = AMDGPU::S_AND_B32;
854 OrOpc = AMDGPU::S_OR_B32;
855 XorOpc = AMDGPU::S_XOR_B32;
856 MovTermOpc = AMDGPU::S_MOV_B32_term;
857 Andn2TermOpc = AMDGPU::S_ANDN2_B32_term;
858 XorTermrOpc = AMDGPU::S_XOR_B32_term;
859 OrTermrOpc = AMDGPU::S_OR_B32_term;
860 OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32;
861 Exec = AMDGPU::EXEC_LO;
862 } else {
863 AndOpc = AMDGPU::S_AND_B64;
864 OrOpc = AMDGPU::S_OR_B64;
865 XorOpc = AMDGPU::S_XOR_B64;
866 MovTermOpc = AMDGPU::S_MOV_B64_term;
867 Andn2TermOpc = AMDGPU::S_ANDN2_B64_term;
868 XorTermrOpc = AMDGPU::S_XOR_B64_term;
869 OrTermrOpc = AMDGPU::S_OR_B64_term;
870 OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64;
871 Exec = AMDGPU::EXEC;
872 }
873
874 // Compute set of blocks with kills
875 const bool CanDemote =
876 MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS;
877 for (auto &MBB : MF) {
878 bool IsKillBlock = false;
879 for (auto &Term : MBB.terminators()) {
880 if (TII->isKillTerminator(Term.getOpcode())) {
881 KillBlocks.insert(&MBB);
882 IsKillBlock = true;
883 break;
884 }
885 }
886 if (CanDemote && !IsKillBlock) {
887 for (auto &MI : MBB) {
888 if (MI.getOpcode() == AMDGPU::SI_DEMOTE_I1) {
889 KillBlocks.insert(&MBB);
890 break;
891 }
892 }
893 }
894 }
895
896 bool Changed = false;
897 MachineFunction::iterator NextBB;
898 for (MachineFunction::iterator BI = MF.begin();
899 BI != MF.end(); BI = NextBB) {
900 NextBB = std::next(BI);
901 MachineBasicBlock *MBB = &*BI;
902
903 MachineBasicBlock::iterator I, E, Next;
904 E = MBB->end();
905 for (I = MBB->begin(); I != E; I = Next) {
906 Next = std::next(I);
907 MachineInstr &MI = *I;
908 MachineBasicBlock *SplitMBB = MBB;
909
910 switch (MI.getOpcode()) {
911 case AMDGPU::SI_IF:
912 case AMDGPU::SI_ELSE:
913 case AMDGPU::SI_IF_BREAK:
914 case AMDGPU::SI_WATERFALL_LOOP:
915 case AMDGPU::SI_LOOP:
916 case AMDGPU::SI_END_CF:
917 SplitMBB = process(MI);
918 Changed = true;
919 break;
920
921 // FIXME: find a better place for this
922 case AMDGPU::SI_INIT_EXEC:
923 case AMDGPU::SI_INIT_EXEC_FROM_INPUT:
924 lowerInitExec(MBB, MI);
925 if (LIS)
926 LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
927 Changed = true;
928 break;
929
930 default:
931 break;
932 }
933
934 if (SplitMBB != MBB) {
935 MBB = Next->getParent();
936 E = MBB->end();
937 }
938 }
939 }
940
941 optimizeEndCf();
942
943 LoweredEndCf.clear();
944 LoweredIf.clear();
945 KillBlocks.clear();
946
947 return Changed;
948 }
949