xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision 61e7a61bdccfae2c55e1ab598621204030feac7a)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/CodeGen/LiveIntervals.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   int ShrinkOpcode;
38   unsigned char UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false,
44                 int ShrinkOp = -1) :
45     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46     Kind(FoldOp->getType()),
47     Commuted(Commuted_) {
48     if (FoldOp->isImm()) {
49       ImmToFold = FoldOp->getImm();
50     } else if (FoldOp->isFI()) {
51       FrameIndexToFold = FoldOp->getIndex();
52     } else {
53       assert(FoldOp->isReg() || FoldOp->isGlobal());
54       OpToFold = FoldOp;
55     }
56   }
57 
58   bool isFI() const {
59     return Kind == MachineOperand::MO_FrameIndex;
60   }
61 
62   bool isImm() const {
63     return Kind == MachineOperand::MO_Immediate;
64   }
65 
66   bool isReg() const {
67     return Kind == MachineOperand::MO_Register;
68   }
69 
70   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71 
72   bool isCommuted() const {
73     return Commuted;
74   }
75 
76   bool needsShrink() const {
77     return ShrinkOpcode != -1;
78   }
79 
80   int getShrinkOpcode() const {
81     return ShrinkOpcode;
82   }
83 };
84 
85 class SIFoldOperands : public MachineFunctionPass {
86 public:
87   static char ID;
88   MachineRegisterInfo *MRI;
89   const SIInstrInfo *TII;
90   const SIRegisterInfo *TRI;
91   const GCNSubtarget *ST;
92   const SIMachineFunctionInfo *MFI;
93 
94   void foldOperand(MachineOperand &OpToFold,
95                    MachineInstr *UseMI,
96                    int UseOpIdx,
97                    SmallVectorImpl<FoldCandidate> &FoldList,
98                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99 
100   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101 
102   const MachineOperand *isClamp(const MachineInstr &MI) const;
103   bool tryFoldClamp(MachineInstr &MI);
104 
105   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106   bool tryFoldOMod(MachineInstr &MI);
107 
108 public:
109   SIFoldOperands() : MachineFunctionPass(ID) {
110     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111   }
112 
113   bool runOnMachineFunction(MachineFunction &MF) override;
114 
115   StringRef getPassName() const override { return "SI Fold Operands"; }
116 
117   void getAnalysisUsage(AnalysisUsage &AU) const override {
118     AU.setPreservesCFG();
119     MachineFunctionPass::getAnalysisUsage(AU);
120   }
121 };
122 
123 } // End anonymous namespace.
124 
125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126                 "SI Fold Operands", false, false)
127 
128 char SIFoldOperands::ID = 0;
129 
130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131 
132 // Wrapper around isInlineConstant that understands special cases when
133 // instruction types are replaced during operand folding.
134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135                                      const MachineInstr &UseMI,
136                                      unsigned OpNo,
137                                      const MachineOperand &OpToFold) {
138   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139     return true;
140 
141   unsigned Opc = UseMI.getOpcode();
142   switch (Opc) {
143   case AMDGPU::V_MAC_F32_e64:
144   case AMDGPU::V_MAC_F16_e64:
145   case AMDGPU::V_FMAC_F32_e64:
146   case AMDGPU::V_FMAC_F16_e64: {
147     // Special case for mac. Since this is replaced with mad when folded into
148     // src2, we need to check the legality for the final instruction.
149     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
150     if (static_cast<int>(OpNo) == Src2Idx) {
151       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
152                    Opc == AMDGPU::V_FMAC_F16_e64;
153       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
154                    Opc == AMDGPU::V_FMAC_F32_e64;
155 
156       unsigned Opc = IsFMA ?
157         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
158         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
159       const MCInstrDesc &MadDesc = TII->get(Opc);
160       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
161     }
162     return false;
163   }
164   default:
165     return false;
166   }
167 }
168 
169 // TODO: Add heuristic that the frame index might not fit in the addressing mode
170 // immediate offset to avoid materializing in loops.
171 static bool frameIndexMayFold(const SIInstrInfo *TII,
172                               const MachineInstr &UseMI,
173                               int OpNo,
174                               const MachineOperand &OpToFold) {
175   return OpToFold.isFI() &&
176     (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
177     OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
178 }
179 
180 FunctionPass *llvm::createSIFoldOperandsPass() {
181   return new SIFoldOperands();
182 }
183 
184 static bool updateOperand(FoldCandidate &Fold,
185                           const SIInstrInfo &TII,
186                           const TargetRegisterInfo &TRI,
187                           const GCNSubtarget &ST) {
188   MachineInstr *MI = Fold.UseMI;
189   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
190   assert(Old.isReg());
191 
192   if (Fold.isImm()) {
193     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
194         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
195         AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
196                                        ST.hasInv2PiInlineImm())) {
197       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
198       // already set.
199       unsigned Opcode = MI->getOpcode();
200       int OpNo = MI->getOperandNo(&Old);
201       int ModIdx = -1;
202       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
203         ModIdx = AMDGPU::OpName::src0_modifiers;
204       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
205         ModIdx = AMDGPU::OpName::src1_modifiers;
206       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
207         ModIdx = AMDGPU::OpName::src2_modifiers;
208       assert(ModIdx != -1);
209       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
210       MachineOperand &Mod = MI->getOperand(ModIdx);
211       unsigned Val = Mod.getImm();
212       if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
213         return false;
214       // Only apply the following transformation if that operand requries
215       // a packed immediate.
216       switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
217       case AMDGPU::OPERAND_REG_IMM_V2FP16:
218       case AMDGPU::OPERAND_REG_IMM_V2INT16:
219       case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
220       case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
221         // If upper part is all zero we do not need op_sel_hi.
222         if (!isUInt<16>(Fold.ImmToFold)) {
223           if (!(Fold.ImmToFold & 0xffff)) {
224             Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
225             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
226             Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
227             return true;
228           }
229           Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
230           Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
231           return true;
232         }
233         break;
234       default:
235         break;
236       }
237     }
238   }
239 
240   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
241     MachineBasicBlock *MBB = MI->getParent();
242     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
243     if (Liveness != MachineBasicBlock::LQR_Dead) {
244       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
245       return false;
246     }
247 
248     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
249     int Op32 = Fold.getShrinkOpcode();
250     MachineOperand &Dst0 = MI->getOperand(0);
251     MachineOperand &Dst1 = MI->getOperand(1);
252     assert(Dst0.isDef() && Dst1.isDef());
253 
254     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
255 
256     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
257     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
258 
259     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
260 
261     if (HaveNonDbgCarryUse) {
262       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
263         .addReg(AMDGPU::VCC, RegState::Kill);
264     }
265 
266     // Keep the old instruction around to avoid breaking iterators, but
267     // replace it with a dummy instruction to remove uses.
268     //
269     // FIXME: We should not invert how this pass looks at operands to avoid
270     // this. Should track set of foldable movs instead of looking for uses
271     // when looking at a use.
272     Dst0.setReg(NewReg0);
273     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
274       MI->RemoveOperand(I);
275     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
276 
277     if (Fold.isCommuted())
278       TII.commuteInstruction(*Inst32, false);
279     return true;
280   }
281 
282   assert(!Fold.needsShrink() && "not handled");
283 
284   if (Fold.isImm()) {
285     Old.ChangeToImmediate(Fold.ImmToFold);
286     return true;
287   }
288 
289   if (Fold.isGlobal()) {
290     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
291                    Fold.OpToFold->getTargetFlags());
292     return true;
293   }
294 
295   if (Fold.isFI()) {
296     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
297     return true;
298   }
299 
300   MachineOperand *New = Fold.OpToFold;
301   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
302   Old.setIsUndef(New->isUndef());
303   return true;
304 }
305 
306 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
307                               const MachineInstr *MI) {
308   for (auto Candidate : FoldList) {
309     if (Candidate.UseMI == MI)
310       return true;
311   }
312   return false;
313 }
314 
315 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
316                              MachineInstr *MI, unsigned OpNo,
317                              MachineOperand *OpToFold,
318                              const SIInstrInfo *TII) {
319   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
320     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
321     unsigned Opc = MI->getOpcode();
322     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
323          Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
324         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
325       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
326                    Opc == AMDGPU::V_FMAC_F16_e64;
327       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
328                    Opc == AMDGPU::V_FMAC_F32_e64;
329       unsigned NewOpc = IsFMA ?
330         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
331         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
332 
333       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
334       // to fold the operand.
335       MI->setDesc(TII->get(NewOpc));
336       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
337       if (FoldAsMAD) {
338         MI->untieRegOperand(OpNo);
339         return true;
340       }
341       MI->setDesc(TII->get(Opc));
342     }
343 
344     // Special case for s_setreg_b32
345     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
346       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
347       FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
348       return true;
349     }
350 
351     // If we are already folding into another operand of MI, then
352     // we can't commute the instruction, otherwise we risk making the
353     // other fold illegal.
354     if (isUseMIInFoldList(FoldList, MI))
355       return false;
356 
357     unsigned CommuteOpNo = OpNo;
358 
359     // Operand is not legal, so try to commute the instruction to
360     // see if this makes it possible to fold.
361     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
362     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
363     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
364 
365     if (CanCommute) {
366       if (CommuteIdx0 == OpNo)
367         CommuteOpNo = CommuteIdx1;
368       else if (CommuteIdx1 == OpNo)
369         CommuteOpNo = CommuteIdx0;
370     }
371 
372 
373     // One of operands might be an Imm operand, and OpNo may refer to it after
374     // the call of commuteInstruction() below. Such situations are avoided
375     // here explicitly as OpNo must be a register operand to be a candidate
376     // for memory folding.
377     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
378                        !MI->getOperand(CommuteIdx1).isReg()))
379       return false;
380 
381     if (!CanCommute ||
382         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
383       return false;
384 
385     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
386       if ((Opc == AMDGPU::V_ADD_I32_e64 ||
387            Opc == AMDGPU::V_SUB_I32_e64 ||
388            Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
389           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
390         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
391 
392         // Verify the other operand is a VGPR, otherwise we would violate the
393         // constant bus restriction.
394         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
395         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
396         if (!OtherOp.isReg() ||
397             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
398           return false;
399 
400         assert(MI->getOperand(1).isDef());
401 
402         // Make sure to get the 32-bit version of the commuted opcode.
403         unsigned MaybeCommutedOpc = MI->getOpcode();
404         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
405 
406         FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
407                                          Op32));
408         return true;
409       }
410 
411       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
412       return false;
413     }
414 
415     FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
416     return true;
417   }
418 
419   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
420   return true;
421 }
422 
423 // If the use operand doesn't care about the value, this may be an operand only
424 // used for register indexing, in which case it is unsafe to fold.
425 static bool isUseSafeToFold(const SIInstrInfo *TII,
426                             const MachineInstr &MI,
427                             const MachineOperand &UseMO) {
428   return !UseMO.isUndef() && !TII->isSDWA(MI);
429   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
430 }
431 
432 static bool tryToFoldACImm(const SIInstrInfo *TII,
433                            const MachineOperand &OpToFold,
434                            MachineInstr *UseMI,
435                            unsigned UseOpIdx,
436                            SmallVectorImpl<FoldCandidate> &FoldList) {
437   const MCInstrDesc &Desc = UseMI->getDesc();
438   const MCOperandInfo *OpInfo = Desc.OpInfo;
439   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
440     return false;
441 
442   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
443   if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
444       OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
445     return false;
446 
447   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
448       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
449     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
450     return true;
451   }
452 
453   if (!OpToFold.isReg())
454     return false;
455 
456   Register UseReg = OpToFold.getReg();
457   if (!Register::isVirtualRegister(UseReg))
458     return false;
459 
460   if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
461         return FC.UseMI == UseMI; }) != FoldList.end())
462     return false;
463 
464   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
465   const MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
466   if (!Def || !Def->isRegSequence())
467     return false;
468 
469   int64_t Imm;
470   MachineOperand *Op;
471   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
472     const MachineOperand &Sub = Def->getOperand(I);
473     if (!Sub.isReg() || Sub.getSubReg())
474       return false;
475     MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub.getReg());
476     while (SubDef && !SubDef->isMoveImmediate() &&
477            !SubDef->getOperand(1).isImm() && TII->isFoldableCopy(*SubDef))
478       SubDef = MRI.getUniqueVRegDef(SubDef->getOperand(1).getReg());
479     if (!SubDef || !SubDef->isMoveImmediate() || !SubDef->getOperand(1).isImm())
480       return false;
481     Op = &SubDef->getOperand(1);
482     auto SubImm = Op->getImm();
483     if (I == 1) {
484       if (!TII->isInlineConstant(SubDef->getOperand(1), OpTy))
485         return false;
486 
487       Imm = SubImm;
488       continue;
489     }
490     if (Imm != SubImm)
491       return false; // Can only fold splat constants
492   }
493 
494   if (!TII->isOperandLegal(*UseMI, UseOpIdx, Op))
495     return false;
496 
497   FoldList.push_back(FoldCandidate(UseMI, UseOpIdx, Op));
498   return true;
499 }
500 
501 void SIFoldOperands::foldOperand(
502   MachineOperand &OpToFold,
503   MachineInstr *UseMI,
504   int UseOpIdx,
505   SmallVectorImpl<FoldCandidate> &FoldList,
506   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
507   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
508 
509   if (!isUseSafeToFold(TII, *UseMI, UseOp))
510     return;
511 
512   // FIXME: Fold operands with subregs.
513   if (UseOp.isReg() && OpToFold.isReg()) {
514     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
515       return;
516   }
517 
518   // Special case for REG_SEQUENCE: We can't fold literals into
519   // REG_SEQUENCE instructions, so we have to fold them into the
520   // uses of REG_SEQUENCE.
521   if (UseMI->isRegSequence()) {
522     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
523     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
524 
525     MachineRegisterInfo::use_iterator Next;
526     for (MachineRegisterInfo::use_iterator
527            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
528          RSUse != RSE; RSUse = Next) {
529       Next = std::next(RSUse);
530 
531       MachineInstr *RSUseMI = RSUse->getParent();
532 
533       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
534                          RSUse.getOperandNo(), FoldList))
535         continue;
536 
537       if (RSUse->getSubReg() != RegSeqDstSubReg)
538         continue;
539 
540       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
541                   CopiesToReplace);
542     }
543 
544     return;
545   }
546 
547   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
548     return;
549 
550   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
551     // Sanity check that this is a stack access.
552     // FIXME: Should probably use stack pseudos before frame lowering.
553     MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
554     if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() &&
555                            SOff->getReg() != MFI->getStackPtrOffsetReg()))
556       return;
557 
558     if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
559         MFI->getScratchRSrcReg())
560       return;
561 
562     // A frame index will resolve to a positive constant, so it should always be
563     // safe to fold the addressing mode, even pre-GFX9.
564     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
565     SOff->setReg(MFI->getStackPtrOffsetReg());
566     return;
567   }
568 
569   bool FoldingImmLike =
570       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
571 
572   if (FoldingImmLike && UseMI->isCopy()) {
573     Register DestReg = UseMI->getOperand(0).getReg();
574 
575     // Don't fold into a copy to a physical register. Doing so would interfere
576     // with the register coalescer's logic which would avoid redundant
577     // initalizations.
578     if (DestReg.isPhysical())
579       return;
580 
581     const TargetRegisterClass *DestRC =  MRI->getRegClass(DestReg);
582 
583     Register SrcReg = UseMI->getOperand(1).getReg();
584     if (SrcReg.isVirtual()) { // XXX - This can be an assert?
585       const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
586       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
587         MachineRegisterInfo::use_iterator NextUse;
588         SmallVector<FoldCandidate, 4> CopyUses;
589         for (MachineRegisterInfo::use_iterator
590           Use = MRI->use_begin(DestReg), E = MRI->use_end();
591           Use != E; Use = NextUse) {
592           NextUse = std::next(Use);
593           FoldCandidate FC = FoldCandidate(Use->getParent(),
594            Use.getOperandNo(), &UseMI->getOperand(1));
595           CopyUses.push_back(FC);
596        }
597         for (auto & F : CopyUses) {
598           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
599            FoldList, CopiesToReplace);
600         }
601       }
602     }
603 
604     if (DestRC == &AMDGPU::AGPR_32RegClass &&
605         TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
606       UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
607       UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
608       CopiesToReplace.push_back(UseMI);
609       return;
610     }
611 
612     // In order to fold immediates into copies, we need to change the
613     // copy to a MOV.
614 
615     unsigned MovOp = TII->getMovOpcode(DestRC);
616     if (MovOp == AMDGPU::COPY)
617       return;
618 
619     UseMI->setDesc(TII->get(MovOp));
620     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
621     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
622     while (ImpOpI != ImpOpE) {
623       MachineInstr::mop_iterator Tmp = ImpOpI;
624       ImpOpI++;
625       UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
626     }
627     CopiesToReplace.push_back(UseMI);
628   } else {
629     if (UseMI->isCopy() && OpToFold.isReg() &&
630         UseMI->getOperand(0).getReg().isVirtual() &&
631         TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) &&
632         !UseMI->getOperand(1).getSubReg()) {
633       LLVM_DEBUG(dbgs() << "Folding " << OpToFold
634                         << "\n into " << *UseMI << '\n');
635       unsigned Size = TII->getOpSize(*UseMI, 1);
636       UseMI->getOperand(1).setReg(OpToFold.getReg());
637       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
638       UseMI->getOperand(1).setIsKill(false);
639       CopiesToReplace.push_back(UseMI);
640       OpToFold.setIsKill(false);
641       if (Size != 4)
642         return;
643       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
644           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
645         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
646       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
647                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
648         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
649       return;
650     }
651 
652     unsigned UseOpc = UseMI->getOpcode();
653     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
654         (UseOpc == AMDGPU::V_READLANE_B32 &&
655          (int)UseOpIdx ==
656          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
657       // %vgpr = V_MOV_B32 imm
658       // %sgpr = V_READFIRSTLANE_B32 %vgpr
659       // =>
660       // %sgpr = S_MOV_B32 imm
661       if (FoldingImmLike) {
662         if (execMayBeModifiedBeforeUse(*MRI,
663                                        UseMI->getOperand(UseOpIdx).getReg(),
664                                        *OpToFold.getParent(),
665                                        *UseMI))
666           return;
667 
668         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
669 
670         // FIXME: ChangeToImmediate should clear subreg
671         UseMI->getOperand(1).setSubReg(0);
672         if (OpToFold.isImm())
673           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
674         else
675           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
676         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
677         return;
678       }
679 
680       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
681         if (execMayBeModifiedBeforeUse(*MRI,
682                                        UseMI->getOperand(UseOpIdx).getReg(),
683                                        *OpToFold.getParent(),
684                                        *UseMI))
685           return;
686 
687         // %vgpr = COPY %sgpr0
688         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
689         // =>
690         // %sgpr1 = COPY %sgpr0
691         UseMI->setDesc(TII->get(AMDGPU::COPY));
692         UseMI->getOperand(1).setReg(OpToFold.getReg());
693         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
694         UseMI->getOperand(1).setIsKill(false);
695         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
696         return;
697       }
698     }
699 
700     const MCInstrDesc &UseDesc = UseMI->getDesc();
701 
702     // Don't fold into target independent nodes.  Target independent opcodes
703     // don't have defined register classes.
704     if (UseDesc.isVariadic() ||
705         UseOp.isImplicit() ||
706         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
707       return;
708   }
709 
710   if (!FoldingImmLike) {
711     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
712 
713     // FIXME: We could try to change the instruction from 64-bit to 32-bit
714     // to enable more folding opportunites.  The shrink operands pass
715     // already does this.
716     return;
717   }
718 
719 
720   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
721   const TargetRegisterClass *FoldRC =
722     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
723 
724   // Split 64-bit constants into 32-bits for folding.
725   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
726     Register UseReg = UseOp.getReg();
727     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
728 
729     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
730       return;
731 
732     APInt Imm(64, OpToFold.getImm());
733     if (UseOp.getSubReg() == AMDGPU::sub0) {
734       Imm = Imm.getLoBits(32);
735     } else {
736       assert(UseOp.getSubReg() == AMDGPU::sub1);
737       Imm = Imm.getHiBits(32);
738     }
739 
740     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
741     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
742     return;
743   }
744 
745 
746 
747   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
748 }
749 
750 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
751                                   uint32_t LHS, uint32_t RHS) {
752   switch (Opcode) {
753   case AMDGPU::V_AND_B32_e64:
754   case AMDGPU::V_AND_B32_e32:
755   case AMDGPU::S_AND_B32:
756     Result = LHS & RHS;
757     return true;
758   case AMDGPU::V_OR_B32_e64:
759   case AMDGPU::V_OR_B32_e32:
760   case AMDGPU::S_OR_B32:
761     Result = LHS | RHS;
762     return true;
763   case AMDGPU::V_XOR_B32_e64:
764   case AMDGPU::V_XOR_B32_e32:
765   case AMDGPU::S_XOR_B32:
766     Result = LHS ^ RHS;
767     return true;
768   case AMDGPU::V_LSHL_B32_e64:
769   case AMDGPU::V_LSHL_B32_e32:
770   case AMDGPU::S_LSHL_B32:
771     // The instruction ignores the high bits for out of bounds shifts.
772     Result = LHS << (RHS & 31);
773     return true;
774   case AMDGPU::V_LSHLREV_B32_e64:
775   case AMDGPU::V_LSHLREV_B32_e32:
776     Result = RHS << (LHS & 31);
777     return true;
778   case AMDGPU::V_LSHR_B32_e64:
779   case AMDGPU::V_LSHR_B32_e32:
780   case AMDGPU::S_LSHR_B32:
781     Result = LHS >> (RHS & 31);
782     return true;
783   case AMDGPU::V_LSHRREV_B32_e64:
784   case AMDGPU::V_LSHRREV_B32_e32:
785     Result = RHS >> (LHS & 31);
786     return true;
787   case AMDGPU::V_ASHR_I32_e64:
788   case AMDGPU::V_ASHR_I32_e32:
789   case AMDGPU::S_ASHR_I32:
790     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
791     return true;
792   case AMDGPU::V_ASHRREV_I32_e64:
793   case AMDGPU::V_ASHRREV_I32_e32:
794     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
795     return true;
796   default:
797     return false;
798   }
799 }
800 
801 static unsigned getMovOpc(bool IsScalar) {
802   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
803 }
804 
805 /// Remove any leftover implicit operands from mutating the instruction. e.g.
806 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
807 /// anymore.
808 static void stripExtraCopyOperands(MachineInstr &MI) {
809   const MCInstrDesc &Desc = MI.getDesc();
810   unsigned NumOps = Desc.getNumOperands() +
811                     Desc.getNumImplicitUses() +
812                     Desc.getNumImplicitDefs();
813 
814   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
815     MI.RemoveOperand(I);
816 }
817 
818 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
819   MI.setDesc(NewDesc);
820   stripExtraCopyOperands(MI);
821 }
822 
823 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
824                                                MachineOperand &Op) {
825   if (Op.isReg()) {
826     // If this has a subregister, it obviously is a register source.
827     if (Op.getSubReg() != AMDGPU::NoSubRegister ||
828         !Register::isVirtualRegister(Op.getReg()))
829       return &Op;
830 
831     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
832     if (Def && Def->isMoveImmediate()) {
833       MachineOperand &ImmSrc = Def->getOperand(1);
834       if (ImmSrc.isImm())
835         return &ImmSrc;
836     }
837   }
838 
839   return &Op;
840 }
841 
842 // Try to simplify operations with a constant that may appear after instruction
843 // selection.
844 // TODO: See if a frame index with a fixed offset can fold.
845 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
846                               const SIInstrInfo *TII,
847                               MachineInstr *MI,
848                               MachineOperand *ImmOp) {
849   unsigned Opc = MI->getOpcode();
850   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
851       Opc == AMDGPU::S_NOT_B32) {
852     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
853     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
854     return true;
855   }
856 
857   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
858   if (Src1Idx == -1)
859     return false;
860 
861   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
862   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
863   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
864 
865   if (!Src0->isImm() && !Src1->isImm())
866     return false;
867 
868   if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
869     if (Src0->isImm() && Src0->getImm() == 0) {
870       // v_lshl_or_b32 0, X, Y -> copy Y
871       // v_lshl_or_b32 0, X, K -> v_mov_b32 K
872       bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
873       MI->RemoveOperand(Src1Idx);
874       MI->RemoveOperand(Src0Idx);
875 
876       MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
877       return true;
878     }
879   }
880 
881   // and k0, k1 -> v_mov_b32 (k0 & k1)
882   // or k0, k1 -> v_mov_b32 (k0 | k1)
883   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
884   if (Src0->isImm() && Src1->isImm()) {
885     int32_t NewImm;
886     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
887       return false;
888 
889     const SIRegisterInfo &TRI = TII->getRegisterInfo();
890     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
891 
892     // Be careful to change the right operand, src0 may belong to a different
893     // instruction.
894     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
895     MI->RemoveOperand(Src1Idx);
896     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
897     return true;
898   }
899 
900   if (!MI->isCommutable())
901     return false;
902 
903   if (Src0->isImm() && !Src1->isImm()) {
904     std::swap(Src0, Src1);
905     std::swap(Src0Idx, Src1Idx);
906   }
907 
908   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
909   if (Opc == AMDGPU::V_OR_B32_e64 ||
910       Opc == AMDGPU::V_OR_B32_e32 ||
911       Opc == AMDGPU::S_OR_B32) {
912     if (Src1Val == 0) {
913       // y = or x, 0 => y = copy x
914       MI->RemoveOperand(Src1Idx);
915       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
916     } else if (Src1Val == -1) {
917       // y = or x, -1 => y = v_mov_b32 -1
918       MI->RemoveOperand(Src1Idx);
919       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
920     } else
921       return false;
922 
923     return true;
924   }
925 
926   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
927       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
928       MI->getOpcode() == AMDGPU::S_AND_B32) {
929     if (Src1Val == 0) {
930       // y = and x, 0 => y = v_mov_b32 0
931       MI->RemoveOperand(Src0Idx);
932       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
933     } else if (Src1Val == -1) {
934       // y = and x, -1 => y = copy x
935       MI->RemoveOperand(Src1Idx);
936       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
937       stripExtraCopyOperands(*MI);
938     } else
939       return false;
940 
941     return true;
942   }
943 
944   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
945       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
946       MI->getOpcode() == AMDGPU::S_XOR_B32) {
947     if (Src1Val == 0) {
948       // y = xor x, 0 => y = copy x
949       MI->RemoveOperand(Src1Idx);
950       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
951       return true;
952     }
953   }
954 
955   return false;
956 }
957 
958 // Try to fold an instruction into a simpler one
959 static bool tryFoldInst(const SIInstrInfo *TII,
960                         MachineInstr *MI) {
961   unsigned Opc = MI->getOpcode();
962 
963   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
964       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
965       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
966     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
967     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
968     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
969     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
970     if (Src1->isIdenticalTo(*Src0) &&
971         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
972         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
973       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
974       auto &NewDesc =
975           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
976       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
977       if (Src2Idx != -1)
978         MI->RemoveOperand(Src2Idx);
979       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
980       if (Src1ModIdx != -1)
981         MI->RemoveOperand(Src1ModIdx);
982       if (Src0ModIdx != -1)
983         MI->RemoveOperand(Src0ModIdx);
984       mutateCopyOp(*MI, NewDesc);
985       LLVM_DEBUG(dbgs() << *MI << '\n');
986       return true;
987     }
988   }
989 
990   return false;
991 }
992 
993 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
994                                      MachineOperand &OpToFold) const {
995   // We need mutate the operands of new mov instructions to add implicit
996   // uses of EXEC, but adding them invalidates the use_iterator, so defer
997   // this.
998   SmallVector<MachineInstr *, 4> CopiesToReplace;
999   SmallVector<FoldCandidate, 4> FoldList;
1000   MachineOperand &Dst = MI.getOperand(0);
1001 
1002   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1003   if (FoldingImm) {
1004     unsigned NumLiteralUses = 0;
1005     MachineOperand *NonInlineUse = nullptr;
1006     int NonInlineUseOpNo = -1;
1007 
1008     MachineRegisterInfo::use_iterator NextUse;
1009     for (MachineRegisterInfo::use_iterator
1010            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1011          Use != E; Use = NextUse) {
1012       NextUse = std::next(Use);
1013       MachineInstr *UseMI = Use->getParent();
1014       unsigned OpNo = Use.getOperandNo();
1015 
1016       // Folding the immediate may reveal operations that can be constant
1017       // folded or replaced with a copy. This can happen for example after
1018       // frame indices are lowered to constants or from splitting 64-bit
1019       // constants.
1020       //
1021       // We may also encounter cases where one or both operands are
1022       // immediates materialized into a register, which would ordinarily not
1023       // be folded due to multiple uses or operand constraints.
1024 
1025       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1026         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1027 
1028         // Some constant folding cases change the same immediate's use to a new
1029         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1030         // again. The same constant folded instruction could also have a second
1031         // use operand.
1032         NextUse = MRI->use_begin(Dst.getReg());
1033         FoldList.clear();
1034         continue;
1035       }
1036 
1037       // Try to fold any inline immediate uses, and then only fold other
1038       // constants if they have one use.
1039       //
1040       // The legality of the inline immediate must be checked based on the use
1041       // operand, not the defining instruction, because 32-bit instructions
1042       // with 32-bit inline immediate sources may be used to materialize
1043       // constants used in 16-bit operands.
1044       //
1045       // e.g. it is unsafe to fold:
1046       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1047       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1048 
1049       // Folding immediates with more than one use will increase program size.
1050       // FIXME: This will also reduce register usage, which may be better
1051       // in some cases. A better heuristic is needed.
1052       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1053         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1054       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1055         foldOperand(OpToFold, UseMI, OpNo, FoldList,
1056                     CopiesToReplace);
1057       } else {
1058         if (++NumLiteralUses == 1) {
1059           NonInlineUse = &*Use;
1060           NonInlineUseOpNo = OpNo;
1061         }
1062       }
1063     }
1064 
1065     if (NumLiteralUses == 1) {
1066       MachineInstr *UseMI = NonInlineUse->getParent();
1067       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1068     }
1069   } else {
1070     // Folding register.
1071     SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
1072     for (MachineRegisterInfo::use_iterator
1073            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1074          Use != E; ++Use) {
1075       UsesToProcess.push_back(Use);
1076     }
1077     for (auto U : UsesToProcess) {
1078       MachineInstr *UseMI = U->getParent();
1079 
1080       foldOperand(OpToFold, UseMI, U.getOperandNo(),
1081         FoldList, CopiesToReplace);
1082     }
1083   }
1084 
1085   MachineFunction *MF = MI.getParent()->getParent();
1086   // Make sure we add EXEC uses to any new v_mov instructions created.
1087   for (MachineInstr *Copy : CopiesToReplace)
1088     Copy->addImplicitDefUseOperands(*MF);
1089 
1090   for (FoldCandidate &Fold : FoldList) {
1091     if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) {
1092       Register Reg = Fold.OpToFold->getReg();
1093       MachineInstr *DefMI = Fold.OpToFold->getParent();
1094       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1095           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1096         continue;
1097     }
1098     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1099       // Clear kill flags.
1100       if (Fold.isReg()) {
1101         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1102         // FIXME: Probably shouldn't bother trying to fold if not an
1103         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1104         // copies.
1105         MRI->clearKillFlags(Fold.OpToFold->getReg());
1106       }
1107       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1108                         << static_cast<int>(Fold.UseOpNo) << " of "
1109                         << *Fold.UseMI << '\n');
1110       tryFoldInst(TII, Fold.UseMI);
1111     } else if (Fold.isCommuted()) {
1112       // Restoring instruction's original operand order if fold has failed.
1113       TII->commuteInstruction(*Fold.UseMI, false);
1114     }
1115   }
1116 }
1117 
1118 // Clamp patterns are canonically selected to v_max_* instructions, so only
1119 // handle them.
1120 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1121   unsigned Op = MI.getOpcode();
1122   switch (Op) {
1123   case AMDGPU::V_MAX_F32_e64:
1124   case AMDGPU::V_MAX_F16_e64:
1125   case AMDGPU::V_MAX_F64:
1126   case AMDGPU::V_PK_MAX_F16: {
1127     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1128       return nullptr;
1129 
1130     // Make sure sources are identical.
1131     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1132     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1133     if (!Src0->isReg() || !Src1->isReg() ||
1134         Src0->getReg() != Src1->getReg() ||
1135         Src0->getSubReg() != Src1->getSubReg() ||
1136         Src0->getSubReg() != AMDGPU::NoSubRegister)
1137       return nullptr;
1138 
1139     // Can't fold up if we have modifiers.
1140     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1141       return nullptr;
1142 
1143     unsigned Src0Mods
1144       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1145     unsigned Src1Mods
1146       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1147 
1148     // Having a 0 op_sel_hi would require swizzling the output in the source
1149     // instruction, which we can't do.
1150     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1151                                                       : 0u;
1152     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1153       return nullptr;
1154     return Src0;
1155   }
1156   default:
1157     return nullptr;
1158   }
1159 }
1160 
1161 // We obviously have multiple uses in a clamp since the register is used twice
1162 // in the same instruction.
1163 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1164   int Count = 0;
1165   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1166        I != E; ++I) {
1167     if (++Count > 1)
1168       return false;
1169   }
1170 
1171   return true;
1172 }
1173 
1174 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1175 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1176   const MachineOperand *ClampSrc = isClamp(MI);
1177   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1178     return false;
1179 
1180   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1181 
1182   // The type of clamp must be compatible.
1183   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1184     return false;
1185 
1186   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1187   if (!DefClamp)
1188     return false;
1189 
1190   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1191                     << '\n');
1192 
1193   // Clamp is applied after omod, so it is OK if omod is set.
1194   DefClamp->setImm(1);
1195   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1196   MI.eraseFromParent();
1197   return true;
1198 }
1199 
1200 static int getOModValue(unsigned Opc, int64_t Val) {
1201   switch (Opc) {
1202   case AMDGPU::V_MUL_F32_e64: {
1203     switch (static_cast<uint32_t>(Val)) {
1204     case 0x3f000000: // 0.5
1205       return SIOutMods::DIV2;
1206     case 0x40000000: // 2.0
1207       return SIOutMods::MUL2;
1208     case 0x40800000: // 4.0
1209       return SIOutMods::MUL4;
1210     default:
1211       return SIOutMods::NONE;
1212     }
1213   }
1214   case AMDGPU::V_MUL_F16_e64: {
1215     switch (static_cast<uint16_t>(Val)) {
1216     case 0x3800: // 0.5
1217       return SIOutMods::DIV2;
1218     case 0x4000: // 2.0
1219       return SIOutMods::MUL2;
1220     case 0x4400: // 4.0
1221       return SIOutMods::MUL4;
1222     default:
1223       return SIOutMods::NONE;
1224     }
1225   }
1226   default:
1227     llvm_unreachable("invalid mul opcode");
1228   }
1229 }
1230 
1231 // FIXME: Does this really not support denormals with f16?
1232 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1233 // handled, so will anything other than that break?
1234 std::pair<const MachineOperand *, int>
1235 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1236   unsigned Op = MI.getOpcode();
1237   switch (Op) {
1238   case AMDGPU::V_MUL_F32_e64:
1239   case AMDGPU::V_MUL_F16_e64: {
1240     // If output denormals are enabled, omod is ignored.
1241     if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1242         (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1243       return std::make_pair(nullptr, SIOutMods::NONE);
1244 
1245     const MachineOperand *RegOp = nullptr;
1246     const MachineOperand *ImmOp = nullptr;
1247     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1248     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1249     if (Src0->isImm()) {
1250       ImmOp = Src0;
1251       RegOp = Src1;
1252     } else if (Src1->isImm()) {
1253       ImmOp = Src1;
1254       RegOp = Src0;
1255     } else
1256       return std::make_pair(nullptr, SIOutMods::NONE);
1257 
1258     int OMod = getOModValue(Op, ImmOp->getImm());
1259     if (OMod == SIOutMods::NONE ||
1260         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1261         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1262         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1263         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1264       return std::make_pair(nullptr, SIOutMods::NONE);
1265 
1266     return std::make_pair(RegOp, OMod);
1267   }
1268   case AMDGPU::V_ADD_F32_e64:
1269   case AMDGPU::V_ADD_F16_e64: {
1270     // If output denormals are enabled, omod is ignored.
1271     if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1272         (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1273       return std::make_pair(nullptr, SIOutMods::NONE);
1274 
1275     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1276     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1277     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1278 
1279     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1280         Src0->getSubReg() == Src1->getSubReg() &&
1281         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1282         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1283         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1284         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1285       return std::make_pair(Src0, SIOutMods::MUL2);
1286 
1287     return std::make_pair(nullptr, SIOutMods::NONE);
1288   }
1289   default:
1290     return std::make_pair(nullptr, SIOutMods::NONE);
1291   }
1292 }
1293 
1294 // FIXME: Does this need to check IEEE bit on function?
1295 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1296   const MachineOperand *RegOp;
1297   int OMod;
1298   std::tie(RegOp, OMod) = isOMod(MI);
1299   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1300       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1301       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1302     return false;
1303 
1304   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1305   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1306   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1307     return false;
1308 
1309   // Clamp is applied after omod. If the source already has clamp set, don't
1310   // fold it.
1311   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1312     return false;
1313 
1314   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1315 
1316   DefOMod->setImm(OMod);
1317   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1318   MI.eraseFromParent();
1319   return true;
1320 }
1321 
1322 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1323   if (skipFunction(MF.getFunction()))
1324     return false;
1325 
1326   MRI = &MF.getRegInfo();
1327   ST = &MF.getSubtarget<GCNSubtarget>();
1328   TII = ST->getInstrInfo();
1329   TRI = &TII->getRegisterInfo();
1330   MFI = MF.getInfo<SIMachineFunctionInfo>();
1331 
1332   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1333   // correctly handle signed zeros.
1334   //
1335   // FIXME: Also need to check strictfp
1336   bool IsIEEEMode = MFI->getMode().IEEE;
1337   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1338 
1339   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1340     MachineBasicBlock::iterator I, Next;
1341 
1342     MachineOperand *CurrentKnownM0Val = nullptr;
1343     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1344       Next = std::next(I);
1345       MachineInstr &MI = *I;
1346 
1347       tryFoldInst(TII, &MI);
1348 
1349       if (!TII->isFoldableCopy(MI)) {
1350         // TODO: Omod might be OK if there is NSZ only on the source
1351         // instruction, and not the omod multiply.
1352         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1353             !tryFoldOMod(MI))
1354           tryFoldClamp(MI);
1355 
1356         // Saw an unknown clobber of m0, so we no longer know what it is.
1357         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1358           CurrentKnownM0Val = nullptr;
1359         continue;
1360       }
1361 
1362       // Specially track simple redefs of m0 to the same value in a block, so we
1363       // can erase the later ones.
1364       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1365         MachineOperand &NewM0Val = MI.getOperand(1);
1366         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1367           MI.eraseFromParent();
1368           continue;
1369         }
1370 
1371         // We aren't tracking other physical registers
1372         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1373           nullptr : &NewM0Val;
1374         continue;
1375       }
1376 
1377       MachineOperand &OpToFold = MI.getOperand(1);
1378       bool FoldingImm =
1379           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1380 
1381       // FIXME: We could also be folding things like TargetIndexes.
1382       if (!FoldingImm && !OpToFold.isReg())
1383         continue;
1384 
1385       if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
1386         continue;
1387 
1388       // Prevent folding operands backwards in the function. For example,
1389       // the COPY opcode must not be replaced by 1 in this example:
1390       //
1391       //    %3 = COPY %vgpr0; VGPR_32:%3
1392       //    ...
1393       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1394       MachineOperand &Dst = MI.getOperand(0);
1395       if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
1396         continue;
1397 
1398       foldInstOperand(MI, OpToFold);
1399     }
1400   }
1401   return false;
1402 }
1403