xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision 4d000d24889670bb433eb3379e8936c6fb1ab615)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/CodeGen/LiveIntervals.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   int ShrinkOpcode;
38   unsigned char UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false,
44                 int ShrinkOp = -1) :
45     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46     Kind(FoldOp->getType()),
47     Commuted(Commuted_) {
48     if (FoldOp->isImm()) {
49       ImmToFold = FoldOp->getImm();
50     } else if (FoldOp->isFI()) {
51       FrameIndexToFold = FoldOp->getIndex();
52     } else {
53       assert(FoldOp->isReg());
54       OpToFold = FoldOp;
55     }
56   }
57 
58   bool isFI() const {
59     return Kind == MachineOperand::MO_FrameIndex;
60   }
61 
62   bool isImm() const {
63     return Kind == MachineOperand::MO_Immediate;
64   }
65 
66   bool isReg() const {
67     return Kind == MachineOperand::MO_Register;
68   }
69 
70   bool isCommuted() const {
71     return Commuted;
72   }
73 
74   bool needsShrink() const {
75     return ShrinkOpcode != -1;
76   }
77 
78   int getShrinkOpcode() const {
79     return ShrinkOpcode;
80   }
81 };
82 
83 class SIFoldOperands : public MachineFunctionPass {
84 public:
85   static char ID;
86   MachineRegisterInfo *MRI;
87   const SIInstrInfo *TII;
88   const SIRegisterInfo *TRI;
89   const GCNSubtarget *ST;
90 
91   void foldOperand(MachineOperand &OpToFold,
92                    MachineInstr *UseMI,
93                    unsigned UseOpIdx,
94                    SmallVectorImpl<FoldCandidate> &FoldList,
95                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
96 
97   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
98 
99   const MachineOperand *isClamp(const MachineInstr &MI) const;
100   bool tryFoldClamp(MachineInstr &MI);
101 
102   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
103   bool tryFoldOMod(MachineInstr &MI);
104 
105 public:
106   SIFoldOperands() : MachineFunctionPass(ID) {
107     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
108   }
109 
110   bool runOnMachineFunction(MachineFunction &MF) override;
111 
112   StringRef getPassName() const override { return "SI Fold Operands"; }
113 
114   void getAnalysisUsage(AnalysisUsage &AU) const override {
115     AU.setPreservesCFG();
116     MachineFunctionPass::getAnalysisUsage(AU);
117   }
118 };
119 
120 } // End anonymous namespace.
121 
122 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
123                 "SI Fold Operands", false, false)
124 
125 char SIFoldOperands::ID = 0;
126 
127 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
128 
129 // Wrapper around isInlineConstant that understands special cases when
130 // instruction types are replaced during operand folding.
131 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
132                                      const MachineInstr &UseMI,
133                                      unsigned OpNo,
134                                      const MachineOperand &OpToFold) {
135   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
136     return true;
137 
138   unsigned Opc = UseMI.getOpcode();
139   switch (Opc) {
140   case AMDGPU::V_MAC_F32_e64:
141   case AMDGPU::V_MAC_F16_e64:
142   case AMDGPU::V_FMAC_F32_e64: {
143     // Special case for mac. Since this is replaced with mad when folded into
144     // src2, we need to check the legality for the final instruction.
145     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
146     if (static_cast<int>(OpNo) == Src2Idx) {
147       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
148       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
149 
150       unsigned Opc = IsFMA ?
151         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
152       const MCInstrDesc &MadDesc = TII->get(Opc);
153       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
154     }
155     return false;
156   }
157   default:
158     return false;
159   }
160 }
161 
162 FunctionPass *llvm::createSIFoldOperandsPass() {
163   return new SIFoldOperands();
164 }
165 
166 static bool updateOperand(FoldCandidate &Fold,
167                           const SIInstrInfo &TII,
168                           const TargetRegisterInfo &TRI,
169                           const GCNSubtarget &ST) {
170   MachineInstr *MI = Fold.UseMI;
171   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
172   assert(Old.isReg());
173 
174   if (Fold.isImm()) {
175     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
176         AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
177                                        ST.hasInv2PiInlineImm())) {
178       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
179       // already set.
180       unsigned Opcode = MI->getOpcode();
181       int OpNo = MI->getOperandNo(&Old);
182       int ModIdx = -1;
183       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
184         ModIdx = AMDGPU::OpName::src0_modifiers;
185       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
186         ModIdx = AMDGPU::OpName::src1_modifiers;
187       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
188         ModIdx = AMDGPU::OpName::src2_modifiers;
189       assert(ModIdx != -1);
190       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
191       MachineOperand &Mod = MI->getOperand(ModIdx);
192       unsigned Val = Mod.getImm();
193       if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
194         return false;
195       // Only apply the following transformation if that operand requries
196       // a packed immediate.
197       switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
198       case AMDGPU::OPERAND_REG_IMM_V2FP16:
199       case AMDGPU::OPERAND_REG_IMM_V2INT16:
200       case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
201       case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
202         // If upper part is all zero we do not need op_sel_hi.
203         if (!isUInt<16>(Fold.ImmToFold)) {
204           if (!(Fold.ImmToFold & 0xffff)) {
205             Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
206             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
207             Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
208             return true;
209           }
210           Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
211           Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
212           return true;
213         }
214         break;
215       default:
216         break;
217       }
218     }
219   }
220 
221   if ((Fold.isImm() || Fold.isFI()) && Fold.needsShrink()) {
222     MachineBasicBlock *MBB = MI->getParent();
223     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
224     if (Liveness != MachineBasicBlock::LQR_Dead)
225       return false;
226 
227     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
228     int Op32 = Fold.getShrinkOpcode();
229     MachineOperand &Dst0 = MI->getOperand(0);
230     MachineOperand &Dst1 = MI->getOperand(1);
231     assert(Dst0.isDef() && Dst1.isDef());
232 
233     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
234 
235     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
236     unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
237 
238     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
239 
240     if (HaveNonDbgCarryUse) {
241       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
242         .addReg(AMDGPU::VCC, RegState::Kill);
243     }
244 
245     // Keep the old instruction around to avoid breaking iterators, but
246     // replace it with a dummy instruction to remove uses.
247     //
248     // FIXME: We should not invert how this pass looks at operands to avoid
249     // this. Should track set of foldable movs instead of looking for uses
250     // when looking at a use.
251     Dst0.setReg(NewReg0);
252     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
253       MI->RemoveOperand(I);
254     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
255 
256     if (Fold.isCommuted())
257       TII.commuteInstruction(*Inst32, false);
258     return true;
259   }
260 
261   assert(!Fold.needsShrink() && "not handled");
262 
263   if (Fold.isImm()) {
264     Old.ChangeToImmediate(Fold.ImmToFold);
265     return true;
266   }
267 
268   if (Fold.isFI()) {
269     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
270     return true;
271   }
272 
273   MachineOperand *New = Fold.OpToFold;
274   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
275   Old.setIsUndef(New->isUndef());
276   return true;
277 }
278 
279 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
280                               const MachineInstr *MI) {
281   for (auto Candidate : FoldList) {
282     if (Candidate.UseMI == MI)
283       return true;
284   }
285   return false;
286 }
287 
288 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
289                              MachineInstr *MI, unsigned OpNo,
290                              MachineOperand *OpToFold,
291                              const SIInstrInfo *TII) {
292   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
293 
294     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
295     unsigned Opc = MI->getOpcode();
296     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
297          Opc == AMDGPU::V_FMAC_F32_e64) &&
298         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
299       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
300       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
301       unsigned NewOpc = IsFMA ?
302         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
303 
304       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
305       // to fold the operand.
306       MI->setDesc(TII->get(NewOpc));
307       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
308       if (FoldAsMAD) {
309         MI->untieRegOperand(OpNo);
310         return true;
311       }
312       MI->setDesc(TII->get(Opc));
313     }
314 
315     // Special case for s_setreg_b32
316     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
317       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
318       FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
319       return true;
320     }
321 
322     // If we are already folding into another operand of MI, then
323     // we can't commute the instruction, otherwise we risk making the
324     // other fold illegal.
325     if (isUseMIInFoldList(FoldList, MI))
326       return false;
327 
328     unsigned CommuteOpNo = OpNo;
329 
330     // Operand is not legal, so try to commute the instruction to
331     // see if this makes it possible to fold.
332     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
333     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
334     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
335 
336     if (CanCommute) {
337       if (CommuteIdx0 == OpNo)
338         CommuteOpNo = CommuteIdx1;
339       else if (CommuteIdx1 == OpNo)
340         CommuteOpNo = CommuteIdx0;
341     }
342 
343 
344     // One of operands might be an Imm operand, and OpNo may refer to it after
345     // the call of commuteInstruction() below. Such situations are avoided
346     // here explicitly as OpNo must be a register operand to be a candidate
347     // for memory folding.
348     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
349                        !MI->getOperand(CommuteIdx1).isReg()))
350       return false;
351 
352     if (!CanCommute ||
353         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
354       return false;
355 
356     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
357       if ((Opc == AMDGPU::V_ADD_I32_e64 ||
358            Opc == AMDGPU::V_SUB_I32_e64 ||
359            Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
360           (OpToFold->isImm() || OpToFold->isFI())) {
361         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
362 
363         // Verify the other operand is a VGPR, otherwise we would violate the
364         // constant bus restriction.
365         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
366         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
367         if (!OtherOp.isReg() ||
368             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
369           return false;
370 
371         assert(MI->getOperand(1).isDef());
372 
373         // Make sure to get the 32-bit version of the commuted opcode.
374         unsigned MaybeCommutedOpc = MI->getOpcode();
375         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
376 
377         FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
378                                          Op32));
379         return true;
380       }
381 
382       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
383       return false;
384     }
385 
386     FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
387     return true;
388   }
389 
390   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
391   return true;
392 }
393 
394 // If the use operand doesn't care about the value, this may be an operand only
395 // used for register indexing, in which case it is unsafe to fold.
396 static bool isUseSafeToFold(const SIInstrInfo *TII,
397                             const MachineInstr &MI,
398                             const MachineOperand &UseMO) {
399   return !UseMO.isUndef() && !TII->isSDWA(MI);
400   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
401 }
402 
403 void SIFoldOperands::foldOperand(
404   MachineOperand &OpToFold,
405   MachineInstr *UseMI,
406   unsigned UseOpIdx,
407   SmallVectorImpl<FoldCandidate> &FoldList,
408   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
409   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
410 
411   if (!isUseSafeToFold(TII, *UseMI, UseOp))
412     return;
413 
414   // FIXME: Fold operands with subregs.
415   if (UseOp.isReg() && OpToFold.isReg()) {
416     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
417       return;
418 
419     // Don't fold subregister extracts into tied operands, only if it is a full
420     // copy since a subregister use tied to a full register def doesn't really
421     // make sense. e.g. don't fold:
422     //
423     // %1 = COPY %0:sub1
424     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
425     //
426     //  into
427     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
428     if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
429       return;
430   }
431 
432   // Special case for REG_SEQUENCE: We can't fold literals into
433   // REG_SEQUENCE instructions, so we have to fold them into the
434   // uses of REG_SEQUENCE.
435   if (UseMI->isRegSequence()) {
436     unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
437     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
438 
439     MachineRegisterInfo::use_iterator Next;
440     for (MachineRegisterInfo::use_iterator
441            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
442          RSUse != RSE; RSUse = Next) {
443       Next = std::next(RSUse);
444 
445       MachineInstr *RSUseMI = RSUse->getParent();
446       if (RSUse->getSubReg() != RegSeqDstSubReg)
447         continue;
448 
449       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
450                   CopiesToReplace);
451     }
452 
453     return;
454   }
455 
456 
457   bool FoldingImm = OpToFold.isImm();
458 
459   if (FoldingImm && UseMI->isCopy()) {
460     unsigned DestReg = UseMI->getOperand(0).getReg();
461     const TargetRegisterClass *DestRC
462       = TargetRegisterInfo::isVirtualRegister(DestReg) ?
463       MRI->getRegClass(DestReg) :
464       TRI->getPhysRegClass(DestReg);
465 
466     unsigned SrcReg  = UseMI->getOperand(1).getReg();
467     if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
468       TargetRegisterInfo::isVirtualRegister(SrcReg)) {
469       const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
470       if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) {
471         MachineRegisterInfo::use_iterator NextUse;
472         SmallVector<FoldCandidate, 4> CopyUses;
473         for (MachineRegisterInfo::use_iterator
474           Use = MRI->use_begin(DestReg), E = MRI->use_end();
475           Use != E; Use = NextUse) {
476           NextUse = std::next(Use);
477           FoldCandidate FC = FoldCandidate(Use->getParent(),
478            Use.getOperandNo(), &UseMI->getOperand(1));
479           CopyUses.push_back(FC);
480        }
481         for (auto & F : CopyUses) {
482           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
483            FoldList, CopiesToReplace);
484         }
485       }
486     }
487 
488     // In order to fold immediates into copies, we need to change the
489     // copy to a MOV.
490 
491     unsigned MovOp = TII->getMovOpcode(DestRC);
492     if (MovOp == AMDGPU::COPY)
493       return;
494 
495     UseMI->setDesc(TII->get(MovOp));
496     CopiesToReplace.push_back(UseMI);
497   } else {
498     if (UseMI->isCopy() && OpToFold.isReg() &&
499         TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
500         TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
501         TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) &&
502         !UseMI->getOperand(1).getSubReg()) {
503       UseMI->getOperand(1).setReg(OpToFold.getReg());
504       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
505       UseMI->getOperand(1).setIsKill(false);
506       CopiesToReplace.push_back(UseMI);
507       OpToFold.setIsKill(false);
508       return;
509     }
510 
511     unsigned UseOpc = UseMI->getOpcode();
512     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
513         (UseOpc == AMDGPU::V_READLANE_B32 &&
514          (int)UseOpIdx ==
515          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
516       // %vgpr = V_MOV_B32 imm
517       // %sgpr = V_READFIRSTLANE_B32 %vgpr
518       // =>
519       // %sgpr = S_MOV_B32 imm
520       if (FoldingImm) {
521         if (execMayBeModifiedBeforeUse(*MRI,
522                                        UseMI->getOperand(UseOpIdx).getReg(),
523                                        *OpToFold.getParent(),
524                                        UseMI))
525           return;
526 
527         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
528 
529         // FIXME: ChangeToImmediate should clear subreg
530         UseMI->getOperand(1).setSubReg(0);
531         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
532         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
533         return;
534       }
535 
536       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
537         if (execMayBeModifiedBeforeUse(*MRI,
538                                        UseMI->getOperand(UseOpIdx).getReg(),
539                                        *OpToFold.getParent(),
540                                        UseMI))
541           return;
542 
543         // %vgpr = COPY %sgpr0
544         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
545         // =>
546         // %sgpr1 = COPY %sgpr0
547         UseMI->setDesc(TII->get(AMDGPU::COPY));
548         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
549         return;
550       }
551     }
552 
553     const MCInstrDesc &UseDesc = UseMI->getDesc();
554 
555     // Don't fold into target independent nodes.  Target independent opcodes
556     // don't have defined register classes.
557     if (UseDesc.isVariadic() ||
558         UseOp.isImplicit() ||
559         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
560       return;
561   }
562 
563   if (!FoldingImm) {
564     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
565 
566     // FIXME: We could try to change the instruction from 64-bit to 32-bit
567     // to enable more folding opportunites.  The shrink operands pass
568     // already does this.
569     return;
570   }
571 
572 
573   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
574   const TargetRegisterClass *FoldRC =
575     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
576 
577   // Split 64-bit constants into 32-bits for folding.
578   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
579     unsigned UseReg = UseOp.getReg();
580     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
581 
582     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
583       return;
584 
585     APInt Imm(64, OpToFold.getImm());
586     if (UseOp.getSubReg() == AMDGPU::sub0) {
587       Imm = Imm.getLoBits(32);
588     } else {
589       assert(UseOp.getSubReg() == AMDGPU::sub1);
590       Imm = Imm.getHiBits(32);
591     }
592 
593     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
594     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
595     return;
596   }
597 
598 
599 
600   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
601 }
602 
603 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
604                                   uint32_t LHS, uint32_t RHS) {
605   switch (Opcode) {
606   case AMDGPU::V_AND_B32_e64:
607   case AMDGPU::V_AND_B32_e32:
608   case AMDGPU::S_AND_B32:
609     Result = LHS & RHS;
610     return true;
611   case AMDGPU::V_OR_B32_e64:
612   case AMDGPU::V_OR_B32_e32:
613   case AMDGPU::S_OR_B32:
614     Result = LHS | RHS;
615     return true;
616   case AMDGPU::V_XOR_B32_e64:
617   case AMDGPU::V_XOR_B32_e32:
618   case AMDGPU::S_XOR_B32:
619     Result = LHS ^ RHS;
620     return true;
621   case AMDGPU::V_LSHL_B32_e64:
622   case AMDGPU::V_LSHL_B32_e32:
623   case AMDGPU::S_LSHL_B32:
624     // The instruction ignores the high bits for out of bounds shifts.
625     Result = LHS << (RHS & 31);
626     return true;
627   case AMDGPU::V_LSHLREV_B32_e64:
628   case AMDGPU::V_LSHLREV_B32_e32:
629     Result = RHS << (LHS & 31);
630     return true;
631   case AMDGPU::V_LSHR_B32_e64:
632   case AMDGPU::V_LSHR_B32_e32:
633   case AMDGPU::S_LSHR_B32:
634     Result = LHS >> (RHS & 31);
635     return true;
636   case AMDGPU::V_LSHRREV_B32_e64:
637   case AMDGPU::V_LSHRREV_B32_e32:
638     Result = RHS >> (LHS & 31);
639     return true;
640   case AMDGPU::V_ASHR_I32_e64:
641   case AMDGPU::V_ASHR_I32_e32:
642   case AMDGPU::S_ASHR_I32:
643     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
644     return true;
645   case AMDGPU::V_ASHRREV_I32_e64:
646   case AMDGPU::V_ASHRREV_I32_e32:
647     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
648     return true;
649   default:
650     return false;
651   }
652 }
653 
654 static unsigned getMovOpc(bool IsScalar) {
655   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
656 }
657 
658 /// Remove any leftover implicit operands from mutating the instruction. e.g.
659 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
660 /// anymore.
661 static void stripExtraCopyOperands(MachineInstr &MI) {
662   const MCInstrDesc &Desc = MI.getDesc();
663   unsigned NumOps = Desc.getNumOperands() +
664                     Desc.getNumImplicitUses() +
665                     Desc.getNumImplicitDefs();
666 
667   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
668     MI.RemoveOperand(I);
669 }
670 
671 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
672   MI.setDesc(NewDesc);
673   stripExtraCopyOperands(MI);
674 }
675 
676 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
677                                                MachineOperand &Op) {
678   if (Op.isReg()) {
679     // If this has a subregister, it obviously is a register source.
680     if (Op.getSubReg() != AMDGPU::NoSubRegister ||
681         !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
682       return &Op;
683 
684     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
685     if (Def && Def->isMoveImmediate()) {
686       MachineOperand &ImmSrc = Def->getOperand(1);
687       if (ImmSrc.isImm())
688         return &ImmSrc;
689     }
690   }
691 
692   return &Op;
693 }
694 
695 // Try to simplify operations with a constant that may appear after instruction
696 // selection.
697 // TODO: See if a frame index with a fixed offset can fold.
698 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
699                               const SIInstrInfo *TII,
700                               MachineInstr *MI,
701                               MachineOperand *ImmOp) {
702   unsigned Opc = MI->getOpcode();
703   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
704       Opc == AMDGPU::S_NOT_B32) {
705     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
706     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
707     return true;
708   }
709 
710   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
711   if (Src1Idx == -1)
712     return false;
713 
714   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
715   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
716   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
717 
718   if (!Src0->isImm() && !Src1->isImm())
719     return false;
720 
721   if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
722     if (Src0->isImm() && Src0->getImm() == 0) {
723       // v_lshl_or_b32 0, X, Y -> copy Y
724       // v_lshl_or_b32 0, X, K -> v_mov_b32 K
725       bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
726       MI->RemoveOperand(Src1Idx);
727       MI->RemoveOperand(Src0Idx);
728 
729       MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
730       return true;
731     }
732   }
733 
734   // and k0, k1 -> v_mov_b32 (k0 & k1)
735   // or k0, k1 -> v_mov_b32 (k0 | k1)
736   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
737   if (Src0->isImm() && Src1->isImm()) {
738     int32_t NewImm;
739     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
740       return false;
741 
742     const SIRegisterInfo &TRI = TII->getRegisterInfo();
743     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
744 
745     // Be careful to change the right operand, src0 may belong to a different
746     // instruction.
747     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
748     MI->RemoveOperand(Src1Idx);
749     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
750     return true;
751   }
752 
753   if (!MI->isCommutable())
754     return false;
755 
756   if (Src0->isImm() && !Src1->isImm()) {
757     std::swap(Src0, Src1);
758     std::swap(Src0Idx, Src1Idx);
759   }
760 
761   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
762   if (Opc == AMDGPU::V_OR_B32_e64 ||
763       Opc == AMDGPU::V_OR_B32_e32 ||
764       Opc == AMDGPU::S_OR_B32) {
765     if (Src1Val == 0) {
766       // y = or x, 0 => y = copy x
767       MI->RemoveOperand(Src1Idx);
768       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
769     } else if (Src1Val == -1) {
770       // y = or x, -1 => y = v_mov_b32 -1
771       MI->RemoveOperand(Src1Idx);
772       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
773     } else
774       return false;
775 
776     return true;
777   }
778 
779   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
780       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
781       MI->getOpcode() == AMDGPU::S_AND_B32) {
782     if (Src1Val == 0) {
783       // y = and x, 0 => y = v_mov_b32 0
784       MI->RemoveOperand(Src0Idx);
785       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
786     } else if (Src1Val == -1) {
787       // y = and x, -1 => y = copy x
788       MI->RemoveOperand(Src1Idx);
789       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
790       stripExtraCopyOperands(*MI);
791     } else
792       return false;
793 
794     return true;
795   }
796 
797   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
798       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
799       MI->getOpcode() == AMDGPU::S_XOR_B32) {
800     if (Src1Val == 0) {
801       // y = xor x, 0 => y = copy x
802       MI->RemoveOperand(Src1Idx);
803       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
804       return true;
805     }
806   }
807 
808   return false;
809 }
810 
811 // Try to fold an instruction into a simpler one
812 static bool tryFoldInst(const SIInstrInfo *TII,
813                         MachineInstr *MI) {
814   unsigned Opc = MI->getOpcode();
815 
816   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
817       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
818       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
819     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
820     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
821     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
822     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
823     if (Src1->isIdenticalTo(*Src0) &&
824         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
825         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
826       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
827       auto &NewDesc =
828           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
829       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
830       if (Src2Idx != -1)
831         MI->RemoveOperand(Src2Idx);
832       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
833       if (Src1ModIdx != -1)
834         MI->RemoveOperand(Src1ModIdx);
835       if (Src0ModIdx != -1)
836         MI->RemoveOperand(Src0ModIdx);
837       mutateCopyOp(*MI, NewDesc);
838       LLVM_DEBUG(dbgs() << *MI << '\n');
839       return true;
840     }
841   }
842 
843   return false;
844 }
845 
846 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
847                                      MachineOperand &OpToFold) const {
848   // We need mutate the operands of new mov instructions to add implicit
849   // uses of EXEC, but adding them invalidates the use_iterator, so defer
850   // this.
851   SmallVector<MachineInstr *, 4> CopiesToReplace;
852   SmallVector<FoldCandidate, 4> FoldList;
853   MachineOperand &Dst = MI.getOperand(0);
854 
855   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
856   if (FoldingImm) {
857     unsigned NumLiteralUses = 0;
858     MachineOperand *NonInlineUse = nullptr;
859     int NonInlineUseOpNo = -1;
860 
861     MachineRegisterInfo::use_iterator NextUse;
862     for (MachineRegisterInfo::use_iterator
863            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
864          Use != E; Use = NextUse) {
865       NextUse = std::next(Use);
866       MachineInstr *UseMI = Use->getParent();
867       unsigned OpNo = Use.getOperandNo();
868 
869       // Folding the immediate may reveal operations that can be constant
870       // folded or replaced with a copy. This can happen for example after
871       // frame indices are lowered to constants or from splitting 64-bit
872       // constants.
873       //
874       // We may also encounter cases where one or both operands are
875       // immediates materialized into a register, which would ordinarily not
876       // be folded due to multiple uses or operand constraints.
877 
878       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
879         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
880 
881         // Some constant folding cases change the same immediate's use to a new
882         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
883         // again. The same constant folded instruction could also have a second
884         // use operand.
885         NextUse = MRI->use_begin(Dst.getReg());
886         FoldList.clear();
887         continue;
888       }
889 
890       // Try to fold any inline immediate uses, and then only fold other
891       // constants if they have one use.
892       //
893       // The legality of the inline immediate must be checked based on the use
894       // operand, not the defining instruction, because 32-bit instructions
895       // with 32-bit inline immediate sources may be used to materialize
896       // constants used in 16-bit operands.
897       //
898       // e.g. it is unsafe to fold:
899       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
900       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
901 
902       // Folding immediates with more than one use will increase program size.
903       // FIXME: This will also reduce register usage, which may be better
904       // in some cases. A better heuristic is needed.
905       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
906         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
907       } else {
908         if (++NumLiteralUses == 1) {
909           NonInlineUse = &*Use;
910           NonInlineUseOpNo = OpNo;
911         }
912       }
913     }
914 
915     if (NumLiteralUses == 1) {
916       MachineInstr *UseMI = NonInlineUse->getParent();
917       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
918     }
919   } else {
920     // Folding register.
921     SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
922     for (MachineRegisterInfo::use_iterator
923            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
924          Use != E; ++Use) {
925       UsesToProcess.push_back(Use);
926     }
927     for (auto U : UsesToProcess) {
928       MachineInstr *UseMI = U->getParent();
929 
930       foldOperand(OpToFold, UseMI, U.getOperandNo(),
931         FoldList, CopiesToReplace);
932     }
933   }
934 
935   MachineFunction *MF = MI.getParent()->getParent();
936   // Make sure we add EXEC uses to any new v_mov instructions created.
937   for (MachineInstr *Copy : CopiesToReplace)
938     Copy->addImplicitDefUseOperands(*MF);
939 
940   for (FoldCandidate &Fold : FoldList) {
941     if (updateOperand(Fold, *TII, *TRI, *ST)) {
942       // Clear kill flags.
943       if (Fold.isReg()) {
944         assert(Fold.OpToFold && Fold.OpToFold->isReg());
945         // FIXME: Probably shouldn't bother trying to fold if not an
946         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
947         // copies.
948         MRI->clearKillFlags(Fold.OpToFold->getReg());
949       }
950       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
951                         << static_cast<int>(Fold.UseOpNo) << " of "
952                         << *Fold.UseMI << '\n');
953       tryFoldInst(TII, Fold.UseMI);
954     } else if (Fold.isCommuted()) {
955       // Restoring instruction's original operand order if fold has failed.
956       TII->commuteInstruction(*Fold.UseMI, false);
957     }
958   }
959 }
960 
961 // Clamp patterns are canonically selected to v_max_* instructions, so only
962 // handle them.
963 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
964   unsigned Op = MI.getOpcode();
965   switch (Op) {
966   case AMDGPU::V_MAX_F32_e64:
967   case AMDGPU::V_MAX_F16_e64:
968   case AMDGPU::V_MAX_F64:
969   case AMDGPU::V_PK_MAX_F16: {
970     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
971       return nullptr;
972 
973     // Make sure sources are identical.
974     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
975     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
976     if (!Src0->isReg() || !Src1->isReg() ||
977         Src0->getReg() != Src1->getReg() ||
978         Src0->getSubReg() != Src1->getSubReg() ||
979         Src0->getSubReg() != AMDGPU::NoSubRegister)
980       return nullptr;
981 
982     // Can't fold up if we have modifiers.
983     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
984       return nullptr;
985 
986     unsigned Src0Mods
987       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
988     unsigned Src1Mods
989       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
990 
991     // Having a 0 op_sel_hi would require swizzling the output in the source
992     // instruction, which we can't do.
993     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
994                                                       : 0u;
995     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
996       return nullptr;
997     return Src0;
998   }
999   default:
1000     return nullptr;
1001   }
1002 }
1003 
1004 // We obviously have multiple uses in a clamp since the register is used twice
1005 // in the same instruction.
1006 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1007   int Count = 0;
1008   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1009        I != E; ++I) {
1010     if (++Count > 1)
1011       return false;
1012   }
1013 
1014   return true;
1015 }
1016 
1017 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1018 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1019   const MachineOperand *ClampSrc = isClamp(MI);
1020   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1021     return false;
1022 
1023   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1024 
1025   // The type of clamp must be compatible.
1026   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1027     return false;
1028 
1029   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1030   if (!DefClamp)
1031     return false;
1032 
1033   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1034                     << '\n');
1035 
1036   // Clamp is applied after omod, so it is OK if omod is set.
1037   DefClamp->setImm(1);
1038   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1039   MI.eraseFromParent();
1040   return true;
1041 }
1042 
1043 static int getOModValue(unsigned Opc, int64_t Val) {
1044   switch (Opc) {
1045   case AMDGPU::V_MUL_F32_e64: {
1046     switch (static_cast<uint32_t>(Val)) {
1047     case 0x3f000000: // 0.5
1048       return SIOutMods::DIV2;
1049     case 0x40000000: // 2.0
1050       return SIOutMods::MUL2;
1051     case 0x40800000: // 4.0
1052       return SIOutMods::MUL4;
1053     default:
1054       return SIOutMods::NONE;
1055     }
1056   }
1057   case AMDGPU::V_MUL_F16_e64: {
1058     switch (static_cast<uint16_t>(Val)) {
1059     case 0x3800: // 0.5
1060       return SIOutMods::DIV2;
1061     case 0x4000: // 2.0
1062       return SIOutMods::MUL2;
1063     case 0x4400: // 4.0
1064       return SIOutMods::MUL4;
1065     default:
1066       return SIOutMods::NONE;
1067     }
1068   }
1069   default:
1070     llvm_unreachable("invalid mul opcode");
1071   }
1072 }
1073 
1074 // FIXME: Does this really not support denormals with f16?
1075 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1076 // handled, so will anything other than that break?
1077 std::pair<const MachineOperand *, int>
1078 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1079   unsigned Op = MI.getOpcode();
1080   switch (Op) {
1081   case AMDGPU::V_MUL_F32_e64:
1082   case AMDGPU::V_MUL_F16_e64: {
1083     // If output denormals are enabled, omod is ignored.
1084     if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1085         (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1086       return std::make_pair(nullptr, SIOutMods::NONE);
1087 
1088     const MachineOperand *RegOp = nullptr;
1089     const MachineOperand *ImmOp = nullptr;
1090     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1091     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1092     if (Src0->isImm()) {
1093       ImmOp = Src0;
1094       RegOp = Src1;
1095     } else if (Src1->isImm()) {
1096       ImmOp = Src1;
1097       RegOp = Src0;
1098     } else
1099       return std::make_pair(nullptr, SIOutMods::NONE);
1100 
1101     int OMod = getOModValue(Op, ImmOp->getImm());
1102     if (OMod == SIOutMods::NONE ||
1103         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1104         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1105         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1106         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1107       return std::make_pair(nullptr, SIOutMods::NONE);
1108 
1109     return std::make_pair(RegOp, OMod);
1110   }
1111   case AMDGPU::V_ADD_F32_e64:
1112   case AMDGPU::V_ADD_F16_e64: {
1113     // If output denormals are enabled, omod is ignored.
1114     if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1115         (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1116       return std::make_pair(nullptr, SIOutMods::NONE);
1117 
1118     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1119     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1120     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1121 
1122     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1123         Src0->getSubReg() == Src1->getSubReg() &&
1124         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1125         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1126         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1127         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1128       return std::make_pair(Src0, SIOutMods::MUL2);
1129 
1130     return std::make_pair(nullptr, SIOutMods::NONE);
1131   }
1132   default:
1133     return std::make_pair(nullptr, SIOutMods::NONE);
1134   }
1135 }
1136 
1137 // FIXME: Does this need to check IEEE bit on function?
1138 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1139   const MachineOperand *RegOp;
1140   int OMod;
1141   std::tie(RegOp, OMod) = isOMod(MI);
1142   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1143       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1144       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1145     return false;
1146 
1147   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1148   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1149   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1150     return false;
1151 
1152   // Clamp is applied after omod. If the source already has clamp set, don't
1153   // fold it.
1154   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1155     return false;
1156 
1157   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1158 
1159   DefOMod->setImm(OMod);
1160   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1161   MI.eraseFromParent();
1162   return true;
1163 }
1164 
1165 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1166   if (skipFunction(MF.getFunction()))
1167     return false;
1168 
1169   MRI = &MF.getRegInfo();
1170   ST = &MF.getSubtarget<GCNSubtarget>();
1171   TII = ST->getInstrInfo();
1172   TRI = &TII->getRegisterInfo();
1173 
1174   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1175 
1176   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1177   // correctly handle signed zeros.
1178   //
1179   // FIXME: Also need to check strictfp
1180   bool IsIEEEMode = MFI->getMode().IEEE;
1181   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1182 
1183   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1184     MachineBasicBlock::iterator I, Next;
1185     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1186       Next = std::next(I);
1187       MachineInstr &MI = *I;
1188 
1189       tryFoldInst(TII, &MI);
1190 
1191       if (!TII->isFoldableCopy(MI)) {
1192         // TODO: Omod might be OK if there is NSZ only on the source
1193         // instruction, and not the omod multiply.
1194         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1195             !tryFoldOMod(MI))
1196           tryFoldClamp(MI);
1197         continue;
1198       }
1199 
1200       MachineOperand &OpToFold = MI.getOperand(1);
1201       bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
1202 
1203       // FIXME: We could also be folding things like TargetIndexes.
1204       if (!FoldingImm && !OpToFold.isReg())
1205         continue;
1206 
1207       if (OpToFold.isReg() &&
1208           !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
1209         continue;
1210 
1211       // Prevent folding operands backwards in the function. For example,
1212       // the COPY opcode must not be replaced by 1 in this example:
1213       //
1214       //    %3 = COPY %vgpr0; VGPR_32:%3
1215       //    ...
1216       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1217       MachineOperand &Dst = MI.getOperand(0);
1218       if (Dst.isReg() &&
1219           !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1220         continue;
1221 
1222       foldInstOperand(MI, OpToFold);
1223     }
1224   }
1225   return false;
1226 }
1227