xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision 60957cb74c8869e2abd8996f616261bf4103305d)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/CodeGen/LiveIntervals.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   int ShrinkOpcode;
38   unsigned char UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false,
44                 int ShrinkOp = -1) :
45     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46     Kind(FoldOp->getType()),
47     Commuted(Commuted_) {
48     if (FoldOp->isImm()) {
49       ImmToFold = FoldOp->getImm();
50     } else if (FoldOp->isFI()) {
51       FrameIndexToFold = FoldOp->getIndex();
52     } else {
53       assert(FoldOp->isReg());
54       OpToFold = FoldOp;
55     }
56   }
57 
58   bool isFI() const {
59     return Kind == MachineOperand::MO_FrameIndex;
60   }
61 
62   bool isImm() const {
63     return Kind == MachineOperand::MO_Immediate;
64   }
65 
66   bool isReg() const {
67     return Kind == MachineOperand::MO_Register;
68   }
69 
70   bool isCommuted() const {
71     return Commuted;
72   }
73 
74   bool needsShrink() const {
75     return ShrinkOpcode != -1;
76   }
77 
78   int getShrinkOpcode() const {
79     return ShrinkOpcode;
80   }
81 };
82 
83 class SIFoldOperands : public MachineFunctionPass {
84 public:
85   static char ID;
86   MachineRegisterInfo *MRI;
87   const SIInstrInfo *TII;
88   const SIRegisterInfo *TRI;
89   const GCNSubtarget *ST;
90   const SIMachineFunctionInfo *MFI;
91 
92   void foldOperand(MachineOperand &OpToFold,
93                    MachineInstr *UseMI,
94                    int UseOpIdx,
95                    SmallVectorImpl<FoldCandidate> &FoldList,
96                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
97 
98   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
99 
100   const MachineOperand *isClamp(const MachineInstr &MI) const;
101   bool tryFoldClamp(MachineInstr &MI);
102 
103   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
104   bool tryFoldOMod(MachineInstr &MI);
105 
106 public:
107   SIFoldOperands() : MachineFunctionPass(ID) {
108     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
109   }
110 
111   bool runOnMachineFunction(MachineFunction &MF) override;
112 
113   StringRef getPassName() const override { return "SI Fold Operands"; }
114 
115   void getAnalysisUsage(AnalysisUsage &AU) const override {
116     AU.setPreservesCFG();
117     MachineFunctionPass::getAnalysisUsage(AU);
118   }
119 };
120 
121 } // End anonymous namespace.
122 
123 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
124                 "SI Fold Operands", false, false)
125 
126 char SIFoldOperands::ID = 0;
127 
128 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
129 
130 // Wrapper around isInlineConstant that understands special cases when
131 // instruction types are replaced during operand folding.
132 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
133                                      const MachineInstr &UseMI,
134                                      unsigned OpNo,
135                                      const MachineOperand &OpToFold) {
136   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
137     return true;
138 
139   unsigned Opc = UseMI.getOpcode();
140   switch (Opc) {
141   case AMDGPU::V_MAC_F32_e64:
142   case AMDGPU::V_MAC_F16_e64:
143   case AMDGPU::V_FMAC_F32_e64: {
144     // Special case for mac. Since this is replaced with mad when folded into
145     // src2, we need to check the legality for the final instruction.
146     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
147     if (static_cast<int>(OpNo) == Src2Idx) {
148       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
149       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
150 
151       unsigned Opc = IsFMA ?
152         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
153       const MCInstrDesc &MadDesc = TII->get(Opc);
154       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
155     }
156     return false;
157   }
158   default:
159     return false;
160   }
161 }
162 
163 // TODO: Add heuristic that the frame index might not fit in the addressing mode
164 // immediate offset to avoid materializing in loops.
165 static bool frameIndexMayFold(const SIInstrInfo *TII,
166                               const MachineInstr &UseMI,
167                               int OpNo,
168                               const MachineOperand &OpToFold) {
169   return OpToFold.isFI() &&
170     (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
171     OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
172 }
173 
174 FunctionPass *llvm::createSIFoldOperandsPass() {
175   return new SIFoldOperands();
176 }
177 
178 static bool updateOperand(FoldCandidate &Fold,
179                           const SIInstrInfo &TII,
180                           const TargetRegisterInfo &TRI,
181                           const GCNSubtarget &ST) {
182   MachineInstr *MI = Fold.UseMI;
183   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
184   assert(Old.isReg());
185 
186   if (Fold.isImm()) {
187     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
188         AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
189                                        ST.hasInv2PiInlineImm())) {
190       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
191       // already set.
192       unsigned Opcode = MI->getOpcode();
193       int OpNo = MI->getOperandNo(&Old);
194       int ModIdx = -1;
195       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
196         ModIdx = AMDGPU::OpName::src0_modifiers;
197       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
198         ModIdx = AMDGPU::OpName::src1_modifiers;
199       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
200         ModIdx = AMDGPU::OpName::src2_modifiers;
201       assert(ModIdx != -1);
202       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
203       MachineOperand &Mod = MI->getOperand(ModIdx);
204       unsigned Val = Mod.getImm();
205       if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
206         return false;
207       // Only apply the following transformation if that operand requries
208       // a packed immediate.
209       switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
210       case AMDGPU::OPERAND_REG_IMM_V2FP16:
211       case AMDGPU::OPERAND_REG_IMM_V2INT16:
212       case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
213       case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
214         // If upper part is all zero we do not need op_sel_hi.
215         if (!isUInt<16>(Fold.ImmToFold)) {
216           if (!(Fold.ImmToFold & 0xffff)) {
217             Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
218             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
219             Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
220             return true;
221           }
222           Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
223           Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
224           return true;
225         }
226         break;
227       default:
228         break;
229       }
230     }
231   }
232 
233   if ((Fold.isImm() || Fold.isFI()) && Fold.needsShrink()) {
234     MachineBasicBlock *MBB = MI->getParent();
235     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
236     if (Liveness != MachineBasicBlock::LQR_Dead)
237       return false;
238 
239     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
240     int Op32 = Fold.getShrinkOpcode();
241     MachineOperand &Dst0 = MI->getOperand(0);
242     MachineOperand &Dst1 = MI->getOperand(1);
243     assert(Dst0.isDef() && Dst1.isDef());
244 
245     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
246 
247     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
248     unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
249 
250     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
251 
252     if (HaveNonDbgCarryUse) {
253       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
254         .addReg(AMDGPU::VCC, RegState::Kill);
255     }
256 
257     // Keep the old instruction around to avoid breaking iterators, but
258     // replace it with a dummy instruction to remove uses.
259     //
260     // FIXME: We should not invert how this pass looks at operands to avoid
261     // this. Should track set of foldable movs instead of looking for uses
262     // when looking at a use.
263     Dst0.setReg(NewReg0);
264     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
265       MI->RemoveOperand(I);
266     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
267 
268     if (Fold.isCommuted())
269       TII.commuteInstruction(*Inst32, false);
270     return true;
271   }
272 
273   assert(!Fold.needsShrink() && "not handled");
274 
275   if (Fold.isImm()) {
276     Old.ChangeToImmediate(Fold.ImmToFold);
277     return true;
278   }
279 
280   if (Fold.isFI()) {
281     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
282     return true;
283   }
284 
285   MachineOperand *New = Fold.OpToFold;
286   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
287   Old.setIsUndef(New->isUndef());
288   return true;
289 }
290 
291 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
292                               const MachineInstr *MI) {
293   for (auto Candidate : FoldList) {
294     if (Candidate.UseMI == MI)
295       return true;
296   }
297   return false;
298 }
299 
300 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
301                              MachineInstr *MI, unsigned OpNo,
302                              MachineOperand *OpToFold,
303                              const SIInstrInfo *TII) {
304   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
305     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
306     unsigned Opc = MI->getOpcode();
307     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
308          Opc == AMDGPU::V_FMAC_F32_e64) &&
309         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
310       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
311       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
312       unsigned NewOpc = IsFMA ?
313         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
314 
315       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
316       // to fold the operand.
317       MI->setDesc(TII->get(NewOpc));
318       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
319       if (FoldAsMAD) {
320         MI->untieRegOperand(OpNo);
321         return true;
322       }
323       MI->setDesc(TII->get(Opc));
324     }
325 
326     // Special case for s_setreg_b32
327     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
328       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
329       FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
330       return true;
331     }
332 
333     // If we are already folding into another operand of MI, then
334     // we can't commute the instruction, otherwise we risk making the
335     // other fold illegal.
336     if (isUseMIInFoldList(FoldList, MI))
337       return false;
338 
339     unsigned CommuteOpNo = OpNo;
340 
341     // Operand is not legal, so try to commute the instruction to
342     // see if this makes it possible to fold.
343     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
344     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
345     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
346 
347     if (CanCommute) {
348       if (CommuteIdx0 == OpNo)
349         CommuteOpNo = CommuteIdx1;
350       else if (CommuteIdx1 == OpNo)
351         CommuteOpNo = CommuteIdx0;
352     }
353 
354 
355     // One of operands might be an Imm operand, and OpNo may refer to it after
356     // the call of commuteInstruction() below. Such situations are avoided
357     // here explicitly as OpNo must be a register operand to be a candidate
358     // for memory folding.
359     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
360                        !MI->getOperand(CommuteIdx1).isReg()))
361       return false;
362 
363     if (!CanCommute ||
364         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
365       return false;
366 
367     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
368       if ((Opc == AMDGPU::V_ADD_I32_e64 ||
369            Opc == AMDGPU::V_SUB_I32_e64 ||
370            Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
371           (OpToFold->isImm() || OpToFold->isFI())) {
372         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
373 
374         // Verify the other operand is a VGPR, otherwise we would violate the
375         // constant bus restriction.
376         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
377         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
378         if (!OtherOp.isReg() ||
379             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
380           return false;
381 
382         assert(MI->getOperand(1).isDef());
383 
384         // Make sure to get the 32-bit version of the commuted opcode.
385         unsigned MaybeCommutedOpc = MI->getOpcode();
386         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
387 
388         FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
389                                          Op32));
390         return true;
391       }
392 
393       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
394       return false;
395     }
396 
397     FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
398     return true;
399   }
400 
401   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
402   return true;
403 }
404 
405 // If the use operand doesn't care about the value, this may be an operand only
406 // used for register indexing, in which case it is unsafe to fold.
407 static bool isUseSafeToFold(const SIInstrInfo *TII,
408                             const MachineInstr &MI,
409                             const MachineOperand &UseMO) {
410   return !UseMO.isUndef() && !TII->isSDWA(MI);
411   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
412 }
413 
414 void SIFoldOperands::foldOperand(
415   MachineOperand &OpToFold,
416   MachineInstr *UseMI,
417   int UseOpIdx,
418   SmallVectorImpl<FoldCandidate> &FoldList,
419   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
420   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
421 
422   if (!isUseSafeToFold(TII, *UseMI, UseOp))
423     return;
424 
425   // FIXME: Fold operands with subregs.
426   if (UseOp.isReg() && OpToFold.isReg()) {
427     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
428       return;
429 
430     // Don't fold subregister extracts into tied operands, only if it is a full
431     // copy since a subregister use tied to a full register def doesn't really
432     // make sense. e.g. don't fold:
433     //
434     // %1 = COPY %0:sub1
435     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
436     //
437     //  into
438     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
439     if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
440       return;
441   }
442 
443   // Special case for REG_SEQUENCE: We can't fold literals into
444   // REG_SEQUENCE instructions, so we have to fold them into the
445   // uses of REG_SEQUENCE.
446   if (UseMI->isRegSequence()) {
447     unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
448     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
449 
450     MachineRegisterInfo::use_iterator Next;
451     for (MachineRegisterInfo::use_iterator
452            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
453          RSUse != RSE; RSUse = Next) {
454       Next = std::next(RSUse);
455 
456       MachineInstr *RSUseMI = RSUse->getParent();
457       if (RSUse->getSubReg() != RegSeqDstSubReg)
458         continue;
459 
460       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
461                   CopiesToReplace);
462     }
463 
464     return;
465   }
466 
467   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
468     // Sanity check that this is a stack access.
469     // FIXME: Should probably use stack pseudos before frame lowering.
470     MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
471     if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() &&
472                            SOff->getReg() != MFI->getStackPtrOffsetReg()))
473       return;
474 
475     if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
476         MFI->getScratchRSrcReg())
477       return;
478 
479     // A frame index will resolve to a positive constant, so it should always be
480     // safe to fold the addressing mode, even pre-GFX9.
481     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
482     SOff->setReg(MFI->getStackPtrOffsetReg());
483     return;
484   }
485 
486   bool FoldingImmLike = OpToFold.isImm() || OpToFold.isFI();
487 
488   if (FoldingImmLike && UseMI->isCopy()) {
489     unsigned DestReg = UseMI->getOperand(0).getReg();
490     const TargetRegisterClass *DestRC
491       = TargetRegisterInfo::isVirtualRegister(DestReg) ?
492       MRI->getRegClass(DestReg) :
493       TRI->getPhysRegClass(DestReg);
494 
495     unsigned SrcReg  = UseMI->getOperand(1).getReg();
496     if (TargetRegisterInfo::isVirtualRegister(DestReg) &&
497       TargetRegisterInfo::isVirtualRegister(SrcReg)) {
498       const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
499       if (TRI->isSGPRClass(SrcRC) && TRI->hasVGPRs(DestRC)) {
500         MachineRegisterInfo::use_iterator NextUse;
501         SmallVector<FoldCandidate, 4> CopyUses;
502         for (MachineRegisterInfo::use_iterator
503           Use = MRI->use_begin(DestReg), E = MRI->use_end();
504           Use != E; Use = NextUse) {
505           NextUse = std::next(Use);
506           FoldCandidate FC = FoldCandidate(Use->getParent(),
507            Use.getOperandNo(), &UseMI->getOperand(1));
508           CopyUses.push_back(FC);
509        }
510         for (auto & F : CopyUses) {
511           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
512            FoldList, CopiesToReplace);
513         }
514       }
515     }
516 
517     // In order to fold immediates into copies, we need to change the
518     // copy to a MOV.
519 
520     unsigned MovOp = TII->getMovOpcode(DestRC);
521     if (MovOp == AMDGPU::COPY)
522       return;
523 
524     UseMI->setDesc(TII->get(MovOp));
525     CopiesToReplace.push_back(UseMI);
526   } else {
527     if (UseMI->isCopy() && OpToFold.isReg() &&
528         TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
529         TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
530         TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) &&
531         !UseMI->getOperand(1).getSubReg()) {
532       UseMI->getOperand(1).setReg(OpToFold.getReg());
533       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
534       UseMI->getOperand(1).setIsKill(false);
535       CopiesToReplace.push_back(UseMI);
536       OpToFold.setIsKill(false);
537       return;
538     }
539 
540     unsigned UseOpc = UseMI->getOpcode();
541     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
542         (UseOpc == AMDGPU::V_READLANE_B32 &&
543          (int)UseOpIdx ==
544          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
545       // %vgpr = V_MOV_B32 imm
546       // %sgpr = V_READFIRSTLANE_B32 %vgpr
547       // =>
548       // %sgpr = S_MOV_B32 imm
549       if (FoldingImmLike) {
550         if (execMayBeModifiedBeforeUse(*MRI,
551                                        UseMI->getOperand(UseOpIdx).getReg(),
552                                        *OpToFold.getParent(),
553                                        UseMI))
554           return;
555 
556         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
557 
558         // FIXME: ChangeToImmediate should clear subreg
559         UseMI->getOperand(1).setSubReg(0);
560         if (OpToFold.isImm())
561           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
562         else
563           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
564         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
565         return;
566       }
567 
568       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
569         if (execMayBeModifiedBeforeUse(*MRI,
570                                        UseMI->getOperand(UseOpIdx).getReg(),
571                                        *OpToFold.getParent(),
572                                        UseMI))
573           return;
574 
575         // %vgpr = COPY %sgpr0
576         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
577         // =>
578         // %sgpr1 = COPY %sgpr0
579         UseMI->setDesc(TII->get(AMDGPU::COPY));
580         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
581         return;
582       }
583     }
584 
585     const MCInstrDesc &UseDesc = UseMI->getDesc();
586 
587     // Don't fold into target independent nodes.  Target independent opcodes
588     // don't have defined register classes.
589     if (UseDesc.isVariadic() ||
590         UseOp.isImplicit() ||
591         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
592       return;
593   }
594 
595   if (!FoldingImmLike) {
596     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
597 
598     // FIXME: We could try to change the instruction from 64-bit to 32-bit
599     // to enable more folding opportunites.  The shrink operands pass
600     // already does this.
601     return;
602   }
603 
604 
605   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
606   const TargetRegisterClass *FoldRC =
607     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
608 
609   // Split 64-bit constants into 32-bits for folding.
610   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
611     unsigned UseReg = UseOp.getReg();
612     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
613 
614     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
615       return;
616 
617     APInt Imm(64, OpToFold.getImm());
618     if (UseOp.getSubReg() == AMDGPU::sub0) {
619       Imm = Imm.getLoBits(32);
620     } else {
621       assert(UseOp.getSubReg() == AMDGPU::sub1);
622       Imm = Imm.getHiBits(32);
623     }
624 
625     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
626     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
627     return;
628   }
629 
630 
631 
632   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
633 }
634 
635 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
636                                   uint32_t LHS, uint32_t RHS) {
637   switch (Opcode) {
638   case AMDGPU::V_AND_B32_e64:
639   case AMDGPU::V_AND_B32_e32:
640   case AMDGPU::S_AND_B32:
641     Result = LHS & RHS;
642     return true;
643   case AMDGPU::V_OR_B32_e64:
644   case AMDGPU::V_OR_B32_e32:
645   case AMDGPU::S_OR_B32:
646     Result = LHS | RHS;
647     return true;
648   case AMDGPU::V_XOR_B32_e64:
649   case AMDGPU::V_XOR_B32_e32:
650   case AMDGPU::S_XOR_B32:
651     Result = LHS ^ RHS;
652     return true;
653   case AMDGPU::V_LSHL_B32_e64:
654   case AMDGPU::V_LSHL_B32_e32:
655   case AMDGPU::S_LSHL_B32:
656     // The instruction ignores the high bits for out of bounds shifts.
657     Result = LHS << (RHS & 31);
658     return true;
659   case AMDGPU::V_LSHLREV_B32_e64:
660   case AMDGPU::V_LSHLREV_B32_e32:
661     Result = RHS << (LHS & 31);
662     return true;
663   case AMDGPU::V_LSHR_B32_e64:
664   case AMDGPU::V_LSHR_B32_e32:
665   case AMDGPU::S_LSHR_B32:
666     Result = LHS >> (RHS & 31);
667     return true;
668   case AMDGPU::V_LSHRREV_B32_e64:
669   case AMDGPU::V_LSHRREV_B32_e32:
670     Result = RHS >> (LHS & 31);
671     return true;
672   case AMDGPU::V_ASHR_I32_e64:
673   case AMDGPU::V_ASHR_I32_e32:
674   case AMDGPU::S_ASHR_I32:
675     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
676     return true;
677   case AMDGPU::V_ASHRREV_I32_e64:
678   case AMDGPU::V_ASHRREV_I32_e32:
679     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
680     return true;
681   default:
682     return false;
683   }
684 }
685 
686 static unsigned getMovOpc(bool IsScalar) {
687   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
688 }
689 
690 /// Remove any leftover implicit operands from mutating the instruction. e.g.
691 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
692 /// anymore.
693 static void stripExtraCopyOperands(MachineInstr &MI) {
694   const MCInstrDesc &Desc = MI.getDesc();
695   unsigned NumOps = Desc.getNumOperands() +
696                     Desc.getNumImplicitUses() +
697                     Desc.getNumImplicitDefs();
698 
699   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
700     MI.RemoveOperand(I);
701 }
702 
703 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
704   MI.setDesc(NewDesc);
705   stripExtraCopyOperands(MI);
706 }
707 
708 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
709                                                MachineOperand &Op) {
710   if (Op.isReg()) {
711     // If this has a subregister, it obviously is a register source.
712     if (Op.getSubReg() != AMDGPU::NoSubRegister ||
713         !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
714       return &Op;
715 
716     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
717     if (Def && Def->isMoveImmediate()) {
718       MachineOperand &ImmSrc = Def->getOperand(1);
719       if (ImmSrc.isImm())
720         return &ImmSrc;
721     }
722   }
723 
724   return &Op;
725 }
726 
727 // Try to simplify operations with a constant that may appear after instruction
728 // selection.
729 // TODO: See if a frame index with a fixed offset can fold.
730 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
731                               const SIInstrInfo *TII,
732                               MachineInstr *MI,
733                               MachineOperand *ImmOp) {
734   unsigned Opc = MI->getOpcode();
735   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
736       Opc == AMDGPU::S_NOT_B32) {
737     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
738     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
739     return true;
740   }
741 
742   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
743   if (Src1Idx == -1)
744     return false;
745 
746   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
747   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
748   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
749 
750   if (!Src0->isImm() && !Src1->isImm())
751     return false;
752 
753   if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
754     if (Src0->isImm() && Src0->getImm() == 0) {
755       // v_lshl_or_b32 0, X, Y -> copy Y
756       // v_lshl_or_b32 0, X, K -> v_mov_b32 K
757       bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
758       MI->RemoveOperand(Src1Idx);
759       MI->RemoveOperand(Src0Idx);
760 
761       MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
762       return true;
763     }
764   }
765 
766   // and k0, k1 -> v_mov_b32 (k0 & k1)
767   // or k0, k1 -> v_mov_b32 (k0 | k1)
768   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
769   if (Src0->isImm() && Src1->isImm()) {
770     int32_t NewImm;
771     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
772       return false;
773 
774     const SIRegisterInfo &TRI = TII->getRegisterInfo();
775     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
776 
777     // Be careful to change the right operand, src0 may belong to a different
778     // instruction.
779     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
780     MI->RemoveOperand(Src1Idx);
781     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
782     return true;
783   }
784 
785   if (!MI->isCommutable())
786     return false;
787 
788   if (Src0->isImm() && !Src1->isImm()) {
789     std::swap(Src0, Src1);
790     std::swap(Src0Idx, Src1Idx);
791   }
792 
793   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
794   if (Opc == AMDGPU::V_OR_B32_e64 ||
795       Opc == AMDGPU::V_OR_B32_e32 ||
796       Opc == AMDGPU::S_OR_B32) {
797     if (Src1Val == 0) {
798       // y = or x, 0 => y = copy x
799       MI->RemoveOperand(Src1Idx);
800       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
801     } else if (Src1Val == -1) {
802       // y = or x, -1 => y = v_mov_b32 -1
803       MI->RemoveOperand(Src1Idx);
804       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
805     } else
806       return false;
807 
808     return true;
809   }
810 
811   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
812       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
813       MI->getOpcode() == AMDGPU::S_AND_B32) {
814     if (Src1Val == 0) {
815       // y = and x, 0 => y = v_mov_b32 0
816       MI->RemoveOperand(Src0Idx);
817       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
818     } else if (Src1Val == -1) {
819       // y = and x, -1 => y = copy x
820       MI->RemoveOperand(Src1Idx);
821       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
822       stripExtraCopyOperands(*MI);
823     } else
824       return false;
825 
826     return true;
827   }
828 
829   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
830       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
831       MI->getOpcode() == AMDGPU::S_XOR_B32) {
832     if (Src1Val == 0) {
833       // y = xor x, 0 => y = copy x
834       MI->RemoveOperand(Src1Idx);
835       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
836       return true;
837     }
838   }
839 
840   return false;
841 }
842 
843 // Try to fold an instruction into a simpler one
844 static bool tryFoldInst(const SIInstrInfo *TII,
845                         MachineInstr *MI) {
846   unsigned Opc = MI->getOpcode();
847 
848   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
849       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
850       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
851     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
852     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
853     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
854     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
855     if (Src1->isIdenticalTo(*Src0) &&
856         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
857         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
858       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
859       auto &NewDesc =
860           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
861       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
862       if (Src2Idx != -1)
863         MI->RemoveOperand(Src2Idx);
864       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
865       if (Src1ModIdx != -1)
866         MI->RemoveOperand(Src1ModIdx);
867       if (Src0ModIdx != -1)
868         MI->RemoveOperand(Src0ModIdx);
869       mutateCopyOp(*MI, NewDesc);
870       LLVM_DEBUG(dbgs() << *MI << '\n');
871       return true;
872     }
873   }
874 
875   return false;
876 }
877 
878 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
879                                      MachineOperand &OpToFold) const {
880   // We need mutate the operands of new mov instructions to add implicit
881   // uses of EXEC, but adding them invalidates the use_iterator, so defer
882   // this.
883   SmallVector<MachineInstr *, 4> CopiesToReplace;
884   SmallVector<FoldCandidate, 4> FoldList;
885   MachineOperand &Dst = MI.getOperand(0);
886 
887   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
888   if (FoldingImm) {
889     unsigned NumLiteralUses = 0;
890     MachineOperand *NonInlineUse = nullptr;
891     int NonInlineUseOpNo = -1;
892 
893     MachineRegisterInfo::use_iterator NextUse;
894     for (MachineRegisterInfo::use_iterator
895            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
896          Use != E; Use = NextUse) {
897       NextUse = std::next(Use);
898       MachineInstr *UseMI = Use->getParent();
899       unsigned OpNo = Use.getOperandNo();
900 
901       // Folding the immediate may reveal operations that can be constant
902       // folded or replaced with a copy. This can happen for example after
903       // frame indices are lowered to constants or from splitting 64-bit
904       // constants.
905       //
906       // We may also encounter cases where one or both operands are
907       // immediates materialized into a register, which would ordinarily not
908       // be folded due to multiple uses or operand constraints.
909 
910       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
911         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
912 
913         // Some constant folding cases change the same immediate's use to a new
914         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
915         // again. The same constant folded instruction could also have a second
916         // use operand.
917         NextUse = MRI->use_begin(Dst.getReg());
918         FoldList.clear();
919         continue;
920       }
921 
922       // Try to fold any inline immediate uses, and then only fold other
923       // constants if they have one use.
924       //
925       // The legality of the inline immediate must be checked based on the use
926       // operand, not the defining instruction, because 32-bit instructions
927       // with 32-bit inline immediate sources may be used to materialize
928       // constants used in 16-bit operands.
929       //
930       // e.g. it is unsafe to fold:
931       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
932       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
933 
934       // Folding immediates with more than one use will increase program size.
935       // FIXME: This will also reduce register usage, which may be better
936       // in some cases. A better heuristic is needed.
937       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
938         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
939       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
940         foldOperand(OpToFold, UseMI, OpNo, FoldList,
941                     CopiesToReplace);
942       } else {
943         if (++NumLiteralUses == 1) {
944           NonInlineUse = &*Use;
945           NonInlineUseOpNo = OpNo;
946         }
947       }
948     }
949 
950     if (NumLiteralUses == 1) {
951       MachineInstr *UseMI = NonInlineUse->getParent();
952       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
953     }
954   } else {
955     // Folding register.
956     SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
957     for (MachineRegisterInfo::use_iterator
958            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
959          Use != E; ++Use) {
960       UsesToProcess.push_back(Use);
961     }
962     for (auto U : UsesToProcess) {
963       MachineInstr *UseMI = U->getParent();
964 
965       foldOperand(OpToFold, UseMI, U.getOperandNo(),
966         FoldList, CopiesToReplace);
967     }
968   }
969 
970   MachineFunction *MF = MI.getParent()->getParent();
971   // Make sure we add EXEC uses to any new v_mov instructions created.
972   for (MachineInstr *Copy : CopiesToReplace)
973     Copy->addImplicitDefUseOperands(*MF);
974 
975   for (FoldCandidate &Fold : FoldList) {
976     if (updateOperand(Fold, *TII, *TRI, *ST)) {
977       // Clear kill flags.
978       if (Fold.isReg()) {
979         assert(Fold.OpToFold && Fold.OpToFold->isReg());
980         // FIXME: Probably shouldn't bother trying to fold if not an
981         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
982         // copies.
983         MRI->clearKillFlags(Fold.OpToFold->getReg());
984       }
985       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
986                         << static_cast<int>(Fold.UseOpNo) << " of "
987                         << *Fold.UseMI << '\n');
988       tryFoldInst(TII, Fold.UseMI);
989     } else if (Fold.isCommuted()) {
990       // Restoring instruction's original operand order if fold has failed.
991       TII->commuteInstruction(*Fold.UseMI, false);
992     }
993   }
994 }
995 
996 // Clamp patterns are canonically selected to v_max_* instructions, so only
997 // handle them.
998 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
999   unsigned Op = MI.getOpcode();
1000   switch (Op) {
1001   case AMDGPU::V_MAX_F32_e64:
1002   case AMDGPU::V_MAX_F16_e64:
1003   case AMDGPU::V_MAX_F64:
1004   case AMDGPU::V_PK_MAX_F16: {
1005     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1006       return nullptr;
1007 
1008     // Make sure sources are identical.
1009     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1010     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1011     if (!Src0->isReg() || !Src1->isReg() ||
1012         Src0->getReg() != Src1->getReg() ||
1013         Src0->getSubReg() != Src1->getSubReg() ||
1014         Src0->getSubReg() != AMDGPU::NoSubRegister)
1015       return nullptr;
1016 
1017     // Can't fold up if we have modifiers.
1018     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1019       return nullptr;
1020 
1021     unsigned Src0Mods
1022       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1023     unsigned Src1Mods
1024       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1025 
1026     // Having a 0 op_sel_hi would require swizzling the output in the source
1027     // instruction, which we can't do.
1028     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1029                                                       : 0u;
1030     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1031       return nullptr;
1032     return Src0;
1033   }
1034   default:
1035     return nullptr;
1036   }
1037 }
1038 
1039 // We obviously have multiple uses in a clamp since the register is used twice
1040 // in the same instruction.
1041 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1042   int Count = 0;
1043   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1044        I != E; ++I) {
1045     if (++Count > 1)
1046       return false;
1047   }
1048 
1049   return true;
1050 }
1051 
1052 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1053 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1054   const MachineOperand *ClampSrc = isClamp(MI);
1055   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1056     return false;
1057 
1058   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1059 
1060   // The type of clamp must be compatible.
1061   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1062     return false;
1063 
1064   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1065   if (!DefClamp)
1066     return false;
1067 
1068   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1069                     << '\n');
1070 
1071   // Clamp is applied after omod, so it is OK if omod is set.
1072   DefClamp->setImm(1);
1073   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1074   MI.eraseFromParent();
1075   return true;
1076 }
1077 
1078 static int getOModValue(unsigned Opc, int64_t Val) {
1079   switch (Opc) {
1080   case AMDGPU::V_MUL_F32_e64: {
1081     switch (static_cast<uint32_t>(Val)) {
1082     case 0x3f000000: // 0.5
1083       return SIOutMods::DIV2;
1084     case 0x40000000: // 2.0
1085       return SIOutMods::MUL2;
1086     case 0x40800000: // 4.0
1087       return SIOutMods::MUL4;
1088     default:
1089       return SIOutMods::NONE;
1090     }
1091   }
1092   case AMDGPU::V_MUL_F16_e64: {
1093     switch (static_cast<uint16_t>(Val)) {
1094     case 0x3800: // 0.5
1095       return SIOutMods::DIV2;
1096     case 0x4000: // 2.0
1097       return SIOutMods::MUL2;
1098     case 0x4400: // 4.0
1099       return SIOutMods::MUL4;
1100     default:
1101       return SIOutMods::NONE;
1102     }
1103   }
1104   default:
1105     llvm_unreachable("invalid mul opcode");
1106   }
1107 }
1108 
1109 // FIXME: Does this really not support denormals with f16?
1110 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1111 // handled, so will anything other than that break?
1112 std::pair<const MachineOperand *, int>
1113 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1114   unsigned Op = MI.getOpcode();
1115   switch (Op) {
1116   case AMDGPU::V_MUL_F32_e64:
1117   case AMDGPU::V_MUL_F16_e64: {
1118     // If output denormals are enabled, omod is ignored.
1119     if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1120         (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1121       return std::make_pair(nullptr, SIOutMods::NONE);
1122 
1123     const MachineOperand *RegOp = nullptr;
1124     const MachineOperand *ImmOp = nullptr;
1125     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1126     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1127     if (Src0->isImm()) {
1128       ImmOp = Src0;
1129       RegOp = Src1;
1130     } else if (Src1->isImm()) {
1131       ImmOp = Src1;
1132       RegOp = Src0;
1133     } else
1134       return std::make_pair(nullptr, SIOutMods::NONE);
1135 
1136     int OMod = getOModValue(Op, ImmOp->getImm());
1137     if (OMod == SIOutMods::NONE ||
1138         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1139         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1140         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1141         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1142       return std::make_pair(nullptr, SIOutMods::NONE);
1143 
1144     return std::make_pair(RegOp, OMod);
1145   }
1146   case AMDGPU::V_ADD_F32_e64:
1147   case AMDGPU::V_ADD_F16_e64: {
1148     // If output denormals are enabled, omod is ignored.
1149     if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1150         (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1151       return std::make_pair(nullptr, SIOutMods::NONE);
1152 
1153     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1154     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1155     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1156 
1157     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1158         Src0->getSubReg() == Src1->getSubReg() &&
1159         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1160         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1161         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1162         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1163       return std::make_pair(Src0, SIOutMods::MUL2);
1164 
1165     return std::make_pair(nullptr, SIOutMods::NONE);
1166   }
1167   default:
1168     return std::make_pair(nullptr, SIOutMods::NONE);
1169   }
1170 }
1171 
1172 // FIXME: Does this need to check IEEE bit on function?
1173 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1174   const MachineOperand *RegOp;
1175   int OMod;
1176   std::tie(RegOp, OMod) = isOMod(MI);
1177   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1178       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1179       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1180     return false;
1181 
1182   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1183   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1184   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1185     return false;
1186 
1187   // Clamp is applied after omod. If the source already has clamp set, don't
1188   // fold it.
1189   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1190     return false;
1191 
1192   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1193 
1194   DefOMod->setImm(OMod);
1195   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1196   MI.eraseFromParent();
1197   return true;
1198 }
1199 
1200 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1201   if (skipFunction(MF.getFunction()))
1202     return false;
1203 
1204   MRI = &MF.getRegInfo();
1205   ST = &MF.getSubtarget<GCNSubtarget>();
1206   TII = ST->getInstrInfo();
1207   TRI = &TII->getRegisterInfo();
1208   MFI = MF.getInfo<SIMachineFunctionInfo>();
1209 
1210   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1211   // correctly handle signed zeros.
1212   //
1213   // FIXME: Also need to check strictfp
1214   bool IsIEEEMode = MFI->getMode().IEEE;
1215   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1216 
1217   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1218     MachineBasicBlock::iterator I, Next;
1219     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1220       Next = std::next(I);
1221       MachineInstr &MI = *I;
1222 
1223       tryFoldInst(TII, &MI);
1224 
1225       if (!TII->isFoldableCopy(MI)) {
1226         // TODO: Omod might be OK if there is NSZ only on the source
1227         // instruction, and not the omod multiply.
1228         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1229             !tryFoldOMod(MI))
1230           tryFoldClamp(MI);
1231         continue;
1232       }
1233 
1234       MachineOperand &OpToFold = MI.getOperand(1);
1235       bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
1236 
1237       // FIXME: We could also be folding things like TargetIndexes.
1238       if (!FoldingImm && !OpToFold.isReg())
1239         continue;
1240 
1241       if (OpToFold.isReg() &&
1242           !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
1243         continue;
1244 
1245       // Prevent folding operands backwards in the function. For example,
1246       // the COPY opcode must not be replaced by 1 in this example:
1247       //
1248       //    %3 = COPY %vgpr0; VGPR_32:%3
1249       //    ...
1250       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1251       MachineOperand &Dst = MI.getOperand(0);
1252       if (Dst.isReg() &&
1253           !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
1254         continue;
1255 
1256       foldInstOperand(MI, OpToFold);
1257     }
1258   }
1259   return false;
1260 }
1261