xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision 2bea69bf6503ffc9f3cde9a52b5dac1a25e94e1c)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/CodeGen/LiveIntervals.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   int ShrinkOpcode;
38   unsigned char UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false,
44                 int ShrinkOp = -1) :
45     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46     Kind(FoldOp->getType()),
47     Commuted(Commuted_) {
48     if (FoldOp->isImm()) {
49       ImmToFold = FoldOp->getImm();
50     } else if (FoldOp->isFI()) {
51       FrameIndexToFold = FoldOp->getIndex();
52     } else {
53       assert(FoldOp->isReg() || FoldOp->isGlobal());
54       OpToFold = FoldOp;
55     }
56   }
57 
58   bool isFI() const {
59     return Kind == MachineOperand::MO_FrameIndex;
60   }
61 
62   bool isImm() const {
63     return Kind == MachineOperand::MO_Immediate;
64   }
65 
66   bool isReg() const {
67     return Kind == MachineOperand::MO_Register;
68   }
69 
70   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71 
72   bool isCommuted() const {
73     return Commuted;
74   }
75 
76   bool needsShrink() const {
77     return ShrinkOpcode != -1;
78   }
79 
80   int getShrinkOpcode() const {
81     return ShrinkOpcode;
82   }
83 };
84 
85 class SIFoldOperands : public MachineFunctionPass {
86 public:
87   static char ID;
88   MachineRegisterInfo *MRI;
89   const SIInstrInfo *TII;
90   const SIRegisterInfo *TRI;
91   const GCNSubtarget *ST;
92   const SIMachineFunctionInfo *MFI;
93 
94   void foldOperand(MachineOperand &OpToFold,
95                    MachineInstr *UseMI,
96                    int UseOpIdx,
97                    SmallVectorImpl<FoldCandidate> &FoldList,
98                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99 
100   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101 
102   const MachineOperand *isClamp(const MachineInstr &MI) const;
103   bool tryFoldClamp(MachineInstr &MI);
104 
105   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106   bool tryFoldOMod(MachineInstr &MI);
107 
108 public:
109   SIFoldOperands() : MachineFunctionPass(ID) {
110     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111   }
112 
113   bool runOnMachineFunction(MachineFunction &MF) override;
114 
115   StringRef getPassName() const override { return "SI Fold Operands"; }
116 
117   void getAnalysisUsage(AnalysisUsage &AU) const override {
118     AU.setPreservesCFG();
119     MachineFunctionPass::getAnalysisUsage(AU);
120   }
121 };
122 
123 } // End anonymous namespace.
124 
125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126                 "SI Fold Operands", false, false)
127 
128 char SIFoldOperands::ID = 0;
129 
130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131 
132 // Wrapper around isInlineConstant that understands special cases when
133 // instruction types are replaced during operand folding.
134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135                                      const MachineInstr &UseMI,
136                                      unsigned OpNo,
137                                      const MachineOperand &OpToFold) {
138   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139     return true;
140 
141   unsigned Opc = UseMI.getOpcode();
142   switch (Opc) {
143   case AMDGPU::V_MAC_F32_e64:
144   case AMDGPU::V_MAC_F16_e64:
145   case AMDGPU::V_FMAC_F32_e64: {
146     // Special case for mac. Since this is replaced with mad when folded into
147     // src2, we need to check the legality for the final instruction.
148     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
149     if (static_cast<int>(OpNo) == Src2Idx) {
150       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
151       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
152 
153       unsigned Opc = IsFMA ?
154         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
155       const MCInstrDesc &MadDesc = TII->get(Opc);
156       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
157     }
158     return false;
159   }
160   default:
161     return false;
162   }
163 }
164 
165 // TODO: Add heuristic that the frame index might not fit in the addressing mode
166 // immediate offset to avoid materializing in loops.
167 static bool frameIndexMayFold(const SIInstrInfo *TII,
168                               const MachineInstr &UseMI,
169                               int OpNo,
170                               const MachineOperand &OpToFold) {
171   return OpToFold.isFI() &&
172     (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
173     OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
174 }
175 
176 FunctionPass *llvm::createSIFoldOperandsPass() {
177   return new SIFoldOperands();
178 }
179 
180 static bool updateOperand(FoldCandidate &Fold,
181                           const SIInstrInfo &TII,
182                           const TargetRegisterInfo &TRI,
183                           const GCNSubtarget &ST) {
184   MachineInstr *MI = Fold.UseMI;
185   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
186   assert(Old.isReg());
187 
188   if (Fold.isImm()) {
189     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
190         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
191         AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
192                                        ST.hasInv2PiInlineImm())) {
193       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
194       // already set.
195       unsigned Opcode = MI->getOpcode();
196       int OpNo = MI->getOperandNo(&Old);
197       int ModIdx = -1;
198       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
199         ModIdx = AMDGPU::OpName::src0_modifiers;
200       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
201         ModIdx = AMDGPU::OpName::src1_modifiers;
202       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
203         ModIdx = AMDGPU::OpName::src2_modifiers;
204       assert(ModIdx != -1);
205       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
206       MachineOperand &Mod = MI->getOperand(ModIdx);
207       unsigned Val = Mod.getImm();
208       if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
209         return false;
210       // Only apply the following transformation if that operand requries
211       // a packed immediate.
212       switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
213       case AMDGPU::OPERAND_REG_IMM_V2FP16:
214       case AMDGPU::OPERAND_REG_IMM_V2INT16:
215       case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
216       case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
217         // If upper part is all zero we do not need op_sel_hi.
218         if (!isUInt<16>(Fold.ImmToFold)) {
219           if (!(Fold.ImmToFold & 0xffff)) {
220             Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
221             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
222             Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
223             return true;
224           }
225           Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
226           Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
227           return true;
228         }
229         break;
230       default:
231         break;
232       }
233     }
234   }
235 
236   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
237     MachineBasicBlock *MBB = MI->getParent();
238     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI);
239     if (Liveness != MachineBasicBlock::LQR_Dead)
240       return false;
241 
242     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
243     int Op32 = Fold.getShrinkOpcode();
244     MachineOperand &Dst0 = MI->getOperand(0);
245     MachineOperand &Dst1 = MI->getOperand(1);
246     assert(Dst0.isDef() && Dst1.isDef());
247 
248     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
249 
250     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
251     unsigned NewReg0 = MRI.createVirtualRegister(Dst0RC);
252 
253     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
254 
255     if (HaveNonDbgCarryUse) {
256       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
257         .addReg(AMDGPU::VCC, RegState::Kill);
258     }
259 
260     // Keep the old instruction around to avoid breaking iterators, but
261     // replace it with a dummy instruction to remove uses.
262     //
263     // FIXME: We should not invert how this pass looks at operands to avoid
264     // this. Should track set of foldable movs instead of looking for uses
265     // when looking at a use.
266     Dst0.setReg(NewReg0);
267     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
268       MI->RemoveOperand(I);
269     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
270 
271     if (Fold.isCommuted())
272       TII.commuteInstruction(*Inst32, false);
273     return true;
274   }
275 
276   assert(!Fold.needsShrink() && "not handled");
277 
278   if (Fold.isImm()) {
279     Old.ChangeToImmediate(Fold.ImmToFold);
280     return true;
281   }
282 
283   if (Fold.isGlobal()) {
284     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
285                    Fold.OpToFold->getTargetFlags());
286     return true;
287   }
288 
289   if (Fold.isFI()) {
290     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
291     return true;
292   }
293 
294   MachineOperand *New = Fold.OpToFold;
295   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
296   Old.setIsUndef(New->isUndef());
297   return true;
298 }
299 
300 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
301                               const MachineInstr *MI) {
302   for (auto Candidate : FoldList) {
303     if (Candidate.UseMI == MI)
304       return true;
305   }
306   return false;
307 }
308 
309 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
310                              MachineInstr *MI, unsigned OpNo,
311                              MachineOperand *OpToFold,
312                              const SIInstrInfo *TII) {
313   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
314     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
315     unsigned Opc = MI->getOpcode();
316     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
317          Opc == AMDGPU::V_FMAC_F32_e64) &&
318         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
319       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64;
320       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
321       unsigned NewOpc = IsFMA ?
322         AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
323 
324       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
325       // to fold the operand.
326       MI->setDesc(TII->get(NewOpc));
327       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
328       if (FoldAsMAD) {
329         MI->untieRegOperand(OpNo);
330         return true;
331       }
332       MI->setDesc(TII->get(Opc));
333     }
334 
335     // Special case for s_setreg_b32
336     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
337       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
338       FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
339       return true;
340     }
341 
342     // If we are already folding into another operand of MI, then
343     // we can't commute the instruction, otherwise we risk making the
344     // other fold illegal.
345     if (isUseMIInFoldList(FoldList, MI))
346       return false;
347 
348     unsigned CommuteOpNo = OpNo;
349 
350     // Operand is not legal, so try to commute the instruction to
351     // see if this makes it possible to fold.
352     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
353     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
354     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
355 
356     if (CanCommute) {
357       if (CommuteIdx0 == OpNo)
358         CommuteOpNo = CommuteIdx1;
359       else if (CommuteIdx1 == OpNo)
360         CommuteOpNo = CommuteIdx0;
361     }
362 
363 
364     // One of operands might be an Imm operand, and OpNo may refer to it after
365     // the call of commuteInstruction() below. Such situations are avoided
366     // here explicitly as OpNo must be a register operand to be a candidate
367     // for memory folding.
368     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
369                        !MI->getOperand(CommuteIdx1).isReg()))
370       return false;
371 
372     if (!CanCommute ||
373         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
374       return false;
375 
376     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
377       if ((Opc == AMDGPU::V_ADD_I32_e64 ||
378            Opc == AMDGPU::V_SUB_I32_e64 ||
379            Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
380           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
381         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
382 
383         // Verify the other operand is a VGPR, otherwise we would violate the
384         // constant bus restriction.
385         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
386         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
387         if (!OtherOp.isReg() ||
388             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
389           return false;
390 
391         assert(MI->getOperand(1).isDef());
392 
393         // Make sure to get the 32-bit version of the commuted opcode.
394         unsigned MaybeCommutedOpc = MI->getOpcode();
395         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
396 
397         FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true,
398                                          Op32));
399         return true;
400       }
401 
402       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
403       return false;
404     }
405 
406     FoldList.push_back(FoldCandidate(MI, CommuteOpNo, OpToFold, true));
407     return true;
408   }
409 
410   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
411   return true;
412 }
413 
414 // If the use operand doesn't care about the value, this may be an operand only
415 // used for register indexing, in which case it is unsafe to fold.
416 static bool isUseSafeToFold(const SIInstrInfo *TII,
417                             const MachineInstr &MI,
418                             const MachineOperand &UseMO) {
419   return !UseMO.isUndef() && !TII->isSDWA(MI);
420   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
421 }
422 
423 static bool tryToFoldACImm(const SIInstrInfo *TII,
424                            const MachineOperand &OpToFold,
425                            MachineInstr *UseMI,
426                            unsigned UseOpIdx,
427                            SmallVectorImpl<FoldCandidate> &FoldList) {
428   const MCInstrDesc &Desc = UseMI->getDesc();
429   const MCOperandInfo *OpInfo = Desc.OpInfo;
430   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
431     return false;
432 
433   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
434   if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
435       OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
436     return false;
437 
438   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy)) {
439     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
440     return true;
441   }
442 
443   if (!OpToFold.isReg())
444     return false;
445 
446   unsigned UseReg = OpToFold.getReg();
447   if (!Register::isVirtualRegister(UseReg))
448     return false;
449 
450   if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
451         return FC.UseMI == UseMI; }) != FoldList.end())
452     return false;
453 
454   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
455   const MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
456   if (!Def || !Def->isRegSequence())
457     return false;
458 
459   int64_t Imm;
460   MachineOperand *Op;
461   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
462     const MachineOperand &Sub = Def->getOperand(I);
463     if (!Sub.isReg() || Sub.getSubReg())
464       return false;
465     MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub.getReg());
466     while (SubDef && !SubDef->isMoveImmediate() &&
467            !SubDef->getOperand(1).isImm() && TII->isFoldableCopy(*SubDef))
468       SubDef = MRI.getUniqueVRegDef(SubDef->getOperand(1).getReg());
469     if (!SubDef || !SubDef->isMoveImmediate() || !SubDef->getOperand(1).isImm())
470       return false;
471     Op = &SubDef->getOperand(1);
472     auto SubImm = Op->getImm();
473     if (I == 1) {
474       if (!TII->isInlineConstant(SubDef->getOperand(1), OpTy))
475         return false;
476 
477       Imm = SubImm;
478       continue;
479     }
480     if (Imm != SubImm)
481       return false; // Can only fold splat constants
482   }
483 
484   FoldList.push_back(FoldCandidate(UseMI, UseOpIdx, Op));
485   return true;
486 }
487 
488 void SIFoldOperands::foldOperand(
489   MachineOperand &OpToFold,
490   MachineInstr *UseMI,
491   int UseOpIdx,
492   SmallVectorImpl<FoldCandidate> &FoldList,
493   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
494   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
495 
496   if (!isUseSafeToFold(TII, *UseMI, UseOp))
497     return;
498 
499   // FIXME: Fold operands with subregs.
500   if (UseOp.isReg() && OpToFold.isReg()) {
501     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
502       return;
503 
504     // Don't fold subregister extracts into tied operands, only if it is a full
505     // copy since a subregister use tied to a full register def doesn't really
506     // make sense. e.g. don't fold:
507     //
508     // %1 = COPY %0:sub1
509     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
510     //
511     //  into
512     // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
513     if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
514       return;
515   }
516 
517   // Special case for REG_SEQUENCE: We can't fold literals into
518   // REG_SEQUENCE instructions, so we have to fold them into the
519   // uses of REG_SEQUENCE.
520   if (UseMI->isRegSequence()) {
521     unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
522     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
523 
524     MachineRegisterInfo::use_iterator Next;
525     for (MachineRegisterInfo::use_iterator
526            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
527          RSUse != RSE; RSUse = Next) {
528       Next = std::next(RSUse);
529 
530       MachineInstr *RSUseMI = RSUse->getParent();
531 
532       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
533                          RSUse.getOperandNo(), FoldList))
534         continue;
535 
536       if (RSUse->getSubReg() != RegSeqDstSubReg)
537         continue;
538 
539       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
540                   CopiesToReplace);
541     }
542 
543     return;
544   }
545 
546   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
547     return;
548 
549   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
550     // Sanity check that this is a stack access.
551     // FIXME: Should probably use stack pseudos before frame lowering.
552     MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
553     if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() &&
554                            SOff->getReg() != MFI->getStackPtrOffsetReg()))
555       return;
556 
557     if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
558         MFI->getScratchRSrcReg())
559       return;
560 
561     // A frame index will resolve to a positive constant, so it should always be
562     // safe to fold the addressing mode, even pre-GFX9.
563     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
564     SOff->setReg(MFI->getStackPtrOffsetReg());
565     return;
566   }
567 
568   bool FoldingImmLike =
569       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
570 
571   if (FoldingImmLike && UseMI->isCopy()) {
572     unsigned DestReg = UseMI->getOperand(0).getReg();
573     const TargetRegisterClass *DestRC = Register::isVirtualRegister(DestReg)
574                                             ? MRI->getRegClass(DestReg)
575                                             : TRI->getPhysRegClass(DestReg);
576 
577     unsigned SrcReg  = UseMI->getOperand(1).getReg();
578     if (Register::isVirtualRegister(DestReg) &&
579         Register::isVirtualRegister(SrcReg)) {
580       const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
581       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
582         MachineRegisterInfo::use_iterator NextUse;
583         SmallVector<FoldCandidate, 4> CopyUses;
584         for (MachineRegisterInfo::use_iterator
585           Use = MRI->use_begin(DestReg), E = MRI->use_end();
586           Use != E; Use = NextUse) {
587           NextUse = std::next(Use);
588           FoldCandidate FC = FoldCandidate(Use->getParent(),
589            Use.getOperandNo(), &UseMI->getOperand(1));
590           CopyUses.push_back(FC);
591        }
592         for (auto & F : CopyUses) {
593           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
594            FoldList, CopiesToReplace);
595         }
596       }
597     }
598 
599     if (DestRC == &AMDGPU::AGPR_32RegClass &&
600         TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
601       UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
602       UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
603       CopiesToReplace.push_back(UseMI);
604       return;
605     }
606 
607     // In order to fold immediates into copies, we need to change the
608     // copy to a MOV.
609 
610     unsigned MovOp = TII->getMovOpcode(DestRC);
611     if (MovOp == AMDGPU::COPY)
612       return;
613 
614     UseMI->setDesc(TII->get(MovOp));
615     CopiesToReplace.push_back(UseMI);
616   } else {
617     if (UseMI->isCopy() && OpToFold.isReg() &&
618         Register::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
619         TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) &&
620         TRI->isVectorRegister(*MRI, UseMI->getOperand(1).getReg()) &&
621         !UseMI->getOperand(1).getSubReg()) {
622       unsigned Size = TII->getOpSize(*UseMI, 1);
623       UseMI->getOperand(1).setReg(OpToFold.getReg());
624       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
625       UseMI->getOperand(1).setIsKill(false);
626       CopiesToReplace.push_back(UseMI);
627       OpToFold.setIsKill(false);
628       if (Size != 4)
629         return;
630       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
631           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
632         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
633       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
634                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
635         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
636       return;
637     }
638 
639     unsigned UseOpc = UseMI->getOpcode();
640     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
641         (UseOpc == AMDGPU::V_READLANE_B32 &&
642          (int)UseOpIdx ==
643          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
644       // %vgpr = V_MOV_B32 imm
645       // %sgpr = V_READFIRSTLANE_B32 %vgpr
646       // =>
647       // %sgpr = S_MOV_B32 imm
648       if (FoldingImmLike) {
649         if (execMayBeModifiedBeforeUse(*MRI,
650                                        UseMI->getOperand(UseOpIdx).getReg(),
651                                        *OpToFold.getParent(),
652                                        *UseMI))
653           return;
654 
655         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
656 
657         // FIXME: ChangeToImmediate should clear subreg
658         UseMI->getOperand(1).setSubReg(0);
659         if (OpToFold.isImm())
660           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
661         else
662           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
663         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
664         return;
665       }
666 
667       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
668         if (execMayBeModifiedBeforeUse(*MRI,
669                                        UseMI->getOperand(UseOpIdx).getReg(),
670                                        *OpToFold.getParent(),
671                                        *UseMI))
672           return;
673 
674         // %vgpr = COPY %sgpr0
675         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
676         // =>
677         // %sgpr1 = COPY %sgpr0
678         UseMI->setDesc(TII->get(AMDGPU::COPY));
679         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
680         return;
681       }
682     }
683 
684     const MCInstrDesc &UseDesc = UseMI->getDesc();
685 
686     // Don't fold into target independent nodes.  Target independent opcodes
687     // don't have defined register classes.
688     if (UseDesc.isVariadic() ||
689         UseOp.isImplicit() ||
690         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
691       return;
692   }
693 
694   if (!FoldingImmLike) {
695     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
696 
697     // FIXME: We could try to change the instruction from 64-bit to 32-bit
698     // to enable more folding opportunites.  The shrink operands pass
699     // already does this.
700     return;
701   }
702 
703 
704   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
705   const TargetRegisterClass *FoldRC =
706     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
707 
708   // Split 64-bit constants into 32-bits for folding.
709   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
710     unsigned UseReg = UseOp.getReg();
711     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
712 
713     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
714       return;
715 
716     APInt Imm(64, OpToFold.getImm());
717     if (UseOp.getSubReg() == AMDGPU::sub0) {
718       Imm = Imm.getLoBits(32);
719     } else {
720       assert(UseOp.getSubReg() == AMDGPU::sub1);
721       Imm = Imm.getHiBits(32);
722     }
723 
724     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
725     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
726     return;
727   }
728 
729 
730 
731   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
732 }
733 
734 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
735                                   uint32_t LHS, uint32_t RHS) {
736   switch (Opcode) {
737   case AMDGPU::V_AND_B32_e64:
738   case AMDGPU::V_AND_B32_e32:
739   case AMDGPU::S_AND_B32:
740     Result = LHS & RHS;
741     return true;
742   case AMDGPU::V_OR_B32_e64:
743   case AMDGPU::V_OR_B32_e32:
744   case AMDGPU::S_OR_B32:
745     Result = LHS | RHS;
746     return true;
747   case AMDGPU::V_XOR_B32_e64:
748   case AMDGPU::V_XOR_B32_e32:
749   case AMDGPU::S_XOR_B32:
750     Result = LHS ^ RHS;
751     return true;
752   case AMDGPU::V_LSHL_B32_e64:
753   case AMDGPU::V_LSHL_B32_e32:
754   case AMDGPU::S_LSHL_B32:
755     // The instruction ignores the high bits for out of bounds shifts.
756     Result = LHS << (RHS & 31);
757     return true;
758   case AMDGPU::V_LSHLREV_B32_e64:
759   case AMDGPU::V_LSHLREV_B32_e32:
760     Result = RHS << (LHS & 31);
761     return true;
762   case AMDGPU::V_LSHR_B32_e64:
763   case AMDGPU::V_LSHR_B32_e32:
764   case AMDGPU::S_LSHR_B32:
765     Result = LHS >> (RHS & 31);
766     return true;
767   case AMDGPU::V_LSHRREV_B32_e64:
768   case AMDGPU::V_LSHRREV_B32_e32:
769     Result = RHS >> (LHS & 31);
770     return true;
771   case AMDGPU::V_ASHR_I32_e64:
772   case AMDGPU::V_ASHR_I32_e32:
773   case AMDGPU::S_ASHR_I32:
774     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
775     return true;
776   case AMDGPU::V_ASHRREV_I32_e64:
777   case AMDGPU::V_ASHRREV_I32_e32:
778     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
779     return true;
780   default:
781     return false;
782   }
783 }
784 
785 static unsigned getMovOpc(bool IsScalar) {
786   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
787 }
788 
789 /// Remove any leftover implicit operands from mutating the instruction. e.g.
790 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
791 /// anymore.
792 static void stripExtraCopyOperands(MachineInstr &MI) {
793   const MCInstrDesc &Desc = MI.getDesc();
794   unsigned NumOps = Desc.getNumOperands() +
795                     Desc.getNumImplicitUses() +
796                     Desc.getNumImplicitDefs();
797 
798   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
799     MI.RemoveOperand(I);
800 }
801 
802 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
803   MI.setDesc(NewDesc);
804   stripExtraCopyOperands(MI);
805 }
806 
807 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
808                                                MachineOperand &Op) {
809   if (Op.isReg()) {
810     // If this has a subregister, it obviously is a register source.
811     if (Op.getSubReg() != AMDGPU::NoSubRegister ||
812         !Register::isVirtualRegister(Op.getReg()))
813       return &Op;
814 
815     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
816     if (Def && Def->isMoveImmediate()) {
817       MachineOperand &ImmSrc = Def->getOperand(1);
818       if (ImmSrc.isImm())
819         return &ImmSrc;
820     }
821   }
822 
823   return &Op;
824 }
825 
826 // Try to simplify operations with a constant that may appear after instruction
827 // selection.
828 // TODO: See if a frame index with a fixed offset can fold.
829 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
830                               const SIInstrInfo *TII,
831                               MachineInstr *MI,
832                               MachineOperand *ImmOp) {
833   unsigned Opc = MI->getOpcode();
834   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
835       Opc == AMDGPU::S_NOT_B32) {
836     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
837     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
838     return true;
839   }
840 
841   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
842   if (Src1Idx == -1)
843     return false;
844 
845   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
846   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
847   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
848 
849   if (!Src0->isImm() && !Src1->isImm())
850     return false;
851 
852   if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
853     if (Src0->isImm() && Src0->getImm() == 0) {
854       // v_lshl_or_b32 0, X, Y -> copy Y
855       // v_lshl_or_b32 0, X, K -> v_mov_b32 K
856       bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
857       MI->RemoveOperand(Src1Idx);
858       MI->RemoveOperand(Src0Idx);
859 
860       MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
861       return true;
862     }
863   }
864 
865   // and k0, k1 -> v_mov_b32 (k0 & k1)
866   // or k0, k1 -> v_mov_b32 (k0 | k1)
867   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
868   if (Src0->isImm() && Src1->isImm()) {
869     int32_t NewImm;
870     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
871       return false;
872 
873     const SIRegisterInfo &TRI = TII->getRegisterInfo();
874     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
875 
876     // Be careful to change the right operand, src0 may belong to a different
877     // instruction.
878     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
879     MI->RemoveOperand(Src1Idx);
880     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
881     return true;
882   }
883 
884   if (!MI->isCommutable())
885     return false;
886 
887   if (Src0->isImm() && !Src1->isImm()) {
888     std::swap(Src0, Src1);
889     std::swap(Src0Idx, Src1Idx);
890   }
891 
892   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
893   if (Opc == AMDGPU::V_OR_B32_e64 ||
894       Opc == AMDGPU::V_OR_B32_e32 ||
895       Opc == AMDGPU::S_OR_B32) {
896     if (Src1Val == 0) {
897       // y = or x, 0 => y = copy x
898       MI->RemoveOperand(Src1Idx);
899       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
900     } else if (Src1Val == -1) {
901       // y = or x, -1 => y = v_mov_b32 -1
902       MI->RemoveOperand(Src1Idx);
903       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
904     } else
905       return false;
906 
907     return true;
908   }
909 
910   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
911       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
912       MI->getOpcode() == AMDGPU::S_AND_B32) {
913     if (Src1Val == 0) {
914       // y = and x, 0 => y = v_mov_b32 0
915       MI->RemoveOperand(Src0Idx);
916       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
917     } else if (Src1Val == -1) {
918       // y = and x, -1 => y = copy x
919       MI->RemoveOperand(Src1Idx);
920       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
921       stripExtraCopyOperands(*MI);
922     } else
923       return false;
924 
925     return true;
926   }
927 
928   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
929       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
930       MI->getOpcode() == AMDGPU::S_XOR_B32) {
931     if (Src1Val == 0) {
932       // y = xor x, 0 => y = copy x
933       MI->RemoveOperand(Src1Idx);
934       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
935       return true;
936     }
937   }
938 
939   return false;
940 }
941 
942 // Try to fold an instruction into a simpler one
943 static bool tryFoldInst(const SIInstrInfo *TII,
944                         MachineInstr *MI) {
945   unsigned Opc = MI->getOpcode();
946 
947   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
948       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
949       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
950     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
951     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
952     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
953     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
954     if (Src1->isIdenticalTo(*Src0) &&
955         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
956         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
957       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
958       auto &NewDesc =
959           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
960       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
961       if (Src2Idx != -1)
962         MI->RemoveOperand(Src2Idx);
963       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
964       if (Src1ModIdx != -1)
965         MI->RemoveOperand(Src1ModIdx);
966       if (Src0ModIdx != -1)
967         MI->RemoveOperand(Src0ModIdx);
968       mutateCopyOp(*MI, NewDesc);
969       LLVM_DEBUG(dbgs() << *MI << '\n');
970       return true;
971     }
972   }
973 
974   return false;
975 }
976 
977 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
978                                      MachineOperand &OpToFold) const {
979   // We need mutate the operands of new mov instructions to add implicit
980   // uses of EXEC, but adding them invalidates the use_iterator, so defer
981   // this.
982   SmallVector<MachineInstr *, 4> CopiesToReplace;
983   SmallVector<FoldCandidate, 4> FoldList;
984   MachineOperand &Dst = MI.getOperand(0);
985 
986   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
987   if (FoldingImm) {
988     unsigned NumLiteralUses = 0;
989     MachineOperand *NonInlineUse = nullptr;
990     int NonInlineUseOpNo = -1;
991 
992     MachineRegisterInfo::use_iterator NextUse;
993     for (MachineRegisterInfo::use_iterator
994            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
995          Use != E; Use = NextUse) {
996       NextUse = std::next(Use);
997       MachineInstr *UseMI = Use->getParent();
998       unsigned OpNo = Use.getOperandNo();
999 
1000       // Folding the immediate may reveal operations that can be constant
1001       // folded or replaced with a copy. This can happen for example after
1002       // frame indices are lowered to constants or from splitting 64-bit
1003       // constants.
1004       //
1005       // We may also encounter cases where one or both operands are
1006       // immediates materialized into a register, which would ordinarily not
1007       // be folded due to multiple uses or operand constraints.
1008 
1009       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1010         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1011 
1012         // Some constant folding cases change the same immediate's use to a new
1013         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1014         // again. The same constant folded instruction could also have a second
1015         // use operand.
1016         NextUse = MRI->use_begin(Dst.getReg());
1017         FoldList.clear();
1018         continue;
1019       }
1020 
1021       // Try to fold any inline immediate uses, and then only fold other
1022       // constants if they have one use.
1023       //
1024       // The legality of the inline immediate must be checked based on the use
1025       // operand, not the defining instruction, because 32-bit instructions
1026       // with 32-bit inline immediate sources may be used to materialize
1027       // constants used in 16-bit operands.
1028       //
1029       // e.g. it is unsafe to fold:
1030       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1031       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1032 
1033       // Folding immediates with more than one use will increase program size.
1034       // FIXME: This will also reduce register usage, which may be better
1035       // in some cases. A better heuristic is needed.
1036       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1037         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1038       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1039         foldOperand(OpToFold, UseMI, OpNo, FoldList,
1040                     CopiesToReplace);
1041       } else {
1042         if (++NumLiteralUses == 1) {
1043           NonInlineUse = &*Use;
1044           NonInlineUseOpNo = OpNo;
1045         }
1046       }
1047     }
1048 
1049     if (NumLiteralUses == 1) {
1050       MachineInstr *UseMI = NonInlineUse->getParent();
1051       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1052     }
1053   } else {
1054     // Folding register.
1055     SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
1056     for (MachineRegisterInfo::use_iterator
1057            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1058          Use != E; ++Use) {
1059       UsesToProcess.push_back(Use);
1060     }
1061     for (auto U : UsesToProcess) {
1062       MachineInstr *UseMI = U->getParent();
1063 
1064       foldOperand(OpToFold, UseMI, U.getOperandNo(),
1065         FoldList, CopiesToReplace);
1066     }
1067   }
1068 
1069   MachineFunction *MF = MI.getParent()->getParent();
1070   // Make sure we add EXEC uses to any new v_mov instructions created.
1071   for (MachineInstr *Copy : CopiesToReplace)
1072     Copy->addImplicitDefUseOperands(*MF);
1073 
1074   for (FoldCandidate &Fold : FoldList) {
1075     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1076       // Clear kill flags.
1077       if (Fold.isReg()) {
1078         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1079         // FIXME: Probably shouldn't bother trying to fold if not an
1080         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1081         // copies.
1082         MRI->clearKillFlags(Fold.OpToFold->getReg());
1083       }
1084       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1085                         << static_cast<int>(Fold.UseOpNo) << " of "
1086                         << *Fold.UseMI << '\n');
1087       tryFoldInst(TII, Fold.UseMI);
1088     } else if (Fold.isCommuted()) {
1089       // Restoring instruction's original operand order if fold has failed.
1090       TII->commuteInstruction(*Fold.UseMI, false);
1091     }
1092   }
1093 }
1094 
1095 // Clamp patterns are canonically selected to v_max_* instructions, so only
1096 // handle them.
1097 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1098   unsigned Op = MI.getOpcode();
1099   switch (Op) {
1100   case AMDGPU::V_MAX_F32_e64:
1101   case AMDGPU::V_MAX_F16_e64:
1102   case AMDGPU::V_MAX_F64:
1103   case AMDGPU::V_PK_MAX_F16: {
1104     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1105       return nullptr;
1106 
1107     // Make sure sources are identical.
1108     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1109     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1110     if (!Src0->isReg() || !Src1->isReg() ||
1111         Src0->getReg() != Src1->getReg() ||
1112         Src0->getSubReg() != Src1->getSubReg() ||
1113         Src0->getSubReg() != AMDGPU::NoSubRegister)
1114       return nullptr;
1115 
1116     // Can't fold up if we have modifiers.
1117     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1118       return nullptr;
1119 
1120     unsigned Src0Mods
1121       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1122     unsigned Src1Mods
1123       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1124 
1125     // Having a 0 op_sel_hi would require swizzling the output in the source
1126     // instruction, which we can't do.
1127     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1128                                                       : 0u;
1129     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1130       return nullptr;
1131     return Src0;
1132   }
1133   default:
1134     return nullptr;
1135   }
1136 }
1137 
1138 // We obviously have multiple uses in a clamp since the register is used twice
1139 // in the same instruction.
1140 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1141   int Count = 0;
1142   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1143        I != E; ++I) {
1144     if (++Count > 1)
1145       return false;
1146   }
1147 
1148   return true;
1149 }
1150 
1151 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1152 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1153   const MachineOperand *ClampSrc = isClamp(MI);
1154   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1155     return false;
1156 
1157   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1158 
1159   // The type of clamp must be compatible.
1160   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1161     return false;
1162 
1163   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1164   if (!DefClamp)
1165     return false;
1166 
1167   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1168                     << '\n');
1169 
1170   // Clamp is applied after omod, so it is OK if omod is set.
1171   DefClamp->setImm(1);
1172   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1173   MI.eraseFromParent();
1174   return true;
1175 }
1176 
1177 static int getOModValue(unsigned Opc, int64_t Val) {
1178   switch (Opc) {
1179   case AMDGPU::V_MUL_F32_e64: {
1180     switch (static_cast<uint32_t>(Val)) {
1181     case 0x3f000000: // 0.5
1182       return SIOutMods::DIV2;
1183     case 0x40000000: // 2.0
1184       return SIOutMods::MUL2;
1185     case 0x40800000: // 4.0
1186       return SIOutMods::MUL4;
1187     default:
1188       return SIOutMods::NONE;
1189     }
1190   }
1191   case AMDGPU::V_MUL_F16_e64: {
1192     switch (static_cast<uint16_t>(Val)) {
1193     case 0x3800: // 0.5
1194       return SIOutMods::DIV2;
1195     case 0x4000: // 2.0
1196       return SIOutMods::MUL2;
1197     case 0x4400: // 4.0
1198       return SIOutMods::MUL4;
1199     default:
1200       return SIOutMods::NONE;
1201     }
1202   }
1203   default:
1204     llvm_unreachable("invalid mul opcode");
1205   }
1206 }
1207 
1208 // FIXME: Does this really not support denormals with f16?
1209 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1210 // handled, so will anything other than that break?
1211 std::pair<const MachineOperand *, int>
1212 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1213   unsigned Op = MI.getOpcode();
1214   switch (Op) {
1215   case AMDGPU::V_MUL_F32_e64:
1216   case AMDGPU::V_MUL_F16_e64: {
1217     // If output denormals are enabled, omod is ignored.
1218     if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1219         (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1220       return std::make_pair(nullptr, SIOutMods::NONE);
1221 
1222     const MachineOperand *RegOp = nullptr;
1223     const MachineOperand *ImmOp = nullptr;
1224     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1225     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1226     if (Src0->isImm()) {
1227       ImmOp = Src0;
1228       RegOp = Src1;
1229     } else if (Src1->isImm()) {
1230       ImmOp = Src1;
1231       RegOp = Src0;
1232     } else
1233       return std::make_pair(nullptr, SIOutMods::NONE);
1234 
1235     int OMod = getOModValue(Op, ImmOp->getImm());
1236     if (OMod == SIOutMods::NONE ||
1237         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1238         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1239         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1240         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1241       return std::make_pair(nullptr, SIOutMods::NONE);
1242 
1243     return std::make_pair(RegOp, OMod);
1244   }
1245   case AMDGPU::V_ADD_F32_e64:
1246   case AMDGPU::V_ADD_F16_e64: {
1247     // If output denormals are enabled, omod is ignored.
1248     if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1249         (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1250       return std::make_pair(nullptr, SIOutMods::NONE);
1251 
1252     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1253     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1254     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1255 
1256     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1257         Src0->getSubReg() == Src1->getSubReg() &&
1258         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1259         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1260         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1261         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1262       return std::make_pair(Src0, SIOutMods::MUL2);
1263 
1264     return std::make_pair(nullptr, SIOutMods::NONE);
1265   }
1266   default:
1267     return std::make_pair(nullptr, SIOutMods::NONE);
1268   }
1269 }
1270 
1271 // FIXME: Does this need to check IEEE bit on function?
1272 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1273   const MachineOperand *RegOp;
1274   int OMod;
1275   std::tie(RegOp, OMod) = isOMod(MI);
1276   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1277       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1278       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1279     return false;
1280 
1281   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1282   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1283   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1284     return false;
1285 
1286   // Clamp is applied after omod. If the source already has clamp set, don't
1287   // fold it.
1288   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1289     return false;
1290 
1291   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1292 
1293   DefOMod->setImm(OMod);
1294   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1295   MI.eraseFromParent();
1296   return true;
1297 }
1298 
1299 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1300   if (skipFunction(MF.getFunction()))
1301     return false;
1302 
1303   MRI = &MF.getRegInfo();
1304   ST = &MF.getSubtarget<GCNSubtarget>();
1305   TII = ST->getInstrInfo();
1306   TRI = &TII->getRegisterInfo();
1307   MFI = MF.getInfo<SIMachineFunctionInfo>();
1308 
1309   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1310   // correctly handle signed zeros.
1311   //
1312   // FIXME: Also need to check strictfp
1313   bool IsIEEEMode = MFI->getMode().IEEE;
1314   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1315 
1316   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1317     MachineBasicBlock::iterator I, Next;
1318     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1319       Next = std::next(I);
1320       MachineInstr &MI = *I;
1321 
1322       tryFoldInst(TII, &MI);
1323 
1324       if (!TII->isFoldableCopy(MI)) {
1325         // TODO: Omod might be OK if there is NSZ only on the source
1326         // instruction, and not the omod multiply.
1327         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1328             !tryFoldOMod(MI))
1329           tryFoldClamp(MI);
1330         continue;
1331       }
1332 
1333       MachineOperand &OpToFold = MI.getOperand(1);
1334       bool FoldingImm =
1335           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1336 
1337       // FIXME: We could also be folding things like TargetIndexes.
1338       if (!FoldingImm && !OpToFold.isReg())
1339         continue;
1340 
1341       if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
1342         continue;
1343 
1344       // Prevent folding operands backwards in the function. For example,
1345       // the COPY opcode must not be replaced by 1 in this example:
1346       //
1347       //    %3 = COPY %vgpr0; VGPR_32:%3
1348       //    ...
1349       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1350       MachineOperand &Dst = MI.getOperand(0);
1351       if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
1352         continue;
1353 
1354       foldInstOperand(MI, OpToFold);
1355     }
1356   }
1357   return false;
1358 }
1359