xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision dbda30e2947b0c1339bb080b21475b2a44ca5fd5)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "GCNSubtarget.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "llvm/ADT/DepthFirstIterator.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 
18 #define DEBUG_TYPE "si-fold-operands"
19 using namespace llvm;
20 
21 namespace {
22 
23 struct FoldCandidate {
24   MachineInstr *UseMI;
25   union {
26     MachineOperand *OpToFold;
27     uint64_t ImmToFold;
28     int FrameIndexToFold;
29   };
30   int ShrinkOpcode;
31   unsigned UseOpNo;
32   MachineOperand::MachineOperandType Kind;
33   bool Commuted;
34 
35   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
36                 bool Commuted_ = false,
37                 int ShrinkOp = -1) :
38     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
39     Kind(FoldOp->getType()),
40     Commuted(Commuted_) {
41     if (FoldOp->isImm()) {
42       ImmToFold = FoldOp->getImm();
43     } else if (FoldOp->isFI()) {
44       FrameIndexToFold = FoldOp->getIndex();
45     } else {
46       assert(FoldOp->isReg() || FoldOp->isGlobal());
47       OpToFold = FoldOp;
48     }
49   }
50 
51   bool isFI() const {
52     return Kind == MachineOperand::MO_FrameIndex;
53   }
54 
55   bool isImm() const {
56     return Kind == MachineOperand::MO_Immediate;
57   }
58 
59   bool isReg() const {
60     return Kind == MachineOperand::MO_Register;
61   }
62 
63   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
64 
65   bool isCommuted() const {
66     return Commuted;
67   }
68 
69   bool needsShrink() const {
70     return ShrinkOpcode != -1;
71   }
72 
73   int getShrinkOpcode() const {
74     return ShrinkOpcode;
75   }
76 };
77 
78 class SIFoldOperands : public MachineFunctionPass {
79 public:
80   static char ID;
81   MachineRegisterInfo *MRI;
82   const SIInstrInfo *TII;
83   const SIRegisterInfo *TRI;
84   const GCNSubtarget *ST;
85   const SIMachineFunctionInfo *MFI;
86 
87   void foldOperand(MachineOperand &OpToFold,
88                    MachineInstr *UseMI,
89                    int UseOpIdx,
90                    SmallVectorImpl<FoldCandidate> &FoldList,
91                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
92 
93   bool tryFoldCndMask(MachineInstr &MI) const;
94   bool tryFoldZeroHighBits(MachineInstr &MI) const;
95   bool foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
96 
97   const MachineOperand *isClamp(const MachineInstr &MI) const;
98   bool tryFoldClamp(MachineInstr &MI);
99 
100   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
101   bool tryFoldOMod(MachineInstr &MI);
102   bool tryFoldRegSequence(MachineInstr &MI);
103   bool tryFoldLCSSAPhi(MachineInstr &MI);
104   bool tryFoldLoad(MachineInstr &MI);
105 
106 public:
107   SIFoldOperands() : MachineFunctionPass(ID) {
108     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
109   }
110 
111   bool runOnMachineFunction(MachineFunction &MF) override;
112 
113   StringRef getPassName() const override { return "SI Fold Operands"; }
114 
115   void getAnalysisUsage(AnalysisUsage &AU) const override {
116     AU.setPreservesCFG();
117     MachineFunctionPass::getAnalysisUsage(AU);
118   }
119 };
120 
121 } // End anonymous namespace.
122 
123 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
124                 "SI Fold Operands", false, false)
125 
126 char SIFoldOperands::ID = 0;
127 
128 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
129 
130 // Map multiply-accumulate opcode to corresponding multiply-add opcode if any.
131 static unsigned macToMad(unsigned Opc) {
132   switch (Opc) {
133   case AMDGPU::V_MAC_F32_e64:
134     return AMDGPU::V_MAD_F32_e64;
135   case AMDGPU::V_MAC_F16_e64:
136     return AMDGPU::V_MAD_F16_e64;
137   case AMDGPU::V_FMAC_F32_e64:
138     return AMDGPU::V_FMA_F32_e64;
139   case AMDGPU::V_FMAC_F16_e64:
140     return AMDGPU::V_FMA_F16_gfx9_e64;
141   case AMDGPU::V_FMAC_LEGACY_F32_e64:
142     return AMDGPU::V_FMA_LEGACY_F32_e64;
143   case AMDGPU::V_FMAC_F64_e64:
144     return AMDGPU::V_FMA_F64_e64;
145   }
146   return AMDGPU::INSTRUCTION_LIST_END;
147 }
148 
149 // TODO: Add heuristic that the frame index might not fit in the addressing mode
150 // immediate offset to avoid materializing in loops.
151 static bool frameIndexMayFold(const SIInstrInfo *TII,
152                               const MachineInstr &UseMI,
153                               int OpNo,
154                               const MachineOperand &OpToFold) {
155   if (!OpToFold.isFI())
156     return false;
157 
158   if (TII->isMUBUF(UseMI))
159     return OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
160                                               AMDGPU::OpName::vaddr);
161   if (!TII->isFLATScratch(UseMI))
162     return false;
163 
164   int SIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
165                                         AMDGPU::OpName::saddr);
166   if (OpNo == SIdx)
167     return true;
168 
169   int VIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
170                                         AMDGPU::OpName::vaddr);
171   return OpNo == VIdx && SIdx == -1;
172 }
173 
174 FunctionPass *llvm::createSIFoldOperandsPass() {
175   return new SIFoldOperands();
176 }
177 
178 static bool updateOperand(FoldCandidate &Fold,
179                           const SIInstrInfo &TII,
180                           const TargetRegisterInfo &TRI,
181                           const GCNSubtarget &ST) {
182   MachineInstr *MI = Fold.UseMI;
183   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
184   assert(Old.isReg());
185 
186   if (Fold.isImm()) {
187     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
188         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
189         (!ST.hasDOTOpSelHazard() ||
190          !(MI->getDesc().TSFlags & SIInstrFlags::IsDOT)) &&
191         AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
192                                       ST.hasInv2PiInlineImm())) {
193       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
194       // already set.
195       unsigned Opcode = MI->getOpcode();
196       int OpNo = MI->getOperandNo(&Old);
197       int ModIdx = -1;
198       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
199         ModIdx = AMDGPU::OpName::src0_modifiers;
200       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
201         ModIdx = AMDGPU::OpName::src1_modifiers;
202       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
203         ModIdx = AMDGPU::OpName::src2_modifiers;
204       assert(ModIdx != -1);
205       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
206       MachineOperand &Mod = MI->getOperand(ModIdx);
207       unsigned Val = Mod.getImm();
208       if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
209         // Only apply the following transformation if that operand requires
210         // a packed immediate.
211         switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
212         case AMDGPU::OPERAND_REG_IMM_V2FP16:
213         case AMDGPU::OPERAND_REG_IMM_V2INT16:
214         case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
215         case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
216           // If upper part is all zero we do not need op_sel_hi.
217           if (!isUInt<16>(Fold.ImmToFold)) {
218             if (!(Fold.ImmToFold & 0xffff)) {
219               Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
220               Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
221               Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
222               return true;
223             }
224             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
225             Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
226             return true;
227           }
228           break;
229         default:
230           break;
231         }
232       }
233     }
234   }
235 
236   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
237     MachineBasicBlock *MBB = MI->getParent();
238     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
239     if (Liveness != MachineBasicBlock::LQR_Dead) {
240       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
241       return false;
242     }
243 
244     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
245     int Op32 = Fold.getShrinkOpcode();
246     MachineOperand &Dst0 = MI->getOperand(0);
247     MachineOperand &Dst1 = MI->getOperand(1);
248     assert(Dst0.isDef() && Dst1.isDef());
249 
250     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
251 
252     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
253     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
254 
255     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
256 
257     if (HaveNonDbgCarryUse) {
258       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
259         .addReg(AMDGPU::VCC, RegState::Kill);
260     }
261 
262     // Keep the old instruction around to avoid breaking iterators, but
263     // replace it with a dummy instruction to remove uses.
264     //
265     // FIXME: We should not invert how this pass looks at operands to avoid
266     // this. Should track set of foldable movs instead of looking for uses
267     // when looking at a use.
268     Dst0.setReg(NewReg0);
269     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
270       MI->removeOperand(I);
271     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
272 
273     if (Fold.isCommuted())
274       TII.commuteInstruction(*Inst32, false);
275     return true;
276   }
277 
278   assert(!Fold.needsShrink() && "not handled");
279 
280   if (Fold.isImm()) {
281     if (Old.isTied()) {
282       int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode());
283       if (NewMFMAOpc == -1)
284         return false;
285       MI->setDesc(TII.get(NewMFMAOpc));
286       MI->untieRegOperand(0);
287     }
288     Old.ChangeToImmediate(Fold.ImmToFold);
289     return true;
290   }
291 
292   if (Fold.isGlobal()) {
293     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
294                    Fold.OpToFold->getTargetFlags());
295     return true;
296   }
297 
298   if (Fold.isFI()) {
299     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
300     return true;
301   }
302 
303   MachineOperand *New = Fold.OpToFold;
304   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
305   Old.setIsUndef(New->isUndef());
306   return true;
307 }
308 
309 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
310                               const MachineInstr *MI) {
311   for (auto Candidate : FoldList) {
312     if (Candidate.UseMI == MI)
313       return true;
314   }
315   return false;
316 }
317 
318 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
319                                 MachineInstr *MI, unsigned OpNo,
320                                 MachineOperand *FoldOp, bool Commuted = false,
321                                 int ShrinkOp = -1) {
322   // Skip additional folding on the same operand.
323   for (FoldCandidate &Fold : FoldList)
324     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
325       return;
326   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
327                     << " operand " << OpNo << "\n  " << *MI);
328   FoldList.emplace_back(MI, OpNo, FoldOp, Commuted, ShrinkOp);
329 }
330 
331 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
332                              MachineInstr *MI, unsigned OpNo,
333                              MachineOperand *OpToFold,
334                              const SIInstrInfo *TII) {
335   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
336     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
337     unsigned Opc = MI->getOpcode();
338     unsigned NewOpc = macToMad(Opc);
339     if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
340       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
341       // to fold the operand.
342       MI->setDesc(TII->get(NewOpc));
343       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
344       if (FoldAsMAD) {
345         MI->untieRegOperand(OpNo);
346         return true;
347       }
348       MI->setDesc(TII->get(Opc));
349     }
350 
351     // Special case for s_setreg_b32
352     if (OpToFold->isImm()) {
353       unsigned ImmOpc = 0;
354       if (Opc == AMDGPU::S_SETREG_B32)
355         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
356       else if (Opc == AMDGPU::S_SETREG_B32_mode)
357         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
358       if (ImmOpc) {
359         MI->setDesc(TII->get(ImmOpc));
360         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
361         return true;
362       }
363     }
364 
365     // If we are already folding into another operand of MI, then
366     // we can't commute the instruction, otherwise we risk making the
367     // other fold illegal.
368     if (isUseMIInFoldList(FoldList, MI))
369       return false;
370 
371     unsigned CommuteOpNo = OpNo;
372 
373     // Operand is not legal, so try to commute the instruction to
374     // see if this makes it possible to fold.
375     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
376     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
377     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
378 
379     if (CanCommute) {
380       if (CommuteIdx0 == OpNo)
381         CommuteOpNo = CommuteIdx1;
382       else if (CommuteIdx1 == OpNo)
383         CommuteOpNo = CommuteIdx0;
384     }
385 
386 
387     // One of operands might be an Imm operand, and OpNo may refer to it after
388     // the call of commuteInstruction() below. Such situations are avoided
389     // here explicitly as OpNo must be a register operand to be a candidate
390     // for memory folding.
391     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
392                        !MI->getOperand(CommuteIdx1).isReg()))
393       return false;
394 
395     if (!CanCommute ||
396         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
397       return false;
398 
399     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
400       if ((Opc == AMDGPU::V_ADD_CO_U32_e64 ||
401            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
402            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
403           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
404         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
405 
406         // Verify the other operand is a VGPR, otherwise we would violate the
407         // constant bus restriction.
408         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
409         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
410         if (!OtherOp.isReg() ||
411             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
412           return false;
413 
414         assert(MI->getOperand(1).isDef());
415 
416         // Make sure to get the 32-bit version of the commuted opcode.
417         unsigned MaybeCommutedOpc = MI->getOpcode();
418         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
419 
420         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
421         return true;
422       }
423 
424       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
425       return false;
426     }
427 
428     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
429     return true;
430   }
431 
432   // Check the case where we might introduce a second constant operand to a
433   // scalar instruction
434   if (TII->isSALU(MI->getOpcode())) {
435     const MCInstrDesc &InstDesc = MI->getDesc();
436     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
437     const SIRegisterInfo &SRI = TII->getRegisterInfo();
438 
439     // Fine if the operand can be encoded as an inline constant
440     if (TII->isLiteralConstantLike(*OpToFold, OpInfo)) {
441       if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
442           !TII->isInlineConstant(*OpToFold, OpInfo)) {
443         // Otherwise check for another constant
444         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
445           auto &Op = MI->getOperand(i);
446           if (OpNo != i &&
447               TII->isLiteralConstantLike(Op, OpInfo)) {
448             return false;
449           }
450         }
451       }
452     }
453   }
454 
455   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
456   return true;
457 }
458 
459 // If the use operand doesn't care about the value, this may be an operand only
460 // used for register indexing, in which case it is unsafe to fold.
461 static bool isUseSafeToFold(const SIInstrInfo *TII,
462                             const MachineInstr &MI,
463                             const MachineOperand &UseMO) {
464   if (UseMO.isUndef() || TII->isSDWA(MI))
465     return false;
466 
467   switch (MI.getOpcode()) {
468   case AMDGPU::V_MOV_B32_e32:
469   case AMDGPU::V_MOV_B32_e64:
470   case AMDGPU::V_MOV_B64_PSEUDO:
471   case AMDGPU::V_MOV_B64_e32:
472   case AMDGPU::V_MOV_B64_e64:
473     // Do not fold into an indirect mov.
474     return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0);
475   }
476 
477   return true;
478   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
479 }
480 
481 // Find a def of the UseReg, check if it is a reg_sequence and find initializers
482 // for each subreg, tracking it to foldable inline immediate if possible.
483 // Returns true on success.
484 static bool getRegSeqInit(
485     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
486     Register UseReg, uint8_t OpTy,
487     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
488   MachineInstr *Def = MRI.getVRegDef(UseReg);
489   if (!Def || !Def->isRegSequence())
490     return false;
491 
492   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
493     MachineOperand *Sub = &Def->getOperand(I);
494     assert(Sub->isReg());
495 
496     for (MachineInstr *SubDef = MRI.getVRegDef(Sub->getReg());
497          SubDef && Sub->isReg() && Sub->getReg().isVirtual() &&
498          !Sub->getSubReg() && TII->isFoldableCopy(*SubDef);
499          SubDef = MRI.getVRegDef(Sub->getReg())) {
500       MachineOperand *Op = &SubDef->getOperand(1);
501       if (Op->isImm()) {
502         if (TII->isInlineConstant(*Op, OpTy))
503           Sub = Op;
504         break;
505       }
506       if (!Op->isReg() || Op->getReg().isPhysical())
507         break;
508       Sub = Op;
509     }
510 
511     Defs.emplace_back(Sub, Def->getOperand(I + 1).getImm());
512   }
513 
514   return true;
515 }
516 
517 static bool tryToFoldACImm(const SIInstrInfo *TII,
518                            const MachineOperand &OpToFold,
519                            MachineInstr *UseMI,
520                            unsigned UseOpIdx,
521                            SmallVectorImpl<FoldCandidate> &FoldList) {
522   const MCInstrDesc &Desc = UseMI->getDesc();
523   const MCOperandInfo *OpInfo = Desc.OpInfo;
524   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
525     return false;
526 
527   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
528   if ((OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
529        OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) &&
530       (OpTy < AMDGPU::OPERAND_REG_INLINE_C_FIRST ||
531        OpTy > AMDGPU::OPERAND_REG_INLINE_C_LAST))
532     return false;
533 
534   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
535       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
536     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
537     return true;
538   }
539 
540   if (!OpToFold.isReg())
541     return false;
542 
543   Register UseReg = OpToFold.getReg();
544   if (!UseReg.isVirtual())
545     return false;
546 
547   if (isUseMIInFoldList(FoldList, UseMI))
548     return false;
549 
550   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
551 
552   // Maybe it is just a COPY of an immediate itself.
553   MachineInstr *Def = MRI.getVRegDef(UseReg);
554   MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
555   if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) {
556     MachineOperand &DefOp = Def->getOperand(1);
557     if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) &&
558         TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) {
559       UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm());
560       return true;
561     }
562   }
563 
564   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
565   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
566     return false;
567 
568   int32_t Imm;
569   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
570     const MachineOperand *Op = Defs[I].first;
571     if (!Op->isImm())
572       return false;
573 
574     auto SubImm = Op->getImm();
575     if (!I) {
576       Imm = SubImm;
577       if (!TII->isInlineConstant(*Op, OpTy) ||
578           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
579         return false;
580 
581       continue;
582     }
583     if (Imm != SubImm)
584       return false; // Can only fold splat constants
585   }
586 
587   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
588   return true;
589 }
590 
591 void SIFoldOperands::foldOperand(
592   MachineOperand &OpToFold,
593   MachineInstr *UseMI,
594   int UseOpIdx,
595   SmallVectorImpl<FoldCandidate> &FoldList,
596   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
597   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
598 
599   if (!isUseSafeToFold(TII, *UseMI, UseOp))
600     return;
601 
602   // FIXME: Fold operands with subregs.
603   if (UseOp.isReg() && OpToFold.isReg()) {
604     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
605       return;
606   }
607 
608   // Special case for REG_SEQUENCE: We can't fold literals into
609   // REG_SEQUENCE instructions, so we have to fold them into the
610   // uses of REG_SEQUENCE.
611   if (UseMI->isRegSequence()) {
612     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
613     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
614 
615     for (auto &RSUse : make_early_inc_range(MRI->use_nodbg_operands(RegSeqDstReg))) {
616       MachineInstr *RSUseMI = RSUse.getParent();
617 
618       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
619                          RSUseMI->getOperandNo(&RSUse), FoldList))
620         continue;
621 
622       if (RSUse.getSubReg() != RegSeqDstSubReg)
623         continue;
624 
625       foldOperand(OpToFold, RSUseMI, RSUseMI->getOperandNo(&RSUse), FoldList,
626                   CopiesToReplace);
627     }
628 
629     return;
630   }
631 
632   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
633     return;
634 
635   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
636     // Verify that this is a stack access.
637     // FIXME: Should probably use stack pseudos before frame lowering.
638 
639     if (TII->isMUBUF(*UseMI)) {
640       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
641           MFI->getScratchRSrcReg())
642         return;
643 
644       // Ensure this is either relative to the current frame or the current
645       // wave.
646       MachineOperand &SOff =
647           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
648       if (!SOff.isImm() || SOff.getImm() != 0)
649         return;
650     }
651 
652     // A frame index will resolve to a positive constant, so it should always be
653     // safe to fold the addressing mode, even pre-GFX9.
654     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
655 
656     if (TII->isFLATScratch(*UseMI) &&
657         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
658                                    AMDGPU::OpName::vaddr) != -1 &&
659         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
660                                    AMDGPU::OpName::saddr) == -1) {
661       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(UseMI->getOpcode());
662       UseMI->setDesc(TII->get(NewOpc));
663     }
664 
665     return;
666   }
667 
668   bool FoldingImmLike =
669       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
670 
671   if (FoldingImmLike && UseMI->isCopy()) {
672     Register DestReg = UseMI->getOperand(0).getReg();
673     Register SrcReg = UseMI->getOperand(1).getReg();
674     assert(SrcReg.isVirtual());
675 
676     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
677 
678     // Don't fold into a copy to a physical register with the same class. Doing
679     // so would interfere with the register coalescer's logic which would avoid
680     // redundant initializations.
681     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
682       return;
683 
684     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
685     if (!DestReg.isPhysical()) {
686       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
687         SmallVector<FoldCandidate, 4> CopyUses;
688         for (auto &Use : MRI->use_nodbg_operands(DestReg)) {
689           // There's no point trying to fold into an implicit operand.
690           if (Use.isImplicit())
691             continue;
692 
693           CopyUses.emplace_back(Use.getParent(),
694                                 Use.getParent()->getOperandNo(&Use),
695                                 &UseMI->getOperand(1));
696         }
697         for (auto &F : CopyUses) {
698           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace);
699         }
700       }
701 
702       if (DestRC == &AMDGPU::AGPR_32RegClass &&
703           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
704         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
705         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
706         CopiesToReplace.push_back(UseMI);
707         return;
708       }
709     }
710 
711     // In order to fold immediates into copies, we need to change the
712     // copy to a MOV.
713 
714     unsigned MovOp = TII->getMovOpcode(DestRC);
715     if (MovOp == AMDGPU::COPY)
716       return;
717 
718     UseMI->setDesc(TII->get(MovOp));
719     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
720     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
721     while (ImpOpI != ImpOpE) {
722       MachineInstr::mop_iterator Tmp = ImpOpI;
723       ImpOpI++;
724       UseMI->removeOperand(UseMI->getOperandNo(Tmp));
725     }
726     CopiesToReplace.push_back(UseMI);
727   } else {
728     if (UseMI->isCopy() && OpToFold.isReg() &&
729         UseMI->getOperand(0).getReg().isVirtual() &&
730         !UseMI->getOperand(1).getSubReg()) {
731       LLVM_DEBUG(dbgs() << "Folding " << OpToFold << "\n into " << *UseMI);
732       unsigned Size = TII->getOpSize(*UseMI, 1);
733       Register UseReg = OpToFold.getReg();
734       UseMI->getOperand(1).setReg(UseReg);
735       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
736       UseMI->getOperand(1).setIsKill(false);
737       CopiesToReplace.push_back(UseMI);
738       OpToFold.setIsKill(false);
739 
740       // Remove kill flags as kills may now be out of order with uses.
741       MRI->clearKillFlags(OpToFold.getReg());
742 
743       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
744       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
745       // its initializers right here, so we will rematerialize immediates and
746       // avoid copies via different reg classes.
747       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
748       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
749           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
750                         *MRI)) {
751         const DebugLoc &DL = UseMI->getDebugLoc();
752         MachineBasicBlock &MBB = *UseMI->getParent();
753 
754         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
755         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
756           UseMI->removeOperand(I);
757 
758         MachineInstrBuilder B(*MBB.getParent(), UseMI);
759         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
760         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
761         for (unsigned I = 0; I < Size / 4; ++I) {
762           MachineOperand *Def = Defs[I].first;
763           TargetInstrInfo::RegSubRegPair CopyToVGPR;
764           if (Def->isImm() &&
765               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
766             int64_t Imm = Def->getImm();
767 
768             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
769             BuildMI(MBB, UseMI, DL,
770                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm);
771             B.addReg(Tmp);
772           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
773             auto Src = getRegSubRegPair(*Def);
774             Def->setIsKill(false);
775             if (!SeenAGPRs.insert(Src)) {
776               // We cannot build a reg_sequence out of the same registers, they
777               // must be copied. Better do it here before copyPhysReg() created
778               // several reads to do the AGPR->VGPR->AGPR copy.
779               CopyToVGPR = Src;
780             } else {
781               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
782                        Src.SubReg);
783             }
784           } else {
785             assert(Def->isReg());
786             Def->setIsKill(false);
787             auto Src = getRegSubRegPair(*Def);
788 
789             // Direct copy from SGPR to AGPR is not possible. To avoid creation
790             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
791             // create a copy here and track if we already have such a copy.
792             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
793               CopyToVGPR = Src;
794             } else {
795               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
796               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
797               B.addReg(Tmp);
798             }
799           }
800 
801           if (CopyToVGPR.Reg) {
802             Register Vgpr;
803             if (VGPRCopies.count(CopyToVGPR)) {
804               Vgpr = VGPRCopies[CopyToVGPR];
805             } else {
806               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
807               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
808               VGPRCopies[CopyToVGPR] = Vgpr;
809             }
810             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
811             BuildMI(MBB, UseMI, DL,
812                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr);
813             B.addReg(Tmp);
814           }
815 
816           B.addImm(Defs[I].second);
817         }
818         LLVM_DEBUG(dbgs() << "Folded " << *UseMI);
819         return;
820       }
821 
822       if (Size != 4)
823         return;
824       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
825           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
826         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
827       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
828                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
829         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64));
830       else if (ST->hasGFX90AInsts() &&
831                TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
832                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
833         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32));
834       return;
835     }
836 
837     unsigned UseOpc = UseMI->getOpcode();
838     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
839         (UseOpc == AMDGPU::V_READLANE_B32 &&
840          (int)UseOpIdx ==
841          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
842       // %vgpr = V_MOV_B32 imm
843       // %sgpr = V_READFIRSTLANE_B32 %vgpr
844       // =>
845       // %sgpr = S_MOV_B32 imm
846       if (FoldingImmLike) {
847         if (execMayBeModifiedBeforeUse(*MRI,
848                                        UseMI->getOperand(UseOpIdx).getReg(),
849                                        *OpToFold.getParent(),
850                                        *UseMI))
851           return;
852 
853         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
854 
855         if (OpToFold.isImm())
856           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
857         else
858           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
859         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
860         return;
861       }
862 
863       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
864         if (execMayBeModifiedBeforeUse(*MRI,
865                                        UseMI->getOperand(UseOpIdx).getReg(),
866                                        *OpToFold.getParent(),
867                                        *UseMI))
868           return;
869 
870         // %vgpr = COPY %sgpr0
871         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
872         // =>
873         // %sgpr1 = COPY %sgpr0
874         UseMI->setDesc(TII->get(AMDGPU::COPY));
875         UseMI->getOperand(1).setReg(OpToFold.getReg());
876         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
877         UseMI->getOperand(1).setIsKill(false);
878         UseMI->removeOperand(2); // Remove exec read (or src1 for readlane)
879         return;
880       }
881     }
882 
883     const MCInstrDesc &UseDesc = UseMI->getDesc();
884 
885     // Don't fold into target independent nodes.  Target independent opcodes
886     // don't have defined register classes.
887     if (UseDesc.isVariadic() ||
888         UseOp.isImplicit() ||
889         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
890       return;
891   }
892 
893   if (!FoldingImmLike) {
894     if (OpToFold.isReg() && ST->needsAlignedVGPRs()) {
895       // Don't fold if OpToFold doesn't hold an aligned register.
896       const TargetRegisterClass *RC =
897           TRI->getRegClassForReg(*MRI, OpToFold.getReg());
898       if (TRI->hasVectorRegisters(RC) && OpToFold.getSubReg()) {
899         unsigned SubReg = OpToFold.getSubReg();
900         const TargetRegisterClass *SubRC = TRI->getSubRegClass(RC, SubReg);
901         RC = TRI->getCompatibleSubRegClass(RC, SubRC, SubReg);
902         if (RC)
903           RC = SubRC;
904       }
905 
906       if (!RC || !TRI->isProperlyAlignedRC(*RC))
907         return;
908     }
909 
910     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
911 
912     // FIXME: We could try to change the instruction from 64-bit to 32-bit
913     // to enable more folding opportunities.  The shrink operands pass
914     // already does this.
915     return;
916   }
917 
918 
919   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
920   const TargetRegisterClass *FoldRC =
921     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
922 
923   // Split 64-bit constants into 32-bits for folding.
924   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
925     Register UseReg = UseOp.getReg();
926     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
927 
928     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
929       return;
930 
931     APInt Imm(64, OpToFold.getImm());
932     if (UseOp.getSubReg() == AMDGPU::sub0) {
933       Imm = Imm.getLoBits(32);
934     } else {
935       assert(UseOp.getSubReg() == AMDGPU::sub1);
936       Imm = Imm.getHiBits(32);
937     }
938 
939     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
940     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
941     return;
942   }
943 
944 
945 
946   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
947 }
948 
949 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
950                                   uint32_t LHS, uint32_t RHS) {
951   switch (Opcode) {
952   case AMDGPU::V_AND_B32_e64:
953   case AMDGPU::V_AND_B32_e32:
954   case AMDGPU::S_AND_B32:
955     Result = LHS & RHS;
956     return true;
957   case AMDGPU::V_OR_B32_e64:
958   case AMDGPU::V_OR_B32_e32:
959   case AMDGPU::S_OR_B32:
960     Result = LHS | RHS;
961     return true;
962   case AMDGPU::V_XOR_B32_e64:
963   case AMDGPU::V_XOR_B32_e32:
964   case AMDGPU::S_XOR_B32:
965     Result = LHS ^ RHS;
966     return true;
967   case AMDGPU::S_XNOR_B32:
968     Result = ~(LHS ^ RHS);
969     return true;
970   case AMDGPU::S_NAND_B32:
971     Result = ~(LHS & RHS);
972     return true;
973   case AMDGPU::S_NOR_B32:
974     Result = ~(LHS | RHS);
975     return true;
976   case AMDGPU::S_ANDN2_B32:
977     Result = LHS & ~RHS;
978     return true;
979   case AMDGPU::S_ORN2_B32:
980     Result = LHS | ~RHS;
981     return true;
982   case AMDGPU::V_LSHL_B32_e64:
983   case AMDGPU::V_LSHL_B32_e32:
984   case AMDGPU::S_LSHL_B32:
985     // The instruction ignores the high bits for out of bounds shifts.
986     Result = LHS << (RHS & 31);
987     return true;
988   case AMDGPU::V_LSHLREV_B32_e64:
989   case AMDGPU::V_LSHLREV_B32_e32:
990     Result = RHS << (LHS & 31);
991     return true;
992   case AMDGPU::V_LSHR_B32_e64:
993   case AMDGPU::V_LSHR_B32_e32:
994   case AMDGPU::S_LSHR_B32:
995     Result = LHS >> (RHS & 31);
996     return true;
997   case AMDGPU::V_LSHRREV_B32_e64:
998   case AMDGPU::V_LSHRREV_B32_e32:
999     Result = RHS >> (LHS & 31);
1000     return true;
1001   case AMDGPU::V_ASHR_I32_e64:
1002   case AMDGPU::V_ASHR_I32_e32:
1003   case AMDGPU::S_ASHR_I32:
1004     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
1005     return true;
1006   case AMDGPU::V_ASHRREV_I32_e64:
1007   case AMDGPU::V_ASHRREV_I32_e32:
1008     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
1009     return true;
1010   default:
1011     return false;
1012   }
1013 }
1014 
1015 static unsigned getMovOpc(bool IsScalar) {
1016   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1017 }
1018 
1019 /// Remove any leftover implicit operands from mutating the instruction. e.g.
1020 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1021 /// anymore.
1022 static void stripExtraCopyOperands(MachineInstr &MI) {
1023   const MCInstrDesc &Desc = MI.getDesc();
1024   unsigned NumOps = Desc.getNumOperands() +
1025                     Desc.getNumImplicitUses() +
1026                     Desc.getNumImplicitDefs();
1027 
1028   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
1029     MI.removeOperand(I);
1030 }
1031 
1032 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1033   MI.setDesc(NewDesc);
1034   stripExtraCopyOperands(MI);
1035 }
1036 
1037 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
1038                                                MachineOperand &Op) {
1039   if (Op.isReg()) {
1040     // If this has a subregister, it obviously is a register source.
1041     if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
1042       return &Op;
1043 
1044     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
1045     if (Def && Def->isMoveImmediate()) {
1046       MachineOperand &ImmSrc = Def->getOperand(1);
1047       if (ImmSrc.isImm())
1048         return &ImmSrc;
1049     }
1050   }
1051 
1052   return &Op;
1053 }
1054 
1055 // Try to simplify operations with a constant that may appear after instruction
1056 // selection.
1057 // TODO: See if a frame index with a fixed offset can fold.
1058 static bool tryConstantFoldOp(MachineRegisterInfo &MRI, const SIInstrInfo *TII,
1059                               MachineInstr *MI) {
1060   unsigned Opc = MI->getOpcode();
1061 
1062   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1063   if (Src0Idx == -1)
1064     return false;
1065   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1066 
1067   if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1068        Opc == AMDGPU::S_NOT_B32) &&
1069       Src0->isImm()) {
1070     MI->getOperand(1).ChangeToImmediate(~Src0->getImm());
1071     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1072     return true;
1073   }
1074 
1075   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1076   if (Src1Idx == -1)
1077     return false;
1078   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1079 
1080   if (!Src0->isImm() && !Src1->isImm())
1081     return false;
1082 
1083   // and k0, k1 -> v_mov_b32 (k0 & k1)
1084   // or k0, k1 -> v_mov_b32 (k0 | k1)
1085   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1086   if (Src0->isImm() && Src1->isImm()) {
1087     int32_t NewImm;
1088     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1089       return false;
1090 
1091     const SIRegisterInfo &TRI = TII->getRegisterInfo();
1092     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1093 
1094     // Be careful to change the right operand, src0 may belong to a different
1095     // instruction.
1096     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1097     MI->removeOperand(Src1Idx);
1098     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1099     return true;
1100   }
1101 
1102   if (!MI->isCommutable())
1103     return false;
1104 
1105   if (Src0->isImm() && !Src1->isImm()) {
1106     std::swap(Src0, Src1);
1107     std::swap(Src0Idx, Src1Idx);
1108   }
1109 
1110   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1111   if (Opc == AMDGPU::V_OR_B32_e64 ||
1112       Opc == AMDGPU::V_OR_B32_e32 ||
1113       Opc == AMDGPU::S_OR_B32) {
1114     if (Src1Val == 0) {
1115       // y = or x, 0 => y = copy x
1116       MI->removeOperand(Src1Idx);
1117       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1118     } else if (Src1Val == -1) {
1119       // y = or x, -1 => y = v_mov_b32 -1
1120       MI->removeOperand(Src1Idx);
1121       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1122     } else
1123       return false;
1124 
1125     return true;
1126   }
1127 
1128   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1129       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1130       MI->getOpcode() == AMDGPU::S_AND_B32) {
1131     if (Src1Val == 0) {
1132       // y = and x, 0 => y = v_mov_b32 0
1133       MI->removeOperand(Src0Idx);
1134       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1135     } else if (Src1Val == -1) {
1136       // y = and x, -1 => y = copy x
1137       MI->removeOperand(Src1Idx);
1138       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1139       stripExtraCopyOperands(*MI);
1140     } else
1141       return false;
1142 
1143     return true;
1144   }
1145 
1146   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1147       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1148       MI->getOpcode() == AMDGPU::S_XOR_B32) {
1149     if (Src1Val == 0) {
1150       // y = xor x, 0 => y = copy x
1151       MI->removeOperand(Src1Idx);
1152       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1153       return true;
1154     }
1155   }
1156 
1157   return false;
1158 }
1159 
1160 // Try to fold an instruction into a simpler one
1161 bool SIFoldOperands::tryFoldCndMask(MachineInstr &MI) const {
1162   unsigned Opc = MI.getOpcode();
1163   if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 &&
1164       Opc != AMDGPU::V_CNDMASK_B64_PSEUDO)
1165     return false;
1166 
1167   MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1168   MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1169   if (!Src1->isIdenticalTo(*Src0)) {
1170     auto *Src0Imm = getImmOrMaterializedImm(*MRI, *Src0);
1171     auto *Src1Imm = getImmOrMaterializedImm(*MRI, *Src1);
1172     if (!Src1Imm->isIdenticalTo(*Src0Imm))
1173       return false;
1174   }
1175 
1176   int Src1ModIdx =
1177       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1178   int Src0ModIdx =
1179       AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1180   if ((Src1ModIdx != -1 && MI.getOperand(Src1ModIdx).getImm() != 0) ||
1181       (Src0ModIdx != -1 && MI.getOperand(Src0ModIdx).getImm() != 0))
1182     return false;
1183 
1184   LLVM_DEBUG(dbgs() << "Folded " << MI << " into ");
1185   auto &NewDesc =
1186       TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1187   int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1188   if (Src2Idx != -1)
1189     MI.removeOperand(Src2Idx);
1190   MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1191   if (Src1ModIdx != -1)
1192     MI.removeOperand(Src1ModIdx);
1193   if (Src0ModIdx != -1)
1194     MI.removeOperand(Src0ModIdx);
1195   mutateCopyOp(MI, NewDesc);
1196   LLVM_DEBUG(dbgs() << MI);
1197   return true;
1198 }
1199 
1200 bool SIFoldOperands::tryFoldZeroHighBits(MachineInstr &MI) const {
1201   if (MI.getOpcode() != AMDGPU::V_AND_B32_e64 &&
1202       MI.getOpcode() != AMDGPU::V_AND_B32_e32)
1203     return false;
1204 
1205   MachineOperand *Src0 = getImmOrMaterializedImm(*MRI, MI.getOperand(1));
1206   if (!Src0->isImm() || Src0->getImm() != 0xffff)
1207     return false;
1208 
1209   Register Src1 = MI.getOperand(2).getReg();
1210   MachineInstr *SrcDef = MRI->getVRegDef(Src1);
1211   if (ST->zeroesHigh16BitsOfDest(SrcDef->getOpcode())) {
1212     Register Dst = MI.getOperand(0).getReg();
1213     MRI->replaceRegWith(Dst, SrcDef->getOperand(0).getReg());
1214     MI.eraseFromParent();
1215     return true;
1216   }
1217 
1218   return false;
1219 }
1220 
1221 bool SIFoldOperands::foldInstOperand(MachineInstr &MI,
1222                                      MachineOperand &OpToFold) const {
1223   // We need mutate the operands of new mov instructions to add implicit
1224   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1225   // this.
1226   SmallVector<MachineInstr *, 4> CopiesToReplace;
1227   SmallVector<FoldCandidate, 4> FoldList;
1228   MachineOperand &Dst = MI.getOperand(0);
1229   bool Changed = false;
1230 
1231   if (OpToFold.isImm()) {
1232     for (auto &UseMI :
1233          make_early_inc_range(MRI->use_nodbg_instructions(Dst.getReg()))) {
1234       // Folding the immediate may reveal operations that can be constant
1235       // folded or replaced with a copy. This can happen for example after
1236       // frame indices are lowered to constants or from splitting 64-bit
1237       // constants.
1238       //
1239       // We may also encounter cases where one or both operands are
1240       // immediates materialized into a register, which would ordinarily not
1241       // be folded due to multiple uses or operand constraints.
1242       if (tryConstantFoldOp(*MRI, TII, &UseMI)) {
1243         LLVM_DEBUG(dbgs() << "Constant folded " << UseMI);
1244         Changed = true;
1245       }
1246     }
1247   }
1248 
1249   SmallVector<MachineOperand *, 4> UsesToProcess;
1250   for (auto &Use : MRI->use_nodbg_operands(Dst.getReg()))
1251     UsesToProcess.push_back(&Use);
1252   for (auto U : UsesToProcess) {
1253     MachineInstr *UseMI = U->getParent();
1254     foldOperand(OpToFold, UseMI, UseMI->getOperandNo(U), FoldList,
1255                 CopiesToReplace);
1256   }
1257 
1258   if (CopiesToReplace.empty() && FoldList.empty())
1259     return Changed;
1260 
1261   MachineFunction *MF = MI.getParent()->getParent();
1262   // Make sure we add EXEC uses to any new v_mov instructions created.
1263   for (MachineInstr *Copy : CopiesToReplace)
1264     Copy->addImplicitDefUseOperands(*MF);
1265 
1266   for (FoldCandidate &Fold : FoldList) {
1267     assert(!Fold.isReg() || Fold.OpToFold);
1268     if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
1269       Register Reg = Fold.OpToFold->getReg();
1270       MachineInstr *DefMI = Fold.OpToFold->getParent();
1271       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1272           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1273         continue;
1274     }
1275     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1276       // Clear kill flags.
1277       if (Fold.isReg()) {
1278         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1279         // FIXME: Probably shouldn't bother trying to fold if not an
1280         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1281         // copies.
1282         MRI->clearKillFlags(Fold.OpToFold->getReg());
1283       }
1284       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1285                         << static_cast<int>(Fold.UseOpNo) << " of "
1286                         << *Fold.UseMI);
1287     } else if (Fold.isCommuted()) {
1288       // Restoring instruction's original operand order if fold has failed.
1289       TII->commuteInstruction(*Fold.UseMI, false);
1290     }
1291   }
1292   return true;
1293 }
1294 
1295 // Clamp patterns are canonically selected to v_max_* instructions, so only
1296 // handle them.
1297 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1298   unsigned Op = MI.getOpcode();
1299   switch (Op) {
1300   case AMDGPU::V_MAX_F32_e64:
1301   case AMDGPU::V_MAX_F16_e64:
1302   case AMDGPU::V_MAX_F64_e64:
1303   case AMDGPU::V_PK_MAX_F16: {
1304     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1305       return nullptr;
1306 
1307     // Make sure sources are identical.
1308     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1309     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1310     if (!Src0->isReg() || !Src1->isReg() ||
1311         Src0->getReg() != Src1->getReg() ||
1312         Src0->getSubReg() != Src1->getSubReg() ||
1313         Src0->getSubReg() != AMDGPU::NoSubRegister)
1314       return nullptr;
1315 
1316     // Can't fold up if we have modifiers.
1317     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1318       return nullptr;
1319 
1320     unsigned Src0Mods
1321       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1322     unsigned Src1Mods
1323       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1324 
1325     // Having a 0 op_sel_hi would require swizzling the output in the source
1326     // instruction, which we can't do.
1327     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1328                                                       : 0u;
1329     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1330       return nullptr;
1331     return Src0;
1332   }
1333   default:
1334     return nullptr;
1335   }
1336 }
1337 
1338 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1339 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1340   const MachineOperand *ClampSrc = isClamp(MI);
1341   if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg()))
1342     return false;
1343 
1344   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1345 
1346   // The type of clamp must be compatible.
1347   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1348     return false;
1349 
1350   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1351   if (!DefClamp)
1352     return false;
1353 
1354   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def);
1355 
1356   // Clamp is applied after omod, so it is OK if omod is set.
1357   DefClamp->setImm(1);
1358   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1359   MI.eraseFromParent();
1360 
1361   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
1362   // instruction, so we might as well convert it to the more flexible VOP3-only
1363   // mad/fma form.
1364   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
1365     Def->eraseFromParent();
1366 
1367   return true;
1368 }
1369 
1370 static int getOModValue(unsigned Opc, int64_t Val) {
1371   switch (Opc) {
1372   case AMDGPU::V_MUL_F64_e64: {
1373     switch (Val) {
1374     case 0x3fe0000000000000: // 0.5
1375       return SIOutMods::DIV2;
1376     case 0x4000000000000000: // 2.0
1377       return SIOutMods::MUL2;
1378     case 0x4010000000000000: // 4.0
1379       return SIOutMods::MUL4;
1380     default:
1381       return SIOutMods::NONE;
1382     }
1383   }
1384   case AMDGPU::V_MUL_F32_e64: {
1385     switch (static_cast<uint32_t>(Val)) {
1386     case 0x3f000000: // 0.5
1387       return SIOutMods::DIV2;
1388     case 0x40000000: // 2.0
1389       return SIOutMods::MUL2;
1390     case 0x40800000: // 4.0
1391       return SIOutMods::MUL4;
1392     default:
1393       return SIOutMods::NONE;
1394     }
1395   }
1396   case AMDGPU::V_MUL_F16_e64: {
1397     switch (static_cast<uint16_t>(Val)) {
1398     case 0x3800: // 0.5
1399       return SIOutMods::DIV2;
1400     case 0x4000: // 2.0
1401       return SIOutMods::MUL2;
1402     case 0x4400: // 4.0
1403       return SIOutMods::MUL4;
1404     default:
1405       return SIOutMods::NONE;
1406     }
1407   }
1408   default:
1409     llvm_unreachable("invalid mul opcode");
1410   }
1411 }
1412 
1413 // FIXME: Does this really not support denormals with f16?
1414 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1415 // handled, so will anything other than that break?
1416 std::pair<const MachineOperand *, int>
1417 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1418   unsigned Op = MI.getOpcode();
1419   switch (Op) {
1420   case AMDGPU::V_MUL_F64_e64:
1421   case AMDGPU::V_MUL_F32_e64:
1422   case AMDGPU::V_MUL_F16_e64: {
1423     // If output denormals are enabled, omod is ignored.
1424     if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1425         ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F16_e64) &&
1426          MFI->getMode().FP64FP16OutputDenormals))
1427       return std::make_pair(nullptr, SIOutMods::NONE);
1428 
1429     const MachineOperand *RegOp = nullptr;
1430     const MachineOperand *ImmOp = nullptr;
1431     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1432     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1433     if (Src0->isImm()) {
1434       ImmOp = Src0;
1435       RegOp = Src1;
1436     } else if (Src1->isImm()) {
1437       ImmOp = Src1;
1438       RegOp = Src0;
1439     } else
1440       return std::make_pair(nullptr, SIOutMods::NONE);
1441 
1442     int OMod = getOModValue(Op, ImmOp->getImm());
1443     if (OMod == SIOutMods::NONE ||
1444         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1445         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1446         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1447         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1448       return std::make_pair(nullptr, SIOutMods::NONE);
1449 
1450     return std::make_pair(RegOp, OMod);
1451   }
1452   case AMDGPU::V_ADD_F64_e64:
1453   case AMDGPU::V_ADD_F32_e64:
1454   case AMDGPU::V_ADD_F16_e64: {
1455     // If output denormals are enabled, omod is ignored.
1456     if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1457         ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F16_e64) &&
1458          MFI->getMode().FP64FP16OutputDenormals))
1459       return std::make_pair(nullptr, SIOutMods::NONE);
1460 
1461     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1462     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1463     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1464 
1465     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1466         Src0->getSubReg() == Src1->getSubReg() &&
1467         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1468         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1469         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1470         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1471       return std::make_pair(Src0, SIOutMods::MUL2);
1472 
1473     return std::make_pair(nullptr, SIOutMods::NONE);
1474   }
1475   default:
1476     return std::make_pair(nullptr, SIOutMods::NONE);
1477   }
1478 }
1479 
1480 // FIXME: Does this need to check IEEE bit on function?
1481 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1482   const MachineOperand *RegOp;
1483   int OMod;
1484   std::tie(RegOp, OMod) = isOMod(MI);
1485   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1486       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1487       !MRI->hasOneNonDBGUser(RegOp->getReg()))
1488     return false;
1489 
1490   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1491   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1492   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1493     return false;
1494 
1495   // Clamp is applied after omod. If the source already has clamp set, don't
1496   // fold it.
1497   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1498     return false;
1499 
1500   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def);
1501 
1502   DefOMod->setImm(OMod);
1503   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1504   MI.eraseFromParent();
1505 
1506   // Use of output modifiers forces VOP3 encoding for a VOP2 mac/fmac
1507   // instruction, so we might as well convert it to the more flexible VOP3-only
1508   // mad/fma form.
1509   if (TII->convertToThreeAddress(*Def, nullptr, nullptr))
1510     Def->eraseFromParent();
1511 
1512   return true;
1513 }
1514 
1515 // Try to fold a reg_sequence with vgpr output and agpr inputs into an
1516 // instruction which can take an agpr. So far that means a store.
1517 bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
1518   assert(MI.isRegSequence());
1519   auto Reg = MI.getOperand(0).getReg();
1520 
1521   if (!ST->hasGFX90AInsts() || !TRI->isVGPR(*MRI, Reg) ||
1522       !MRI->hasOneNonDBGUse(Reg))
1523     return false;
1524 
1525   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
1526   if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER, TII, *MRI))
1527     return false;
1528 
1529   for (auto &Def : Defs) {
1530     const auto *Op = Def.first;
1531     if (!Op->isReg())
1532       return false;
1533     if (TRI->isAGPR(*MRI, Op->getReg()))
1534       continue;
1535     // Maybe this is a COPY from AREG
1536     const MachineInstr *SubDef = MRI->getVRegDef(Op->getReg());
1537     if (!SubDef || !SubDef->isCopy() || SubDef->getOperand(1).getSubReg())
1538       return false;
1539     if (!TRI->isAGPR(*MRI, SubDef->getOperand(1).getReg()))
1540       return false;
1541   }
1542 
1543   MachineOperand *Op = &*MRI->use_nodbg_begin(Reg);
1544   MachineInstr *UseMI = Op->getParent();
1545   while (UseMI->isCopy() && !Op->getSubReg()) {
1546     Reg = UseMI->getOperand(0).getReg();
1547     if (!TRI->isVGPR(*MRI, Reg) || !MRI->hasOneNonDBGUse(Reg))
1548       return false;
1549     Op = &*MRI->use_nodbg_begin(Reg);
1550     UseMI = Op->getParent();
1551   }
1552 
1553   if (Op->getSubReg())
1554     return false;
1555 
1556   unsigned OpIdx = Op - &UseMI->getOperand(0);
1557   const MCInstrDesc &InstDesc = UseMI->getDesc();
1558   const TargetRegisterClass *OpRC =
1559       TII->getRegClass(InstDesc, OpIdx, TRI, *MI.getMF());
1560   if (!OpRC || !TRI->isVectorSuperClass(OpRC))
1561     return false;
1562 
1563   const auto *NewDstRC = TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg));
1564   auto Dst = MRI->createVirtualRegister(NewDstRC);
1565   auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1566                     TII->get(AMDGPU::REG_SEQUENCE), Dst);
1567 
1568   for (unsigned I = 0; I < Defs.size(); ++I) {
1569     MachineOperand *Def = Defs[I].first;
1570     Def->setIsKill(false);
1571     if (TRI->isAGPR(*MRI, Def->getReg())) {
1572       RS.add(*Def);
1573     } else { // This is a copy
1574       MachineInstr *SubDef = MRI->getVRegDef(Def->getReg());
1575       SubDef->getOperand(1).setIsKill(false);
1576       RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg());
1577     }
1578     RS.addImm(Defs[I].second);
1579   }
1580 
1581   Op->setReg(Dst);
1582   if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) {
1583     Op->setReg(Reg);
1584     RS->eraseFromParent();
1585     return false;
1586   }
1587 
1588   LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI);
1589 
1590   // Erase the REG_SEQUENCE eagerly, unless we followed a chain of COPY users,
1591   // in which case we can erase them all later in runOnMachineFunction.
1592   if (MRI->use_nodbg_empty(MI.getOperand(0).getReg()))
1593     MI.eraseFromParent();
1594   return true;
1595 }
1596 
1597 // Try to hoist an AGPR to VGPR copy out of the loop across a LCSSA PHI.
1598 // This should allow folding of an AGPR into a consumer which may support it.
1599 // I.e.:
1600 //
1601 // loop:                             // loop:
1602 //   %1:vreg = COPY %0:areg          // exit:
1603 // exit:                          => //   %1:areg = PHI %0:areg, %loop
1604 //   %2:vreg = PHI %1:vreg, %loop    //   %2:vreg = COPY %1:areg
1605 bool SIFoldOperands::tryFoldLCSSAPhi(MachineInstr &PHI) {
1606   assert(PHI.isPHI());
1607 
1608   if (PHI.getNumExplicitOperands() != 3) // Single input LCSSA PHI
1609     return false;
1610 
1611   Register PhiIn = PHI.getOperand(1).getReg();
1612   Register PhiOut = PHI.getOperand(0).getReg();
1613   if (PHI.getOperand(1).getSubReg() ||
1614       !TRI->isVGPR(*MRI, PhiIn) || !TRI->isVGPR(*MRI, PhiOut))
1615     return false;
1616 
1617   // A single use should not matter for correctness, but if it has another use
1618   // inside the loop we may perform copy twice in a worst case.
1619   if (!MRI->hasOneNonDBGUse(PhiIn))
1620     return false;
1621 
1622   MachineInstr *Copy = MRI->getVRegDef(PhiIn);
1623   if (!Copy || !Copy->isCopy())
1624     return false;
1625 
1626   Register CopyIn = Copy->getOperand(1).getReg();
1627   if (!TRI->isAGPR(*MRI, CopyIn) || Copy->getOperand(1).getSubReg())
1628     return false;
1629 
1630   const TargetRegisterClass *ARC = MRI->getRegClass(CopyIn);
1631   Register NewReg = MRI->createVirtualRegister(ARC);
1632   PHI.getOperand(1).setReg(CopyIn);
1633   PHI.getOperand(0).setReg(NewReg);
1634 
1635   MachineBasicBlock *MBB = PHI.getParent();
1636   BuildMI(*MBB, MBB->getFirstNonPHI(), Copy->getDebugLoc(),
1637           TII->get(AMDGPU::COPY), PhiOut)
1638     .addReg(NewReg, RegState::Kill);
1639   Copy->eraseFromParent(); // We know this copy had a single use.
1640 
1641   LLVM_DEBUG(dbgs() << "Folded " << PHI);
1642 
1643   return true;
1644 }
1645 
1646 // Attempt to convert VGPR load to an AGPR load.
1647 bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) {
1648   assert(MI.mayLoad());
1649   if (!ST->hasGFX90AInsts() || MI.getNumExplicitDefs() != 1)
1650     return false;
1651 
1652   MachineOperand &Def = MI.getOperand(0);
1653   if (!Def.isDef())
1654     return false;
1655 
1656   Register DefReg = Def.getReg();
1657 
1658   if (DefReg.isPhysical() || !TRI->isVGPR(*MRI, DefReg))
1659     return false;
1660 
1661   SmallVector<const MachineInstr*, 8> Users;
1662   SmallVector<Register, 8> MoveRegs;
1663   for (const MachineInstr &I : MRI->use_nodbg_instructions(DefReg)) {
1664     Users.push_back(&I);
1665   }
1666   if (Users.empty())
1667     return false;
1668 
1669   // Check that all uses a copy to an agpr or a reg_sequence producing an agpr.
1670   while (!Users.empty()) {
1671     const MachineInstr *I = Users.pop_back_val();
1672     if (!I->isCopy() && !I->isRegSequence())
1673       return false;
1674     Register DstReg = I->getOperand(0).getReg();
1675     if (TRI->isAGPR(*MRI, DstReg))
1676       continue;
1677     MoveRegs.push_back(DstReg);
1678     for (const MachineInstr &U : MRI->use_nodbg_instructions(DstReg)) {
1679       Users.push_back(&U);
1680     }
1681   }
1682 
1683   const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
1684   MRI->setRegClass(DefReg, TRI->getEquivalentAGPRClass(RC));
1685   if (!TII->isOperandLegal(MI, 0, &Def)) {
1686     MRI->setRegClass(DefReg, RC);
1687     return false;
1688   }
1689 
1690   while (!MoveRegs.empty()) {
1691     Register Reg = MoveRegs.pop_back_val();
1692     MRI->setRegClass(Reg, TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg)));
1693   }
1694 
1695   LLVM_DEBUG(dbgs() << "Folded " << MI);
1696 
1697   return true;
1698 }
1699 
1700 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1701   if (skipFunction(MF.getFunction()))
1702     return false;
1703 
1704   MRI = &MF.getRegInfo();
1705   ST = &MF.getSubtarget<GCNSubtarget>();
1706   TII = ST->getInstrInfo();
1707   TRI = &TII->getRegisterInfo();
1708   MFI = MF.getInfo<SIMachineFunctionInfo>();
1709 
1710   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1711   // correctly handle signed zeros.
1712   //
1713   // FIXME: Also need to check strictfp
1714   bool IsIEEEMode = MFI->getMode().IEEE;
1715   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1716 
1717   bool Changed = false;
1718   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1719     MachineOperand *CurrentKnownM0Val = nullptr;
1720     for (auto &MI : make_early_inc_range(*MBB)) {
1721       Changed |= tryFoldCndMask(MI);
1722 
1723       if (tryFoldZeroHighBits(MI)) {
1724         Changed = true;
1725         continue;
1726       }
1727 
1728       if (MI.isRegSequence() && tryFoldRegSequence(MI)) {
1729         Changed = true;
1730         continue;
1731       }
1732 
1733       if (MI.isPHI() && tryFoldLCSSAPhi(MI)) {
1734         Changed = true;
1735         continue;
1736       }
1737 
1738       if (MI.mayLoad() && tryFoldLoad(MI)) {
1739         Changed = true;
1740         continue;
1741       }
1742 
1743       if (!TII->isFoldableCopy(MI)) {
1744         // Saw an unknown clobber of m0, so we no longer know what it is.
1745         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1746           CurrentKnownM0Val = nullptr;
1747 
1748         // TODO: Omod might be OK if there is NSZ only on the source
1749         // instruction, and not the omod multiply.
1750         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1751             !tryFoldOMod(MI))
1752           Changed |= tryFoldClamp(MI);
1753 
1754         continue;
1755       }
1756 
1757       // Specially track simple redefs of m0 to the same value in a block, so we
1758       // can erase the later ones.
1759       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1760         MachineOperand &NewM0Val = MI.getOperand(1);
1761         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1762           MI.eraseFromParent();
1763           Changed = true;
1764           continue;
1765         }
1766 
1767         // We aren't tracking other physical registers
1768         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1769           nullptr : &NewM0Val;
1770         continue;
1771       }
1772 
1773       MachineOperand &OpToFold = MI.getOperand(1);
1774       bool FoldingImm =
1775           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1776 
1777       // FIXME: We could also be folding things like TargetIndexes.
1778       if (!FoldingImm && !OpToFold.isReg())
1779         continue;
1780 
1781       if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
1782         continue;
1783 
1784       // Prevent folding operands backwards in the function. For example,
1785       // the COPY opcode must not be replaced by 1 in this example:
1786       //
1787       //    %3 = COPY %vgpr0; VGPR_32:%3
1788       //    ...
1789       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1790       if (!MI.getOperand(0).getReg().isVirtual())
1791         continue;
1792 
1793       Changed |= foldInstOperand(MI, OpToFold);
1794 
1795       // If we managed to fold all uses of this copy then we might as well
1796       // delete it now.
1797       // The only reason we need to follow chains of copies here is that
1798       // tryFoldRegSequence looks forward through copies before folding a
1799       // REG_SEQUENCE into its eventual users.
1800       auto *InstToErase = &MI;
1801       while (MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
1802         auto &SrcOp = InstToErase->getOperand(1);
1803         auto SrcReg = SrcOp.isReg() ? SrcOp.getReg() : Register();
1804         InstToErase->eraseFromParent();
1805         Changed = true;
1806         InstToErase = nullptr;
1807         if (!SrcReg || SrcReg.isPhysical())
1808           break;
1809         InstToErase = MRI->getVRegDef(SrcReg);
1810         if (!InstToErase || !TII->isFoldableCopy(*InstToErase))
1811           break;
1812       }
1813       if (InstToErase && InstToErase->isRegSequence() &&
1814           MRI->use_nodbg_empty(InstToErase->getOperand(0).getReg())) {
1815         InstToErase->eraseFromParent();
1816         Changed = true;
1817       }
1818     }
1819   }
1820   return Changed;
1821 }
1822