xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision efc7bf27f5ee87ac106c6e48c4276e26f4f04715)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "GCNSubtarget.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "llvm/ADT/DepthFirstIterator.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 
18 #define DEBUG_TYPE "si-fold-operands"
19 using namespace llvm;
20 
21 namespace {
22 
23 struct FoldCandidate {
24   MachineInstr *UseMI;
25   union {
26     MachineOperand *OpToFold;
27     uint64_t ImmToFold;
28     int FrameIndexToFold;
29   };
30   int ShrinkOpcode;
31   unsigned UseOpNo;
32   MachineOperand::MachineOperandType Kind;
33   bool Commuted;
34 
35   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
36                 bool Commuted_ = false,
37                 int ShrinkOp = -1) :
38     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
39     Kind(FoldOp->getType()),
40     Commuted(Commuted_) {
41     if (FoldOp->isImm()) {
42       ImmToFold = FoldOp->getImm();
43     } else if (FoldOp->isFI()) {
44       FrameIndexToFold = FoldOp->getIndex();
45     } else {
46       assert(FoldOp->isReg() || FoldOp->isGlobal());
47       OpToFold = FoldOp;
48     }
49   }
50 
51   bool isFI() const {
52     return Kind == MachineOperand::MO_FrameIndex;
53   }
54 
55   bool isImm() const {
56     return Kind == MachineOperand::MO_Immediate;
57   }
58 
59   bool isReg() const {
60     return Kind == MachineOperand::MO_Register;
61   }
62 
63   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
64 
65   bool isCommuted() const {
66     return Commuted;
67   }
68 
69   bool needsShrink() const {
70     return ShrinkOpcode != -1;
71   }
72 
73   int getShrinkOpcode() const {
74     return ShrinkOpcode;
75   }
76 };
77 
78 class SIFoldOperands : public MachineFunctionPass {
79 public:
80   static char ID;
81   MachineRegisterInfo *MRI;
82   const SIInstrInfo *TII;
83   const SIRegisterInfo *TRI;
84   const GCNSubtarget *ST;
85   const SIMachineFunctionInfo *MFI;
86 
87   void foldOperand(MachineOperand &OpToFold,
88                    MachineInstr *UseMI,
89                    int UseOpIdx,
90                    SmallVectorImpl<FoldCandidate> &FoldList,
91                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
92 
93   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
94 
95   const MachineOperand *isClamp(const MachineInstr &MI) const;
96   bool tryFoldClamp(MachineInstr &MI);
97 
98   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
99   bool tryFoldOMod(MachineInstr &MI);
100   bool tryFoldRegSequence(MachineInstr &MI);
101   bool tryFoldLCSSAPhi(MachineInstr &MI);
102   bool tryFoldLoad(MachineInstr &MI);
103 
104 public:
105   SIFoldOperands() : MachineFunctionPass(ID) {
106     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
107   }
108 
109   bool runOnMachineFunction(MachineFunction &MF) override;
110 
111   StringRef getPassName() const override { return "SI Fold Operands"; }
112 
113   void getAnalysisUsage(AnalysisUsage &AU) const override {
114     AU.setPreservesCFG();
115     MachineFunctionPass::getAnalysisUsage(AU);
116   }
117 };
118 
119 } // End anonymous namespace.
120 
121 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
122                 "SI Fold Operands", false, false)
123 
124 char SIFoldOperands::ID = 0;
125 
126 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
127 
128 // Map multiply-accumulate opcode to corresponding multiply-add opcode if any.
129 static unsigned macToMad(unsigned Opc) {
130   switch (Opc) {
131   case AMDGPU::V_MAC_F32_e64:
132     return AMDGPU::V_MAD_F32_e64;
133   case AMDGPU::V_MAC_F16_e64:
134     return AMDGPU::V_MAD_F16_e64;
135   case AMDGPU::V_FMAC_F32_e64:
136     return AMDGPU::V_FMA_F32_e64;
137   case AMDGPU::V_FMAC_F16_e64:
138     return AMDGPU::V_FMA_F16_gfx9_e64;
139   case AMDGPU::V_FMAC_LEGACY_F32_e64:
140     return AMDGPU::V_FMA_LEGACY_F32_e64;
141   case AMDGPU::V_FMAC_F64_e64:
142     return AMDGPU::V_FMA_F64_e64;
143   }
144   return AMDGPU::INSTRUCTION_LIST_END;
145 }
146 
147 // Wrapper around isInlineConstant that understands special cases when
148 // instruction types are replaced during operand folding.
149 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
150                                      const MachineInstr &UseMI,
151                                      unsigned OpNo,
152                                      const MachineOperand &OpToFold) {
153   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
154     return true;
155 
156   unsigned Opc = UseMI.getOpcode();
157   unsigned NewOpc = macToMad(Opc);
158   if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
159     // Special case for mac. Since this is replaced with mad when folded into
160     // src2, we need to check the legality for the final instruction.
161     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
162     if (static_cast<int>(OpNo) == Src2Idx) {
163       const MCInstrDesc &MadDesc = TII->get(NewOpc);
164       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
165     }
166   }
167 
168   return false;
169 }
170 
171 // TODO: Add heuristic that the frame index might not fit in the addressing mode
172 // immediate offset to avoid materializing in loops.
173 static bool frameIndexMayFold(const SIInstrInfo *TII,
174                               const MachineInstr &UseMI,
175                               int OpNo,
176                               const MachineOperand &OpToFold) {
177   if (!OpToFold.isFI())
178     return false;
179 
180   if (TII->isMUBUF(UseMI))
181     return OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
182                                               AMDGPU::OpName::vaddr);
183   if (!TII->isFLATScratch(UseMI))
184     return false;
185 
186   int SIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
187                                         AMDGPU::OpName::saddr);
188   if (OpNo == SIdx)
189     return true;
190 
191   int VIdx = AMDGPU::getNamedOperandIdx(UseMI.getOpcode(),
192                                         AMDGPU::OpName::vaddr);
193   return OpNo == VIdx && SIdx == -1;
194 }
195 
196 FunctionPass *llvm::createSIFoldOperandsPass() {
197   return new SIFoldOperands();
198 }
199 
200 static bool updateOperand(FoldCandidate &Fold,
201                           const SIInstrInfo &TII,
202                           const TargetRegisterInfo &TRI,
203                           const GCNSubtarget &ST) {
204   MachineInstr *MI = Fold.UseMI;
205   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
206   assert(Old.isReg());
207 
208   if (Fold.isImm()) {
209     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
210         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
211         AMDGPU::isFoldableLiteralV216(Fold.ImmToFold,
212                                       ST.hasInv2PiInlineImm())) {
213       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
214       // already set.
215       unsigned Opcode = MI->getOpcode();
216       int OpNo = MI->getOperandNo(&Old);
217       int ModIdx = -1;
218       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
219         ModIdx = AMDGPU::OpName::src0_modifiers;
220       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
221         ModIdx = AMDGPU::OpName::src1_modifiers;
222       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
223         ModIdx = AMDGPU::OpName::src2_modifiers;
224       assert(ModIdx != -1);
225       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
226       MachineOperand &Mod = MI->getOperand(ModIdx);
227       unsigned Val = Mod.getImm();
228       if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) {
229         // Only apply the following transformation if that operand requries
230         // a packed immediate.
231         switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
232         case AMDGPU::OPERAND_REG_IMM_V2FP16:
233         case AMDGPU::OPERAND_REG_IMM_V2INT16:
234         case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
235         case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
236           // If upper part is all zero we do not need op_sel_hi.
237           if (!isUInt<16>(Fold.ImmToFold)) {
238             if (!(Fold.ImmToFold & 0xffff)) {
239               Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
240               Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
241               Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
242               return true;
243             }
244             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
245             Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
246             return true;
247           }
248           break;
249         default:
250           break;
251         }
252       }
253     }
254   }
255 
256   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
257     MachineBasicBlock *MBB = MI->getParent();
258     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
259     if (Liveness != MachineBasicBlock::LQR_Dead) {
260       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
261       return false;
262     }
263 
264     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
265     int Op32 = Fold.getShrinkOpcode();
266     MachineOperand &Dst0 = MI->getOperand(0);
267     MachineOperand &Dst1 = MI->getOperand(1);
268     assert(Dst0.isDef() && Dst1.isDef());
269 
270     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
271 
272     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
273     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
274 
275     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
276 
277     if (HaveNonDbgCarryUse) {
278       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
279         .addReg(AMDGPU::VCC, RegState::Kill);
280     }
281 
282     // Keep the old instruction around to avoid breaking iterators, but
283     // replace it with a dummy instruction to remove uses.
284     //
285     // FIXME: We should not invert how this pass looks at operands to avoid
286     // this. Should track set of foldable movs instead of looking for uses
287     // when looking at a use.
288     Dst0.setReg(NewReg0);
289     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
290       MI->RemoveOperand(I);
291     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
292 
293     if (Fold.isCommuted())
294       TII.commuteInstruction(*Inst32, false);
295     return true;
296   }
297 
298   assert(!Fold.needsShrink() && "not handled");
299 
300   if (Fold.isImm()) {
301     Old.ChangeToImmediate(Fold.ImmToFold);
302     return true;
303   }
304 
305   if (Fold.isGlobal()) {
306     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
307                    Fold.OpToFold->getTargetFlags());
308     return true;
309   }
310 
311   if (Fold.isFI()) {
312     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
313     return true;
314   }
315 
316   MachineOperand *New = Fold.OpToFold;
317   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
318   Old.setIsUndef(New->isUndef());
319   return true;
320 }
321 
322 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
323                               const MachineInstr *MI) {
324   for (auto Candidate : FoldList) {
325     if (Candidate.UseMI == MI)
326       return true;
327   }
328   return false;
329 }
330 
331 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
332                                 MachineInstr *MI, unsigned OpNo,
333                                 MachineOperand *FoldOp, bool Commuted = false,
334                                 int ShrinkOp = -1) {
335   // Skip additional folding on the same operand.
336   for (FoldCandidate &Fold : FoldList)
337     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
338       return;
339   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
340                     << " operand " << OpNo << "\n  " << *MI << '\n');
341   FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
342 }
343 
344 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
345                              MachineInstr *MI, unsigned OpNo,
346                              MachineOperand *OpToFold,
347                              const SIInstrInfo *TII) {
348   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
349     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
350     unsigned Opc = MI->getOpcode();
351     unsigned NewOpc = macToMad(Opc);
352     if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) {
353       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
354       // to fold the operand.
355       MI->setDesc(TII->get(NewOpc));
356       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
357       if (FoldAsMAD) {
358         MI->untieRegOperand(OpNo);
359         return true;
360       }
361       MI->setDesc(TII->get(Opc));
362     }
363 
364     // Special case for s_setreg_b32
365     if (OpToFold->isImm()) {
366       unsigned ImmOpc = 0;
367       if (Opc == AMDGPU::S_SETREG_B32)
368         ImmOpc = AMDGPU::S_SETREG_IMM32_B32;
369       else if (Opc == AMDGPU::S_SETREG_B32_mode)
370         ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode;
371       if (ImmOpc) {
372         MI->setDesc(TII->get(ImmOpc));
373         appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
374         return true;
375       }
376     }
377 
378     // If we are already folding into another operand of MI, then
379     // we can't commute the instruction, otherwise we risk making the
380     // other fold illegal.
381     if (isUseMIInFoldList(FoldList, MI))
382       return false;
383 
384     unsigned CommuteOpNo = OpNo;
385 
386     // Operand is not legal, so try to commute the instruction to
387     // see if this makes it possible to fold.
388     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
389     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
390     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
391 
392     if (CanCommute) {
393       if (CommuteIdx0 == OpNo)
394         CommuteOpNo = CommuteIdx1;
395       else if (CommuteIdx1 == OpNo)
396         CommuteOpNo = CommuteIdx0;
397     }
398 
399 
400     // One of operands might be an Imm operand, and OpNo may refer to it after
401     // the call of commuteInstruction() below. Such situations are avoided
402     // here explicitly as OpNo must be a register operand to be a candidate
403     // for memory folding.
404     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
405                        !MI->getOperand(CommuteIdx1).isReg()))
406       return false;
407 
408     if (!CanCommute ||
409         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
410       return false;
411 
412     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
413       if ((Opc == AMDGPU::V_ADD_CO_U32_e64 ||
414            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
415            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
416           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
417         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
418 
419         // Verify the other operand is a VGPR, otherwise we would violate the
420         // constant bus restriction.
421         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
422         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
423         if (!OtherOp.isReg() ||
424             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
425           return false;
426 
427         assert(MI->getOperand(1).isDef());
428 
429         // Make sure to get the 32-bit version of the commuted opcode.
430         unsigned MaybeCommutedOpc = MI->getOpcode();
431         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
432 
433         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
434         return true;
435       }
436 
437       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
438       return false;
439     }
440 
441     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
442     return true;
443   }
444 
445   // Check the case where we might introduce a second constant operand to a
446   // scalar instruction
447   if (TII->isSALU(MI->getOpcode())) {
448     const MCInstrDesc &InstDesc = MI->getDesc();
449     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
450     const SIRegisterInfo &SRI = TII->getRegisterInfo();
451 
452     // Fine if the operand can be encoded as an inline constant
453     if (OpToFold->isImm()) {
454       if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
455           !TII->isInlineConstant(*OpToFold, OpInfo)) {
456         // Otherwise check for another constant
457         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
458           auto &Op = MI->getOperand(i);
459           if (OpNo != i &&
460               TII->isLiteralConstantLike(Op, OpInfo)) {
461             return false;
462           }
463         }
464       }
465     }
466   }
467 
468   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
469   return true;
470 }
471 
472 // If the use operand doesn't care about the value, this may be an operand only
473 // used for register indexing, in which case it is unsafe to fold.
474 static bool isUseSafeToFold(const SIInstrInfo *TII,
475                             const MachineInstr &MI,
476                             const MachineOperand &UseMO) {
477   if (UseMO.isUndef() || TII->isSDWA(MI))
478     return false;
479 
480   switch (MI.getOpcode()) {
481   case AMDGPU::V_MOV_B32_e32:
482   case AMDGPU::V_MOV_B32_e64:
483   case AMDGPU::V_MOV_B64_PSEUDO:
484     // Do not fold into an indirect mov.
485     return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0);
486   }
487 
488   return true;
489   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
490 }
491 
492 // Find a def of the UseReg, check if it is a reg_sequence and find initializers
493 // for each subreg, tracking it to foldable inline immediate if possible.
494 // Returns true on success.
495 static bool getRegSeqInit(
496     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
497     Register UseReg, uint8_t OpTy,
498     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
499   MachineInstr *Def = MRI.getVRegDef(UseReg);
500   if (!Def || !Def->isRegSequence())
501     return false;
502 
503   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
504     MachineOperand *Sub = &Def->getOperand(I);
505     assert (Sub->isReg());
506 
507     for (MachineInstr *SubDef = MRI.getVRegDef(Sub->getReg());
508          SubDef && Sub->isReg() && !Sub->getSubReg() &&
509          TII->isFoldableCopy(*SubDef);
510          SubDef = MRI.getVRegDef(Sub->getReg())) {
511       MachineOperand *Op = &SubDef->getOperand(1);
512       if (Op->isImm()) {
513         if (TII->isInlineConstant(*Op, OpTy))
514           Sub = Op;
515         break;
516       }
517       if (!Op->isReg())
518         break;
519       Sub = Op;
520     }
521 
522     Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm()));
523   }
524 
525   return true;
526 }
527 
528 static bool tryToFoldACImm(const SIInstrInfo *TII,
529                            const MachineOperand &OpToFold,
530                            MachineInstr *UseMI,
531                            unsigned UseOpIdx,
532                            SmallVectorImpl<FoldCandidate> &FoldList) {
533   const MCInstrDesc &Desc = UseMI->getDesc();
534   const MCOperandInfo *OpInfo = Desc.OpInfo;
535   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
536     return false;
537 
538   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
539   if ((OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
540        OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) &&
541       (OpTy < AMDGPU::OPERAND_REG_INLINE_C_FIRST ||
542        OpTy > AMDGPU::OPERAND_REG_INLINE_C_LAST))
543     return false;
544 
545   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
546       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
547     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
548     return true;
549   }
550 
551   if (!OpToFold.isReg())
552     return false;
553 
554   Register UseReg = OpToFold.getReg();
555   if (!UseReg.isVirtual())
556     return false;
557 
558   if (llvm::any_of(FoldList, [UseMI](const FoldCandidate &FC) {
559         return FC.UseMI == UseMI;
560       }))
561     return false;
562 
563   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
564 
565   // Maybe it is just a COPY of an immediate itself.
566   MachineInstr *Def = MRI.getVRegDef(UseReg);
567   MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
568   if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) {
569     MachineOperand &DefOp = Def->getOperand(1);
570     if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) &&
571         TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) {
572       UseMI->getOperand(UseOpIdx).ChangeToImmediate(DefOp.getImm());
573       return true;
574     }
575   }
576 
577   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
578   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
579     return false;
580 
581   int32_t Imm;
582   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
583     const MachineOperand *Op = Defs[I].first;
584     if (!Op->isImm())
585       return false;
586 
587     auto SubImm = Op->getImm();
588     if (!I) {
589       Imm = SubImm;
590       if (!TII->isInlineConstant(*Op, OpTy) ||
591           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
592         return false;
593 
594       continue;
595     }
596     if (Imm != SubImm)
597       return false; // Can only fold splat constants
598   }
599 
600   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
601   return true;
602 }
603 
604 void SIFoldOperands::foldOperand(
605   MachineOperand &OpToFold,
606   MachineInstr *UseMI,
607   int UseOpIdx,
608   SmallVectorImpl<FoldCandidate> &FoldList,
609   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
610   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
611 
612   if (!isUseSafeToFold(TII, *UseMI, UseOp))
613     return;
614 
615   // FIXME: Fold operands with subregs.
616   if (UseOp.isReg() && OpToFold.isReg()) {
617     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
618       return;
619   }
620 
621   // Special case for REG_SEQUENCE: We can't fold literals into
622   // REG_SEQUENCE instructions, so we have to fold them into the
623   // uses of REG_SEQUENCE.
624   if (UseMI->isRegSequence()) {
625     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
626     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
627 
628     for (auto &RSUse : make_early_inc_range(MRI->use_nodbg_operands(RegSeqDstReg))) {
629       MachineInstr *RSUseMI = RSUse.getParent();
630 
631       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
632                          RSUseMI->getOperandNo(&RSUse), FoldList))
633         continue;
634 
635       if (RSUse.getSubReg() != RegSeqDstSubReg)
636         continue;
637 
638       foldOperand(OpToFold, RSUseMI, RSUseMI->getOperandNo(&RSUse), FoldList,
639                   CopiesToReplace);
640     }
641 
642     return;
643   }
644 
645   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
646     return;
647 
648   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
649     // Sanity check that this is a stack access.
650     // FIXME: Should probably use stack pseudos before frame lowering.
651 
652     if (TII->isMUBUF(*UseMI)) {
653       if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
654           MFI->getScratchRSrcReg())
655         return;
656 
657       // Ensure this is either relative to the current frame or the current
658       // wave.
659       MachineOperand &SOff =
660           *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
661       if (!SOff.isImm() || SOff.getImm() != 0)
662         return;
663     }
664 
665     // A frame index will resolve to a positive constant, so it should always be
666     // safe to fold the addressing mode, even pre-GFX9.
667     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
668 
669     if (TII->isFLATScratch(*UseMI) &&
670         AMDGPU::getNamedOperandIdx(UseMI->getOpcode(),
671                                    AMDGPU::OpName::vaddr) != -1) {
672       unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(UseMI->getOpcode());
673       UseMI->setDesc(TII->get(NewOpc));
674     }
675 
676     return;
677   }
678 
679   bool FoldingImmLike =
680       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
681 
682   if (FoldingImmLike && UseMI->isCopy()) {
683     Register DestReg = UseMI->getOperand(0).getReg();
684     Register SrcReg = UseMI->getOperand(1).getReg();
685     assert(SrcReg.isVirtual());
686 
687     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
688 
689     // Don't fold into a copy to a physical register with the same class. Doing
690     // so would interfere with the register coalescer's logic which would avoid
691     // redundant initalizations.
692     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
693       return;
694 
695     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
696     if (!DestReg.isPhysical()) {
697       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
698         SmallVector<FoldCandidate, 4> CopyUses;
699         for (auto &Use : make_early_inc_range(MRI->use_nodbg_operands(DestReg))) {
700           // There's no point trying to fold into an implicit operand.
701           if (Use.isImplicit())
702             continue;
703 
704           FoldCandidate FC = FoldCandidate(Use.getParent(), Use.getParent()->getOperandNo(&Use),
705                                            &UseMI->getOperand(1));
706           CopyUses.push_back(FC);
707         }
708         for (auto &F : CopyUses) {
709           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace);
710         }
711       }
712 
713       if (DestRC == &AMDGPU::AGPR_32RegClass &&
714           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
715         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
716         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
717         CopiesToReplace.push_back(UseMI);
718         return;
719       }
720     }
721 
722     // In order to fold immediates into copies, we need to change the
723     // copy to a MOV.
724 
725     unsigned MovOp = TII->getMovOpcode(DestRC);
726     if (MovOp == AMDGPU::COPY)
727       return;
728 
729     UseMI->setDesc(TII->get(MovOp));
730     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
731     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
732     while (ImpOpI != ImpOpE) {
733       MachineInstr::mop_iterator Tmp = ImpOpI;
734       ImpOpI++;
735       UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
736     }
737     CopiesToReplace.push_back(UseMI);
738   } else {
739     if (UseMI->isCopy() && OpToFold.isReg() &&
740         UseMI->getOperand(0).getReg().isVirtual() &&
741         !UseMI->getOperand(1).getSubReg()) {
742       LLVM_DEBUG(dbgs() << "Folding " << OpToFold
743                         << "\n into " << *UseMI << '\n');
744       unsigned Size = TII->getOpSize(*UseMI, 1);
745       Register UseReg = OpToFold.getReg();
746       UseMI->getOperand(1).setReg(UseReg);
747       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
748       UseMI->getOperand(1).setIsKill(false);
749       CopiesToReplace.push_back(UseMI);
750       OpToFold.setIsKill(false);
751 
752       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
753       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
754       // its initializers right here, so we will rematerialize immediates and
755       // avoid copies via different reg classes.
756       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
757       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
758           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
759                         *MRI)) {
760         const DebugLoc &DL = UseMI->getDebugLoc();
761         MachineBasicBlock &MBB = *UseMI->getParent();
762 
763         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
764         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
765           UseMI->RemoveOperand(I);
766 
767         MachineInstrBuilder B(*MBB.getParent(), UseMI);
768         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
769         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
770         for (unsigned I = 0; I < Size / 4; ++I) {
771           MachineOperand *Def = Defs[I].first;
772           TargetInstrInfo::RegSubRegPair CopyToVGPR;
773           if (Def->isImm() &&
774               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
775             int64_t Imm = Def->getImm();
776 
777             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
778             BuildMI(MBB, UseMI, DL,
779                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm);
780             B.addReg(Tmp);
781           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
782             auto Src = getRegSubRegPair(*Def);
783             Def->setIsKill(false);
784             if (!SeenAGPRs.insert(Src)) {
785               // We cannot build a reg_sequence out of the same registers, they
786               // must be copied. Better do it here before copyPhysReg() created
787               // several reads to do the AGPR->VGPR->AGPR copy.
788               CopyToVGPR = Src;
789             } else {
790               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
791                        Src.SubReg);
792             }
793           } else {
794             assert(Def->isReg());
795             Def->setIsKill(false);
796             auto Src = getRegSubRegPair(*Def);
797 
798             // Direct copy from SGPR to AGPR is not possible. To avoid creation
799             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
800             // create a copy here and track if we already have such a copy.
801             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
802               CopyToVGPR = Src;
803             } else {
804               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
805               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
806               B.addReg(Tmp);
807             }
808           }
809 
810           if (CopyToVGPR.Reg) {
811             Register Vgpr;
812             if (VGPRCopies.count(CopyToVGPR)) {
813               Vgpr = VGPRCopies[CopyToVGPR];
814             } else {
815               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
816               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
817               VGPRCopies[CopyToVGPR] = Vgpr;
818             }
819             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
820             BuildMI(MBB, UseMI, DL,
821                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr);
822             B.addReg(Tmp);
823           }
824 
825           B.addImm(Defs[I].second);
826         }
827         LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n');
828         return;
829       }
830 
831       if (Size != 4)
832         return;
833       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
834           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
835         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64));
836       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
837                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
838         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64));
839       else if (ST->hasGFX90AInsts() &&
840                TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
841                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
842         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32));
843       return;
844     }
845 
846     unsigned UseOpc = UseMI->getOpcode();
847     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
848         (UseOpc == AMDGPU::V_READLANE_B32 &&
849          (int)UseOpIdx ==
850          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
851       // %vgpr = V_MOV_B32 imm
852       // %sgpr = V_READFIRSTLANE_B32 %vgpr
853       // =>
854       // %sgpr = S_MOV_B32 imm
855       if (FoldingImmLike) {
856         if (execMayBeModifiedBeforeUse(*MRI,
857                                        UseMI->getOperand(UseOpIdx).getReg(),
858                                        *OpToFold.getParent(),
859                                        *UseMI))
860           return;
861 
862         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
863 
864         if (OpToFold.isImm())
865           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
866         else
867           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
868         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
869         return;
870       }
871 
872       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
873         if (execMayBeModifiedBeforeUse(*MRI,
874                                        UseMI->getOperand(UseOpIdx).getReg(),
875                                        *OpToFold.getParent(),
876                                        *UseMI))
877           return;
878 
879         // %vgpr = COPY %sgpr0
880         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
881         // =>
882         // %sgpr1 = COPY %sgpr0
883         UseMI->setDesc(TII->get(AMDGPU::COPY));
884         UseMI->getOperand(1).setReg(OpToFold.getReg());
885         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
886         UseMI->getOperand(1).setIsKill(false);
887         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
888         return;
889       }
890     }
891 
892     const MCInstrDesc &UseDesc = UseMI->getDesc();
893 
894     // Don't fold into target independent nodes.  Target independent opcodes
895     // don't have defined register classes.
896     if (UseDesc.isVariadic() ||
897         UseOp.isImplicit() ||
898         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
899       return;
900   }
901 
902   if (!FoldingImmLike) {
903     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
904 
905     // FIXME: We could try to change the instruction from 64-bit to 32-bit
906     // to enable more folding opportunites.  The shrink operands pass
907     // already does this.
908     return;
909   }
910 
911 
912   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
913   const TargetRegisterClass *FoldRC =
914     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
915 
916   // Split 64-bit constants into 32-bits for folding.
917   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
918     Register UseReg = UseOp.getReg();
919     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
920 
921     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
922       return;
923 
924     APInt Imm(64, OpToFold.getImm());
925     if (UseOp.getSubReg() == AMDGPU::sub0) {
926       Imm = Imm.getLoBits(32);
927     } else {
928       assert(UseOp.getSubReg() == AMDGPU::sub1);
929       Imm = Imm.getHiBits(32);
930     }
931 
932     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
933     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
934     return;
935   }
936 
937 
938 
939   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
940 }
941 
942 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
943                                   uint32_t LHS, uint32_t RHS) {
944   switch (Opcode) {
945   case AMDGPU::V_AND_B32_e64:
946   case AMDGPU::V_AND_B32_e32:
947   case AMDGPU::S_AND_B32:
948     Result = LHS & RHS;
949     return true;
950   case AMDGPU::V_OR_B32_e64:
951   case AMDGPU::V_OR_B32_e32:
952   case AMDGPU::S_OR_B32:
953     Result = LHS | RHS;
954     return true;
955   case AMDGPU::V_XOR_B32_e64:
956   case AMDGPU::V_XOR_B32_e32:
957   case AMDGPU::S_XOR_B32:
958     Result = LHS ^ RHS;
959     return true;
960   case AMDGPU::S_XNOR_B32:
961     Result = ~(LHS ^ RHS);
962     return true;
963   case AMDGPU::S_NAND_B32:
964     Result = ~(LHS & RHS);
965     return true;
966   case AMDGPU::S_NOR_B32:
967     Result = ~(LHS | RHS);
968     return true;
969   case AMDGPU::S_ANDN2_B32:
970     Result = LHS & ~RHS;
971     return true;
972   case AMDGPU::S_ORN2_B32:
973     Result = LHS | ~RHS;
974     return true;
975   case AMDGPU::V_LSHL_B32_e64:
976   case AMDGPU::V_LSHL_B32_e32:
977   case AMDGPU::S_LSHL_B32:
978     // The instruction ignores the high bits for out of bounds shifts.
979     Result = LHS << (RHS & 31);
980     return true;
981   case AMDGPU::V_LSHLREV_B32_e64:
982   case AMDGPU::V_LSHLREV_B32_e32:
983     Result = RHS << (LHS & 31);
984     return true;
985   case AMDGPU::V_LSHR_B32_e64:
986   case AMDGPU::V_LSHR_B32_e32:
987   case AMDGPU::S_LSHR_B32:
988     Result = LHS >> (RHS & 31);
989     return true;
990   case AMDGPU::V_LSHRREV_B32_e64:
991   case AMDGPU::V_LSHRREV_B32_e32:
992     Result = RHS >> (LHS & 31);
993     return true;
994   case AMDGPU::V_ASHR_I32_e64:
995   case AMDGPU::V_ASHR_I32_e32:
996   case AMDGPU::S_ASHR_I32:
997     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
998     return true;
999   case AMDGPU::V_ASHRREV_I32_e64:
1000   case AMDGPU::V_ASHRREV_I32_e32:
1001     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
1002     return true;
1003   default:
1004     return false;
1005   }
1006 }
1007 
1008 static unsigned getMovOpc(bool IsScalar) {
1009   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1010 }
1011 
1012 /// Remove any leftover implicit operands from mutating the instruction. e.g.
1013 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
1014 /// anymore.
1015 static void stripExtraCopyOperands(MachineInstr &MI) {
1016   const MCInstrDesc &Desc = MI.getDesc();
1017   unsigned NumOps = Desc.getNumOperands() +
1018                     Desc.getNumImplicitUses() +
1019                     Desc.getNumImplicitDefs();
1020 
1021   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
1022     MI.RemoveOperand(I);
1023 }
1024 
1025 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1026   MI.setDesc(NewDesc);
1027   stripExtraCopyOperands(MI);
1028 }
1029 
1030 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
1031                                                MachineOperand &Op) {
1032   if (Op.isReg()) {
1033     // If this has a subregister, it obviously is a register source.
1034     if (Op.getSubReg() != AMDGPU::NoSubRegister || !Op.getReg().isVirtual())
1035       return &Op;
1036 
1037     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
1038     if (Def && Def->isMoveImmediate()) {
1039       MachineOperand &ImmSrc = Def->getOperand(1);
1040       if (ImmSrc.isImm())
1041         return &ImmSrc;
1042     }
1043   }
1044 
1045   return &Op;
1046 }
1047 
1048 // Try to simplify operations with a constant that may appear after instruction
1049 // selection.
1050 // TODO: See if a frame index with a fixed offset can fold.
1051 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
1052                               const SIInstrInfo *TII,
1053                               MachineInstr *MI,
1054                               MachineOperand *ImmOp) {
1055   unsigned Opc = MI->getOpcode();
1056   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1057       Opc == AMDGPU::S_NOT_B32) {
1058     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
1059     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1060     return true;
1061   }
1062 
1063   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1064   if (Src1Idx == -1)
1065     return false;
1066 
1067   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1068   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1069   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1070 
1071   if (!Src0->isImm() && !Src1->isImm())
1072     return false;
1073 
1074   // and k0, k1 -> v_mov_b32 (k0 & k1)
1075   // or k0, k1 -> v_mov_b32 (k0 | k1)
1076   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1077   if (Src0->isImm() && Src1->isImm()) {
1078     int32_t NewImm;
1079     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1080       return false;
1081 
1082     const SIRegisterInfo &TRI = TII->getRegisterInfo();
1083     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1084 
1085     // Be careful to change the right operand, src0 may belong to a different
1086     // instruction.
1087     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1088     MI->RemoveOperand(Src1Idx);
1089     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1090     return true;
1091   }
1092 
1093   if (!MI->isCommutable())
1094     return false;
1095 
1096   if (Src0->isImm() && !Src1->isImm()) {
1097     std::swap(Src0, Src1);
1098     std::swap(Src0Idx, Src1Idx);
1099   }
1100 
1101   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1102   if (Opc == AMDGPU::V_OR_B32_e64 ||
1103       Opc == AMDGPU::V_OR_B32_e32 ||
1104       Opc == AMDGPU::S_OR_B32) {
1105     if (Src1Val == 0) {
1106       // y = or x, 0 => y = copy x
1107       MI->RemoveOperand(Src1Idx);
1108       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1109     } else if (Src1Val == -1) {
1110       // y = or x, -1 => y = v_mov_b32 -1
1111       MI->RemoveOperand(Src1Idx);
1112       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1113     } else
1114       return false;
1115 
1116     return true;
1117   }
1118 
1119   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1120       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1121       MI->getOpcode() == AMDGPU::S_AND_B32) {
1122     if (Src1Val == 0) {
1123       // y = and x, 0 => y = v_mov_b32 0
1124       MI->RemoveOperand(Src0Idx);
1125       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1126     } else if (Src1Val == -1) {
1127       // y = and x, -1 => y = copy x
1128       MI->RemoveOperand(Src1Idx);
1129       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1130       stripExtraCopyOperands(*MI);
1131     } else
1132       return false;
1133 
1134     return true;
1135   }
1136 
1137   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1138       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1139       MI->getOpcode() == AMDGPU::S_XOR_B32) {
1140     if (Src1Val == 0) {
1141       // y = xor x, 0 => y = copy x
1142       MI->RemoveOperand(Src1Idx);
1143       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1144       return true;
1145     }
1146   }
1147 
1148   return false;
1149 }
1150 
1151 // Try to fold an instruction into a simpler one
1152 static bool tryFoldCndMask(const SIInstrInfo *TII,
1153                            MachineInstr *MI) {
1154   unsigned Opc = MI->getOpcode();
1155 
1156   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
1157       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
1158       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
1159     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1160     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1161     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1162     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1163     if (Src1->isIdenticalTo(*Src0) &&
1164         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
1165         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
1166       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
1167       auto &NewDesc =
1168           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1169       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1170       if (Src2Idx != -1)
1171         MI->RemoveOperand(Src2Idx);
1172       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1173       if (Src1ModIdx != -1)
1174         MI->RemoveOperand(Src1ModIdx);
1175       if (Src0ModIdx != -1)
1176         MI->RemoveOperand(Src0ModIdx);
1177       mutateCopyOp(*MI, NewDesc);
1178       LLVM_DEBUG(dbgs() << *MI << '\n');
1179       return true;
1180     }
1181   }
1182 
1183   return false;
1184 }
1185 
1186 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
1187                                      MachineOperand &OpToFold) const {
1188   // We need mutate the operands of new mov instructions to add implicit
1189   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1190   // this.
1191   SmallVector<MachineInstr *, 4> CopiesToReplace;
1192   SmallVector<FoldCandidate, 4> FoldList;
1193   MachineOperand &Dst = MI.getOperand(0);
1194 
1195   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1196   if (FoldingImm) {
1197     unsigned NumLiteralUses = 0;
1198     MachineOperand *NonInlineUse = nullptr;
1199     int NonInlineUseOpNo = -1;
1200 
1201     bool Again;
1202     do {
1203       Again = false;
1204       for (auto &Use : make_early_inc_range(MRI->use_nodbg_operands(Dst.getReg()))) {
1205         MachineInstr *UseMI = Use.getParent();
1206         unsigned OpNo = UseMI->getOperandNo(&Use);
1207 
1208         // Folding the immediate may reveal operations that can be constant
1209         // folded or replaced with a copy. This can happen for example after
1210         // frame indices are lowered to constants or from splitting 64-bit
1211         // constants.
1212         //
1213         // We may also encounter cases where one or both operands are
1214         // immediates materialized into a register, which would ordinarily not
1215         // be folded due to multiple uses or operand constraints.
1216 
1217         if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1218           LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1219 
1220           // Some constant folding cases change the same immediate's use to a new
1221           // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1222           // again. The same constant folded instruction could also have a second
1223           // use operand.
1224           FoldList.clear();
1225           Again = true;
1226           break;
1227         }
1228 
1229         // Try to fold any inline immediate uses, and then only fold other
1230         // constants if they have one use.
1231         //
1232         // The legality of the inline immediate must be checked based on the use
1233         // operand, not the defining instruction, because 32-bit instructions
1234         // with 32-bit inline immediate sources may be used to materialize
1235         // constants used in 16-bit operands.
1236         //
1237         // e.g. it is unsafe to fold:
1238         //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1239         //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1240 
1241         // Folding immediates with more than one use will increase program size.
1242         // FIXME: This will also reduce register usage, which may be better
1243         // in some cases. A better heuristic is needed.
1244         if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1245           foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1246         } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1247           foldOperand(OpToFold, UseMI, OpNo, FoldList,
1248                       CopiesToReplace);
1249         } else {
1250           if (++NumLiteralUses == 1) {
1251             NonInlineUse = &Use;
1252             NonInlineUseOpNo = OpNo;
1253           }
1254         }
1255       }
1256     } while (Again);
1257 
1258     if (NumLiteralUses == 1) {
1259       MachineInstr *UseMI = NonInlineUse->getParent();
1260       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1261     }
1262   } else {
1263     // Folding register.
1264     SmallVector <MachineOperand *, 4> UsesToProcess;
1265     for (auto &Use : MRI->use_nodbg_operands(Dst.getReg()))
1266       UsesToProcess.push_back(&Use);
1267     for (auto U : UsesToProcess) {
1268       MachineInstr *UseMI = U->getParent();
1269 
1270       foldOperand(OpToFold, UseMI, UseMI->getOperandNo(U),
1271         FoldList, CopiesToReplace);
1272     }
1273   }
1274 
1275   MachineFunction *MF = MI.getParent()->getParent();
1276   // Make sure we add EXEC uses to any new v_mov instructions created.
1277   for (MachineInstr *Copy : CopiesToReplace)
1278     Copy->addImplicitDefUseOperands(*MF);
1279 
1280   SmallPtrSet<MachineInstr *, 16> Folded;
1281   for (FoldCandidate &Fold : FoldList) {
1282     assert(!Fold.isReg() || Fold.OpToFold);
1283     if (Folded.count(Fold.UseMI))
1284       continue;
1285     if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) {
1286       Register Reg = Fold.OpToFold->getReg();
1287       MachineInstr *DefMI = Fold.OpToFold->getParent();
1288       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1289           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1290         continue;
1291     }
1292     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1293       // Clear kill flags.
1294       if (Fold.isReg()) {
1295         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1296         // FIXME: Probably shouldn't bother trying to fold if not an
1297         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1298         // copies.
1299         MRI->clearKillFlags(Fold.OpToFold->getReg());
1300       }
1301       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1302                         << static_cast<int>(Fold.UseOpNo) << " of "
1303                         << *Fold.UseMI << '\n');
1304       if (tryFoldCndMask(TII, Fold.UseMI))
1305         Folded.insert(Fold.UseMI);
1306     } else if (Fold.isCommuted()) {
1307       // Restoring instruction's original operand order if fold has failed.
1308       TII->commuteInstruction(*Fold.UseMI, false);
1309     }
1310   }
1311 }
1312 
1313 // Clamp patterns are canonically selected to v_max_* instructions, so only
1314 // handle them.
1315 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1316   unsigned Op = MI.getOpcode();
1317   switch (Op) {
1318   case AMDGPU::V_MAX_F32_e64:
1319   case AMDGPU::V_MAX_F16_e64:
1320   case AMDGPU::V_MAX_F64_e64:
1321   case AMDGPU::V_PK_MAX_F16: {
1322     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1323       return nullptr;
1324 
1325     // Make sure sources are identical.
1326     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1327     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1328     if (!Src0->isReg() || !Src1->isReg() ||
1329         Src0->getReg() != Src1->getReg() ||
1330         Src0->getSubReg() != Src1->getSubReg() ||
1331         Src0->getSubReg() != AMDGPU::NoSubRegister)
1332       return nullptr;
1333 
1334     // Can't fold up if we have modifiers.
1335     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1336       return nullptr;
1337 
1338     unsigned Src0Mods
1339       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1340     unsigned Src1Mods
1341       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1342 
1343     // Having a 0 op_sel_hi would require swizzling the output in the source
1344     // instruction, which we can't do.
1345     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1346                                                       : 0u;
1347     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1348       return nullptr;
1349     return Src0;
1350   }
1351   default:
1352     return nullptr;
1353   }
1354 }
1355 
1356 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1357 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1358   const MachineOperand *ClampSrc = isClamp(MI);
1359   if (!ClampSrc || !MRI->hasOneNonDBGUser(ClampSrc->getReg()))
1360     return false;
1361 
1362   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1363 
1364   // The type of clamp must be compatible.
1365   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1366     return false;
1367 
1368   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1369   if (!DefClamp)
1370     return false;
1371 
1372   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1373                     << '\n');
1374 
1375   // Clamp is applied after omod, so it is OK if omod is set.
1376   DefClamp->setImm(1);
1377   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1378   MI.eraseFromParent();
1379   return true;
1380 }
1381 
1382 static int getOModValue(unsigned Opc, int64_t Val) {
1383   switch (Opc) {
1384   case AMDGPU::V_MUL_F64_e64: {
1385     switch (Val) {
1386     case 0x3fe0000000000000: // 0.5
1387       return SIOutMods::DIV2;
1388     case 0x4000000000000000: // 2.0
1389       return SIOutMods::MUL2;
1390     case 0x4010000000000000: // 4.0
1391       return SIOutMods::MUL4;
1392     default:
1393       return SIOutMods::NONE;
1394     }
1395   }
1396   case AMDGPU::V_MUL_F32_e64: {
1397     switch (static_cast<uint32_t>(Val)) {
1398     case 0x3f000000: // 0.5
1399       return SIOutMods::DIV2;
1400     case 0x40000000: // 2.0
1401       return SIOutMods::MUL2;
1402     case 0x40800000: // 4.0
1403       return SIOutMods::MUL4;
1404     default:
1405       return SIOutMods::NONE;
1406     }
1407   }
1408   case AMDGPU::V_MUL_F16_e64: {
1409     switch (static_cast<uint16_t>(Val)) {
1410     case 0x3800: // 0.5
1411       return SIOutMods::DIV2;
1412     case 0x4000: // 2.0
1413       return SIOutMods::MUL2;
1414     case 0x4400: // 4.0
1415       return SIOutMods::MUL4;
1416     default:
1417       return SIOutMods::NONE;
1418     }
1419   }
1420   default:
1421     llvm_unreachable("invalid mul opcode");
1422   }
1423 }
1424 
1425 // FIXME: Does this really not support denormals with f16?
1426 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1427 // handled, so will anything other than that break?
1428 std::pair<const MachineOperand *, int>
1429 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1430   unsigned Op = MI.getOpcode();
1431   switch (Op) {
1432   case AMDGPU::V_MUL_F64_e64:
1433   case AMDGPU::V_MUL_F32_e64:
1434   case AMDGPU::V_MUL_F16_e64: {
1435     // If output denormals are enabled, omod is ignored.
1436     if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1437         ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F16_e64) &&
1438          MFI->getMode().FP64FP16OutputDenormals))
1439       return std::make_pair(nullptr, SIOutMods::NONE);
1440 
1441     const MachineOperand *RegOp = nullptr;
1442     const MachineOperand *ImmOp = nullptr;
1443     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1444     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1445     if (Src0->isImm()) {
1446       ImmOp = Src0;
1447       RegOp = Src1;
1448     } else if (Src1->isImm()) {
1449       ImmOp = Src1;
1450       RegOp = Src0;
1451     } else
1452       return std::make_pair(nullptr, SIOutMods::NONE);
1453 
1454     int OMod = getOModValue(Op, ImmOp->getImm());
1455     if (OMod == SIOutMods::NONE ||
1456         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1457         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1458         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1459         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1460       return std::make_pair(nullptr, SIOutMods::NONE);
1461 
1462     return std::make_pair(RegOp, OMod);
1463   }
1464   case AMDGPU::V_ADD_F64_e64:
1465   case AMDGPU::V_ADD_F32_e64:
1466   case AMDGPU::V_ADD_F16_e64: {
1467     // If output denormals are enabled, omod is ignored.
1468     if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1469         ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F16_e64) &&
1470          MFI->getMode().FP64FP16OutputDenormals))
1471       return std::make_pair(nullptr, SIOutMods::NONE);
1472 
1473     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1474     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1475     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1476 
1477     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1478         Src0->getSubReg() == Src1->getSubReg() &&
1479         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1480         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1481         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1482         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1483       return std::make_pair(Src0, SIOutMods::MUL2);
1484 
1485     return std::make_pair(nullptr, SIOutMods::NONE);
1486   }
1487   default:
1488     return std::make_pair(nullptr, SIOutMods::NONE);
1489   }
1490 }
1491 
1492 // FIXME: Does this need to check IEEE bit on function?
1493 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1494   const MachineOperand *RegOp;
1495   int OMod;
1496   std::tie(RegOp, OMod) = isOMod(MI);
1497   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1498       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1499       !MRI->hasOneNonDBGUser(RegOp->getReg()))
1500     return false;
1501 
1502   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1503   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1504   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1505     return false;
1506 
1507   // Clamp is applied after omod. If the source already has clamp set, don't
1508   // fold it.
1509   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1510     return false;
1511 
1512   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1513 
1514   DefOMod->setImm(OMod);
1515   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1516   MI.eraseFromParent();
1517   return true;
1518 }
1519 
1520 // Try to fold a reg_sequence with vgpr output and agpr inputs into an
1521 // instruction which can take an agpr. So far that means a store.
1522 bool SIFoldOperands::tryFoldRegSequence(MachineInstr &MI) {
1523   assert(MI.isRegSequence());
1524   auto Reg = MI.getOperand(0).getReg();
1525 
1526   if (!ST->hasGFX90AInsts() || !TRI->isVGPR(*MRI, Reg) ||
1527       !MRI->hasOneNonDBGUse(Reg))
1528     return false;
1529 
1530   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
1531   if (!getRegSeqInit(Defs, Reg, MCOI::OPERAND_REGISTER, TII, *MRI))
1532     return false;
1533 
1534   for (auto &Def : Defs) {
1535     const auto *Op = Def.first;
1536     if (!Op->isReg())
1537       return false;
1538     if (TRI->isAGPR(*MRI, Op->getReg()))
1539       continue;
1540     // Maybe this is a COPY from AREG
1541     const MachineInstr *SubDef = MRI->getVRegDef(Op->getReg());
1542     if (!SubDef || !SubDef->isCopy() || SubDef->getOperand(1).getSubReg())
1543       return false;
1544     if (!TRI->isAGPR(*MRI, SubDef->getOperand(1).getReg()))
1545       return false;
1546   }
1547 
1548   MachineOperand *Op = &*MRI->use_nodbg_begin(Reg);
1549   MachineInstr *UseMI = Op->getParent();
1550   while (UseMI->isCopy() && !Op->getSubReg()) {
1551     Reg = UseMI->getOperand(0).getReg();
1552     if (!TRI->isVGPR(*MRI, Reg) || !MRI->hasOneNonDBGUse(Reg))
1553       return false;
1554     Op = &*MRI->use_nodbg_begin(Reg);
1555     UseMI = Op->getParent();
1556   }
1557 
1558   if (Op->getSubReg())
1559     return false;
1560 
1561   unsigned OpIdx = Op - &UseMI->getOperand(0);
1562   const MCInstrDesc &InstDesc = UseMI->getDesc();
1563   const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
1564   switch (OpInfo.RegClass) {
1565   case AMDGPU::AV_32RegClassID:  LLVM_FALLTHROUGH;
1566   case AMDGPU::AV_64RegClassID:  LLVM_FALLTHROUGH;
1567   case AMDGPU::AV_96RegClassID:  LLVM_FALLTHROUGH;
1568   case AMDGPU::AV_128RegClassID: LLVM_FALLTHROUGH;
1569   case AMDGPU::AV_160RegClassID:
1570     break;
1571   default:
1572     return false;
1573   }
1574 
1575   const auto *NewDstRC = TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg));
1576   auto Dst = MRI->createVirtualRegister(NewDstRC);
1577   auto RS = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1578                     TII->get(AMDGPU::REG_SEQUENCE), Dst);
1579 
1580   for (unsigned I = 0; I < Defs.size(); ++I) {
1581     MachineOperand *Def = Defs[I].first;
1582     Def->setIsKill(false);
1583     if (TRI->isAGPR(*MRI, Def->getReg())) {
1584       RS.add(*Def);
1585     } else { // This is a copy
1586       MachineInstr *SubDef = MRI->getVRegDef(Def->getReg());
1587       SubDef->getOperand(1).setIsKill(false);
1588       RS.addReg(SubDef->getOperand(1).getReg(), 0, Def->getSubReg());
1589     }
1590     RS.addImm(Defs[I].second);
1591   }
1592 
1593   Op->setReg(Dst);
1594   if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) {
1595     Op->setReg(Reg);
1596     RS->eraseFromParent();
1597     return false;
1598   }
1599 
1600   LLVM_DEBUG(dbgs() << "Folded " << *RS << " into " << *UseMI << '\n');
1601 
1602   return true;
1603 }
1604 
1605 // Try to hoist an AGPR to VGPR copy out of the loop across a LCSSA PHI.
1606 // This should allow folding of an AGPR into a consumer which may support it.
1607 // I.e.:
1608 //
1609 // loop:                             // loop:
1610 //   %1:vreg = COPY %0:areg          // exit:
1611 // exit:                          => //   %1:areg = PHI %0:areg, %loop
1612 //   %2:vreg = PHI %1:vreg, %loop    //   %2:vreg = COPY %1:areg
1613 bool SIFoldOperands::tryFoldLCSSAPhi(MachineInstr &PHI) {
1614   assert(PHI.isPHI());
1615 
1616   if (PHI.getNumExplicitOperands() != 3) // Single input LCSSA PHI
1617     return false;
1618 
1619   Register PhiIn = PHI.getOperand(1).getReg();
1620   Register PhiOut = PHI.getOperand(0).getReg();
1621   if (PHI.getOperand(1).getSubReg() ||
1622       !TRI->isVGPR(*MRI, PhiIn) || !TRI->isVGPR(*MRI, PhiOut))
1623     return false;
1624 
1625   // A single use should not matter for correctness, but if it has another use
1626   // inside the loop we may perform copy twice in a worst case.
1627   if (!MRI->hasOneNonDBGUse(PhiIn))
1628     return false;
1629 
1630   MachineInstr *Copy = MRI->getVRegDef(PhiIn);
1631   if (!Copy || !Copy->isCopy())
1632     return false;
1633 
1634   Register CopyIn = Copy->getOperand(1).getReg();
1635   if (!TRI->isAGPR(*MRI, CopyIn) || Copy->getOperand(1).getSubReg())
1636     return false;
1637 
1638   const TargetRegisterClass *ARC = MRI->getRegClass(CopyIn);
1639   Register NewReg = MRI->createVirtualRegister(ARC);
1640   PHI.getOperand(1).setReg(CopyIn);
1641   PHI.getOperand(0).setReg(NewReg);
1642 
1643   MachineBasicBlock *MBB = PHI.getParent();
1644   BuildMI(*MBB, MBB->getFirstNonPHI(), Copy->getDebugLoc(),
1645           TII->get(AMDGPU::COPY), PhiOut)
1646     .addReg(NewReg, RegState::Kill);
1647   Copy->eraseFromParent(); // We know this copy had a single use.
1648 
1649   LLVM_DEBUG(dbgs() << "Folded " << PHI << '\n');
1650 
1651   return true;
1652 }
1653 
1654 // Attempt to convert VGPR load to an AGPR load.
1655 bool SIFoldOperands::tryFoldLoad(MachineInstr &MI) {
1656   assert(MI.mayLoad());
1657   if (!ST->hasGFX90AInsts() || !MI.getNumOperands())
1658     return false;
1659 
1660   MachineOperand &Def = MI.getOperand(0);
1661   if (!Def.isDef())
1662     return false;
1663 
1664   Register DefReg = Def.getReg();
1665 
1666   if (DefReg.isPhysical() || !TRI->isVGPR(*MRI, DefReg))
1667     return false;
1668 
1669   SmallVector<const MachineInstr*, 8> Users;
1670   SmallVector<Register, 8> MoveRegs;
1671   for (const MachineInstr &I : MRI->use_nodbg_instructions(DefReg)) {
1672     Users.push_back(&I);
1673   }
1674   if (Users.empty())
1675     return false;
1676 
1677   // Check that all uses a copy to an agpr or a reg_sequence producing an agpr.
1678   while (!Users.empty()) {
1679     const MachineInstr *I = Users.pop_back_val();
1680     if (!I->isCopy() && !I->isRegSequence())
1681       return false;
1682     Register DstReg = I->getOperand(0).getReg();
1683     if (TRI->isAGPR(*MRI, DstReg))
1684       continue;
1685     MoveRegs.push_back(DstReg);
1686     for (const MachineInstr &U : MRI->use_nodbg_instructions(DstReg)) {
1687       Users.push_back(&U);
1688     }
1689   }
1690 
1691   const TargetRegisterClass *RC = MRI->getRegClass(DefReg);
1692   MRI->setRegClass(DefReg, TRI->getEquivalentAGPRClass(RC));
1693   if (!TII->isOperandLegal(MI, 0, &Def)) {
1694     MRI->setRegClass(DefReg, RC);
1695     return false;
1696   }
1697 
1698   while (!MoveRegs.empty()) {
1699     Register Reg = MoveRegs.pop_back_val();
1700     MRI->setRegClass(Reg, TRI->getEquivalentAGPRClass(MRI->getRegClass(Reg)));
1701   }
1702 
1703   LLVM_DEBUG(dbgs() << "Folded " << MI << '\n');
1704 
1705   return true;
1706 }
1707 
1708 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1709   if (skipFunction(MF.getFunction()))
1710     return false;
1711 
1712   MRI = &MF.getRegInfo();
1713   ST = &MF.getSubtarget<GCNSubtarget>();
1714   TII = ST->getInstrInfo();
1715   TRI = &TII->getRegisterInfo();
1716   MFI = MF.getInfo<SIMachineFunctionInfo>();
1717 
1718   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1719   // correctly handle signed zeros.
1720   //
1721   // FIXME: Also need to check strictfp
1722   bool IsIEEEMode = MFI->getMode().IEEE;
1723   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1724 
1725   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1726     MachineOperand *CurrentKnownM0Val = nullptr;
1727     for (auto &MI : make_early_inc_range(*MBB)) {
1728       tryFoldCndMask(TII, &MI);
1729 
1730       if (MI.isRegSequence() && tryFoldRegSequence(MI))
1731         continue;
1732 
1733       if (MI.isPHI() && tryFoldLCSSAPhi(MI))
1734         continue;
1735 
1736       if (MI.mayLoad() && tryFoldLoad(MI))
1737         continue;
1738 
1739       if (!TII->isFoldableCopy(MI)) {
1740         // Saw an unknown clobber of m0, so we no longer know what it is.
1741         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1742           CurrentKnownM0Val = nullptr;
1743 
1744         // TODO: Omod might be OK if there is NSZ only on the source
1745         // instruction, and not the omod multiply.
1746         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1747             !tryFoldOMod(MI))
1748           tryFoldClamp(MI);
1749 
1750         continue;
1751       }
1752 
1753       // Specially track simple redefs of m0 to the same value in a block, so we
1754       // can erase the later ones.
1755       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1756         MachineOperand &NewM0Val = MI.getOperand(1);
1757         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1758           MI.eraseFromParent();
1759           continue;
1760         }
1761 
1762         // We aren't tracking other physical registers
1763         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1764           nullptr : &NewM0Val;
1765         continue;
1766       }
1767 
1768       MachineOperand &OpToFold = MI.getOperand(1);
1769       bool FoldingImm =
1770           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1771 
1772       // FIXME: We could also be folding things like TargetIndexes.
1773       if (!FoldingImm && !OpToFold.isReg())
1774         continue;
1775 
1776       if (OpToFold.isReg() && !OpToFold.getReg().isVirtual())
1777         continue;
1778 
1779       // Prevent folding operands backwards in the function. For example,
1780       // the COPY opcode must not be replaced by 1 in this example:
1781       //
1782       //    %3 = COPY %vgpr0; VGPR_32:%3
1783       //    ...
1784       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1785       MachineOperand &Dst = MI.getOperand(0);
1786       if (Dst.isReg() && !Dst.getReg().isVirtual())
1787         continue;
1788 
1789       foldInstOperand(MI, OpToFold);
1790     }
1791   }
1792   return true;
1793 }
1794