xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision c7dcacf16a680f6a5ef4cbe15ff9ca40f7d128b8)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/CodeGen/LiveIntervals.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
24 #include "llvm/Target/TargetMachine.h"
25 
26 #define DEBUG_TYPE "si-fold-operands"
27 using namespace llvm;
28 
29 namespace {
30 
31 struct FoldCandidate {
32   MachineInstr *UseMI;
33   union {
34     MachineOperand *OpToFold;
35     uint64_t ImmToFold;
36     int FrameIndexToFold;
37   };
38   int ShrinkOpcode;
39   unsigned char UseOpNo;
40   MachineOperand::MachineOperandType Kind;
41   bool Commuted;
42 
43   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
44                 bool Commuted_ = false,
45                 int ShrinkOp = -1) :
46     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
47     Kind(FoldOp->getType()),
48     Commuted(Commuted_) {
49     if (FoldOp->isImm()) {
50       ImmToFold = FoldOp->getImm();
51     } else if (FoldOp->isFI()) {
52       FrameIndexToFold = FoldOp->getIndex();
53     } else {
54       assert(FoldOp->isReg() || FoldOp->isGlobal());
55       OpToFold = FoldOp;
56     }
57   }
58 
59   bool isFI() const {
60     return Kind == MachineOperand::MO_FrameIndex;
61   }
62 
63   bool isImm() const {
64     return Kind == MachineOperand::MO_Immediate;
65   }
66 
67   bool isReg() const {
68     return Kind == MachineOperand::MO_Register;
69   }
70 
71   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
72 
73   bool isCommuted() const {
74     return Commuted;
75   }
76 
77   bool needsShrink() const {
78     return ShrinkOpcode != -1;
79   }
80 
81   int getShrinkOpcode() const {
82     return ShrinkOpcode;
83   }
84 };
85 
86 class SIFoldOperands : public MachineFunctionPass {
87 public:
88   static char ID;
89   MachineRegisterInfo *MRI;
90   const SIInstrInfo *TII;
91   const SIRegisterInfo *TRI;
92   const GCNSubtarget *ST;
93   const SIMachineFunctionInfo *MFI;
94 
95   void foldOperand(MachineOperand &OpToFold,
96                    MachineInstr *UseMI,
97                    int UseOpIdx,
98                    SmallVectorImpl<FoldCandidate> &FoldList,
99                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
100 
101   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
102 
103   const MachineOperand *isClamp(const MachineInstr &MI) const;
104   bool tryFoldClamp(MachineInstr &MI);
105 
106   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
107   bool tryFoldOMod(MachineInstr &MI);
108 
109 public:
110   SIFoldOperands() : MachineFunctionPass(ID) {
111     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
112   }
113 
114   bool runOnMachineFunction(MachineFunction &MF) override;
115 
116   StringRef getPassName() const override { return "SI Fold Operands"; }
117 
118   void getAnalysisUsage(AnalysisUsage &AU) const override {
119     AU.setPreservesCFG();
120     MachineFunctionPass::getAnalysisUsage(AU);
121   }
122 };
123 
124 } // End anonymous namespace.
125 
126 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
127                 "SI Fold Operands", false, false)
128 
129 char SIFoldOperands::ID = 0;
130 
131 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
132 
133 // Wrapper around isInlineConstant that understands special cases when
134 // instruction types are replaced during operand folding.
135 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
136                                      const MachineInstr &UseMI,
137                                      unsigned OpNo,
138                                      const MachineOperand &OpToFold) {
139   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
140     return true;
141 
142   unsigned Opc = UseMI.getOpcode();
143   switch (Opc) {
144   case AMDGPU::V_MAC_F32_e64:
145   case AMDGPU::V_MAC_F16_e64:
146   case AMDGPU::V_FMAC_F32_e64:
147   case AMDGPU::V_FMAC_F16_e64: {
148     // Special case for mac. Since this is replaced with mad when folded into
149     // src2, we need to check the legality for the final instruction.
150     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
151     if (static_cast<int>(OpNo) == Src2Idx) {
152       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
153                    Opc == AMDGPU::V_FMAC_F16_e64;
154       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
155                    Opc == AMDGPU::V_FMAC_F32_e64;
156 
157       unsigned Opc = IsFMA ?
158         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
159         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
160       const MCInstrDesc &MadDesc = TII->get(Opc);
161       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
162     }
163     return false;
164   }
165   default:
166     return false;
167   }
168 }
169 
170 // TODO: Add heuristic that the frame index might not fit in the addressing mode
171 // immediate offset to avoid materializing in loops.
172 static bool frameIndexMayFold(const SIInstrInfo *TII,
173                               const MachineInstr &UseMI,
174                               int OpNo,
175                               const MachineOperand &OpToFold) {
176   return OpToFold.isFI() &&
177     (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
178     OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
179 }
180 
181 FunctionPass *llvm::createSIFoldOperandsPass() {
182   return new SIFoldOperands();
183 }
184 
185 static bool updateOperand(FoldCandidate &Fold,
186                           const SIInstrInfo &TII,
187                           const TargetRegisterInfo &TRI,
188                           const GCNSubtarget &ST) {
189   MachineInstr *MI = Fold.UseMI;
190   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
191   assert(Old.isReg());
192 
193   if (Fold.isImm()) {
194     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
195         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
196         AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
197                                        ST.hasInv2PiInlineImm())) {
198       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
199       // already set.
200       unsigned Opcode = MI->getOpcode();
201       int OpNo = MI->getOperandNo(&Old);
202       int ModIdx = -1;
203       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
204         ModIdx = AMDGPU::OpName::src0_modifiers;
205       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
206         ModIdx = AMDGPU::OpName::src1_modifiers;
207       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
208         ModIdx = AMDGPU::OpName::src2_modifiers;
209       assert(ModIdx != -1);
210       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
211       MachineOperand &Mod = MI->getOperand(ModIdx);
212       unsigned Val = Mod.getImm();
213       if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
214         return false;
215       // Only apply the following transformation if that operand requries
216       // a packed immediate.
217       switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
218       case AMDGPU::OPERAND_REG_IMM_V2FP16:
219       case AMDGPU::OPERAND_REG_IMM_V2INT16:
220       case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
221       case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
222         // If upper part is all zero we do not need op_sel_hi.
223         if (!isUInt<16>(Fold.ImmToFold)) {
224           if (!(Fold.ImmToFold & 0xffff)) {
225             Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
226             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
227             Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
228             return true;
229           }
230           Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
231           Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
232           return true;
233         }
234         break;
235       default:
236         break;
237       }
238     }
239   }
240 
241   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
242     MachineBasicBlock *MBB = MI->getParent();
243     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
244     if (Liveness != MachineBasicBlock::LQR_Dead) {
245       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
246       return false;
247     }
248 
249     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
250     int Op32 = Fold.getShrinkOpcode();
251     MachineOperand &Dst0 = MI->getOperand(0);
252     MachineOperand &Dst1 = MI->getOperand(1);
253     assert(Dst0.isDef() && Dst1.isDef());
254 
255     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
256 
257     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
258     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
259 
260     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
261 
262     if (HaveNonDbgCarryUse) {
263       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
264         .addReg(AMDGPU::VCC, RegState::Kill);
265     }
266 
267     // Keep the old instruction around to avoid breaking iterators, but
268     // replace it with a dummy instruction to remove uses.
269     //
270     // FIXME: We should not invert how this pass looks at operands to avoid
271     // this. Should track set of foldable movs instead of looking for uses
272     // when looking at a use.
273     Dst0.setReg(NewReg0);
274     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
275       MI->RemoveOperand(I);
276     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
277 
278     if (Fold.isCommuted())
279       TII.commuteInstruction(*Inst32, false);
280     return true;
281   }
282 
283   assert(!Fold.needsShrink() && "not handled");
284 
285   if (Fold.isImm()) {
286     Old.ChangeToImmediate(Fold.ImmToFold);
287     return true;
288   }
289 
290   if (Fold.isGlobal()) {
291     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
292                    Fold.OpToFold->getTargetFlags());
293     return true;
294   }
295 
296   if (Fold.isFI()) {
297     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
298     return true;
299   }
300 
301   MachineOperand *New = Fold.OpToFold;
302   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
303   Old.setIsUndef(New->isUndef());
304   return true;
305 }
306 
307 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
308                               const MachineInstr *MI) {
309   for (auto Candidate : FoldList) {
310     if (Candidate.UseMI == MI)
311       return true;
312   }
313   return false;
314 }
315 
316 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
317                                 MachineInstr *MI, unsigned OpNo,
318                                 MachineOperand *FoldOp, bool Commuted = false,
319                                 int ShrinkOp = -1) {
320   // Skip additional folding on the same operand.
321   for (FoldCandidate &Fold : FoldList)
322     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
323       return;
324   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
325                     << " operand " << OpNo << "\n  " << *MI << '\n');
326   FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
327 }
328 
329 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
330                              MachineInstr *MI, unsigned OpNo,
331                              MachineOperand *OpToFold,
332                              const SIInstrInfo *TII) {
333   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
334     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
335     unsigned Opc = MI->getOpcode();
336     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
337          Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
338         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
339       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
340                    Opc == AMDGPU::V_FMAC_F16_e64;
341       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
342                    Opc == AMDGPU::V_FMAC_F32_e64;
343       unsigned NewOpc = IsFMA ?
344         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
345         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
346 
347       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
348       // to fold the operand.
349       MI->setDesc(TII->get(NewOpc));
350       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
351       if (FoldAsMAD) {
352         MI->untieRegOperand(OpNo);
353         return true;
354       }
355       MI->setDesc(TII->get(Opc));
356     }
357 
358     // Special case for s_setreg_b32
359     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
360       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
361       appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
362       return true;
363     }
364 
365     // If we are already folding into another operand of MI, then
366     // we can't commute the instruction, otherwise we risk making the
367     // other fold illegal.
368     if (isUseMIInFoldList(FoldList, MI))
369       return false;
370 
371     unsigned CommuteOpNo = OpNo;
372 
373     // Operand is not legal, so try to commute the instruction to
374     // see if this makes it possible to fold.
375     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
376     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
377     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
378 
379     if (CanCommute) {
380       if (CommuteIdx0 == OpNo)
381         CommuteOpNo = CommuteIdx1;
382       else if (CommuteIdx1 == OpNo)
383         CommuteOpNo = CommuteIdx0;
384     }
385 
386 
387     // One of operands might be an Imm operand, and OpNo may refer to it after
388     // the call of commuteInstruction() below. Such situations are avoided
389     // here explicitly as OpNo must be a register operand to be a candidate
390     // for memory folding.
391     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
392                        !MI->getOperand(CommuteIdx1).isReg()))
393       return false;
394 
395     if (!CanCommute ||
396         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
397       return false;
398 
399     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
400       if ((Opc == AMDGPU::V_ADD_I32_e64 ||
401            Opc == AMDGPU::V_SUB_I32_e64 ||
402            Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
403           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
404         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
405 
406         // Verify the other operand is a VGPR, otherwise we would violate the
407         // constant bus restriction.
408         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
409         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
410         if (!OtherOp.isReg() ||
411             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
412           return false;
413 
414         assert(MI->getOperand(1).isDef());
415 
416         // Make sure to get the 32-bit version of the commuted opcode.
417         unsigned MaybeCommutedOpc = MI->getOpcode();
418         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
419 
420         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
421         return true;
422       }
423 
424       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
425       return false;
426     }
427 
428     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
429     return true;
430   }
431 
432   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
433   return true;
434 }
435 
436 // If the use operand doesn't care about the value, this may be an operand only
437 // used for register indexing, in which case it is unsafe to fold.
438 static bool isUseSafeToFold(const SIInstrInfo *TII,
439                             const MachineInstr &MI,
440                             const MachineOperand &UseMO) {
441   return !UseMO.isUndef() && !TII->isSDWA(MI);
442   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
443 }
444 
445 // Find a def of the UseReg, check if it is a reg_seqence and find initializers
446 // for each subreg, tracking it to foldable inline immediate if possible.
447 // Returns true on success.
448 static bool getRegSeqInit(
449     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
450     Register UseReg, uint8_t OpTy,
451     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
452   MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
453   if (!Def || !Def->isRegSequence())
454     return false;
455 
456   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
457     MachineOperand *Sub = &Def->getOperand(I);
458     assert (Sub->isReg());
459 
460     for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg());
461          SubDef && Sub->isReg() && !Sub->getSubReg() &&
462          TII->isFoldableCopy(*SubDef);
463          SubDef = MRI.getUniqueVRegDef(Sub->getReg())) {
464       MachineOperand *Op = &SubDef->getOperand(1);
465       if (Op->isImm()) {
466         if (TII->isInlineConstant(*Op, OpTy))
467           Sub = Op;
468         break;
469       }
470       if (!Op->isReg())
471         break;
472       Sub = Op;
473     }
474 
475     Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm()));
476   }
477 
478   return true;
479 }
480 
481 static bool tryToFoldACImm(const SIInstrInfo *TII,
482                            const MachineOperand &OpToFold,
483                            MachineInstr *UseMI,
484                            unsigned UseOpIdx,
485                            SmallVectorImpl<FoldCandidate> &FoldList) {
486   const MCInstrDesc &Desc = UseMI->getDesc();
487   const MCOperandInfo *OpInfo = Desc.OpInfo;
488   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
489     return false;
490 
491   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
492   if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
493       OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
494     return false;
495 
496   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
497       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
498     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
499     return true;
500   }
501 
502   if (!OpToFold.isReg())
503     return false;
504 
505   Register UseReg = OpToFold.getReg();
506   if (!Register::isVirtualRegister(UseReg))
507     return false;
508 
509   if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
510         return FC.UseMI == UseMI; }) != FoldList.end())
511     return false;
512 
513   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
514   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
515   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
516     return false;
517 
518   int32_t Imm;
519   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
520     const MachineOperand *Op = Defs[I].first;
521     if (!Op->isImm())
522       return false;
523 
524     auto SubImm = Op->getImm();
525     if (!I) {
526       Imm = SubImm;
527       if (!TII->isInlineConstant(*Op, OpTy) ||
528           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
529         return false;
530 
531       continue;
532     }
533     if (Imm != SubImm)
534       return false; // Can only fold splat constants
535   }
536 
537   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
538   return true;
539 }
540 
541 void SIFoldOperands::foldOperand(
542   MachineOperand &OpToFold,
543   MachineInstr *UseMI,
544   int UseOpIdx,
545   SmallVectorImpl<FoldCandidate> &FoldList,
546   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
547   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
548 
549   if (!isUseSafeToFold(TII, *UseMI, UseOp))
550     return;
551 
552   // FIXME: Fold operands with subregs.
553   if (UseOp.isReg() && OpToFold.isReg()) {
554     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
555       return;
556   }
557 
558   // Special case for REG_SEQUENCE: We can't fold literals into
559   // REG_SEQUENCE instructions, so we have to fold them into the
560   // uses of REG_SEQUENCE.
561   if (UseMI->isRegSequence()) {
562     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
563     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
564 
565     MachineRegisterInfo::use_iterator Next;
566     for (MachineRegisterInfo::use_iterator
567            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
568          RSUse != RSE; RSUse = Next) {
569       Next = std::next(RSUse);
570 
571       MachineInstr *RSUseMI = RSUse->getParent();
572 
573       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
574                          RSUse.getOperandNo(), FoldList))
575         continue;
576 
577       if (RSUse->getSubReg() != RegSeqDstSubReg)
578         continue;
579 
580       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
581                   CopiesToReplace);
582     }
583 
584     return;
585   }
586 
587   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
588     return;
589 
590   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
591     // Sanity check that this is a stack access.
592     // FIXME: Should probably use stack pseudos before frame lowering.
593     MachineOperand *SOff = TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
594     if (!SOff->isReg() || (SOff->getReg() != MFI->getScratchWaveOffsetReg() &&
595                            SOff->getReg() != MFI->getStackPtrOffsetReg()))
596       return;
597 
598     if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
599         MFI->getScratchRSrcReg())
600       return;
601 
602     // A frame index will resolve to a positive constant, so it should always be
603     // safe to fold the addressing mode, even pre-GFX9.
604     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
605     SOff->setReg(MFI->getStackPtrOffsetReg());
606     return;
607   }
608 
609   bool FoldingImmLike =
610       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
611 
612   if (FoldingImmLike && UseMI->isCopy()) {
613     Register DestReg = UseMI->getOperand(0).getReg();
614 
615     // Don't fold into a copy to a physical register. Doing so would interfere
616     // with the register coalescer's logic which would avoid redundant
617     // initalizations.
618     if (DestReg.isPhysical())
619       return;
620 
621     const TargetRegisterClass *DestRC =  MRI->getRegClass(DestReg);
622 
623     Register SrcReg = UseMI->getOperand(1).getReg();
624     if (SrcReg.isVirtual()) { // XXX - This can be an assert?
625       const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
626       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
627         MachineRegisterInfo::use_iterator NextUse;
628         SmallVector<FoldCandidate, 4> CopyUses;
629         for (MachineRegisterInfo::use_iterator
630           Use = MRI->use_begin(DestReg), E = MRI->use_end();
631           Use != E; Use = NextUse) {
632           NextUse = std::next(Use);
633           FoldCandidate FC = FoldCandidate(Use->getParent(),
634            Use.getOperandNo(), &UseMI->getOperand(1));
635           CopyUses.push_back(FC);
636        }
637         for (auto & F : CopyUses) {
638           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
639            FoldList, CopiesToReplace);
640         }
641       }
642     }
643 
644     if (DestRC == &AMDGPU::AGPR_32RegClass &&
645         TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
646       UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
647       UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
648       CopiesToReplace.push_back(UseMI);
649       return;
650     }
651 
652     // In order to fold immediates into copies, we need to change the
653     // copy to a MOV.
654 
655     unsigned MovOp = TII->getMovOpcode(DestRC);
656     if (MovOp == AMDGPU::COPY)
657       return;
658 
659     UseMI->setDesc(TII->get(MovOp));
660     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
661     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
662     while (ImpOpI != ImpOpE) {
663       MachineInstr::mop_iterator Tmp = ImpOpI;
664       ImpOpI++;
665       UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
666     }
667     CopiesToReplace.push_back(UseMI);
668   } else {
669     if (UseMI->isCopy() && OpToFold.isReg() &&
670         UseMI->getOperand(0).getReg().isVirtual() &&
671         TRI->isVectorRegister(*MRI, UseMI->getOperand(0).getReg()) &&
672         !UseMI->getOperand(1).getSubReg()) {
673       LLVM_DEBUG(dbgs() << "Folding " << OpToFold
674                         << "\n into " << *UseMI << '\n');
675       unsigned Size = TII->getOpSize(*UseMI, 1);
676       Register UseReg = OpToFold.getReg();
677       UseMI->getOperand(1).setReg(UseReg);
678       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
679       UseMI->getOperand(1).setIsKill(false);
680       CopiesToReplace.push_back(UseMI);
681       OpToFold.setIsKill(false);
682 
683       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
684       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
685       // its initializers right here, so we will rematerialize immediates and
686       // avoid copies via different reg classes.
687       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
688       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
689           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
690                         *MRI)) {
691         const DebugLoc &DL = UseMI->getDebugLoc();
692         MachineBasicBlock &MBB = *UseMI->getParent();
693 
694         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
695         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
696           UseMI->RemoveOperand(I);
697 
698         MachineInstrBuilder B(*MBB.getParent(), UseMI);
699         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
700         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
701         for (unsigned I = 0; I < Size / 4; ++I) {
702           MachineOperand *Def = Defs[I].first;
703           TargetInstrInfo::RegSubRegPair CopyToVGPR;
704           if (Def->isImm() &&
705               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
706             int64_t Imm = Def->getImm();
707 
708             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
709             BuildMI(MBB, UseMI, DL,
710                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm);
711             B.addReg(Tmp);
712           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
713             auto Src = getRegSubRegPair(*Def);
714             Def->setIsKill(false);
715             if (!SeenAGPRs.insert(Src)) {
716               // We cannot build a reg_sequence out of the same registers, they
717               // must be copied. Better do it here before copyPhysReg() created
718               // several reads to do the AGPR->VGPR->AGPR copy.
719               CopyToVGPR = Src;
720             } else {
721               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
722                        Src.SubReg);
723             }
724           } else {
725             assert(Def->isReg());
726             Def->setIsKill(false);
727             auto Src = getRegSubRegPair(*Def);
728 
729             // Direct copy from SGPR to AGPR is not possible. To avoid creation
730             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
731             // create a copy here and track if we already have such a copy.
732             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
733               CopyToVGPR = Src;
734             } else {
735               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
736               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
737               B.addReg(Tmp);
738             }
739           }
740 
741           if (CopyToVGPR.Reg) {
742             Register Vgpr;
743             if (VGPRCopies.count(CopyToVGPR)) {
744               Vgpr = VGPRCopies[CopyToVGPR];
745             } else {
746               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
747               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
748               VGPRCopies[CopyToVGPR] = Vgpr;
749             }
750             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
751             BuildMI(MBB, UseMI, DL,
752                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr);
753             B.addReg(Tmp);
754           }
755 
756           B.addImm(Defs[I].second);
757         }
758         LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n');
759         return;
760       }
761 
762       if (Size != 4)
763         return;
764       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
765           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
766         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
767       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
768                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
769         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
770       return;
771     }
772 
773     unsigned UseOpc = UseMI->getOpcode();
774     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
775         (UseOpc == AMDGPU::V_READLANE_B32 &&
776          (int)UseOpIdx ==
777          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
778       // %vgpr = V_MOV_B32 imm
779       // %sgpr = V_READFIRSTLANE_B32 %vgpr
780       // =>
781       // %sgpr = S_MOV_B32 imm
782       if (FoldingImmLike) {
783         if (execMayBeModifiedBeforeUse(*MRI,
784                                        UseMI->getOperand(UseOpIdx).getReg(),
785                                        *OpToFold.getParent(),
786                                        *UseMI))
787           return;
788 
789         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
790 
791         // FIXME: ChangeToImmediate should clear subreg
792         UseMI->getOperand(1).setSubReg(0);
793         if (OpToFold.isImm())
794           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
795         else
796           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
797         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
798         return;
799       }
800 
801       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
802         if (execMayBeModifiedBeforeUse(*MRI,
803                                        UseMI->getOperand(UseOpIdx).getReg(),
804                                        *OpToFold.getParent(),
805                                        *UseMI))
806           return;
807 
808         // %vgpr = COPY %sgpr0
809         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
810         // =>
811         // %sgpr1 = COPY %sgpr0
812         UseMI->setDesc(TII->get(AMDGPU::COPY));
813         UseMI->getOperand(1).setReg(OpToFold.getReg());
814         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
815         UseMI->getOperand(1).setIsKill(false);
816         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
817         return;
818       }
819     }
820 
821     const MCInstrDesc &UseDesc = UseMI->getDesc();
822 
823     // Don't fold into target independent nodes.  Target independent opcodes
824     // don't have defined register classes.
825     if (UseDesc.isVariadic() ||
826         UseOp.isImplicit() ||
827         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
828       return;
829   }
830 
831   if (!FoldingImmLike) {
832     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
833 
834     // FIXME: We could try to change the instruction from 64-bit to 32-bit
835     // to enable more folding opportunites.  The shrink operands pass
836     // already does this.
837     return;
838   }
839 
840 
841   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
842   const TargetRegisterClass *FoldRC =
843     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
844 
845   // Split 64-bit constants into 32-bits for folding.
846   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
847     Register UseReg = UseOp.getReg();
848     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
849 
850     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
851       return;
852 
853     APInt Imm(64, OpToFold.getImm());
854     if (UseOp.getSubReg() == AMDGPU::sub0) {
855       Imm = Imm.getLoBits(32);
856     } else {
857       assert(UseOp.getSubReg() == AMDGPU::sub1);
858       Imm = Imm.getHiBits(32);
859     }
860 
861     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
862     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
863     return;
864   }
865 
866 
867 
868   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
869 }
870 
871 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
872                                   uint32_t LHS, uint32_t RHS) {
873   switch (Opcode) {
874   case AMDGPU::V_AND_B32_e64:
875   case AMDGPU::V_AND_B32_e32:
876   case AMDGPU::S_AND_B32:
877     Result = LHS & RHS;
878     return true;
879   case AMDGPU::V_OR_B32_e64:
880   case AMDGPU::V_OR_B32_e32:
881   case AMDGPU::S_OR_B32:
882     Result = LHS | RHS;
883     return true;
884   case AMDGPU::V_XOR_B32_e64:
885   case AMDGPU::V_XOR_B32_e32:
886   case AMDGPU::S_XOR_B32:
887     Result = LHS ^ RHS;
888     return true;
889   case AMDGPU::V_LSHL_B32_e64:
890   case AMDGPU::V_LSHL_B32_e32:
891   case AMDGPU::S_LSHL_B32:
892     // The instruction ignores the high bits for out of bounds shifts.
893     Result = LHS << (RHS & 31);
894     return true;
895   case AMDGPU::V_LSHLREV_B32_e64:
896   case AMDGPU::V_LSHLREV_B32_e32:
897     Result = RHS << (LHS & 31);
898     return true;
899   case AMDGPU::V_LSHR_B32_e64:
900   case AMDGPU::V_LSHR_B32_e32:
901   case AMDGPU::S_LSHR_B32:
902     Result = LHS >> (RHS & 31);
903     return true;
904   case AMDGPU::V_LSHRREV_B32_e64:
905   case AMDGPU::V_LSHRREV_B32_e32:
906     Result = RHS >> (LHS & 31);
907     return true;
908   case AMDGPU::V_ASHR_I32_e64:
909   case AMDGPU::V_ASHR_I32_e32:
910   case AMDGPU::S_ASHR_I32:
911     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
912     return true;
913   case AMDGPU::V_ASHRREV_I32_e64:
914   case AMDGPU::V_ASHRREV_I32_e32:
915     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
916     return true;
917   default:
918     return false;
919   }
920 }
921 
922 static unsigned getMovOpc(bool IsScalar) {
923   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
924 }
925 
926 /// Remove any leftover implicit operands from mutating the instruction. e.g.
927 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
928 /// anymore.
929 static void stripExtraCopyOperands(MachineInstr &MI) {
930   const MCInstrDesc &Desc = MI.getDesc();
931   unsigned NumOps = Desc.getNumOperands() +
932                     Desc.getNumImplicitUses() +
933                     Desc.getNumImplicitDefs();
934 
935   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
936     MI.RemoveOperand(I);
937 }
938 
939 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
940   MI.setDesc(NewDesc);
941   stripExtraCopyOperands(MI);
942 }
943 
944 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
945                                                MachineOperand &Op) {
946   if (Op.isReg()) {
947     // If this has a subregister, it obviously is a register source.
948     if (Op.getSubReg() != AMDGPU::NoSubRegister ||
949         !Register::isVirtualRegister(Op.getReg()))
950       return &Op;
951 
952     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
953     if (Def && Def->isMoveImmediate()) {
954       MachineOperand &ImmSrc = Def->getOperand(1);
955       if (ImmSrc.isImm())
956         return &ImmSrc;
957     }
958   }
959 
960   return &Op;
961 }
962 
963 // Try to simplify operations with a constant that may appear after instruction
964 // selection.
965 // TODO: See if a frame index with a fixed offset can fold.
966 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
967                               const SIInstrInfo *TII,
968                               MachineInstr *MI,
969                               MachineOperand *ImmOp) {
970   unsigned Opc = MI->getOpcode();
971   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
972       Opc == AMDGPU::S_NOT_B32) {
973     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
974     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
975     return true;
976   }
977 
978   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
979   if (Src1Idx == -1)
980     return false;
981 
982   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
983   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
984   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
985 
986   if (!Src0->isImm() && !Src1->isImm())
987     return false;
988 
989   if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32) {
990     if (Src0->isImm() && Src0->getImm() == 0) {
991       // v_lshl_or_b32 0, X, Y -> copy Y
992       // v_lshl_or_b32 0, X, K -> v_mov_b32 K
993       bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
994       MI->RemoveOperand(Src1Idx);
995       MI->RemoveOperand(Src0Idx);
996 
997       MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
998       return true;
999     }
1000   }
1001 
1002   // and k0, k1 -> v_mov_b32 (k0 & k1)
1003   // or k0, k1 -> v_mov_b32 (k0 | k1)
1004   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1005   if (Src0->isImm() && Src1->isImm()) {
1006     int32_t NewImm;
1007     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1008       return false;
1009 
1010     const SIRegisterInfo &TRI = TII->getRegisterInfo();
1011     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1012 
1013     // Be careful to change the right operand, src0 may belong to a different
1014     // instruction.
1015     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1016     MI->RemoveOperand(Src1Idx);
1017     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1018     return true;
1019   }
1020 
1021   if (!MI->isCommutable())
1022     return false;
1023 
1024   if (Src0->isImm() && !Src1->isImm()) {
1025     std::swap(Src0, Src1);
1026     std::swap(Src0Idx, Src1Idx);
1027   }
1028 
1029   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1030   if (Opc == AMDGPU::V_OR_B32_e64 ||
1031       Opc == AMDGPU::V_OR_B32_e32 ||
1032       Opc == AMDGPU::S_OR_B32) {
1033     if (Src1Val == 0) {
1034       // y = or x, 0 => y = copy x
1035       MI->RemoveOperand(Src1Idx);
1036       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1037     } else if (Src1Val == -1) {
1038       // y = or x, -1 => y = v_mov_b32 -1
1039       MI->RemoveOperand(Src1Idx);
1040       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1041     } else
1042       return false;
1043 
1044     return true;
1045   }
1046 
1047   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1048       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1049       MI->getOpcode() == AMDGPU::S_AND_B32) {
1050     if (Src1Val == 0) {
1051       // y = and x, 0 => y = v_mov_b32 0
1052       MI->RemoveOperand(Src0Idx);
1053       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1054     } else if (Src1Val == -1) {
1055       // y = and x, -1 => y = copy x
1056       MI->RemoveOperand(Src1Idx);
1057       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1058       stripExtraCopyOperands(*MI);
1059     } else
1060       return false;
1061 
1062     return true;
1063   }
1064 
1065   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1066       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1067       MI->getOpcode() == AMDGPU::S_XOR_B32) {
1068     if (Src1Val == 0) {
1069       // y = xor x, 0 => y = copy x
1070       MI->RemoveOperand(Src1Idx);
1071       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1072       return true;
1073     }
1074   }
1075 
1076   return false;
1077 }
1078 
1079 // Try to fold an instruction into a simpler one
1080 static bool tryFoldInst(const SIInstrInfo *TII,
1081                         MachineInstr *MI) {
1082   unsigned Opc = MI->getOpcode();
1083 
1084   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
1085       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
1086       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
1087     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1088     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1089     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1090     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1091     if (Src1->isIdenticalTo(*Src0) &&
1092         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
1093         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
1094       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
1095       auto &NewDesc =
1096           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1097       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1098       if (Src2Idx != -1)
1099         MI->RemoveOperand(Src2Idx);
1100       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1101       if (Src1ModIdx != -1)
1102         MI->RemoveOperand(Src1ModIdx);
1103       if (Src0ModIdx != -1)
1104         MI->RemoveOperand(Src0ModIdx);
1105       mutateCopyOp(*MI, NewDesc);
1106       LLVM_DEBUG(dbgs() << *MI << '\n');
1107       return true;
1108     }
1109   }
1110 
1111   return false;
1112 }
1113 
1114 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
1115                                      MachineOperand &OpToFold) const {
1116   // We need mutate the operands of new mov instructions to add implicit
1117   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1118   // this.
1119   SmallVector<MachineInstr *, 4> CopiesToReplace;
1120   SmallVector<FoldCandidate, 4> FoldList;
1121   MachineOperand &Dst = MI.getOperand(0);
1122 
1123   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1124   if (FoldingImm) {
1125     unsigned NumLiteralUses = 0;
1126     MachineOperand *NonInlineUse = nullptr;
1127     int NonInlineUseOpNo = -1;
1128 
1129     MachineRegisterInfo::use_iterator NextUse;
1130     for (MachineRegisterInfo::use_iterator
1131            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1132          Use != E; Use = NextUse) {
1133       NextUse = std::next(Use);
1134       MachineInstr *UseMI = Use->getParent();
1135       unsigned OpNo = Use.getOperandNo();
1136 
1137       // Folding the immediate may reveal operations that can be constant
1138       // folded or replaced with a copy. This can happen for example after
1139       // frame indices are lowered to constants or from splitting 64-bit
1140       // constants.
1141       //
1142       // We may also encounter cases where one or both operands are
1143       // immediates materialized into a register, which would ordinarily not
1144       // be folded due to multiple uses or operand constraints.
1145 
1146       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1147         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1148 
1149         // Some constant folding cases change the same immediate's use to a new
1150         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1151         // again. The same constant folded instruction could also have a second
1152         // use operand.
1153         NextUse = MRI->use_begin(Dst.getReg());
1154         FoldList.clear();
1155         continue;
1156       }
1157 
1158       // Try to fold any inline immediate uses, and then only fold other
1159       // constants if they have one use.
1160       //
1161       // The legality of the inline immediate must be checked based on the use
1162       // operand, not the defining instruction, because 32-bit instructions
1163       // with 32-bit inline immediate sources may be used to materialize
1164       // constants used in 16-bit operands.
1165       //
1166       // e.g. it is unsafe to fold:
1167       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1168       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1169 
1170       // Folding immediates with more than one use will increase program size.
1171       // FIXME: This will also reduce register usage, which may be better
1172       // in some cases. A better heuristic is needed.
1173       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1174         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1175       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1176         foldOperand(OpToFold, UseMI, OpNo, FoldList,
1177                     CopiesToReplace);
1178       } else {
1179         if (++NumLiteralUses == 1) {
1180           NonInlineUse = &*Use;
1181           NonInlineUseOpNo = OpNo;
1182         }
1183       }
1184     }
1185 
1186     if (NumLiteralUses == 1) {
1187       MachineInstr *UseMI = NonInlineUse->getParent();
1188       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1189     }
1190   } else {
1191     // Folding register.
1192     SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
1193     for (MachineRegisterInfo::use_iterator
1194            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1195          Use != E; ++Use) {
1196       UsesToProcess.push_back(Use);
1197     }
1198     for (auto U : UsesToProcess) {
1199       MachineInstr *UseMI = U->getParent();
1200 
1201       foldOperand(OpToFold, UseMI, U.getOperandNo(),
1202         FoldList, CopiesToReplace);
1203     }
1204   }
1205 
1206   MachineFunction *MF = MI.getParent()->getParent();
1207   // Make sure we add EXEC uses to any new v_mov instructions created.
1208   for (MachineInstr *Copy : CopiesToReplace)
1209     Copy->addImplicitDefUseOperands(*MF);
1210 
1211   for (FoldCandidate &Fold : FoldList) {
1212     if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) {
1213       Register Reg = Fold.OpToFold->getReg();
1214       MachineInstr *DefMI = Fold.OpToFold->getParent();
1215       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1216           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1217         continue;
1218     }
1219     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1220       // Clear kill flags.
1221       if (Fold.isReg()) {
1222         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1223         // FIXME: Probably shouldn't bother trying to fold if not an
1224         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1225         // copies.
1226         MRI->clearKillFlags(Fold.OpToFold->getReg());
1227       }
1228       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1229                         << static_cast<int>(Fold.UseOpNo) << " of "
1230                         << *Fold.UseMI << '\n');
1231       tryFoldInst(TII, Fold.UseMI);
1232     } else if (Fold.isCommuted()) {
1233       // Restoring instruction's original operand order if fold has failed.
1234       TII->commuteInstruction(*Fold.UseMI, false);
1235     }
1236   }
1237 }
1238 
1239 // Clamp patterns are canonically selected to v_max_* instructions, so only
1240 // handle them.
1241 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1242   unsigned Op = MI.getOpcode();
1243   switch (Op) {
1244   case AMDGPU::V_MAX_F32_e64:
1245   case AMDGPU::V_MAX_F16_e64:
1246   case AMDGPU::V_MAX_F64:
1247   case AMDGPU::V_PK_MAX_F16: {
1248     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1249       return nullptr;
1250 
1251     // Make sure sources are identical.
1252     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1253     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1254     if (!Src0->isReg() || !Src1->isReg() ||
1255         Src0->getReg() != Src1->getReg() ||
1256         Src0->getSubReg() != Src1->getSubReg() ||
1257         Src0->getSubReg() != AMDGPU::NoSubRegister)
1258       return nullptr;
1259 
1260     // Can't fold up if we have modifiers.
1261     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1262       return nullptr;
1263 
1264     unsigned Src0Mods
1265       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1266     unsigned Src1Mods
1267       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1268 
1269     // Having a 0 op_sel_hi would require swizzling the output in the source
1270     // instruction, which we can't do.
1271     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1272                                                       : 0u;
1273     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1274       return nullptr;
1275     return Src0;
1276   }
1277   default:
1278     return nullptr;
1279   }
1280 }
1281 
1282 // We obviously have multiple uses in a clamp since the register is used twice
1283 // in the same instruction.
1284 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1285   int Count = 0;
1286   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1287        I != E; ++I) {
1288     if (++Count > 1)
1289       return false;
1290   }
1291 
1292   return true;
1293 }
1294 
1295 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1296 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1297   const MachineOperand *ClampSrc = isClamp(MI);
1298   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1299     return false;
1300 
1301   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1302 
1303   // The type of clamp must be compatible.
1304   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1305     return false;
1306 
1307   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1308   if (!DefClamp)
1309     return false;
1310 
1311   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1312                     << '\n');
1313 
1314   // Clamp is applied after omod, so it is OK if omod is set.
1315   DefClamp->setImm(1);
1316   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1317   MI.eraseFromParent();
1318   return true;
1319 }
1320 
1321 static int getOModValue(unsigned Opc, int64_t Val) {
1322   switch (Opc) {
1323   case AMDGPU::V_MUL_F32_e64: {
1324     switch (static_cast<uint32_t>(Val)) {
1325     case 0x3f000000: // 0.5
1326       return SIOutMods::DIV2;
1327     case 0x40000000: // 2.0
1328       return SIOutMods::MUL2;
1329     case 0x40800000: // 4.0
1330       return SIOutMods::MUL4;
1331     default:
1332       return SIOutMods::NONE;
1333     }
1334   }
1335   case AMDGPU::V_MUL_F16_e64: {
1336     switch (static_cast<uint16_t>(Val)) {
1337     case 0x3800: // 0.5
1338       return SIOutMods::DIV2;
1339     case 0x4000: // 2.0
1340       return SIOutMods::MUL2;
1341     case 0x4400: // 4.0
1342       return SIOutMods::MUL4;
1343     default:
1344       return SIOutMods::NONE;
1345     }
1346   }
1347   default:
1348     llvm_unreachable("invalid mul opcode");
1349   }
1350 }
1351 
1352 // FIXME: Does this really not support denormals with f16?
1353 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1354 // handled, so will anything other than that break?
1355 std::pair<const MachineOperand *, int>
1356 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1357   unsigned Op = MI.getOpcode();
1358   switch (Op) {
1359   case AMDGPU::V_MUL_F32_e64:
1360   case AMDGPU::V_MUL_F16_e64: {
1361     // If output denormals are enabled, omod is ignored.
1362     if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
1363         (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
1364       return std::make_pair(nullptr, SIOutMods::NONE);
1365 
1366     const MachineOperand *RegOp = nullptr;
1367     const MachineOperand *ImmOp = nullptr;
1368     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1369     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1370     if (Src0->isImm()) {
1371       ImmOp = Src0;
1372       RegOp = Src1;
1373     } else if (Src1->isImm()) {
1374       ImmOp = Src1;
1375       RegOp = Src0;
1376     } else
1377       return std::make_pair(nullptr, SIOutMods::NONE);
1378 
1379     int OMod = getOModValue(Op, ImmOp->getImm());
1380     if (OMod == SIOutMods::NONE ||
1381         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1382         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1383         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1384         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1385       return std::make_pair(nullptr, SIOutMods::NONE);
1386 
1387     return std::make_pair(RegOp, OMod);
1388   }
1389   case AMDGPU::V_ADD_F32_e64:
1390   case AMDGPU::V_ADD_F16_e64: {
1391     // If output denormals are enabled, omod is ignored.
1392     if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
1393         (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
1394       return std::make_pair(nullptr, SIOutMods::NONE);
1395 
1396     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1397     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1398     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1399 
1400     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1401         Src0->getSubReg() == Src1->getSubReg() &&
1402         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1403         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1404         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1405         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1406       return std::make_pair(Src0, SIOutMods::MUL2);
1407 
1408     return std::make_pair(nullptr, SIOutMods::NONE);
1409   }
1410   default:
1411     return std::make_pair(nullptr, SIOutMods::NONE);
1412   }
1413 }
1414 
1415 // FIXME: Does this need to check IEEE bit on function?
1416 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1417   const MachineOperand *RegOp;
1418   int OMod;
1419   std::tie(RegOp, OMod) = isOMod(MI);
1420   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1421       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1422       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1423     return false;
1424 
1425   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1426   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1427   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1428     return false;
1429 
1430   // Clamp is applied after omod. If the source already has clamp set, don't
1431   // fold it.
1432   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1433     return false;
1434 
1435   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1436 
1437   DefOMod->setImm(OMod);
1438   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1439   MI.eraseFromParent();
1440   return true;
1441 }
1442 
1443 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1444   if (skipFunction(MF.getFunction()))
1445     return false;
1446 
1447   MRI = &MF.getRegInfo();
1448   ST = &MF.getSubtarget<GCNSubtarget>();
1449   TII = ST->getInstrInfo();
1450   TRI = &TII->getRegisterInfo();
1451   MFI = MF.getInfo<SIMachineFunctionInfo>();
1452 
1453   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1454   // correctly handle signed zeros.
1455   //
1456   // FIXME: Also need to check strictfp
1457   bool IsIEEEMode = MFI->getMode().IEEE;
1458   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1459 
1460   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1461     MachineBasicBlock::iterator I, Next;
1462 
1463     MachineOperand *CurrentKnownM0Val = nullptr;
1464     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1465       Next = std::next(I);
1466       MachineInstr &MI = *I;
1467 
1468       tryFoldInst(TII, &MI);
1469 
1470       if (!TII->isFoldableCopy(MI)) {
1471         // Saw an unknown clobber of m0, so we no longer know what it is.
1472         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1473           CurrentKnownM0Val = nullptr;
1474 
1475         // TODO: Omod might be OK if there is NSZ only on the source
1476         // instruction, and not the omod multiply.
1477         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1478             !tryFoldOMod(MI))
1479           tryFoldClamp(MI);
1480 
1481         continue;
1482       }
1483 
1484       // Specially track simple redefs of m0 to the same value in a block, so we
1485       // can erase the later ones.
1486       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1487         MachineOperand &NewM0Val = MI.getOperand(1);
1488         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1489           MI.eraseFromParent();
1490           continue;
1491         }
1492 
1493         // We aren't tracking other physical registers
1494         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1495           nullptr : &NewM0Val;
1496         continue;
1497       }
1498 
1499       MachineOperand &OpToFold = MI.getOperand(1);
1500       bool FoldingImm =
1501           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1502 
1503       // FIXME: We could also be folding things like TargetIndexes.
1504       if (!FoldingImm && !OpToFold.isReg())
1505         continue;
1506 
1507       if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
1508         continue;
1509 
1510       // Prevent folding operands backwards in the function. For example,
1511       // the COPY opcode must not be replaced by 1 in this example:
1512       //
1513       //    %3 = COPY %vgpr0; VGPR_32:%3
1514       //    ...
1515       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1516       MachineOperand &Dst = MI.getOperand(0);
1517       if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
1518         continue;
1519 
1520       foldInstOperand(MI, OpToFold);
1521     }
1522   }
1523   return true;
1524 }
1525