xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision 68fab44acfc7ce7fecd86ad784fb207f088c5366)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 //
10 
11 #include "AMDGPU.h"
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   int ShrinkOpcode;
38   unsigned UseOpNo;
39   MachineOperand::MachineOperandType Kind;
40   bool Commuted;
41 
42   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43                 bool Commuted_ = false,
44                 int ShrinkOp = -1) :
45     UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46     Kind(FoldOp->getType()),
47     Commuted(Commuted_) {
48     if (FoldOp->isImm()) {
49       ImmToFold = FoldOp->getImm();
50     } else if (FoldOp->isFI()) {
51       FrameIndexToFold = FoldOp->getIndex();
52     } else {
53       assert(FoldOp->isReg() || FoldOp->isGlobal());
54       OpToFold = FoldOp;
55     }
56   }
57 
58   bool isFI() const {
59     return Kind == MachineOperand::MO_FrameIndex;
60   }
61 
62   bool isImm() const {
63     return Kind == MachineOperand::MO_Immediate;
64   }
65 
66   bool isReg() const {
67     return Kind == MachineOperand::MO_Register;
68   }
69 
70   bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
71 
72   bool isCommuted() const {
73     return Commuted;
74   }
75 
76   bool needsShrink() const {
77     return ShrinkOpcode != -1;
78   }
79 
80   int getShrinkOpcode() const {
81     return ShrinkOpcode;
82   }
83 };
84 
85 class SIFoldOperands : public MachineFunctionPass {
86 public:
87   static char ID;
88   MachineRegisterInfo *MRI;
89   const SIInstrInfo *TII;
90   const SIRegisterInfo *TRI;
91   const GCNSubtarget *ST;
92   const SIMachineFunctionInfo *MFI;
93 
94   void foldOperand(MachineOperand &OpToFold,
95                    MachineInstr *UseMI,
96                    int UseOpIdx,
97                    SmallVectorImpl<FoldCandidate> &FoldList,
98                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
99 
100   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
101 
102   const MachineOperand *isClamp(const MachineInstr &MI) const;
103   bool tryFoldClamp(MachineInstr &MI);
104 
105   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106   bool tryFoldOMod(MachineInstr &MI);
107 
108 public:
109   SIFoldOperands() : MachineFunctionPass(ID) {
110     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
111   }
112 
113   bool runOnMachineFunction(MachineFunction &MF) override;
114 
115   StringRef getPassName() const override { return "SI Fold Operands"; }
116 
117   void getAnalysisUsage(AnalysisUsage &AU) const override {
118     AU.setPreservesCFG();
119     MachineFunctionPass::getAnalysisUsage(AU);
120   }
121 };
122 
123 } // End anonymous namespace.
124 
125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126                 "SI Fold Operands", false, false)
127 
128 char SIFoldOperands::ID = 0;
129 
130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
131 
132 // Wrapper around isInlineConstant that understands special cases when
133 // instruction types are replaced during operand folding.
134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135                                      const MachineInstr &UseMI,
136                                      unsigned OpNo,
137                                      const MachineOperand &OpToFold) {
138   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
139     return true;
140 
141   unsigned Opc = UseMI.getOpcode();
142   switch (Opc) {
143   case AMDGPU::V_MAC_F32_e64:
144   case AMDGPU::V_MAC_F16_e64:
145   case AMDGPU::V_FMAC_F32_e64:
146   case AMDGPU::V_FMAC_F16_e64: {
147     // Special case for mac. Since this is replaced with mad when folded into
148     // src2, we need to check the legality for the final instruction.
149     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
150     if (static_cast<int>(OpNo) == Src2Idx) {
151       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
152                    Opc == AMDGPU::V_FMAC_F16_e64;
153       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
154                    Opc == AMDGPU::V_FMAC_F32_e64;
155 
156       unsigned Opc = IsFMA ?
157         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
158         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
159       const MCInstrDesc &MadDesc = TII->get(Opc);
160       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
161     }
162     return false;
163   }
164   default:
165     return false;
166   }
167 }
168 
169 // TODO: Add heuristic that the frame index might not fit in the addressing mode
170 // immediate offset to avoid materializing in loops.
171 static bool frameIndexMayFold(const SIInstrInfo *TII,
172                               const MachineInstr &UseMI,
173                               int OpNo,
174                               const MachineOperand &OpToFold) {
175   return OpToFold.isFI() &&
176     (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
177     OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
178 }
179 
180 FunctionPass *llvm::createSIFoldOperandsPass() {
181   return new SIFoldOperands();
182 }
183 
184 static bool updateOperand(FoldCandidate &Fold,
185                           const SIInstrInfo &TII,
186                           const TargetRegisterInfo &TRI,
187                           const GCNSubtarget &ST) {
188   MachineInstr *MI = Fold.UseMI;
189   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
190   assert(Old.isReg());
191 
192   if (Fold.isImm()) {
193     if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
194         !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
195         AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
196                                        ST.hasInv2PiInlineImm())) {
197       // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
198       // already set.
199       unsigned Opcode = MI->getOpcode();
200       int OpNo = MI->getOperandNo(&Old);
201       int ModIdx = -1;
202       if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
203         ModIdx = AMDGPU::OpName::src0_modifiers;
204       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
205         ModIdx = AMDGPU::OpName::src1_modifiers;
206       else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
207         ModIdx = AMDGPU::OpName::src2_modifiers;
208       assert(ModIdx != -1);
209       ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
210       MachineOperand &Mod = MI->getOperand(ModIdx);
211       unsigned Val = Mod.getImm();
212       if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
213         return false;
214       // Only apply the following transformation if that operand requries
215       // a packed immediate.
216       switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
217       case AMDGPU::OPERAND_REG_IMM_V2FP16:
218       case AMDGPU::OPERAND_REG_IMM_V2INT16:
219       case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
220       case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
221         // If upper part is all zero we do not need op_sel_hi.
222         if (!isUInt<16>(Fold.ImmToFold)) {
223           if (!(Fold.ImmToFold & 0xffff)) {
224             Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
225             Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
226             Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
227             return true;
228           }
229           Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
230           Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
231           return true;
232         }
233         break;
234       default:
235         break;
236       }
237     }
238   }
239 
240   if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
241     MachineBasicBlock *MBB = MI->getParent();
242     auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
243     if (Liveness != MachineBasicBlock::LQR_Dead) {
244       LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
245       return false;
246     }
247 
248     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
249     int Op32 = Fold.getShrinkOpcode();
250     MachineOperand &Dst0 = MI->getOperand(0);
251     MachineOperand &Dst1 = MI->getOperand(1);
252     assert(Dst0.isDef() && Dst1.isDef());
253 
254     bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
255 
256     const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
257     Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
258 
259     MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
260 
261     if (HaveNonDbgCarryUse) {
262       BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
263         .addReg(AMDGPU::VCC, RegState::Kill);
264     }
265 
266     // Keep the old instruction around to avoid breaking iterators, but
267     // replace it with a dummy instruction to remove uses.
268     //
269     // FIXME: We should not invert how this pass looks at operands to avoid
270     // this. Should track set of foldable movs instead of looking for uses
271     // when looking at a use.
272     Dst0.setReg(NewReg0);
273     for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
274       MI->RemoveOperand(I);
275     MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
276 
277     if (Fold.isCommuted())
278       TII.commuteInstruction(*Inst32, false);
279     return true;
280   }
281 
282   assert(!Fold.needsShrink() && "not handled");
283 
284   if (Fold.isImm()) {
285     // FIXME: ChangeToImmediate should probably clear the subreg flags. It's
286     // reinterpreted as TargetFlags.
287     Old.setSubReg(0);
288     Old.ChangeToImmediate(Fold.ImmToFold);
289     return true;
290   }
291 
292   if (Fold.isGlobal()) {
293     Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
294                    Fold.OpToFold->getTargetFlags());
295     return true;
296   }
297 
298   if (Fold.isFI()) {
299     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
300     return true;
301   }
302 
303   MachineOperand *New = Fold.OpToFold;
304   Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
305   Old.setIsUndef(New->isUndef());
306   return true;
307 }
308 
309 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
310                               const MachineInstr *MI) {
311   for (auto Candidate : FoldList) {
312     if (Candidate.UseMI == MI)
313       return true;
314   }
315   return false;
316 }
317 
318 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
319                                 MachineInstr *MI, unsigned OpNo,
320                                 MachineOperand *FoldOp, bool Commuted = false,
321                                 int ShrinkOp = -1) {
322   // Skip additional folding on the same operand.
323   for (FoldCandidate &Fold : FoldList)
324     if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
325       return;
326   LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
327                     << " operand " << OpNo << "\n  " << *MI << '\n');
328   FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
329 }
330 
331 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
332                              MachineInstr *MI, unsigned OpNo,
333                              MachineOperand *OpToFold,
334                              const SIInstrInfo *TII) {
335   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
336     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
337     unsigned Opc = MI->getOpcode();
338     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
339          Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
340         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
341       bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
342                    Opc == AMDGPU::V_FMAC_F16_e64;
343       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
344                    Opc == AMDGPU::V_FMAC_F32_e64;
345       unsigned NewOpc = IsFMA ?
346         (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
347         (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
348 
349       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
350       // to fold the operand.
351       MI->setDesc(TII->get(NewOpc));
352       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
353       if (FoldAsMAD) {
354         MI->untieRegOperand(OpNo);
355         return true;
356       }
357       MI->setDesc(TII->get(Opc));
358     }
359 
360     // Special case for s_setreg_b32
361     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
362       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
363       appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
364       return true;
365     }
366 
367     // If we are already folding into another operand of MI, then
368     // we can't commute the instruction, otherwise we risk making the
369     // other fold illegal.
370     if (isUseMIInFoldList(FoldList, MI))
371       return false;
372 
373     unsigned CommuteOpNo = OpNo;
374 
375     // Operand is not legal, so try to commute the instruction to
376     // see if this makes it possible to fold.
377     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
378     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
379     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
380 
381     if (CanCommute) {
382       if (CommuteIdx0 == OpNo)
383         CommuteOpNo = CommuteIdx1;
384       else if (CommuteIdx1 == OpNo)
385         CommuteOpNo = CommuteIdx0;
386     }
387 
388 
389     // One of operands might be an Imm operand, and OpNo may refer to it after
390     // the call of commuteInstruction() below. Such situations are avoided
391     // here explicitly as OpNo must be a register operand to be a candidate
392     // for memory folding.
393     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
394                        !MI->getOperand(CommuteIdx1).isReg()))
395       return false;
396 
397     if (!CanCommute ||
398         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
399       return false;
400 
401     if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
402       if ((Opc == AMDGPU::V_ADD_CO_U32_e64 ||
403            Opc == AMDGPU::V_SUB_CO_U32_e64 ||
404            Opc == AMDGPU::V_SUBREV_CO_U32_e64) && // FIXME
405           (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
406         MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
407 
408         // Verify the other operand is a VGPR, otherwise we would violate the
409         // constant bus restriction.
410         unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
411         MachineOperand &OtherOp = MI->getOperand(OtherIdx);
412         if (!OtherOp.isReg() ||
413             !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
414           return false;
415 
416         assert(MI->getOperand(1).isDef());
417 
418         // Make sure to get the 32-bit version of the commuted opcode.
419         unsigned MaybeCommutedOpc = MI->getOpcode();
420         int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
421 
422         appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
423         return true;
424       }
425 
426       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
427       return false;
428     }
429 
430     appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
431     return true;
432   }
433 
434   // Check the case where we might introduce a second constant operand to a
435   // scalar instruction
436   if (TII->isSALU(MI->getOpcode())) {
437     const MCInstrDesc &InstDesc = MI->getDesc();
438     const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
439     const SIRegisterInfo &SRI = TII->getRegisterInfo();
440 
441     // Fine if the operand can be encoded as an inline constant
442     if (OpToFold->isImm()) {
443       if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
444           !TII->isInlineConstant(*OpToFold, OpInfo)) {
445         // Otherwise check for another constant
446         for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
447           auto &Op = MI->getOperand(i);
448           if (OpNo != i &&
449               TII->isLiteralConstantLike(Op, OpInfo)) {
450             return false;
451           }
452         }
453       }
454     }
455   }
456 
457   appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
458   return true;
459 }
460 
461 // If the use operand doesn't care about the value, this may be an operand only
462 // used for register indexing, in which case it is unsafe to fold.
463 static bool isUseSafeToFold(const SIInstrInfo *TII,
464                             const MachineInstr &MI,
465                             const MachineOperand &UseMO) {
466   if (UseMO.isUndef() || TII->isSDWA(MI))
467     return false;
468 
469   switch (MI.getOpcode()) {
470   case AMDGPU::V_MOV_B32_e32:
471   case AMDGPU::V_MOV_B32_e64:
472   case AMDGPU::V_MOV_B64_PSEUDO:
473     // Do not fold into an indirect mov.
474     return !MI.hasRegisterImplicitUseOperand(AMDGPU::M0);
475   }
476 
477   return true;
478   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
479 }
480 
481 // Find a def of the UseReg, check if it is a reg_seqence and find initializers
482 // for each subreg, tracking it to foldable inline immediate if possible.
483 // Returns true on success.
484 static bool getRegSeqInit(
485     SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
486     Register UseReg, uint8_t OpTy,
487     const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
488   MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
489   if (!Def || !Def->isRegSequence())
490     return false;
491 
492   for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
493     MachineOperand *Sub = &Def->getOperand(I);
494     assert (Sub->isReg());
495 
496     for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg());
497          SubDef && Sub->isReg() && !Sub->getSubReg() &&
498          TII->isFoldableCopy(*SubDef);
499          SubDef = MRI.getUniqueVRegDef(Sub->getReg())) {
500       MachineOperand *Op = &SubDef->getOperand(1);
501       if (Op->isImm()) {
502         if (TII->isInlineConstant(*Op, OpTy))
503           Sub = Op;
504         break;
505       }
506       if (!Op->isReg())
507         break;
508       Sub = Op;
509     }
510 
511     Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm()));
512   }
513 
514   return true;
515 }
516 
517 static bool tryToFoldACImm(const SIInstrInfo *TII,
518                            const MachineOperand &OpToFold,
519                            MachineInstr *UseMI,
520                            unsigned UseOpIdx,
521                            SmallVectorImpl<FoldCandidate> &FoldList) {
522   const MCInstrDesc &Desc = UseMI->getDesc();
523   const MCOperandInfo *OpInfo = Desc.OpInfo;
524   if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
525     return false;
526 
527   uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
528   if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
529       OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
530     return false;
531 
532   if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
533       TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
534     UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
535     return true;
536   }
537 
538   if (!OpToFold.isReg())
539     return false;
540 
541   Register UseReg = OpToFold.getReg();
542   if (!Register::isVirtualRegister(UseReg))
543     return false;
544 
545   if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
546         return FC.UseMI == UseMI; }) != FoldList.end())
547     return false;
548 
549   MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
550   SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
551   if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
552     return false;
553 
554   int32_t Imm;
555   for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
556     const MachineOperand *Op = Defs[I].first;
557     if (!Op->isImm())
558       return false;
559 
560     auto SubImm = Op->getImm();
561     if (!I) {
562       Imm = SubImm;
563       if (!TII->isInlineConstant(*Op, OpTy) ||
564           !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
565         return false;
566 
567       continue;
568     }
569     if (Imm != SubImm)
570       return false; // Can only fold splat constants
571   }
572 
573   appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
574   return true;
575 }
576 
577 void SIFoldOperands::foldOperand(
578   MachineOperand &OpToFold,
579   MachineInstr *UseMI,
580   int UseOpIdx,
581   SmallVectorImpl<FoldCandidate> &FoldList,
582   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
583   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
584 
585   if (!isUseSafeToFold(TII, *UseMI, UseOp))
586     return;
587 
588   // FIXME: Fold operands with subregs.
589   if (UseOp.isReg() && OpToFold.isReg()) {
590     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
591       return;
592   }
593 
594   // Special case for REG_SEQUENCE: We can't fold literals into
595   // REG_SEQUENCE instructions, so we have to fold them into the
596   // uses of REG_SEQUENCE.
597   if (UseMI->isRegSequence()) {
598     Register RegSeqDstReg = UseMI->getOperand(0).getReg();
599     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
600 
601     MachineRegisterInfo::use_iterator Next;
602     for (MachineRegisterInfo::use_iterator
603            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
604          RSUse != RSE; RSUse = Next) {
605       Next = std::next(RSUse);
606 
607       MachineInstr *RSUseMI = RSUse->getParent();
608 
609       if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
610                          RSUse.getOperandNo(), FoldList))
611         continue;
612 
613       if (RSUse->getSubReg() != RegSeqDstSubReg)
614         continue;
615 
616       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
617                   CopiesToReplace);
618     }
619 
620     return;
621   }
622 
623   if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
624     return;
625 
626   if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
627     // Sanity check that this is a stack access.
628     // FIXME: Should probably use stack pseudos before frame lowering.
629 
630     if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
631         MFI->getScratchRSrcReg())
632       return;
633 
634     // Ensure this is either relative to the current frame or the current wave.
635     MachineOperand &SOff =
636         *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
637     if ((!SOff.isReg() || SOff.getReg() != MFI->getStackPtrOffsetReg()) &&
638         (!SOff.isImm() || SOff.getImm() != 0))
639       return;
640 
641     // A frame index will resolve to a positive constant, so it should always be
642     // safe to fold the addressing mode, even pre-GFX9.
643     UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
644 
645     // If this is relative to the current wave, update it to be relative to the
646     // current frame.
647     if (SOff.isImm())
648       SOff.ChangeToRegister(MFI->getStackPtrOffsetReg(), false);
649     return;
650   }
651 
652   bool FoldingImmLike =
653       OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
654 
655   if (FoldingImmLike && UseMI->isCopy()) {
656     Register DestReg = UseMI->getOperand(0).getReg();
657     Register SrcReg = UseMI->getOperand(1).getReg();
658     assert(SrcReg.isVirtual());
659 
660     const TargetRegisterClass *SrcRC = MRI->getRegClass(SrcReg);
661 
662     // Don't fold into a copy to a physical register with the same class. Doing
663     // so would interfere with the register coalescer's logic which would avoid
664     // redundant initalizations.
665     if (DestReg.isPhysical() && SrcRC->contains(DestReg))
666       return;
667 
668     const TargetRegisterClass *DestRC = TRI->getRegClassForReg(*MRI, DestReg);
669     if (!DestReg.isPhysical()) {
670       if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
671         MachineRegisterInfo::use_iterator NextUse;
672         SmallVector<FoldCandidate, 4> CopyUses;
673         for (MachineRegisterInfo::use_iterator Use = MRI->use_begin(DestReg),
674                E = MRI->use_end();
675              Use != E; Use = NextUse) {
676           NextUse = std::next(Use);
677           // There's no point trying to fold into an implicit operand.
678           if (Use->isImplicit())
679             continue;
680 
681           FoldCandidate FC = FoldCandidate(Use->getParent(), Use.getOperandNo(),
682                                            &UseMI->getOperand(1));
683           CopyUses.push_back(FC);
684         }
685         for (auto &F : CopyUses) {
686           foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo, FoldList, CopiesToReplace);
687         }
688       }
689 
690       if (DestRC == &AMDGPU::AGPR_32RegClass &&
691           TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
692         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
693         UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
694         CopiesToReplace.push_back(UseMI);
695         return;
696       }
697     }
698 
699     // In order to fold immediates into copies, we need to change the
700     // copy to a MOV.
701 
702     unsigned MovOp = TII->getMovOpcode(DestRC);
703     if (MovOp == AMDGPU::COPY)
704       return;
705 
706     UseMI->setDesc(TII->get(MovOp));
707     MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
708     MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
709     while (ImpOpI != ImpOpE) {
710       MachineInstr::mop_iterator Tmp = ImpOpI;
711       ImpOpI++;
712       UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
713     }
714     CopiesToReplace.push_back(UseMI);
715   } else {
716     if (UseMI->isCopy() && OpToFold.isReg() &&
717         UseMI->getOperand(0).getReg().isVirtual() &&
718         !UseMI->getOperand(1).getSubReg()) {
719       LLVM_DEBUG(dbgs() << "Folding " << OpToFold
720                         << "\n into " << *UseMI << '\n');
721       unsigned Size = TII->getOpSize(*UseMI, 1);
722       Register UseReg = OpToFold.getReg();
723       UseMI->getOperand(1).setReg(UseReg);
724       UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
725       UseMI->getOperand(1).setIsKill(false);
726       CopiesToReplace.push_back(UseMI);
727       OpToFold.setIsKill(false);
728 
729       // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
730       // can only accept VGPR or inline immediate. Recreate a reg_sequence with
731       // its initializers right here, so we will rematerialize immediates and
732       // avoid copies via different reg classes.
733       SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
734       if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
735           getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
736                         *MRI)) {
737         const DebugLoc &DL = UseMI->getDebugLoc();
738         MachineBasicBlock &MBB = *UseMI->getParent();
739 
740         UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
741         for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
742           UseMI->RemoveOperand(I);
743 
744         MachineInstrBuilder B(*MBB.getParent(), UseMI);
745         DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
746         SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
747         for (unsigned I = 0; I < Size / 4; ++I) {
748           MachineOperand *Def = Defs[I].first;
749           TargetInstrInfo::RegSubRegPair CopyToVGPR;
750           if (Def->isImm() &&
751               TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
752             int64_t Imm = Def->getImm();
753 
754             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
755             BuildMI(MBB, UseMI, DL,
756                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm);
757             B.addReg(Tmp);
758           } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
759             auto Src = getRegSubRegPair(*Def);
760             Def->setIsKill(false);
761             if (!SeenAGPRs.insert(Src)) {
762               // We cannot build a reg_sequence out of the same registers, they
763               // must be copied. Better do it here before copyPhysReg() created
764               // several reads to do the AGPR->VGPR->AGPR copy.
765               CopyToVGPR = Src;
766             } else {
767               B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
768                        Src.SubReg);
769             }
770           } else {
771             assert(Def->isReg());
772             Def->setIsKill(false);
773             auto Src = getRegSubRegPair(*Def);
774 
775             // Direct copy from SGPR to AGPR is not possible. To avoid creation
776             // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
777             // create a copy here and track if we already have such a copy.
778             if (TRI->isSGPRReg(*MRI, Src.Reg)) {
779               CopyToVGPR = Src;
780             } else {
781               auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
782               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
783               B.addReg(Tmp);
784             }
785           }
786 
787           if (CopyToVGPR.Reg) {
788             Register Vgpr;
789             if (VGPRCopies.count(CopyToVGPR)) {
790               Vgpr = VGPRCopies[CopyToVGPR];
791             } else {
792               Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
793               BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
794               VGPRCopies[CopyToVGPR] = Vgpr;
795             }
796             auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
797             BuildMI(MBB, UseMI, DL,
798                     TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr);
799             B.addReg(Tmp);
800           }
801 
802           B.addImm(Defs[I].second);
803         }
804         LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n');
805         return;
806       }
807 
808       if (Size != 4)
809         return;
810       if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
811           TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
812         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
813       else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
814                TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
815         UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
816       return;
817     }
818 
819     unsigned UseOpc = UseMI->getOpcode();
820     if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
821         (UseOpc == AMDGPU::V_READLANE_B32 &&
822          (int)UseOpIdx ==
823          AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
824       // %vgpr = V_MOV_B32 imm
825       // %sgpr = V_READFIRSTLANE_B32 %vgpr
826       // =>
827       // %sgpr = S_MOV_B32 imm
828       if (FoldingImmLike) {
829         if (execMayBeModifiedBeforeUse(*MRI,
830                                        UseMI->getOperand(UseOpIdx).getReg(),
831                                        *OpToFold.getParent(),
832                                        *UseMI))
833           return;
834 
835         UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
836 
837         // FIXME: ChangeToImmediate should clear subreg
838         UseMI->getOperand(1).setSubReg(0);
839         if (OpToFold.isImm())
840           UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
841         else
842           UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
843         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
844         return;
845       }
846 
847       if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
848         if (execMayBeModifiedBeforeUse(*MRI,
849                                        UseMI->getOperand(UseOpIdx).getReg(),
850                                        *OpToFold.getParent(),
851                                        *UseMI))
852           return;
853 
854         // %vgpr = COPY %sgpr0
855         // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
856         // =>
857         // %sgpr1 = COPY %sgpr0
858         UseMI->setDesc(TII->get(AMDGPU::COPY));
859         UseMI->getOperand(1).setReg(OpToFold.getReg());
860         UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
861         UseMI->getOperand(1).setIsKill(false);
862         UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
863         return;
864       }
865     }
866 
867     const MCInstrDesc &UseDesc = UseMI->getDesc();
868 
869     // Don't fold into target independent nodes.  Target independent opcodes
870     // don't have defined register classes.
871     if (UseDesc.isVariadic() ||
872         UseOp.isImplicit() ||
873         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
874       return;
875   }
876 
877   if (!FoldingImmLike) {
878     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
879 
880     // FIXME: We could try to change the instruction from 64-bit to 32-bit
881     // to enable more folding opportunites.  The shrink operands pass
882     // already does this.
883     return;
884   }
885 
886 
887   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
888   const TargetRegisterClass *FoldRC =
889     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
890 
891   // Split 64-bit constants into 32-bits for folding.
892   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
893     Register UseReg = UseOp.getReg();
894     const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
895 
896     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
897       return;
898 
899     APInt Imm(64, OpToFold.getImm());
900     if (UseOp.getSubReg() == AMDGPU::sub0) {
901       Imm = Imm.getLoBits(32);
902     } else {
903       assert(UseOp.getSubReg() == AMDGPU::sub1);
904       Imm = Imm.getHiBits(32);
905     }
906 
907     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
908     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
909     return;
910   }
911 
912 
913 
914   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
915 }
916 
917 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
918                                   uint32_t LHS, uint32_t RHS) {
919   switch (Opcode) {
920   case AMDGPU::V_AND_B32_e64:
921   case AMDGPU::V_AND_B32_e32:
922   case AMDGPU::S_AND_B32:
923     Result = LHS & RHS;
924     return true;
925   case AMDGPU::V_OR_B32_e64:
926   case AMDGPU::V_OR_B32_e32:
927   case AMDGPU::S_OR_B32:
928     Result = LHS | RHS;
929     return true;
930   case AMDGPU::V_XOR_B32_e64:
931   case AMDGPU::V_XOR_B32_e32:
932   case AMDGPU::S_XOR_B32:
933     Result = LHS ^ RHS;
934     return true;
935   case AMDGPU::S_XNOR_B32:
936     Result = ~(LHS ^ RHS);
937     return true;
938   case AMDGPU::S_NAND_B32:
939     Result = ~(LHS & RHS);
940     return true;
941   case AMDGPU::S_NOR_B32:
942     Result = ~(LHS | RHS);
943     return true;
944   case AMDGPU::S_ANDN2_B32:
945     Result = LHS & ~RHS;
946     return true;
947   case AMDGPU::S_ORN2_B32:
948     Result = LHS | ~RHS;
949     return true;
950   case AMDGPU::V_LSHL_B32_e64:
951   case AMDGPU::V_LSHL_B32_e32:
952   case AMDGPU::S_LSHL_B32:
953     // The instruction ignores the high bits for out of bounds shifts.
954     Result = LHS << (RHS & 31);
955     return true;
956   case AMDGPU::V_LSHLREV_B32_e64:
957   case AMDGPU::V_LSHLREV_B32_e32:
958     Result = RHS << (LHS & 31);
959     return true;
960   case AMDGPU::V_LSHR_B32_e64:
961   case AMDGPU::V_LSHR_B32_e32:
962   case AMDGPU::S_LSHR_B32:
963     Result = LHS >> (RHS & 31);
964     return true;
965   case AMDGPU::V_LSHRREV_B32_e64:
966   case AMDGPU::V_LSHRREV_B32_e32:
967     Result = RHS >> (LHS & 31);
968     return true;
969   case AMDGPU::V_ASHR_I32_e64:
970   case AMDGPU::V_ASHR_I32_e32:
971   case AMDGPU::S_ASHR_I32:
972     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
973     return true;
974   case AMDGPU::V_ASHRREV_I32_e64:
975   case AMDGPU::V_ASHRREV_I32_e32:
976     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
977     return true;
978   default:
979     return false;
980   }
981 }
982 
983 static unsigned getMovOpc(bool IsScalar) {
984   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
985 }
986 
987 /// Remove any leftover implicit operands from mutating the instruction. e.g.
988 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
989 /// anymore.
990 static void stripExtraCopyOperands(MachineInstr &MI) {
991   const MCInstrDesc &Desc = MI.getDesc();
992   unsigned NumOps = Desc.getNumOperands() +
993                     Desc.getNumImplicitUses() +
994                     Desc.getNumImplicitDefs();
995 
996   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
997     MI.RemoveOperand(I);
998 }
999 
1000 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
1001   MI.setDesc(NewDesc);
1002   stripExtraCopyOperands(MI);
1003 }
1004 
1005 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
1006                                                MachineOperand &Op) {
1007   if (Op.isReg()) {
1008     // If this has a subregister, it obviously is a register source.
1009     if (Op.getSubReg() != AMDGPU::NoSubRegister ||
1010         !Register::isVirtualRegister(Op.getReg()))
1011       return &Op;
1012 
1013     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
1014     if (Def && Def->isMoveImmediate()) {
1015       MachineOperand &ImmSrc = Def->getOperand(1);
1016       if (ImmSrc.isImm())
1017         return &ImmSrc;
1018     }
1019   }
1020 
1021   return &Op;
1022 }
1023 
1024 // Try to simplify operations with a constant that may appear after instruction
1025 // selection.
1026 // TODO: See if a frame index with a fixed offset can fold.
1027 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
1028                               const SIInstrInfo *TII,
1029                               MachineInstr *MI,
1030                               MachineOperand *ImmOp) {
1031   unsigned Opc = MI->getOpcode();
1032   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1033       Opc == AMDGPU::S_NOT_B32) {
1034     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
1035     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1036     return true;
1037   }
1038 
1039   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1040   if (Src1Idx == -1)
1041     return false;
1042 
1043   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1044   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1045   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1046 
1047   if (!Src0->isImm() && !Src1->isImm())
1048     return false;
1049 
1050   if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32 ||
1051       MI->getOpcode() == AMDGPU::V_LSHL_ADD_U32 ||
1052       MI->getOpcode() == AMDGPU::V_AND_OR_B32) {
1053     if (Src0->isImm() && Src0->getImm() == 0) {
1054       // v_lshl_or_b32 0, X, Y -> copy Y
1055       // v_lshl_or_b32 0, X, K -> v_mov_b32 K
1056       // v_lshl_add_b32 0, X, Y -> copy Y
1057       // v_lshl_add_b32 0, X, K -> v_mov_b32 K
1058       // v_and_or_b32 0, X, Y -> copy Y
1059       // v_and_or_b32 0, X, K -> v_mov_b32 K
1060       bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
1061       MI->RemoveOperand(Src1Idx);
1062       MI->RemoveOperand(Src0Idx);
1063 
1064       MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
1065       return true;
1066     }
1067   }
1068 
1069   // and k0, k1 -> v_mov_b32 (k0 & k1)
1070   // or k0, k1 -> v_mov_b32 (k0 | k1)
1071   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1072   if (Src0->isImm() && Src1->isImm()) {
1073     int32_t NewImm;
1074     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1075       return false;
1076 
1077     const SIRegisterInfo &TRI = TII->getRegisterInfo();
1078     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1079 
1080     // Be careful to change the right operand, src0 may belong to a different
1081     // instruction.
1082     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1083     MI->RemoveOperand(Src1Idx);
1084     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1085     return true;
1086   }
1087 
1088   if (!MI->isCommutable())
1089     return false;
1090 
1091   if (Src0->isImm() && !Src1->isImm()) {
1092     std::swap(Src0, Src1);
1093     std::swap(Src0Idx, Src1Idx);
1094   }
1095 
1096   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1097   if (Opc == AMDGPU::V_OR_B32_e64 ||
1098       Opc == AMDGPU::V_OR_B32_e32 ||
1099       Opc == AMDGPU::S_OR_B32) {
1100     if (Src1Val == 0) {
1101       // y = or x, 0 => y = copy x
1102       MI->RemoveOperand(Src1Idx);
1103       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1104     } else if (Src1Val == -1) {
1105       // y = or x, -1 => y = v_mov_b32 -1
1106       MI->RemoveOperand(Src1Idx);
1107       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1108     } else
1109       return false;
1110 
1111     return true;
1112   }
1113 
1114   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1115       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1116       MI->getOpcode() == AMDGPU::S_AND_B32) {
1117     if (Src1Val == 0) {
1118       // y = and x, 0 => y = v_mov_b32 0
1119       MI->RemoveOperand(Src0Idx);
1120       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1121     } else if (Src1Val == -1) {
1122       // y = and x, -1 => y = copy x
1123       MI->RemoveOperand(Src1Idx);
1124       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1125       stripExtraCopyOperands(*MI);
1126     } else
1127       return false;
1128 
1129     return true;
1130   }
1131 
1132   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1133       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1134       MI->getOpcode() == AMDGPU::S_XOR_B32) {
1135     if (Src1Val == 0) {
1136       // y = xor x, 0 => y = copy x
1137       MI->RemoveOperand(Src1Idx);
1138       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1139       return true;
1140     }
1141   }
1142 
1143   return false;
1144 }
1145 
1146 // Try to fold an instruction into a simpler one
1147 static bool tryFoldInst(const SIInstrInfo *TII,
1148                         MachineInstr *MI) {
1149   unsigned Opc = MI->getOpcode();
1150 
1151   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
1152       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
1153       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
1154     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1155     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1156     int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1157     int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1158     if (Src1->isIdenticalTo(*Src0) &&
1159         (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
1160         (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
1161       LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
1162       auto &NewDesc =
1163           TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1164       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1165       if (Src2Idx != -1)
1166         MI->RemoveOperand(Src2Idx);
1167       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1168       if (Src1ModIdx != -1)
1169         MI->RemoveOperand(Src1ModIdx);
1170       if (Src0ModIdx != -1)
1171         MI->RemoveOperand(Src0ModIdx);
1172       mutateCopyOp(*MI, NewDesc);
1173       LLVM_DEBUG(dbgs() << *MI << '\n');
1174       return true;
1175     }
1176   }
1177 
1178   return false;
1179 }
1180 
1181 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
1182                                      MachineOperand &OpToFold) const {
1183   // We need mutate the operands of new mov instructions to add implicit
1184   // uses of EXEC, but adding them invalidates the use_iterator, so defer
1185   // this.
1186   SmallVector<MachineInstr *, 4> CopiesToReplace;
1187   SmallVector<FoldCandidate, 4> FoldList;
1188   MachineOperand &Dst = MI.getOperand(0);
1189 
1190   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1191   if (FoldingImm) {
1192     unsigned NumLiteralUses = 0;
1193     MachineOperand *NonInlineUse = nullptr;
1194     int NonInlineUseOpNo = -1;
1195 
1196     MachineRegisterInfo::use_iterator NextUse;
1197     for (MachineRegisterInfo::use_iterator
1198            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1199          Use != E; Use = NextUse) {
1200       NextUse = std::next(Use);
1201       MachineInstr *UseMI = Use->getParent();
1202       unsigned OpNo = Use.getOperandNo();
1203 
1204       // Folding the immediate may reveal operations that can be constant
1205       // folded or replaced with a copy. This can happen for example after
1206       // frame indices are lowered to constants or from splitting 64-bit
1207       // constants.
1208       //
1209       // We may also encounter cases where one or both operands are
1210       // immediates materialized into a register, which would ordinarily not
1211       // be folded due to multiple uses or operand constraints.
1212 
1213       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1214         LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1215 
1216         // Some constant folding cases change the same immediate's use to a new
1217         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1218         // again. The same constant folded instruction could also have a second
1219         // use operand.
1220         NextUse = MRI->use_begin(Dst.getReg());
1221         FoldList.clear();
1222         continue;
1223       }
1224 
1225       // Try to fold any inline immediate uses, and then only fold other
1226       // constants if they have one use.
1227       //
1228       // The legality of the inline immediate must be checked based on the use
1229       // operand, not the defining instruction, because 32-bit instructions
1230       // with 32-bit inline immediate sources may be used to materialize
1231       // constants used in 16-bit operands.
1232       //
1233       // e.g. it is unsafe to fold:
1234       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
1235       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1236 
1237       // Folding immediates with more than one use will increase program size.
1238       // FIXME: This will also reduce register usage, which may be better
1239       // in some cases. A better heuristic is needed.
1240       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1241         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1242       } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1243         foldOperand(OpToFold, UseMI, OpNo, FoldList,
1244                     CopiesToReplace);
1245       } else {
1246         if (++NumLiteralUses == 1) {
1247           NonInlineUse = &*Use;
1248           NonInlineUseOpNo = OpNo;
1249         }
1250       }
1251     }
1252 
1253     if (NumLiteralUses == 1) {
1254       MachineInstr *UseMI = NonInlineUse->getParent();
1255       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1256     }
1257   } else {
1258     // Folding register.
1259     SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
1260     for (MachineRegisterInfo::use_iterator
1261            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1262          Use != E; ++Use) {
1263       UsesToProcess.push_back(Use);
1264     }
1265     for (auto U : UsesToProcess) {
1266       MachineInstr *UseMI = U->getParent();
1267 
1268       foldOperand(OpToFold, UseMI, U.getOperandNo(),
1269         FoldList, CopiesToReplace);
1270     }
1271   }
1272 
1273   MachineFunction *MF = MI.getParent()->getParent();
1274   // Make sure we add EXEC uses to any new v_mov instructions created.
1275   for (MachineInstr *Copy : CopiesToReplace)
1276     Copy->addImplicitDefUseOperands(*MF);
1277 
1278   for (FoldCandidate &Fold : FoldList) {
1279     assert(!Fold.isReg() || Fold.OpToFold);
1280     if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) {
1281       Register Reg = Fold.OpToFold->getReg();
1282       MachineInstr *DefMI = Fold.OpToFold->getParent();
1283       if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1284           execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1285         continue;
1286     }
1287     if (updateOperand(Fold, *TII, *TRI, *ST)) {
1288       // Clear kill flags.
1289       if (Fold.isReg()) {
1290         assert(Fold.OpToFold && Fold.OpToFold->isReg());
1291         // FIXME: Probably shouldn't bother trying to fold if not an
1292         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1293         // copies.
1294         MRI->clearKillFlags(Fold.OpToFold->getReg());
1295       }
1296       LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1297                         << static_cast<int>(Fold.UseOpNo) << " of "
1298                         << *Fold.UseMI << '\n');
1299       tryFoldInst(TII, Fold.UseMI);
1300     } else if (Fold.isCommuted()) {
1301       // Restoring instruction's original operand order if fold has failed.
1302       TII->commuteInstruction(*Fold.UseMI, false);
1303     }
1304   }
1305 }
1306 
1307 // Clamp patterns are canonically selected to v_max_* instructions, so only
1308 // handle them.
1309 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1310   unsigned Op = MI.getOpcode();
1311   switch (Op) {
1312   case AMDGPU::V_MAX_F32_e64:
1313   case AMDGPU::V_MAX_F16_e64:
1314   case AMDGPU::V_MAX_F64:
1315   case AMDGPU::V_PK_MAX_F16: {
1316     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1317       return nullptr;
1318 
1319     // Make sure sources are identical.
1320     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1321     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1322     if (!Src0->isReg() || !Src1->isReg() ||
1323         Src0->getReg() != Src1->getReg() ||
1324         Src0->getSubReg() != Src1->getSubReg() ||
1325         Src0->getSubReg() != AMDGPU::NoSubRegister)
1326       return nullptr;
1327 
1328     // Can't fold up if we have modifiers.
1329     if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1330       return nullptr;
1331 
1332     unsigned Src0Mods
1333       = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1334     unsigned Src1Mods
1335       = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1336 
1337     // Having a 0 op_sel_hi would require swizzling the output in the source
1338     // instruction, which we can't do.
1339     unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1340                                                       : 0u;
1341     if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1342       return nullptr;
1343     return Src0;
1344   }
1345   default:
1346     return nullptr;
1347   }
1348 }
1349 
1350 // We obviously have multiple uses in a clamp since the register is used twice
1351 // in the same instruction.
1352 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1353   int Count = 0;
1354   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1355        I != E; ++I) {
1356     if (++Count > 1)
1357       return false;
1358   }
1359 
1360   return true;
1361 }
1362 
1363 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1364 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1365   const MachineOperand *ClampSrc = isClamp(MI);
1366   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1367     return false;
1368 
1369   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1370 
1371   // The type of clamp must be compatible.
1372   if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1373     return false;
1374 
1375   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1376   if (!DefClamp)
1377     return false;
1378 
1379   LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1380                     << '\n');
1381 
1382   // Clamp is applied after omod, so it is OK if omod is set.
1383   DefClamp->setImm(1);
1384   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1385   MI.eraseFromParent();
1386   return true;
1387 }
1388 
1389 static int getOModValue(unsigned Opc, int64_t Val) {
1390   switch (Opc) {
1391   case AMDGPU::V_MUL_F32_e64: {
1392     switch (static_cast<uint32_t>(Val)) {
1393     case 0x3f000000: // 0.5
1394       return SIOutMods::DIV2;
1395     case 0x40000000: // 2.0
1396       return SIOutMods::MUL2;
1397     case 0x40800000: // 4.0
1398       return SIOutMods::MUL4;
1399     default:
1400       return SIOutMods::NONE;
1401     }
1402   }
1403   case AMDGPU::V_MUL_F16_e64: {
1404     switch (static_cast<uint16_t>(Val)) {
1405     case 0x3800: // 0.5
1406       return SIOutMods::DIV2;
1407     case 0x4000: // 2.0
1408       return SIOutMods::MUL2;
1409     case 0x4400: // 4.0
1410       return SIOutMods::MUL4;
1411     default:
1412       return SIOutMods::NONE;
1413     }
1414   }
1415   default:
1416     llvm_unreachable("invalid mul opcode");
1417   }
1418 }
1419 
1420 // FIXME: Does this really not support denormals with f16?
1421 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1422 // handled, so will anything other than that break?
1423 std::pair<const MachineOperand *, int>
1424 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1425   unsigned Op = MI.getOpcode();
1426   switch (Op) {
1427   case AMDGPU::V_MUL_F32_e64:
1428   case AMDGPU::V_MUL_F16_e64: {
1429     // If output denormals are enabled, omod is ignored.
1430     if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1431         (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1432       return std::make_pair(nullptr, SIOutMods::NONE);
1433 
1434     const MachineOperand *RegOp = nullptr;
1435     const MachineOperand *ImmOp = nullptr;
1436     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1437     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1438     if (Src0->isImm()) {
1439       ImmOp = Src0;
1440       RegOp = Src1;
1441     } else if (Src1->isImm()) {
1442       ImmOp = Src1;
1443       RegOp = Src0;
1444     } else
1445       return std::make_pair(nullptr, SIOutMods::NONE);
1446 
1447     int OMod = getOModValue(Op, ImmOp->getImm());
1448     if (OMod == SIOutMods::NONE ||
1449         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1450         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1451         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1452         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1453       return std::make_pair(nullptr, SIOutMods::NONE);
1454 
1455     return std::make_pair(RegOp, OMod);
1456   }
1457   case AMDGPU::V_ADD_F32_e64:
1458   case AMDGPU::V_ADD_F16_e64: {
1459     // If output denormals are enabled, omod is ignored.
1460     if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1461         (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1462       return std::make_pair(nullptr, SIOutMods::NONE);
1463 
1464     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1465     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1466     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1467 
1468     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1469         Src0->getSubReg() == Src1->getSubReg() &&
1470         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1471         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1472         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1473         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1474       return std::make_pair(Src0, SIOutMods::MUL2);
1475 
1476     return std::make_pair(nullptr, SIOutMods::NONE);
1477   }
1478   default:
1479     return std::make_pair(nullptr, SIOutMods::NONE);
1480   }
1481 }
1482 
1483 // FIXME: Does this need to check IEEE bit on function?
1484 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1485   const MachineOperand *RegOp;
1486   int OMod;
1487   std::tie(RegOp, OMod) = isOMod(MI);
1488   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1489       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1490       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1491     return false;
1492 
1493   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1494   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1495   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1496     return false;
1497 
1498   // Clamp is applied after omod. If the source already has clamp set, don't
1499   // fold it.
1500   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1501     return false;
1502 
1503   LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1504 
1505   DefOMod->setImm(OMod);
1506   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1507   MI.eraseFromParent();
1508   return true;
1509 }
1510 
1511 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1512   if (skipFunction(MF.getFunction()))
1513     return false;
1514 
1515   MRI = &MF.getRegInfo();
1516   ST = &MF.getSubtarget<GCNSubtarget>();
1517   TII = ST->getInstrInfo();
1518   TRI = &TII->getRegisterInfo();
1519   MFI = MF.getInfo<SIMachineFunctionInfo>();
1520 
1521   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1522   // correctly handle signed zeros.
1523   //
1524   // FIXME: Also need to check strictfp
1525   bool IsIEEEMode = MFI->getMode().IEEE;
1526   bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1527 
1528   for (MachineBasicBlock *MBB : depth_first(&MF)) {
1529     MachineBasicBlock::iterator I, Next;
1530 
1531     MachineOperand *CurrentKnownM0Val = nullptr;
1532     for (I = MBB->begin(); I != MBB->end(); I = Next) {
1533       Next = std::next(I);
1534       MachineInstr &MI = *I;
1535 
1536       tryFoldInst(TII, &MI);
1537 
1538       if (!TII->isFoldableCopy(MI)) {
1539         // Saw an unknown clobber of m0, so we no longer know what it is.
1540         if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1541           CurrentKnownM0Val = nullptr;
1542 
1543         // TODO: Omod might be OK if there is NSZ only on the source
1544         // instruction, and not the omod multiply.
1545         if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1546             !tryFoldOMod(MI))
1547           tryFoldClamp(MI);
1548 
1549         continue;
1550       }
1551 
1552       // Specially track simple redefs of m0 to the same value in a block, so we
1553       // can erase the later ones.
1554       if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1555         MachineOperand &NewM0Val = MI.getOperand(1);
1556         if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1557           MI.eraseFromParent();
1558           continue;
1559         }
1560 
1561         // We aren't tracking other physical registers
1562         CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1563           nullptr : &NewM0Val;
1564         continue;
1565       }
1566 
1567       MachineOperand &OpToFold = MI.getOperand(1);
1568       bool FoldingImm =
1569           OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1570 
1571       // FIXME: We could also be folding things like TargetIndexes.
1572       if (!FoldingImm && !OpToFold.isReg())
1573         continue;
1574 
1575       if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
1576         continue;
1577 
1578       // Prevent folding operands backwards in the function. For example,
1579       // the COPY opcode must not be replaced by 1 in this example:
1580       //
1581       //    %3 = COPY %vgpr0; VGPR_32:%3
1582       //    ...
1583       //    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1584       MachineOperand &Dst = MI.getOperand(0);
1585       if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
1586         continue;
1587 
1588       foldInstOperand(MI, OpToFold);
1589     }
1590   }
1591   return true;
1592 }
1593