xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision ff3f912e74473ddae39fa261eeeae2ac6777c392)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 //
11 
12 #include "AMDGPU.h"
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "SIMachineFunctionInfo.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
24 
25 #define DEBUG_TYPE "si-fold-operands"
26 using namespace llvm;
27 
28 namespace {
29 
30 struct FoldCandidate {
31   MachineInstr *UseMI;
32   union {
33     MachineOperand *OpToFold;
34     uint64_t ImmToFold;
35     int FrameIndexToFold;
36   };
37   unsigned char UseOpNo;
38   MachineOperand::MachineOperandType Kind;
39   bool Commuted;
40 
41   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
42                 bool Commuted_ = false) :
43     UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()),
44     Commuted(Commuted_) {
45     if (FoldOp->isImm()) {
46       ImmToFold = FoldOp->getImm();
47     } else if (FoldOp->isFI()) {
48       FrameIndexToFold = FoldOp->getIndex();
49     } else {
50       assert(FoldOp->isReg());
51       OpToFold = FoldOp;
52     }
53   }
54 
55   bool isFI() const {
56     return Kind == MachineOperand::MO_FrameIndex;
57   }
58 
59   bool isImm() const {
60     return Kind == MachineOperand::MO_Immediate;
61   }
62 
63   bool isReg() const {
64     return Kind == MachineOperand::MO_Register;
65   }
66 
67   bool isCommuted() const {
68     return Commuted;
69   }
70 };
71 
72 class SIFoldOperands : public MachineFunctionPass {
73 public:
74   static char ID;
75   MachineRegisterInfo *MRI;
76   const SIInstrInfo *TII;
77   const SIRegisterInfo *TRI;
78   const SISubtarget *ST;
79 
80   void foldOperand(MachineOperand &OpToFold,
81                    MachineInstr *UseMI,
82                    unsigned UseOpIdx,
83                    SmallVectorImpl<FoldCandidate> &FoldList,
84                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
85 
86   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
87 
88   const MachineOperand *isClamp(const MachineInstr &MI) const;
89   bool tryFoldClamp(MachineInstr &MI);
90 
91   std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
92   bool tryFoldOMod(MachineInstr &MI);
93 
94 public:
95   SIFoldOperands() : MachineFunctionPass(ID) {
96     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
97   }
98 
99   bool runOnMachineFunction(MachineFunction &MF) override;
100 
101   StringRef getPassName() const override { return "SI Fold Operands"; }
102 
103   void getAnalysisUsage(AnalysisUsage &AU) const override {
104     AU.setPreservesCFG();
105     MachineFunctionPass::getAnalysisUsage(AU);
106   }
107 };
108 
109 } // End anonymous namespace.
110 
111 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
112                 "SI Fold Operands", false, false)
113 
114 char SIFoldOperands::ID = 0;
115 
116 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
117 
118 // Wrapper around isInlineConstant that understands special cases when
119 // instruction types are replaced during operand folding.
120 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
121                                      const MachineInstr &UseMI,
122                                      unsigned OpNo,
123                                      const MachineOperand &OpToFold) {
124   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
125     return true;
126 
127   unsigned Opc = UseMI.getOpcode();
128   switch (Opc) {
129   case AMDGPU::V_MAC_F32_e64:
130   case AMDGPU::V_MAC_F16_e64: {
131     // Special case for mac. Since this is replaced with mad when folded into
132     // src2, we need to check the legality for the final instruction.
133     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
134     if (static_cast<int>(OpNo) == Src2Idx) {
135       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
136       const MCInstrDesc &MadDesc
137         = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
138       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
139     }
140   }
141   default:
142     return false;
143   }
144 }
145 
146 FunctionPass *llvm::createSIFoldOperandsPass() {
147   return new SIFoldOperands();
148 }
149 
150 static bool updateOperand(FoldCandidate &Fold,
151                           const TargetRegisterInfo &TRI) {
152   MachineInstr *MI = Fold.UseMI;
153   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
154   assert(Old.isReg());
155 
156   if (Fold.isImm()) {
157     Old.ChangeToImmediate(Fold.ImmToFold);
158     return true;
159   }
160 
161   if (Fold.isFI()) {
162     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
163     return true;
164   }
165 
166   MachineOperand *New = Fold.OpToFold;
167   if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
168       TargetRegisterInfo::isVirtualRegister(New->getReg())) {
169     Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
170 
171     Old.setIsUndef(New->isUndef());
172     return true;
173   }
174 
175   // FIXME: Handle physical registers.
176 
177   return false;
178 }
179 
180 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
181                               const MachineInstr *MI) {
182   for (auto Candidate : FoldList) {
183     if (Candidate.UseMI == MI)
184       return true;
185   }
186   return false;
187 }
188 
189 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
190                              MachineInstr *MI, unsigned OpNo,
191                              MachineOperand *OpToFold,
192                              const SIInstrInfo *TII) {
193   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
194 
195     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
196     unsigned Opc = MI->getOpcode();
197     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
198         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
199       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
200 
201       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
202       // to fold the operand.
203       MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
204       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
205       if (FoldAsMAD) {
206         MI->untieRegOperand(OpNo);
207         return true;
208       }
209       MI->setDesc(TII->get(Opc));
210     }
211 
212     // Special case for s_setreg_b32
213     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
214       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
215       FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
216       return true;
217     }
218 
219     // If we are already folding into another operand of MI, then
220     // we can't commute the instruction, otherwise we risk making the
221     // other fold illegal.
222     if (isUseMIInFoldList(FoldList, MI))
223       return false;
224 
225     // Operand is not legal, so try to commute the instruction to
226     // see if this makes it possible to fold.
227     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
228     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
229     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
230 
231     if (CanCommute) {
232       if (CommuteIdx0 == OpNo)
233         OpNo = CommuteIdx1;
234       else if (CommuteIdx1 == OpNo)
235         OpNo = CommuteIdx0;
236     }
237 
238     // One of operands might be an Imm operand, and OpNo may refer to it after
239     // the call of commuteInstruction() below. Such situations are avoided
240     // here explicitly as OpNo must be a register operand to be a candidate
241     // for memory folding.
242     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
243                        !MI->getOperand(CommuteIdx1).isReg()))
244       return false;
245 
246     if (!CanCommute ||
247         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
248       return false;
249 
250     if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
251       TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
252       return false;
253     }
254 
255     FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold, true));
256     return true;
257   }
258 
259   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
260   return true;
261 }
262 
263 // If the use operand doesn't care about the value, this may be an operand only
264 // used for register indexing, in which case it is unsafe to fold.
265 static bool isUseSafeToFold(const SIInstrInfo *TII,
266                             const MachineInstr &MI,
267                             const MachineOperand &UseMO) {
268   return !UseMO.isUndef() && !TII->isSDWA(MI);
269   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
270 }
271 
272 void SIFoldOperands::foldOperand(
273   MachineOperand &OpToFold,
274   MachineInstr *UseMI,
275   unsigned UseOpIdx,
276   SmallVectorImpl<FoldCandidate> &FoldList,
277   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
278   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
279 
280   if (!isUseSafeToFold(TII, *UseMI, UseOp))
281     return;
282 
283   // FIXME: Fold operands with subregs.
284   if (UseOp.isReg() && OpToFold.isReg()) {
285     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
286       return;
287 
288     // Don't fold subregister extracts into tied operands, only if it is a full
289     // copy since a subregister use tied to a full register def doesn't really
290     // make sense. e.g. don't fold:
291     //
292     // %vreg1 = COPY %vreg0:sub1
293     // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
294     //
295     //  into
296     // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
297     if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
298       return;
299   }
300 
301   // Special case for REG_SEQUENCE: We can't fold literals into
302   // REG_SEQUENCE instructions, so we have to fold them into the
303   // uses of REG_SEQUENCE.
304   if (UseMI->isRegSequence()) {
305     unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
306     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
307 
308     for (MachineRegisterInfo::use_iterator
309            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
310          RSUse != RSE; ++RSUse) {
311 
312       MachineInstr *RSUseMI = RSUse->getParent();
313       if (RSUse->getSubReg() != RegSeqDstSubReg)
314         continue;
315 
316       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
317                   CopiesToReplace);
318     }
319 
320     return;
321   }
322 
323 
324   bool FoldingImm = OpToFold.isImm();
325 
326   // In order to fold immediates into copies, we need to change the
327   // copy to a MOV.
328   if (FoldingImm && UseMI->isCopy()) {
329     unsigned DestReg = UseMI->getOperand(0).getReg();
330     const TargetRegisterClass *DestRC
331       = TargetRegisterInfo::isVirtualRegister(DestReg) ?
332       MRI->getRegClass(DestReg) :
333       TRI->getPhysRegClass(DestReg);
334 
335     unsigned MovOp = TII->getMovOpcode(DestRC);
336     if (MovOp == AMDGPU::COPY)
337       return;
338 
339     UseMI->setDesc(TII->get(MovOp));
340     CopiesToReplace.push_back(UseMI);
341   } else {
342     const MCInstrDesc &UseDesc = UseMI->getDesc();
343 
344     // Don't fold into target independent nodes.  Target independent opcodes
345     // don't have defined register classes.
346     if (UseDesc.isVariadic() ||
347         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
348       return;
349   }
350 
351   if (!FoldingImm) {
352     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
353 
354     // FIXME: We could try to change the instruction from 64-bit to 32-bit
355     // to enable more folding opportunites.  The shrink operands pass
356     // already does this.
357     return;
358   }
359 
360 
361   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
362   const TargetRegisterClass *FoldRC =
363     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
364 
365 
366   // Split 64-bit constants into 32-bits for folding.
367   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
368     unsigned UseReg = UseOp.getReg();
369     const TargetRegisterClass *UseRC
370       = TargetRegisterInfo::isVirtualRegister(UseReg) ?
371       MRI->getRegClass(UseReg) :
372       TRI->getPhysRegClass(UseReg);
373 
374     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
375       return;
376 
377     APInt Imm(64, OpToFold.getImm());
378     if (UseOp.getSubReg() == AMDGPU::sub0) {
379       Imm = Imm.getLoBits(32);
380     } else {
381       assert(UseOp.getSubReg() == AMDGPU::sub1);
382       Imm = Imm.getHiBits(32);
383     }
384 
385     MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
386     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
387     return;
388   }
389 
390 
391 
392   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
393 }
394 
395 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
396                                   uint32_t LHS, uint32_t RHS) {
397   switch (Opcode) {
398   case AMDGPU::V_AND_B32_e64:
399   case AMDGPU::V_AND_B32_e32:
400   case AMDGPU::S_AND_B32:
401     Result = LHS & RHS;
402     return true;
403   case AMDGPU::V_OR_B32_e64:
404   case AMDGPU::V_OR_B32_e32:
405   case AMDGPU::S_OR_B32:
406     Result = LHS | RHS;
407     return true;
408   case AMDGPU::V_XOR_B32_e64:
409   case AMDGPU::V_XOR_B32_e32:
410   case AMDGPU::S_XOR_B32:
411     Result = LHS ^ RHS;
412     return true;
413   case AMDGPU::V_LSHL_B32_e64:
414   case AMDGPU::V_LSHL_B32_e32:
415   case AMDGPU::S_LSHL_B32:
416     // The instruction ignores the high bits for out of bounds shifts.
417     Result = LHS << (RHS & 31);
418     return true;
419   case AMDGPU::V_LSHLREV_B32_e64:
420   case AMDGPU::V_LSHLREV_B32_e32:
421     Result = RHS << (LHS & 31);
422     return true;
423   case AMDGPU::V_LSHR_B32_e64:
424   case AMDGPU::V_LSHR_B32_e32:
425   case AMDGPU::S_LSHR_B32:
426     Result = LHS >> (RHS & 31);
427     return true;
428   case AMDGPU::V_LSHRREV_B32_e64:
429   case AMDGPU::V_LSHRREV_B32_e32:
430     Result = RHS >> (LHS & 31);
431     return true;
432   case AMDGPU::V_ASHR_I32_e64:
433   case AMDGPU::V_ASHR_I32_e32:
434   case AMDGPU::S_ASHR_I32:
435     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
436     return true;
437   case AMDGPU::V_ASHRREV_I32_e64:
438   case AMDGPU::V_ASHRREV_I32_e32:
439     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
440     return true;
441   default:
442     return false;
443   }
444 }
445 
446 static unsigned getMovOpc(bool IsScalar) {
447   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
448 }
449 
450 /// Remove any leftover implicit operands from mutating the instruction. e.g.
451 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
452 /// anymore.
453 static void stripExtraCopyOperands(MachineInstr &MI) {
454   const MCInstrDesc &Desc = MI.getDesc();
455   unsigned NumOps = Desc.getNumOperands() +
456                     Desc.getNumImplicitUses() +
457                     Desc.getNumImplicitDefs();
458 
459   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
460     MI.RemoveOperand(I);
461 }
462 
463 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
464   MI.setDesc(NewDesc);
465   stripExtraCopyOperands(MI);
466 }
467 
468 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
469                                                MachineOperand &Op) {
470   if (Op.isReg()) {
471     // If this has a subregister, it obviously is a register source.
472     if (Op.getSubReg() != AMDGPU::NoSubRegister)
473       return &Op;
474 
475     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
476     if (Def && Def->isMoveImmediate()) {
477       MachineOperand &ImmSrc = Def->getOperand(1);
478       if (ImmSrc.isImm())
479         return &ImmSrc;
480     }
481   }
482 
483   return &Op;
484 }
485 
486 // Try to simplify operations with a constant that may appear after instruction
487 // selection.
488 // TODO: See if a frame index with a fixed offset can fold.
489 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
490                               const SIInstrInfo *TII,
491                               MachineInstr *MI,
492                               MachineOperand *ImmOp) {
493   unsigned Opc = MI->getOpcode();
494   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
495       Opc == AMDGPU::S_NOT_B32) {
496     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
497     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
498     return true;
499   }
500 
501   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
502   if (Src1Idx == -1)
503     return false;
504 
505   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
506   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
507   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
508 
509   if (!Src0->isImm() && !Src1->isImm())
510     return false;
511 
512   // and k0, k1 -> v_mov_b32 (k0 & k1)
513   // or k0, k1 -> v_mov_b32 (k0 | k1)
514   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
515   if (Src0->isImm() && Src1->isImm()) {
516     int32_t NewImm;
517     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
518       return false;
519 
520     const SIRegisterInfo &TRI = TII->getRegisterInfo();
521     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
522 
523     // Be careful to change the right operand, src0 may belong to a different
524     // instruction.
525     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
526     MI->RemoveOperand(Src1Idx);
527     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
528     return true;
529   }
530 
531   if (!MI->isCommutable())
532     return false;
533 
534   if (Src0->isImm() && !Src1->isImm()) {
535     std::swap(Src0, Src1);
536     std::swap(Src0Idx, Src1Idx);
537   }
538 
539   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
540   if (Opc == AMDGPU::V_OR_B32_e64 ||
541       Opc == AMDGPU::V_OR_B32_e32 ||
542       Opc == AMDGPU::S_OR_B32) {
543     if (Src1Val == 0) {
544       // y = or x, 0 => y = copy x
545       MI->RemoveOperand(Src1Idx);
546       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
547     } else if (Src1Val == -1) {
548       // y = or x, -1 => y = v_mov_b32 -1
549       MI->RemoveOperand(Src1Idx);
550       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
551     } else
552       return false;
553 
554     return true;
555   }
556 
557   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
558       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
559       MI->getOpcode() == AMDGPU::S_AND_B32) {
560     if (Src1Val == 0) {
561       // y = and x, 0 => y = v_mov_b32 0
562       MI->RemoveOperand(Src0Idx);
563       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
564     } else if (Src1Val == -1) {
565       // y = and x, -1 => y = copy x
566       MI->RemoveOperand(Src1Idx);
567       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
568       stripExtraCopyOperands(*MI);
569     } else
570       return false;
571 
572     return true;
573   }
574 
575   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
576       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
577       MI->getOpcode() == AMDGPU::S_XOR_B32) {
578     if (Src1Val == 0) {
579       // y = xor x, 0 => y = copy x
580       MI->RemoveOperand(Src1Idx);
581       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
582       return true;
583     }
584   }
585 
586   return false;
587 }
588 
589 // Try to fold an instruction into a simpler one
590 static bool tryFoldInst(const SIInstrInfo *TII,
591                         MachineInstr *MI) {
592   unsigned Opc = MI->getOpcode();
593 
594   if (Opc == AMDGPU::V_CNDMASK_B32_e32    ||
595       Opc == AMDGPU::V_CNDMASK_B32_e64    ||
596       Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
597     const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
598     const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
599     if (Src1->isIdenticalTo(*Src0)) {
600       DEBUG(dbgs() << "Folded " << *MI << " into ");
601       int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
602       if (Src2Idx != -1)
603         MI->RemoveOperand(Src2Idx);
604       MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
605       mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
606                                                : getMovOpc(false)));
607       DEBUG(dbgs() << *MI << '\n');
608       return true;
609     }
610   }
611 
612   return false;
613 }
614 
615 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
616                                      MachineOperand &OpToFold) const {
617   // We need mutate the operands of new mov instructions to add implicit
618   // uses of EXEC, but adding them invalidates the use_iterator, so defer
619   // this.
620   SmallVector<MachineInstr *, 4> CopiesToReplace;
621   SmallVector<FoldCandidate, 4> FoldList;
622   MachineOperand &Dst = MI.getOperand(0);
623 
624   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
625   if (FoldingImm) {
626     unsigned NumLiteralUses = 0;
627     MachineOperand *NonInlineUse = nullptr;
628     int NonInlineUseOpNo = -1;
629 
630     MachineRegisterInfo::use_iterator NextUse, NextInstUse;
631     for (MachineRegisterInfo::use_iterator
632            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
633          Use != E; Use = NextUse) {
634       NextUse = std::next(Use);
635       MachineInstr *UseMI = Use->getParent();
636       unsigned OpNo = Use.getOperandNo();
637 
638       // Folding the immediate may reveal operations that can be constant
639       // folded or replaced with a copy. This can happen for example after
640       // frame indices are lowered to constants or from splitting 64-bit
641       // constants.
642       //
643       // We may also encounter cases where one or both operands are
644       // immediates materialized into a register, which would ordinarily not
645       // be folded due to multiple uses or operand constraints.
646 
647       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
648         DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
649 
650         // Some constant folding cases change the same immediate's use to a new
651         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
652         // again. The same constant folded instruction could also have a second
653         // use operand.
654         NextUse = MRI->use_begin(Dst.getReg());
655         continue;
656       }
657 
658       // Try to fold any inline immediate uses, and then only fold other
659       // constants if they have one use.
660       //
661       // The legality of the inline immediate must be checked based on the use
662       // operand, not the defining instruction, because 32-bit instructions
663       // with 32-bit inline immediate sources may be used to materialize
664       // constants used in 16-bit operands.
665       //
666       // e.g. it is unsafe to fold:
667       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
668       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
669 
670       // Folding immediates with more than one use will increase program size.
671       // FIXME: This will also reduce register usage, which may be better
672       // in some cases. A better heuristic is needed.
673       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
674         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
675       } else {
676         if (++NumLiteralUses == 1) {
677           NonInlineUse = &*Use;
678           NonInlineUseOpNo = OpNo;
679         }
680       }
681     }
682 
683     if (NumLiteralUses == 1) {
684       MachineInstr *UseMI = NonInlineUse->getParent();
685       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
686     }
687   } else {
688     // Folding register.
689     for (MachineRegisterInfo::use_iterator
690            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
691          Use != E; ++Use) {
692       MachineInstr *UseMI = Use->getParent();
693 
694       foldOperand(OpToFold, UseMI, Use.getOperandNo(),
695                   FoldList, CopiesToReplace);
696     }
697   }
698 
699   MachineFunction *MF = MI.getParent()->getParent();
700   // Make sure we add EXEC uses to any new v_mov instructions created.
701   for (MachineInstr *Copy : CopiesToReplace)
702     Copy->addImplicitDefUseOperands(*MF);
703 
704   for (FoldCandidate &Fold : FoldList) {
705     if (updateOperand(Fold, *TRI)) {
706       // Clear kill flags.
707       if (Fold.isReg()) {
708         assert(Fold.OpToFold && Fold.OpToFold->isReg());
709         // FIXME: Probably shouldn't bother trying to fold if not an
710         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
711         // copies.
712         MRI->clearKillFlags(Fold.OpToFold->getReg());
713       }
714       DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
715             static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
716       tryFoldInst(TII, Fold.UseMI);
717     } else if (Fold.isCommuted()) {
718       // Restoring instruction's original operand order if fold has failed.
719       TII->commuteInstruction(*Fold.UseMI, false);
720     }
721   }
722 }
723 
724 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
725   unsigned Op = MI.getOpcode();
726   switch (Op) {
727   case AMDGPU::V_MAX_F32_e64:
728   case AMDGPU::V_MAX_F16_e64:
729   case AMDGPU::V_MAX_F64: {
730     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
731       return nullptr;
732 
733     // Make sure sources are identical.
734     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
735     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
736     if (!Src0->isReg() || !Src1->isReg() ||
737         Src0->getSubReg() != Src1->getSubReg() ||
738         Src0->getSubReg() != AMDGPU::NoSubRegister)
739       return nullptr;
740 
741     // Can't fold up if we have modifiers.
742     if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
743         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
744         TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
745       return nullptr;
746     return Src0;
747   }
748   default:
749     return nullptr;
750   }
751 }
752 
753 // We obviously have multiple uses in a clamp since the register is used twice
754 // in the same instruction.
755 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
756   int Count = 0;
757   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
758        I != E; ++I) {
759     if (++Count > 1)
760       return false;
761   }
762 
763   return true;
764 }
765 
766 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
767   const MachineOperand *ClampSrc = isClamp(MI);
768   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
769     return false;
770 
771   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
772   if (!TII->hasFPClamp(*Def))
773     return false;
774   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
775   if (!DefClamp)
776     return false;
777 
778   DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
779 
780   // Clamp is applied after omod, so it is OK if omod is set.
781   DefClamp->setImm(1);
782   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
783   MI.eraseFromParent();
784   return true;
785 }
786 
787 static int getOModValue(unsigned Opc, int64_t Val) {
788   switch (Opc) {
789   case AMDGPU::V_MUL_F32_e64: {
790     switch (static_cast<uint32_t>(Val)) {
791     case 0x3f000000: // 0.5
792       return SIOutMods::DIV2;
793     case 0x40000000: // 2.0
794       return SIOutMods::MUL2;
795     case 0x40800000: // 4.0
796       return SIOutMods::MUL4;
797     default:
798       return SIOutMods::NONE;
799     }
800   }
801   case AMDGPU::V_MUL_F16_e64: {
802     switch (static_cast<uint16_t>(Val)) {
803     case 0x3800: // 0.5
804       return SIOutMods::DIV2;
805     case 0x4000: // 2.0
806       return SIOutMods::MUL2;
807     case 0x4400: // 4.0
808       return SIOutMods::MUL4;
809     default:
810       return SIOutMods::NONE;
811     }
812   }
813   default:
814     llvm_unreachable("invalid mul opcode");
815   }
816 }
817 
818 // FIXME: Does this really not support denormals with f16?
819 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
820 // handled, so will anything other than that break?
821 std::pair<const MachineOperand *, int>
822 SIFoldOperands::isOMod(const MachineInstr &MI) const {
823   unsigned Op = MI.getOpcode();
824   switch (Op) {
825   case AMDGPU::V_MUL_F32_e64:
826   case AMDGPU::V_MUL_F16_e64: {
827     // If output denormals are enabled, omod is ignored.
828     if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
829         (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
830       return std::make_pair(nullptr, SIOutMods::NONE);
831 
832     const MachineOperand *RegOp = nullptr;
833     const MachineOperand *ImmOp = nullptr;
834     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
835     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
836     if (Src0->isImm()) {
837       ImmOp = Src0;
838       RegOp = Src1;
839     } else if (Src1->isImm()) {
840       ImmOp = Src1;
841       RegOp = Src0;
842     } else
843       return std::make_pair(nullptr, SIOutMods::NONE);
844 
845     int OMod = getOModValue(Op, ImmOp->getImm());
846     if (OMod == SIOutMods::NONE ||
847         TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
848         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
849         TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
850         TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
851       return std::make_pair(nullptr, SIOutMods::NONE);
852 
853     return std::make_pair(RegOp, OMod);
854   }
855   case AMDGPU::V_ADD_F32_e64:
856   case AMDGPU::V_ADD_F16_e64: {
857     // If output denormals are enabled, omod is ignored.
858     if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
859         (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
860       return std::make_pair(nullptr, SIOutMods::NONE);
861 
862     // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
863     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
864     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
865 
866     if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
867         Src0->getSubReg() == Src1->getSubReg() &&
868         !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
869         !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
870         !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
871         !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
872       return std::make_pair(Src0, SIOutMods::MUL2);
873 
874     return std::make_pair(nullptr, SIOutMods::NONE);
875   }
876   default:
877     return std::make_pair(nullptr, SIOutMods::NONE);
878   }
879 }
880 
881 // FIXME: Does this need to check IEEE bit on function?
882 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
883   const MachineOperand *RegOp;
884   int OMod;
885   std::tie(RegOp, OMod) = isOMod(MI);
886   if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
887       RegOp->getSubReg() != AMDGPU::NoSubRegister ||
888       !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
889     return false;
890 
891   MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
892   MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
893   if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
894     return false;
895 
896   // Clamp is applied after omod. If the source already has clamp set, don't
897   // fold it.
898   if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
899     return false;
900 
901   DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
902 
903   DefOMod->setImm(OMod);
904   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
905   MI.eraseFromParent();
906   return true;
907 }
908 
909 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
910   if (skipFunction(*MF.getFunction()))
911     return false;
912 
913   MRI = &MF.getRegInfo();
914   ST = &MF.getSubtarget<SISubtarget>();
915   TII = ST->getInstrInfo();
916   TRI = &TII->getRegisterInfo();
917 
918   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
919 
920   // omod is ignored by hardware if IEEE bit is enabled. omod also does not
921   // correctly handle signed zeros.
922   //
923   // TODO: Check nsz on instructions when fast math flags are preserved to MI
924   // level.
925   bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
926 
927   for (MachineBasicBlock *MBB : depth_first(&MF)) {
928     MachineBasicBlock::iterator I, Next;
929     for (I = MBB->begin(); I != MBB->end(); I = Next) {
930       Next = std::next(I);
931       MachineInstr &MI = *I;
932 
933       tryFoldInst(TII, &MI);
934 
935       if (!TII->isFoldableCopy(MI)) {
936         if (IsIEEEMode || !tryFoldOMod(MI))
937           tryFoldClamp(MI);
938         continue;
939       }
940 
941       MachineOperand &OpToFold = MI.getOperand(1);
942       bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
943 
944       // FIXME: We could also be folding things like TargetIndexes.
945       if (!FoldingImm && !OpToFold.isReg())
946         continue;
947 
948       if (OpToFold.isReg() &&
949           !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
950         continue;
951 
952       // Prevent folding operands backwards in the function. For example,
953       // the COPY opcode must not be replaced by 1 in this example:
954       //
955       //    %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
956       //    ...
957       //    %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
958       MachineOperand &Dst = MI.getOperand(0);
959       if (Dst.isReg() &&
960           !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
961         continue;
962 
963       foldInstOperand(MI, OpToFold);
964     }
965   }
966   return false;
967 }
968