xref: /llvm-project/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (revision 79a45db7f5752be9fd1f4f718c7ac90a70972662)
1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 /// \file
9 //===----------------------------------------------------------------------===//
10 //
11 
12 #include "AMDGPU.h"
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include "llvm/Target/TargetMachine.h"
22 
23 #define DEBUG_TYPE "si-fold-operands"
24 using namespace llvm;
25 
26 namespace {
27 
28 struct FoldCandidate {
29   MachineInstr *UseMI;
30   union {
31     MachineOperand *OpToFold;
32     uint64_t ImmToFold;
33     int FrameIndexToFold;
34   };
35   unsigned char UseOpNo;
36   MachineOperand::MachineOperandType Kind;
37 
38   FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
39     UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) {
40     if (FoldOp->isImm()) {
41       ImmToFold = FoldOp->getImm();
42     } else if (FoldOp->isFI()) {
43       FrameIndexToFold = FoldOp->getIndex();
44     } else {
45       assert(FoldOp->isReg());
46       OpToFold = FoldOp;
47     }
48   }
49 
50   bool isFI() const {
51     return Kind == MachineOperand::MO_FrameIndex;
52   }
53 
54   bool isImm() const {
55     return Kind == MachineOperand::MO_Immediate;
56   }
57 
58   bool isReg() const {
59     return Kind == MachineOperand::MO_Register;
60   }
61 };
62 
63 class SIFoldOperands : public MachineFunctionPass {
64 public:
65   static char ID;
66   MachineRegisterInfo *MRI;
67   const SIInstrInfo *TII;
68   const SIRegisterInfo *TRI;
69   const SISubtarget *ST;
70 
71   void foldOperand(MachineOperand &OpToFold,
72                    MachineInstr *UseMI,
73                    unsigned UseOpIdx,
74                    SmallVectorImpl<FoldCandidate> &FoldList,
75                    SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
76 
77   void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
78 
79   const MachineOperand *isClamp(const MachineInstr &MI) const;
80   bool tryFoldClamp(MachineInstr &MI);
81 
82 public:
83   SIFoldOperands() : MachineFunctionPass(ID) {
84     initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
85   }
86 
87   bool runOnMachineFunction(MachineFunction &MF) override;
88 
89   StringRef getPassName() const override { return "SI Fold Operands"; }
90 
91   void getAnalysisUsage(AnalysisUsage &AU) const override {
92     AU.setPreservesCFG();
93     MachineFunctionPass::getAnalysisUsage(AU);
94   }
95 };
96 
97 } // End anonymous namespace.
98 
99 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
100                 "SI Fold Operands", false, false)
101 
102 char SIFoldOperands::ID = 0;
103 
104 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
105 
106 // Wrapper around isInlineConstant that understands special cases when
107 // instruction types are replaced during operand folding.
108 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
109                                      const MachineInstr &UseMI,
110                                      unsigned OpNo,
111                                      const MachineOperand &OpToFold) {
112   if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
113     return true;
114 
115   unsigned Opc = UseMI.getOpcode();
116   switch (Opc) {
117   case AMDGPU::V_MAC_F32_e64:
118   case AMDGPU::V_MAC_F16_e64: {
119     // Special case for mac. Since this is replaced with mad when folded into
120     // src2, we need to check the legality for the final instruction.
121     int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
122     if (static_cast<int>(OpNo) == Src2Idx) {
123       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
124       const MCInstrDesc &MadDesc
125         = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
126       return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
127     }
128   }
129   default:
130     return false;
131   }
132 }
133 
134 FunctionPass *llvm::createSIFoldOperandsPass() {
135   return new SIFoldOperands();
136 }
137 
138 static bool isSafeToFold(const MachineInstr &MI) {
139   switch (MI.getOpcode()) {
140   case AMDGPU::V_MOV_B32_e32:
141   case AMDGPU::V_MOV_B32_e64:
142   case AMDGPU::V_MOV_B64_PSEUDO: {
143     // If there are additional implicit register operands, this may be used for
144     // register indexing so the source register operand isn't simply copied.
145     unsigned NumOps = MI.getDesc().getNumOperands() +
146       MI.getDesc().getNumImplicitUses();
147 
148     return MI.getNumOperands() == NumOps;
149   }
150   case AMDGPU::S_MOV_B32:
151   case AMDGPU::S_MOV_B64:
152   case AMDGPU::COPY:
153     return true;
154   default:
155     return false;
156   }
157 }
158 
159 static bool updateOperand(FoldCandidate &Fold,
160                           const TargetRegisterInfo &TRI) {
161   MachineInstr *MI = Fold.UseMI;
162   MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
163   assert(Old.isReg());
164 
165   if (Fold.isImm()) {
166     Old.ChangeToImmediate(Fold.ImmToFold);
167     return true;
168   }
169 
170   if (Fold.isFI()) {
171     Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
172     return true;
173   }
174 
175   MachineOperand *New = Fold.OpToFold;
176   if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
177       TargetRegisterInfo::isVirtualRegister(New->getReg())) {
178     Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
179     return true;
180   }
181 
182   // FIXME: Handle physical registers.
183 
184   return false;
185 }
186 
187 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
188                               const MachineInstr *MI) {
189   for (auto Candidate : FoldList) {
190     if (Candidate.UseMI == MI)
191       return true;
192   }
193   return false;
194 }
195 
196 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
197                              MachineInstr *MI, unsigned OpNo,
198                              MachineOperand *OpToFold,
199                              const SIInstrInfo *TII) {
200   if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
201 
202     // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
203     unsigned Opc = MI->getOpcode();
204     if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
205         (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
206       bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
207 
208       // Check if changing this to a v_mad_{f16, f32} instruction will allow us
209       // to fold the operand.
210       MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
211       bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
212       if (FoldAsMAD) {
213         MI->untieRegOperand(OpNo);
214         return true;
215       }
216       MI->setDesc(TII->get(Opc));
217     }
218 
219     // Special case for s_setreg_b32
220     if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
221       MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
222       FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
223       return true;
224     }
225 
226     // If we are already folding into another operand of MI, then
227     // we can't commute the instruction, otherwise we risk making the
228     // other fold illegal.
229     if (isUseMIInFoldList(FoldList, MI))
230       return false;
231 
232     // Operand is not legal, so try to commute the instruction to
233     // see if this makes it possible to fold.
234     unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
235     unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
236     bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
237 
238     if (CanCommute) {
239       if (CommuteIdx0 == OpNo)
240         OpNo = CommuteIdx1;
241       else if (CommuteIdx1 == OpNo)
242         OpNo = CommuteIdx0;
243     }
244 
245     // One of operands might be an Imm operand, and OpNo may refer to it after
246     // the call of commuteInstruction() below. Such situations are avoided
247     // here explicitly as OpNo must be a register operand to be a candidate
248     // for memory folding.
249     if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
250                        !MI->getOperand(CommuteIdx1).isReg()))
251       return false;
252 
253     if (!CanCommute ||
254         !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
255       return false;
256 
257     if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
258       return false;
259   }
260 
261   FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
262   return true;
263 }
264 
265 // If the use operand doesn't care about the value, this may be an operand only
266 // used for register indexing, in which case it is unsafe to fold.
267 static bool isUseSafeToFold(const MachineInstr &MI,
268                             const MachineOperand &UseMO) {
269   return !UseMO.isUndef();
270   //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
271 }
272 
273 void SIFoldOperands::foldOperand(
274   MachineOperand &OpToFold,
275   MachineInstr *UseMI,
276   unsigned UseOpIdx,
277   SmallVectorImpl<FoldCandidate> &FoldList,
278   SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
279   const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
280 
281   if (!isUseSafeToFold(*UseMI, UseOp))
282     return;
283 
284   // FIXME: Fold operands with subregs.
285   if (UseOp.isReg() && OpToFold.isReg()) {
286     if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
287       return;
288 
289     // Don't fold subregister extracts into tied operands, only if it is a full
290     // copy since a subregister use tied to a full register def doesn't really
291     // make sense. e.g. don't fold:
292     //
293     // %vreg1 = COPY %vreg0:sub1
294     // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
295     //
296     //  into
297     // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
298     if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
299       return;
300   }
301 
302   // Special case for REG_SEQUENCE: We can't fold literals into
303   // REG_SEQUENCE instructions, so we have to fold them into the
304   // uses of REG_SEQUENCE.
305   if (UseMI->isRegSequence()) {
306     unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
307     unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
308 
309     for (MachineRegisterInfo::use_iterator
310            RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
311          RSUse != RSE; ++RSUse) {
312 
313       MachineInstr *RSUseMI = RSUse->getParent();
314       if (RSUse->getSubReg() != RegSeqDstSubReg)
315         continue;
316 
317       foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
318                   CopiesToReplace);
319     }
320 
321     return;
322   }
323 
324 
325   bool FoldingImm = OpToFold.isImm();
326 
327   // In order to fold immediates into copies, we need to change the
328   // copy to a MOV.
329   if (FoldingImm && UseMI->isCopy()) {
330     unsigned DestReg = UseMI->getOperand(0).getReg();
331     const TargetRegisterClass *DestRC
332       = TargetRegisterInfo::isVirtualRegister(DestReg) ?
333       MRI->getRegClass(DestReg) :
334       TRI->getPhysRegClass(DestReg);
335 
336     unsigned MovOp = TII->getMovOpcode(DestRC);
337     if (MovOp == AMDGPU::COPY)
338       return;
339 
340     UseMI->setDesc(TII->get(MovOp));
341     CopiesToReplace.push_back(UseMI);
342   } else {
343     const MCInstrDesc &UseDesc = UseMI->getDesc();
344 
345     // Don't fold into target independent nodes.  Target independent opcodes
346     // don't have defined register classes.
347     if (UseDesc.isVariadic() ||
348         UseDesc.OpInfo[UseOpIdx].RegClass == -1)
349       return;
350   }
351 
352   if (!FoldingImm) {
353     tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
354 
355     // FIXME: We could try to change the instruction from 64-bit to 32-bit
356     // to enable more folding opportunites.  The shrink operands pass
357     // already does this.
358     return;
359   }
360 
361 
362   const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
363   const TargetRegisterClass *FoldRC =
364     TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
365 
366   APInt Imm(TII->operandBitWidth(FoldDesc.OpInfo[1].OperandType),
367             OpToFold.getImm());
368 
369   // Split 64-bit constants into 32-bits for folding.
370   if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
371     unsigned UseReg = UseOp.getReg();
372     const TargetRegisterClass *UseRC
373       = TargetRegisterInfo::isVirtualRegister(UseReg) ?
374       MRI->getRegClass(UseReg) :
375       TRI->getPhysRegClass(UseReg);
376 
377     assert(Imm.getBitWidth() == 64);
378 
379     if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
380       return;
381 
382     if (UseOp.getSubReg() == AMDGPU::sub0) {
383       Imm = Imm.getLoBits(32);
384     } else {
385       assert(UseOp.getSubReg() == AMDGPU::sub1);
386       Imm = Imm.getHiBits(32);
387     }
388   }
389 
390   MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
391   tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
392 }
393 
394 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
395                                   uint32_t LHS, uint32_t RHS) {
396   switch (Opcode) {
397   case AMDGPU::V_AND_B32_e64:
398   case AMDGPU::V_AND_B32_e32:
399   case AMDGPU::S_AND_B32:
400     Result = LHS & RHS;
401     return true;
402   case AMDGPU::V_OR_B32_e64:
403   case AMDGPU::V_OR_B32_e32:
404   case AMDGPU::S_OR_B32:
405     Result = LHS | RHS;
406     return true;
407   case AMDGPU::V_XOR_B32_e64:
408   case AMDGPU::V_XOR_B32_e32:
409   case AMDGPU::S_XOR_B32:
410     Result = LHS ^ RHS;
411     return true;
412   case AMDGPU::V_LSHL_B32_e64:
413   case AMDGPU::V_LSHL_B32_e32:
414   case AMDGPU::S_LSHL_B32:
415     // The instruction ignores the high bits for out of bounds shifts.
416     Result = LHS << (RHS & 31);
417     return true;
418   case AMDGPU::V_LSHLREV_B32_e64:
419   case AMDGPU::V_LSHLREV_B32_e32:
420     Result = RHS << (LHS & 31);
421     return true;
422   case AMDGPU::V_LSHR_B32_e64:
423   case AMDGPU::V_LSHR_B32_e32:
424   case AMDGPU::S_LSHR_B32:
425     Result = LHS >> (RHS & 31);
426     return true;
427   case AMDGPU::V_LSHRREV_B32_e64:
428   case AMDGPU::V_LSHRREV_B32_e32:
429     Result = RHS >> (LHS & 31);
430     return true;
431   case AMDGPU::V_ASHR_I32_e64:
432   case AMDGPU::V_ASHR_I32_e32:
433   case AMDGPU::S_ASHR_I32:
434     Result = static_cast<int32_t>(LHS) >> (RHS & 31);
435     return true;
436   case AMDGPU::V_ASHRREV_I32_e64:
437   case AMDGPU::V_ASHRREV_I32_e32:
438     Result = static_cast<int32_t>(RHS) >> (LHS & 31);
439     return true;
440   default:
441     return false;
442   }
443 }
444 
445 static unsigned getMovOpc(bool IsScalar) {
446   return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
447 }
448 
449 /// Remove any leftover implicit operands from mutating the instruction. e.g.
450 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
451 /// anymore.
452 static void stripExtraCopyOperands(MachineInstr &MI) {
453   const MCInstrDesc &Desc = MI.getDesc();
454   unsigned NumOps = Desc.getNumOperands() +
455                     Desc.getNumImplicitUses() +
456                     Desc.getNumImplicitDefs();
457 
458   for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
459     MI.RemoveOperand(I);
460 }
461 
462 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
463   MI.setDesc(NewDesc);
464   stripExtraCopyOperands(MI);
465 }
466 
467 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
468                                                MachineOperand &Op) {
469   if (Op.isReg()) {
470     // If this has a subregister, it obviously is a register source.
471     if (Op.getSubReg() != AMDGPU::NoSubRegister)
472       return &Op;
473 
474     MachineInstr *Def = MRI.getVRegDef(Op.getReg());
475     if (Def->isMoveImmediate()) {
476       MachineOperand &ImmSrc = Def->getOperand(1);
477       if (ImmSrc.isImm())
478         return &ImmSrc;
479     }
480   }
481 
482   return &Op;
483 }
484 
485 // Try to simplify operations with a constant that may appear after instruction
486 // selection.
487 // TODO: See if a frame index with a fixed offset can fold.
488 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
489                               const SIInstrInfo *TII,
490                               MachineInstr *MI,
491                               MachineOperand *ImmOp) {
492   unsigned Opc = MI->getOpcode();
493   if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
494       Opc == AMDGPU::S_NOT_B32) {
495     MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
496     mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
497     return true;
498   }
499 
500   int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
501   if (Src1Idx == -1)
502     return false;
503 
504   int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
505   MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
506   MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
507 
508   if (!Src0->isImm() && !Src1->isImm())
509     return false;
510 
511   // and k0, k1 -> v_mov_b32 (k0 & k1)
512   // or k0, k1 -> v_mov_b32 (k0 | k1)
513   // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
514   if (Src0->isImm() && Src1->isImm()) {
515     int32_t NewImm;
516     if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
517       return false;
518 
519     const SIRegisterInfo &TRI = TII->getRegisterInfo();
520     bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
521 
522     // Be careful to change the right operand, src0 may belong to a different
523     // instruction.
524     MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
525     MI->RemoveOperand(Src1Idx);
526     mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
527     return true;
528   }
529 
530   if (!MI->isCommutable())
531     return false;
532 
533   if (Src0->isImm() && !Src1->isImm()) {
534     std::swap(Src0, Src1);
535     std::swap(Src0Idx, Src1Idx);
536   }
537 
538   int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
539   if (Opc == AMDGPU::V_OR_B32_e64 ||
540       Opc == AMDGPU::V_OR_B32_e32 ||
541       Opc == AMDGPU::S_OR_B32) {
542     if (Src1Val == 0) {
543       // y = or x, 0 => y = copy x
544       MI->RemoveOperand(Src1Idx);
545       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
546     } else if (Src1Val == -1) {
547       // y = or x, -1 => y = v_mov_b32 -1
548       MI->RemoveOperand(Src1Idx);
549       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
550     } else
551       return false;
552 
553     return true;
554   }
555 
556   if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
557       MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
558       MI->getOpcode() == AMDGPU::S_AND_B32) {
559     if (Src1Val == 0) {
560       // y = and x, 0 => y = v_mov_b32 0
561       MI->RemoveOperand(Src0Idx);
562       mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
563     } else if (Src1Val == -1) {
564       // y = and x, -1 => y = copy x
565       MI->RemoveOperand(Src1Idx);
566       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
567       stripExtraCopyOperands(*MI);
568     } else
569       return false;
570 
571     return true;
572   }
573 
574   if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
575       MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
576       MI->getOpcode() == AMDGPU::S_XOR_B32) {
577     if (Src1Val == 0) {
578       // y = xor x, 0 => y = copy x
579       MI->RemoveOperand(Src1Idx);
580       mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
581       return true;
582     }
583   }
584 
585   return false;
586 }
587 
588 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
589                                      MachineOperand &OpToFold) const {
590   // We need mutate the operands of new mov instructions to add implicit
591   // uses of EXEC, but adding them invalidates the use_iterator, so defer
592   // this.
593   SmallVector<MachineInstr *, 4> CopiesToReplace;
594   SmallVector<FoldCandidate, 4> FoldList;
595   MachineOperand &Dst = MI.getOperand(0);
596 
597   bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
598   if (FoldingImm) {
599     unsigned NumLiteralUses = 0;
600     MachineOperand *NonInlineUse = nullptr;
601     int NonInlineUseOpNo = -1;
602 
603     MachineRegisterInfo::use_iterator NextUse, NextInstUse;
604     for (MachineRegisterInfo::use_iterator
605            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
606          Use != E; Use = NextUse) {
607       NextUse = std::next(Use);
608       MachineInstr *UseMI = Use->getParent();
609       unsigned OpNo = Use.getOperandNo();
610 
611       // Folding the immediate may reveal operations that can be constant
612       // folded or replaced with a copy. This can happen for example after
613       // frame indices are lowered to constants or from splitting 64-bit
614       // constants.
615       //
616       // We may also encounter cases where one or both operands are
617       // immediates materialized into a register, which would ordinarily not
618       // be folded due to multiple uses or operand constraints.
619 
620       if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
621         DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
622 
623         // Some constant folding cases change the same immediate's use to a new
624         // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
625         // again. The same constant folded instruction could also have a second
626         // use operand.
627         NextUse = MRI->use_begin(Dst.getReg());
628         continue;
629       }
630 
631       // Try to fold any inline immediate uses, and then only fold other
632       // constants if they have one use.
633       //
634       // The legality of the inline immediate must be checked based on the use
635       // operand, not the defining instruction, because 32-bit instructions
636       // with 32-bit inline immediate sources may be used to materialize
637       // constants used in 16-bit operands.
638       //
639       // e.g. it is unsafe to fold:
640       //  s_mov_b32 s0, 1.0    // materializes 0x3f800000
641       //  v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
642 
643       // Folding immediates with more than one use will increase program size.
644       // FIXME: This will also reduce register usage, which may be better
645       // in some cases. A better heuristic is needed.
646       if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
647         foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
648       } else {
649         if (++NumLiteralUses == 1) {
650           NonInlineUse = &*Use;
651           NonInlineUseOpNo = OpNo;
652         }
653       }
654     }
655 
656     if (NumLiteralUses == 1) {
657       MachineInstr *UseMI = NonInlineUse->getParent();
658       foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
659     }
660   } else {
661     // Folding register.
662     for (MachineRegisterInfo::use_iterator
663            Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
664          Use != E; ++Use) {
665       MachineInstr *UseMI = Use->getParent();
666 
667       foldOperand(OpToFold, UseMI, Use.getOperandNo(),
668                   FoldList, CopiesToReplace);
669     }
670   }
671 
672   MachineFunction *MF = MI.getParent()->getParent();
673   // Make sure we add EXEC uses to any new v_mov instructions created.
674   for (MachineInstr *Copy : CopiesToReplace)
675     Copy->addImplicitDefUseOperands(*MF);
676 
677   for (FoldCandidate &Fold : FoldList) {
678     if (updateOperand(Fold, *TRI)) {
679       // Clear kill flags.
680       if (Fold.isReg()) {
681         assert(Fold.OpToFold && Fold.OpToFold->isReg());
682         // FIXME: Probably shouldn't bother trying to fold if not an
683         // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
684         // copies.
685         MRI->clearKillFlags(Fold.OpToFold->getReg());
686       }
687       DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
688             static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
689     }
690   }
691 }
692 
693 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
694   unsigned Op = MI.getOpcode();
695   switch (Op) {
696   case AMDGPU::V_MAX_F32_e64:
697   case AMDGPU::V_MAX_F16_e64:
698   case AMDGPU::V_MAX_F64: {
699     if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
700       return nullptr;
701 
702     // Make sure sources are identical.
703     const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
704     const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
705     if (!Src0->isReg() || Src0->getSubReg() != Src1->getSubReg() ||
706         Src0->getSubReg() != AMDGPU::NoSubRegister)
707       return nullptr;
708 
709     // Can't fold up if we have modifiers.
710     if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
711         TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
712         TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
713       return nullptr;
714     return Src0;
715   }
716   default:
717     return nullptr;
718   }
719 }
720 
721 // We obviously have multiple uses in a clamp since the register is used twice
722 // in the same instruction.
723 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
724   int Count = 0;
725   for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
726        I != E; ++I) {
727     if (++Count > 1)
728       return false;
729   }
730 
731   return true;
732 }
733 
734 // FIXME: Does this need to check IEEE bit on function?
735 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
736   const MachineOperand *ClampSrc = isClamp(MI);
737   if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
738     return false;
739 
740   MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
741   if (!TII->hasFPClamp(*Def))
742     return false;
743   MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
744   if (!DefClamp)
745     return false;
746 
747   DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
748 
749   // Clamp is applied after omod, so it is OK if omod is set.
750   DefClamp->setImm(1);
751   MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
752   MI.eraseFromParent();
753   return true;
754 }
755 
756 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
757   if (skipFunction(*MF.getFunction()))
758     return false;
759 
760   MRI = &MF.getRegInfo();
761   ST = &MF.getSubtarget<SISubtarget>();
762   TII = ST->getInstrInfo();
763   TRI = &TII->getRegisterInfo();
764 
765   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
766        BI != BE; ++BI) {
767 
768     MachineBasicBlock &MBB = *BI;
769     MachineBasicBlock::iterator I, Next;
770     for (I = MBB.begin(); I != MBB.end(); I = Next) {
771       Next = std::next(I);
772       MachineInstr &MI = *I;
773 
774       if (!isSafeToFold(MI)) {
775         // TODO: Try omod also.
776         tryFoldClamp(MI);
777         continue;
778       }
779 
780       MachineOperand &OpToFold = MI.getOperand(1);
781       bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
782 
783       // FIXME: We could also be folding things like TargetIndexes.
784       if (!FoldingImm && !OpToFold.isReg())
785         continue;
786 
787       if (OpToFold.isReg() &&
788           !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
789         continue;
790 
791       // Prevent folding operands backwards in the function. For example,
792       // the COPY opcode must not be replaced by 1 in this example:
793       //
794       //    %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
795       //    ...
796       //    %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
797       MachineOperand &Dst = MI.getOperand(0);
798       if (Dst.isReg() &&
799           !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
800         continue;
801 
802       foldInstOperand(MI, OpToFold);
803     }
804   }
805   return false;
806 }
807