xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/IR/DiagnosticInfo.h"
27 
28 #define DEBUG_TYPE "amdgpu-isel"
29 
30 using namespace llvm;
31 using namespace MIPatternMatch;
32 
33 static cl::opt<bool> AllowRiskySelect(
34   "amdgpu-global-isel-risky-select",
35   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
36   cl::init(false),
37   cl::ReallyHidden);
38 
39 #define GET_GLOBALISEL_IMPL
40 #define AMDGPUSubtarget GCNSubtarget
41 #include "AMDGPUGenGlobalISel.inc"
42 #undef GET_GLOBALISEL_IMPL
43 #undef AMDGPUSubtarget
44 
AMDGPUInstructionSelector(const GCNSubtarget & STI,const AMDGPURegisterBankInfo & RBI,const AMDGPUTargetMachine & TM)45 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
46     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
47     const AMDGPUTargetMachine &TM)
48     : InstructionSelector(), TII(*STI.getInstrInfo()),
49       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
50       STI(STI),
51       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
52 #define GET_GLOBALISEL_PREDICATES_INIT
53 #include "AMDGPUGenGlobalISel.inc"
54 #undef GET_GLOBALISEL_PREDICATES_INIT
55 #define GET_GLOBALISEL_TEMPORARIES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_TEMPORARIES_INIT
58 {
59 }
60 
getName()61 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
62 
setupMF(MachineFunction & MF,GISelKnownBits * KB,CodeGenCoverage & CoverageInfo,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)63 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
64                                         CodeGenCoverage &CoverageInfo,
65                                         ProfileSummaryInfo *PSI,
66                                         BlockFrequencyInfo *BFI) {
67   MRI = &MF.getRegInfo();
68   Subtarget = &MF.getSubtarget<GCNSubtarget>();
69   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
70 }
71 
isVCC(Register Reg,const MachineRegisterInfo & MRI) const72 bool AMDGPUInstructionSelector::isVCC(Register Reg,
73                                       const MachineRegisterInfo &MRI) const {
74   // The verifier is oblivious to s1 being a valid value for wavesize registers.
75   if (Reg.isPhysical())
76     return false;
77 
78   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
79   const TargetRegisterClass *RC =
80       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
81   if (RC) {
82     const LLT Ty = MRI.getType(Reg);
83     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
84            Ty.isValid() && Ty.getSizeInBits() == 1;
85   }
86 
87   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
88   return RB->getID() == AMDGPU::VCCRegBankID;
89 }
90 
constrainCopyLikeIntrin(MachineInstr & MI,unsigned NewOpc) const91 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
92                                                         unsigned NewOpc) const {
93   MI.setDesc(TII.get(NewOpc));
94   MI.RemoveOperand(1); // Remove intrinsic ID.
95   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
96 
97   MachineOperand &Dst = MI.getOperand(0);
98   MachineOperand &Src = MI.getOperand(1);
99 
100   // TODO: This should be legalized to s32 if needed
101   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
102     return false;
103 
104   const TargetRegisterClass *DstRC
105     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
106   const TargetRegisterClass *SrcRC
107     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
108   if (!DstRC || DstRC != SrcRC)
109     return false;
110 
111   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
112          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
113 }
114 
selectCOPY(MachineInstr & I) const115 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
116   const DebugLoc &DL = I.getDebugLoc();
117   MachineBasicBlock *BB = I.getParent();
118   I.setDesc(TII.get(TargetOpcode::COPY));
119 
120   const MachineOperand &Src = I.getOperand(1);
121   MachineOperand &Dst = I.getOperand(0);
122   Register DstReg = Dst.getReg();
123   Register SrcReg = Src.getReg();
124 
125   if (isVCC(DstReg, *MRI)) {
126     if (SrcReg == AMDGPU::SCC) {
127       const TargetRegisterClass *RC
128         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
129       if (!RC)
130         return true;
131       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
132     }
133 
134     if (!isVCC(SrcReg, *MRI)) {
135       // TODO: Should probably leave the copy and let copyPhysReg expand it.
136       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
137         return false;
138 
139       const TargetRegisterClass *SrcRC
140         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
141 
142       Optional<ValueAndVReg> ConstVal =
143           getConstantVRegValWithLookThrough(SrcReg, *MRI, true, true);
144       if (ConstVal) {
145         unsigned MovOpc =
146             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
147         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
148             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
149       } else {
150         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
151 
152         // We can't trust the high bits at this point, so clear them.
153 
154         // TODO: Skip masking high bits if def is known boolean.
155 
156         unsigned AndOpc =
157             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
158         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
159             .addImm(1)
160             .addReg(SrcReg);
161         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
162             .addImm(0)
163             .addReg(MaskedReg);
164       }
165 
166       if (!MRI->getRegClassOrNull(SrcReg))
167         MRI->setRegClass(SrcReg, SrcRC);
168       I.eraseFromParent();
169       return true;
170     }
171 
172     const TargetRegisterClass *RC =
173       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
174     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
175       return false;
176 
177     return true;
178   }
179 
180   for (const MachineOperand &MO : I.operands()) {
181     if (MO.getReg().isPhysical())
182       continue;
183 
184     const TargetRegisterClass *RC =
185             TRI.getConstrainedRegClassForOperand(MO, *MRI);
186     if (!RC)
187       continue;
188     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
189   }
190   return true;
191 }
192 
selectPHI(MachineInstr & I) const193 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
194   const Register DefReg = I.getOperand(0).getReg();
195   const LLT DefTy = MRI->getType(DefReg);
196   if (DefTy == LLT::scalar(1)) {
197     if (!AllowRiskySelect) {
198       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
199       return false;
200     }
201 
202     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
203   }
204 
205   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
206 
207   const RegClassOrRegBank &RegClassOrBank =
208     MRI->getRegClassOrRegBank(DefReg);
209 
210   const TargetRegisterClass *DefRC
211     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
212   if (!DefRC) {
213     if (!DefTy.isValid()) {
214       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
215       return false;
216     }
217 
218     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
219     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
220     if (!DefRC) {
221       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
222       return false;
223     }
224   }
225 
226   // TODO: Verify that all registers have the same bank
227   I.setDesc(TII.get(TargetOpcode::PHI));
228   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
229 }
230 
231 MachineOperand
getSubOperand64(MachineOperand & MO,const TargetRegisterClass & SubRC,unsigned SubIdx) const232 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
233                                            const TargetRegisterClass &SubRC,
234                                            unsigned SubIdx) const {
235 
236   MachineInstr *MI = MO.getParent();
237   MachineBasicBlock *BB = MO.getParent()->getParent();
238   Register DstReg = MRI->createVirtualRegister(&SubRC);
239 
240   if (MO.isReg()) {
241     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
242     Register Reg = MO.getReg();
243     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
244             .addReg(Reg, 0, ComposedSubIdx);
245 
246     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
247                                      MO.isKill(), MO.isDead(), MO.isUndef(),
248                                      MO.isEarlyClobber(), 0, MO.isDebug(),
249                                      MO.isInternalRead());
250   }
251 
252   assert(MO.isImm());
253 
254   APInt Imm(64, MO.getImm());
255 
256   switch (SubIdx) {
257   default:
258     llvm_unreachable("do not know to split immediate with this sub index.");
259   case AMDGPU::sub0:
260     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
261   case AMDGPU::sub1:
262     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
263   }
264 }
265 
getLogicalBitOpcode(unsigned Opc,bool Is64)266 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
267   switch (Opc) {
268   case AMDGPU::G_AND:
269     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
270   case AMDGPU::G_OR:
271     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
272   case AMDGPU::G_XOR:
273     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
274   default:
275     llvm_unreachable("not a bit op");
276   }
277 }
278 
selectG_AND_OR_XOR(MachineInstr & I) const279 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
280   Register DstReg = I.getOperand(0).getReg();
281   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
282 
283   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
284   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
285       DstRB->getID() != AMDGPU::VCCRegBankID)
286     return false;
287 
288   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
289                             STI.isWave64());
290   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
291 
292   // Dead implicit-def of scc
293   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
294                                          true, // isImp
295                                          false, // isKill
296                                          true)); // isDead
297   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
298 }
299 
selectG_ADD_SUB(MachineInstr & I) const300 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
301   MachineBasicBlock *BB = I.getParent();
302   MachineFunction *MF = BB->getParent();
303   Register DstReg = I.getOperand(0).getReg();
304   const DebugLoc &DL = I.getDebugLoc();
305   LLT Ty = MRI->getType(DstReg);
306   if (Ty.isVector())
307     return false;
308 
309   unsigned Size = Ty.getSizeInBits();
310   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
311   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
312   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
313 
314   if (Size == 32) {
315     if (IsSALU) {
316       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
317       MachineInstr *Add =
318         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
319         .add(I.getOperand(1))
320         .add(I.getOperand(2));
321       I.eraseFromParent();
322       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
323     }
324 
325     if (STI.hasAddNoCarry()) {
326       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
327       I.setDesc(TII.get(Opc));
328       I.addOperand(*MF, MachineOperand::CreateImm(0));
329       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
330       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
331     }
332 
333     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
334 
335     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
336     MachineInstr *Add
337       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
338       .addDef(UnusedCarry, RegState::Dead)
339       .add(I.getOperand(1))
340       .add(I.getOperand(2))
341       .addImm(0);
342     I.eraseFromParent();
343     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
344   }
345 
346   assert(!Sub && "illegal sub should not reach here");
347 
348   const TargetRegisterClass &RC
349     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
350   const TargetRegisterClass &HalfRC
351     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
352 
353   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
354   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
355   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
356   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
357 
358   Register DstLo = MRI->createVirtualRegister(&HalfRC);
359   Register DstHi = MRI->createVirtualRegister(&HalfRC);
360 
361   if (IsSALU) {
362     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
363       .add(Lo1)
364       .add(Lo2);
365     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
366       .add(Hi1)
367       .add(Hi2);
368   } else {
369     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
370     Register CarryReg = MRI->createVirtualRegister(CarryRC);
371     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
372       .addDef(CarryReg)
373       .add(Lo1)
374       .add(Lo2)
375       .addImm(0);
376     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
377       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
378       .add(Hi1)
379       .add(Hi2)
380       .addReg(CarryReg, RegState::Kill)
381       .addImm(0);
382 
383     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
384       return false;
385   }
386 
387   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
388     .addReg(DstLo)
389     .addImm(AMDGPU::sub0)
390     .addReg(DstHi)
391     .addImm(AMDGPU::sub1);
392 
393 
394   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
395     return false;
396 
397   I.eraseFromParent();
398   return true;
399 }
400 
selectG_UADDO_USUBO_UADDE_USUBE(MachineInstr & I) const401 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
402   MachineInstr &I) const {
403   MachineBasicBlock *BB = I.getParent();
404   MachineFunction *MF = BB->getParent();
405   const DebugLoc &DL = I.getDebugLoc();
406   Register Dst0Reg = I.getOperand(0).getReg();
407   Register Dst1Reg = I.getOperand(1).getReg();
408   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
409                      I.getOpcode() == AMDGPU::G_UADDE;
410   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
411                           I.getOpcode() == AMDGPU::G_USUBE;
412 
413   if (isVCC(Dst1Reg, *MRI)) {
414     unsigned NoCarryOpc =
415         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
416     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
417     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
418     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
419     I.addOperand(*MF, MachineOperand::CreateImm(0));
420     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
421   }
422 
423   Register Src0Reg = I.getOperand(2).getReg();
424   Register Src1Reg = I.getOperand(3).getReg();
425 
426   if (HasCarryIn) {
427     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
428       .addReg(I.getOperand(4).getReg());
429   }
430 
431   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
432   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
433 
434   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
435     .add(I.getOperand(2))
436     .add(I.getOperand(3));
437   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
438     .addReg(AMDGPU::SCC);
439 
440   if (!MRI->getRegClassOrNull(Dst1Reg))
441     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
442 
443   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
445       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
446     return false;
447 
448   if (HasCarryIn &&
449       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
450                                     AMDGPU::SReg_32RegClass, *MRI))
451     return false;
452 
453   I.eraseFromParent();
454   return true;
455 }
456 
457 // TODO: We should probably legalize these to only using 32-bit results.
selectG_EXTRACT(MachineInstr & I) const458 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
459   MachineBasicBlock *BB = I.getParent();
460   Register DstReg = I.getOperand(0).getReg();
461   Register SrcReg = I.getOperand(1).getReg();
462   LLT DstTy = MRI->getType(DstReg);
463   LLT SrcTy = MRI->getType(SrcReg);
464   const unsigned SrcSize = SrcTy.getSizeInBits();
465   unsigned DstSize = DstTy.getSizeInBits();
466 
467   // TODO: Should handle any multiple of 32 offset.
468   unsigned Offset = I.getOperand(2).getImm();
469   if (Offset % 32 != 0 || DstSize > 128)
470     return false;
471 
472   // 16-bit operations really use 32-bit registers.
473   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
474   if (DstSize == 16)
475     DstSize = 32;
476 
477   const TargetRegisterClass *DstRC =
478     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
479   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
480     return false;
481 
482   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
483   const TargetRegisterClass *SrcRC =
484     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
485   if (!SrcRC)
486     return false;
487   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
488                                                          DstSize / 32);
489   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
490   if (!SrcRC)
491     return false;
492 
493   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
494                                     *SrcRC, I.getOperand(1));
495   const DebugLoc &DL = I.getDebugLoc();
496   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
497     .addReg(SrcReg, 0, SubReg);
498 
499   I.eraseFromParent();
500   return true;
501 }
502 
selectG_MERGE_VALUES(MachineInstr & MI) const503 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
504   MachineBasicBlock *BB = MI.getParent();
505   Register DstReg = MI.getOperand(0).getReg();
506   LLT DstTy = MRI->getType(DstReg);
507   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
508 
509   const unsigned SrcSize = SrcTy.getSizeInBits();
510   if (SrcSize < 32)
511     return selectImpl(MI, *CoverageInfo);
512 
513   const DebugLoc &DL = MI.getDebugLoc();
514   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
515   const unsigned DstSize = DstTy.getSizeInBits();
516   const TargetRegisterClass *DstRC =
517     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
518   if (!DstRC)
519     return false;
520 
521   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
522   MachineInstrBuilder MIB =
523     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
524   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
525     MachineOperand &Src = MI.getOperand(I + 1);
526     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
527     MIB.addImm(SubRegs[I]);
528 
529     const TargetRegisterClass *SrcRC
530       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
531     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
532       return false;
533   }
534 
535   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
536     return false;
537 
538   MI.eraseFromParent();
539   return true;
540 }
541 
selectG_UNMERGE_VALUES(MachineInstr & MI) const542 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
543   MachineBasicBlock *BB = MI.getParent();
544   const int NumDst = MI.getNumOperands() - 1;
545 
546   MachineOperand &Src = MI.getOperand(NumDst);
547 
548   Register SrcReg = Src.getReg();
549   Register DstReg0 = MI.getOperand(0).getReg();
550   LLT DstTy = MRI->getType(DstReg0);
551   LLT SrcTy = MRI->getType(SrcReg);
552 
553   const unsigned DstSize = DstTy.getSizeInBits();
554   const unsigned SrcSize = SrcTy.getSizeInBits();
555   const DebugLoc &DL = MI.getDebugLoc();
556   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
557 
558   const TargetRegisterClass *SrcRC =
559     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
560   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
561     return false;
562 
563   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
564   // source, and this relies on the fact that the same subregister indices are
565   // used for both.
566   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
567   for (int I = 0, E = NumDst; I != E; ++I) {
568     MachineOperand &Dst = MI.getOperand(I);
569     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
570       .addReg(SrcReg, 0, SubRegs[I]);
571 
572     // Make sure the subregister index is valid for the source register.
573     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
574     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
575       return false;
576 
577     const TargetRegisterClass *DstRC =
578       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
579     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
580       return false;
581   }
582 
583   MI.eraseFromParent();
584   return true;
585 }
586 
selectG_BUILD_VECTOR_TRUNC(MachineInstr & MI) const587 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
588   MachineInstr &MI) const {
589   if (selectImpl(MI, *CoverageInfo))
590     return true;
591 
592   const LLT S32 = LLT::scalar(32);
593   const LLT V2S16 = LLT::vector(2, 16);
594 
595   Register Dst = MI.getOperand(0).getReg();
596   if (MRI->getType(Dst) != V2S16)
597     return false;
598 
599   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
600   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
601     return false;
602 
603   Register Src0 = MI.getOperand(1).getReg();
604   Register Src1 = MI.getOperand(2).getReg();
605   if (MRI->getType(Src0) != S32)
606     return false;
607 
608   const DebugLoc &DL = MI.getDebugLoc();
609   MachineBasicBlock *BB = MI.getParent();
610 
611   auto ConstSrc1 =
612       getConstantVRegValWithLookThrough(Src1, *MRI, true, true, true);
613   if (ConstSrc1) {
614     auto ConstSrc0 =
615         getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true);
616     if (ConstSrc0) {
617       const int64_t K0 = ConstSrc0->Value.getSExtValue();
618       const int64_t K1 = ConstSrc1->Value.getSExtValue();
619       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
620       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
621 
622       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
623         .addImm(Lo16 | (Hi16 << 16));
624       MI.eraseFromParent();
625       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
626     }
627   }
628 
629   // TODO: This should probably be a combine somewhere
630   // (build_vector_trunc $src0, undef -> copy $src0
631   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
632   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
633     MI.setDesc(TII.get(AMDGPU::COPY));
634     MI.RemoveOperand(2);
635     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
636            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
637   }
638 
639   Register ShiftSrc0;
640   Register ShiftSrc1;
641 
642   // With multiple uses of the shift, this will duplicate the shift and
643   // increase register pressure.
644   //
645   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
646   //  => (S_PACK_HH_B32_B16 $src0, $src1)
647   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
648   //  => (S_PACK_LH_B32_B16 $src0, $src1)
649   // (build_vector_trunc $src0, $src1)
650   //  => (S_PACK_LL_B32_B16 $src0, $src1)
651 
652   bool Shift0 = mi_match(
653       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
654 
655   bool Shift1 = mi_match(
656       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
657 
658   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
659   if (Shift0 && Shift1) {
660     Opc = AMDGPU::S_PACK_HH_B32_B16;
661     MI.getOperand(1).setReg(ShiftSrc0);
662     MI.getOperand(2).setReg(ShiftSrc1);
663   } else if (Shift1) {
664     Opc = AMDGPU::S_PACK_LH_B32_B16;
665     MI.getOperand(2).setReg(ShiftSrc1);
666   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
667     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
668     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
669       .addReg(ShiftSrc0)
670       .addImm(16);
671 
672     MI.eraseFromParent();
673     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
674   }
675 
676   MI.setDesc(TII.get(Opc));
677   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
678 }
679 
selectG_PTR_ADD(MachineInstr & I) const680 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
681   return selectG_ADD_SUB(I);
682 }
683 
selectG_IMPLICIT_DEF(MachineInstr & I) const684 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
685   const MachineOperand &MO = I.getOperand(0);
686 
687   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
688   // regbank check here is to know why getConstrainedRegClassForOperand failed.
689   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
690   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
691       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
692     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
693     return true;
694   }
695 
696   return false;
697 }
698 
selectG_INSERT(MachineInstr & I) const699 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
700   MachineBasicBlock *BB = I.getParent();
701 
702   Register DstReg = I.getOperand(0).getReg();
703   Register Src0Reg = I.getOperand(1).getReg();
704   Register Src1Reg = I.getOperand(2).getReg();
705   LLT Src1Ty = MRI->getType(Src1Reg);
706 
707   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
708   unsigned InsSize = Src1Ty.getSizeInBits();
709 
710   int64_t Offset = I.getOperand(3).getImm();
711 
712   // FIXME: These cases should have been illegal and unnecessary to check here.
713   if (Offset % 32 != 0 || InsSize % 32 != 0)
714     return false;
715 
716   // Currently not handled by getSubRegFromChannel.
717   if (InsSize > 128)
718     return false;
719 
720   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
721   if (SubReg == AMDGPU::NoSubRegister)
722     return false;
723 
724   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
725   const TargetRegisterClass *DstRC =
726     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
727   if (!DstRC)
728     return false;
729 
730   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
731   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
732   const TargetRegisterClass *Src0RC =
733     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
734   const TargetRegisterClass *Src1RC =
735     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
736 
737   // Deal with weird cases where the class only partially supports the subreg
738   // index.
739   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
740   if (!Src0RC || !Src1RC)
741     return false;
742 
743   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
744       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
745       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
746     return false;
747 
748   const DebugLoc &DL = I.getDebugLoc();
749   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
750     .addReg(Src0Reg)
751     .addReg(Src1Reg)
752     .addImm(SubReg);
753 
754   I.eraseFromParent();
755   return true;
756 }
757 
selectInterpP1F16(MachineInstr & MI) const758 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
759   if (STI.getLDSBankCount() != 16)
760     return selectImpl(MI, *CoverageInfo);
761 
762   Register Dst = MI.getOperand(0).getReg();
763   Register Src0 = MI.getOperand(2).getReg();
764   Register M0Val = MI.getOperand(6).getReg();
765   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
766       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
767       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
768     return false;
769 
770   // This requires 2 instructions. It is possible to write a pattern to support
771   // this, but the generated isel emitter doesn't correctly deal with multiple
772   // output instructions using the same physical register input. The copy to m0
773   // is incorrectly placed before the second instruction.
774   //
775   // TODO: Match source modifiers.
776 
777   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
778   const DebugLoc &DL = MI.getDebugLoc();
779   MachineBasicBlock *MBB = MI.getParent();
780 
781   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
782     .addReg(M0Val);
783   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
784     .addImm(2)
785     .addImm(MI.getOperand(4).getImm())  // $attr
786     .addImm(MI.getOperand(3).getImm()); // $attrchan
787 
788   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
789     .addImm(0)                          // $src0_modifiers
790     .addReg(Src0)                       // $src0
791     .addImm(MI.getOperand(4).getImm())  // $attr
792     .addImm(MI.getOperand(3).getImm())  // $attrchan
793     .addImm(0)                          // $src2_modifiers
794     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
795     .addImm(MI.getOperand(5).getImm())  // $high
796     .addImm(0)                          // $clamp
797     .addImm(0);                         // $omod
798 
799   MI.eraseFromParent();
800   return true;
801 }
802 
803 // Writelane is special in that it can use SGPR and M0 (which would normally
804 // count as using the constant bus twice - but in this case it is allowed since
805 // the lane selector doesn't count as a use of the constant bus). However, it is
806 // still required to abide by the 1 SGPR rule. Fix this up if we might have
807 // multiple SGPRs.
selectWritelane(MachineInstr & MI) const808 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
809   // With a constant bus limit of at least 2, there's no issue.
810   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
811     return selectImpl(MI, *CoverageInfo);
812 
813   MachineBasicBlock *MBB = MI.getParent();
814   const DebugLoc &DL = MI.getDebugLoc();
815   Register VDst = MI.getOperand(0).getReg();
816   Register Val = MI.getOperand(2).getReg();
817   Register LaneSelect = MI.getOperand(3).getReg();
818   Register VDstIn = MI.getOperand(4).getReg();
819 
820   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
821 
822   Optional<ValueAndVReg> ConstSelect =
823     getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true);
824   if (ConstSelect) {
825     // The selector has to be an inline immediate, so we can use whatever for
826     // the other operands.
827     MIB.addReg(Val);
828     MIB.addImm(ConstSelect->Value.getSExtValue() &
829                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
830   } else {
831     Optional<ValueAndVReg> ConstVal =
832       getConstantVRegValWithLookThrough(Val, *MRI, true, true);
833 
834     // If the value written is an inline immediate, we can get away without a
835     // copy to m0.
836     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
837                                                  STI.hasInv2PiInlineImm())) {
838       MIB.addImm(ConstVal->Value.getSExtValue());
839       MIB.addReg(LaneSelect);
840     } else {
841       MIB.addReg(Val);
842 
843       // If the lane selector was originally in a VGPR and copied with
844       // readfirstlane, there's a hazard to read the same SGPR from the
845       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
846       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
847 
848       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
849         .addReg(LaneSelect);
850       MIB.addReg(AMDGPU::M0);
851     }
852   }
853 
854   MIB.addReg(VDstIn);
855 
856   MI.eraseFromParent();
857   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
858 }
859 
860 // We need to handle this here because tablegen doesn't support matching
861 // instructions with multiple outputs.
selectDivScale(MachineInstr & MI) const862 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
863   Register Dst0 = MI.getOperand(0).getReg();
864   Register Dst1 = MI.getOperand(1).getReg();
865 
866   LLT Ty = MRI->getType(Dst0);
867   unsigned Opc;
868   if (Ty == LLT::scalar(32))
869     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
870   else if (Ty == LLT::scalar(64))
871     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
872   else
873     return false;
874 
875   // TODO: Match source modifiers.
876 
877   const DebugLoc &DL = MI.getDebugLoc();
878   MachineBasicBlock *MBB = MI.getParent();
879 
880   Register Numer = MI.getOperand(3).getReg();
881   Register Denom = MI.getOperand(4).getReg();
882   unsigned ChooseDenom = MI.getOperand(5).getImm();
883 
884   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
885 
886   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
887     .addDef(Dst1)
888     .addImm(0)     // $src0_modifiers
889     .addUse(Src0)  // $src0
890     .addImm(0)     // $src1_modifiers
891     .addUse(Denom) // $src1
892     .addImm(0)     // $src2_modifiers
893     .addUse(Numer) // $src2
894     .addImm(0)     // $clamp
895     .addImm(0);    // $omod
896 
897   MI.eraseFromParent();
898   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
899 }
900 
selectG_INTRINSIC(MachineInstr & I) const901 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
902   unsigned IntrinsicID = I.getIntrinsicID();
903   switch (IntrinsicID) {
904   case Intrinsic::amdgcn_if_break: {
905     MachineBasicBlock *BB = I.getParent();
906 
907     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
908     // SelectionDAG uses for wave32 vs wave64.
909     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
910       .add(I.getOperand(0))
911       .add(I.getOperand(2))
912       .add(I.getOperand(3));
913 
914     Register DstReg = I.getOperand(0).getReg();
915     Register Src0Reg = I.getOperand(2).getReg();
916     Register Src1Reg = I.getOperand(3).getReg();
917 
918     I.eraseFromParent();
919 
920     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
921       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
922 
923     return true;
924   }
925   case Intrinsic::amdgcn_interp_p1_f16:
926     return selectInterpP1F16(I);
927   case Intrinsic::amdgcn_wqm:
928     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
929   case Intrinsic::amdgcn_softwqm:
930     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
931   case Intrinsic::amdgcn_strict_wwm:
932   case Intrinsic::amdgcn_wwm:
933     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
934   case Intrinsic::amdgcn_strict_wqm:
935     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
936   case Intrinsic::amdgcn_writelane:
937     return selectWritelane(I);
938   case Intrinsic::amdgcn_div_scale:
939     return selectDivScale(I);
940   case Intrinsic::amdgcn_icmp:
941     return selectIntrinsicIcmp(I);
942   case Intrinsic::amdgcn_ballot:
943     return selectBallot(I);
944   case Intrinsic::amdgcn_reloc_constant:
945     return selectRelocConstant(I);
946   case Intrinsic::amdgcn_groupstaticsize:
947     return selectGroupStaticSize(I);
948   case Intrinsic::returnaddress:
949     return selectReturnAddress(I);
950   default:
951     return selectImpl(I, *CoverageInfo);
952   }
953 }
954 
getV_CMPOpcode(CmpInst::Predicate P,unsigned Size)955 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
956   if (Size != 32 && Size != 64)
957     return -1;
958   switch (P) {
959   default:
960     llvm_unreachable("Unknown condition code!");
961   case CmpInst::ICMP_NE:
962     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
963   case CmpInst::ICMP_EQ:
964     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
965   case CmpInst::ICMP_SGT:
966     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
967   case CmpInst::ICMP_SGE:
968     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
969   case CmpInst::ICMP_SLT:
970     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
971   case CmpInst::ICMP_SLE:
972     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
973   case CmpInst::ICMP_UGT:
974     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
975   case CmpInst::ICMP_UGE:
976     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
977   case CmpInst::ICMP_ULT:
978     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
979   case CmpInst::ICMP_ULE:
980     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
981   }
982 }
983 
getS_CMPOpcode(CmpInst::Predicate P,unsigned Size) const984 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
985                                               unsigned Size) const {
986   if (Size == 64) {
987     if (!STI.hasScalarCompareEq64())
988       return -1;
989 
990     switch (P) {
991     case CmpInst::ICMP_NE:
992       return AMDGPU::S_CMP_LG_U64;
993     case CmpInst::ICMP_EQ:
994       return AMDGPU::S_CMP_EQ_U64;
995     default:
996       return -1;
997     }
998   }
999 
1000   if (Size != 32)
1001     return -1;
1002 
1003   switch (P) {
1004   case CmpInst::ICMP_NE:
1005     return AMDGPU::S_CMP_LG_U32;
1006   case CmpInst::ICMP_EQ:
1007     return AMDGPU::S_CMP_EQ_U32;
1008   case CmpInst::ICMP_SGT:
1009     return AMDGPU::S_CMP_GT_I32;
1010   case CmpInst::ICMP_SGE:
1011     return AMDGPU::S_CMP_GE_I32;
1012   case CmpInst::ICMP_SLT:
1013     return AMDGPU::S_CMP_LT_I32;
1014   case CmpInst::ICMP_SLE:
1015     return AMDGPU::S_CMP_LE_I32;
1016   case CmpInst::ICMP_UGT:
1017     return AMDGPU::S_CMP_GT_U32;
1018   case CmpInst::ICMP_UGE:
1019     return AMDGPU::S_CMP_GE_U32;
1020   case CmpInst::ICMP_ULT:
1021     return AMDGPU::S_CMP_LT_U32;
1022   case CmpInst::ICMP_ULE:
1023     return AMDGPU::S_CMP_LE_U32;
1024   default:
1025     llvm_unreachable("Unknown condition code!");
1026   }
1027 }
1028 
selectG_ICMP(MachineInstr & I) const1029 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1030   MachineBasicBlock *BB = I.getParent();
1031   const DebugLoc &DL = I.getDebugLoc();
1032 
1033   Register SrcReg = I.getOperand(2).getReg();
1034   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1035 
1036   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1037 
1038   Register CCReg = I.getOperand(0).getReg();
1039   if (!isVCC(CCReg, *MRI)) {
1040     int Opcode = getS_CMPOpcode(Pred, Size);
1041     if (Opcode == -1)
1042       return false;
1043     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1044             .add(I.getOperand(2))
1045             .add(I.getOperand(3));
1046     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1047       .addReg(AMDGPU::SCC);
1048     bool Ret =
1049         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1050         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1051     I.eraseFromParent();
1052     return Ret;
1053   }
1054 
1055   int Opcode = getV_CMPOpcode(Pred, Size);
1056   if (Opcode == -1)
1057     return false;
1058 
1059   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1060             I.getOperand(0).getReg())
1061             .add(I.getOperand(2))
1062             .add(I.getOperand(3));
1063   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1064                                *TRI.getBoolRC(), *MRI);
1065   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1066   I.eraseFromParent();
1067   return Ret;
1068 }
1069 
selectIntrinsicIcmp(MachineInstr & I) const1070 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1071   Register Dst = I.getOperand(0).getReg();
1072   if (isVCC(Dst, *MRI))
1073     return false;
1074 
1075   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1076     return false;
1077 
1078   MachineBasicBlock *BB = I.getParent();
1079   const DebugLoc &DL = I.getDebugLoc();
1080   Register SrcReg = I.getOperand(2).getReg();
1081   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1082   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1083 
1084   int Opcode = getV_CMPOpcode(Pred, Size);
1085   if (Opcode == -1)
1086     return false;
1087 
1088   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1089                            .add(I.getOperand(2))
1090                            .add(I.getOperand(3));
1091   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1092                                *MRI);
1093   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1094   I.eraseFromParent();
1095   return Ret;
1096 }
1097 
selectBallot(MachineInstr & I) const1098 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1099   MachineBasicBlock *BB = I.getParent();
1100   const DebugLoc &DL = I.getDebugLoc();
1101   Register DstReg = I.getOperand(0).getReg();
1102   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1103   const bool Is64 = Size == 64;
1104 
1105   if (Size != STI.getWavefrontSize())
1106     return false;
1107 
1108   Optional<ValueAndVReg> Arg =
1109       getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true);
1110 
1111   if (Arg.hasValue()) {
1112     const int64_t Value = Arg.getValue().Value.getSExtValue();
1113     if (Value == 0) {
1114       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1115       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1116     } else if (Value == -1) { // all ones
1117       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1118       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1119     } else
1120       return false;
1121   } else {
1122     Register SrcReg = I.getOperand(2).getReg();
1123     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1124   }
1125 
1126   I.eraseFromParent();
1127   return true;
1128 }
1129 
selectRelocConstant(MachineInstr & I) const1130 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1131   Register DstReg = I.getOperand(0).getReg();
1132   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1133   const TargetRegisterClass *DstRC =
1134     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1135   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1136     return false;
1137 
1138   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1139 
1140   Module *M = MF->getFunction().getParent();
1141   const MDNode *Metadata = I.getOperand(2).getMetadata();
1142   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1143   auto RelocSymbol = cast<GlobalVariable>(
1144     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1145 
1146   MachineBasicBlock *BB = I.getParent();
1147   BuildMI(*BB, &I, I.getDebugLoc(),
1148           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1149     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1150 
1151   I.eraseFromParent();
1152   return true;
1153 }
1154 
selectGroupStaticSize(MachineInstr & I) const1155 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1156   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1157 
1158   Register DstReg = I.getOperand(0).getReg();
1159   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1160   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1161     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1162 
1163   MachineBasicBlock *MBB = I.getParent();
1164   const DebugLoc &DL = I.getDebugLoc();
1165 
1166   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1167 
1168   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1169     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1170     MIB.addImm(MFI->getLDSSize());
1171   } else {
1172     Module *M = MF->getFunction().getParent();
1173     const GlobalValue *GV
1174       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1175     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1176   }
1177 
1178   I.eraseFromParent();
1179   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1180 }
1181 
selectReturnAddress(MachineInstr & I) const1182 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1183   MachineBasicBlock *MBB = I.getParent();
1184   MachineFunction &MF = *MBB->getParent();
1185   const DebugLoc &DL = I.getDebugLoc();
1186 
1187   MachineOperand &Dst = I.getOperand(0);
1188   Register DstReg = Dst.getReg();
1189   unsigned Depth = I.getOperand(2).getImm();
1190 
1191   const TargetRegisterClass *RC
1192     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1193   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1194       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1195     return false;
1196 
1197   // Check for kernel and shader functions
1198   if (Depth != 0 ||
1199       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1200     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1201       .addImm(0);
1202     I.eraseFromParent();
1203     return true;
1204   }
1205 
1206   MachineFrameInfo &MFI = MF.getFrameInfo();
1207   // There is a call to @llvm.returnaddress in this function
1208   MFI.setReturnAddressIsTaken(true);
1209 
1210   // Get the return address reg and mark it as an implicit live-in
1211   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1212   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1213                                              AMDGPU::SReg_64RegClass);
1214   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1215     .addReg(LiveIn);
1216   I.eraseFromParent();
1217   return true;
1218 }
1219 
selectEndCfIntrinsic(MachineInstr & MI) const1220 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1221   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1222   // SelectionDAG uses for wave32 vs wave64.
1223   MachineBasicBlock *BB = MI.getParent();
1224   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1225       .add(MI.getOperand(1));
1226 
1227   Register Reg = MI.getOperand(1).getReg();
1228   MI.eraseFromParent();
1229 
1230   if (!MRI->getRegClassOrNull(Reg))
1231     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1232   return true;
1233 }
1234 
selectDSOrderedIntrinsic(MachineInstr & MI,Intrinsic::ID IntrID) const1235 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1236   MachineInstr &MI, Intrinsic::ID IntrID) const {
1237   MachineBasicBlock *MBB = MI.getParent();
1238   MachineFunction *MF = MBB->getParent();
1239   const DebugLoc &DL = MI.getDebugLoc();
1240 
1241   unsigned IndexOperand = MI.getOperand(7).getImm();
1242   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1243   bool WaveDone = MI.getOperand(9).getImm() != 0;
1244 
1245   if (WaveDone && !WaveRelease)
1246     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1247 
1248   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1249   IndexOperand &= ~0x3f;
1250   unsigned CountDw = 0;
1251 
1252   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1253     CountDw = (IndexOperand >> 24) & 0xf;
1254     IndexOperand &= ~(0xf << 24);
1255 
1256     if (CountDw < 1 || CountDw > 4) {
1257       report_fatal_error(
1258         "ds_ordered_count: dword count must be between 1 and 4");
1259     }
1260   }
1261 
1262   if (IndexOperand)
1263     report_fatal_error("ds_ordered_count: bad index operand");
1264 
1265   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1266   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1267 
1268   unsigned Offset0 = OrderedCountIndex << 2;
1269   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1270                      (Instruction << 4);
1271 
1272   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1273     Offset1 |= (CountDw - 1) << 6;
1274 
1275   unsigned Offset = Offset0 | (Offset1 << 8);
1276 
1277   Register M0Val = MI.getOperand(2).getReg();
1278   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1279     .addReg(M0Val);
1280 
1281   Register DstReg = MI.getOperand(0).getReg();
1282   Register ValReg = MI.getOperand(3).getReg();
1283   MachineInstrBuilder DS =
1284     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1285       .addReg(ValReg)
1286       .addImm(Offset)
1287       .cloneMemRefs(MI);
1288 
1289   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1290     return false;
1291 
1292   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1293   MI.eraseFromParent();
1294   return Ret;
1295 }
1296 
gwsIntrinToOpcode(unsigned IntrID)1297 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1298   switch (IntrID) {
1299   case Intrinsic::amdgcn_ds_gws_init:
1300     return AMDGPU::DS_GWS_INIT;
1301   case Intrinsic::amdgcn_ds_gws_barrier:
1302     return AMDGPU::DS_GWS_BARRIER;
1303   case Intrinsic::amdgcn_ds_gws_sema_v:
1304     return AMDGPU::DS_GWS_SEMA_V;
1305   case Intrinsic::amdgcn_ds_gws_sema_br:
1306     return AMDGPU::DS_GWS_SEMA_BR;
1307   case Intrinsic::amdgcn_ds_gws_sema_p:
1308     return AMDGPU::DS_GWS_SEMA_P;
1309   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1310     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1311   default:
1312     llvm_unreachable("not a gws intrinsic");
1313   }
1314 }
1315 
selectDSGWSIntrinsic(MachineInstr & MI,Intrinsic::ID IID) const1316 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1317                                                      Intrinsic::ID IID) const {
1318   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1319       !STI.hasGWSSemaReleaseAll())
1320     return false;
1321 
1322   // intrinsic ID, vsrc, offset
1323   const bool HasVSrc = MI.getNumOperands() == 3;
1324   assert(HasVSrc || MI.getNumOperands() == 2);
1325 
1326   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1327   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1328   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1329     return false;
1330 
1331   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1332   assert(OffsetDef);
1333 
1334   unsigned ImmOffset;
1335 
1336   MachineBasicBlock *MBB = MI.getParent();
1337   const DebugLoc &DL = MI.getDebugLoc();
1338 
1339   MachineInstr *Readfirstlane = nullptr;
1340 
1341   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1342   // incoming offset, in case there's an add of a constant. We'll have to put it
1343   // back later.
1344   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1345     Readfirstlane = OffsetDef;
1346     BaseOffset = OffsetDef->getOperand(1).getReg();
1347     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1348   }
1349 
1350   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1351     // If we have a constant offset, try to use the 0 in m0 as the base.
1352     // TODO: Look into changing the default m0 initialization value. If the
1353     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1354     // the immediate offset.
1355 
1356     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1357     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1358       .addImm(0);
1359   } else {
1360     std::tie(BaseOffset, ImmOffset) =
1361         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1362 
1363     if (Readfirstlane) {
1364       // We have the constant offset now, so put the readfirstlane back on the
1365       // variable component.
1366       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1367         return false;
1368 
1369       Readfirstlane->getOperand(1).setReg(BaseOffset);
1370       BaseOffset = Readfirstlane->getOperand(0).getReg();
1371     } else {
1372       if (!RBI.constrainGenericRegister(BaseOffset,
1373                                         AMDGPU::SReg_32RegClass, *MRI))
1374         return false;
1375     }
1376 
1377     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1378     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1379       .addReg(BaseOffset)
1380       .addImm(16);
1381 
1382     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1383       .addReg(M0Base);
1384   }
1385 
1386   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1387   // offset field) % 64. Some versions of the programming guide omit the m0
1388   // part, or claim it's from offset 0.
1389   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1390 
1391   if (HasVSrc) {
1392     Register VSrc = MI.getOperand(1).getReg();
1393     MIB.addReg(VSrc);
1394     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1395       return false;
1396   }
1397 
1398   MIB.addImm(ImmOffset)
1399      .cloneMemRefs(MI);
1400 
1401   MI.eraseFromParent();
1402   return true;
1403 }
1404 
selectDSAppendConsume(MachineInstr & MI,bool IsAppend) const1405 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1406                                                       bool IsAppend) const {
1407   Register PtrBase = MI.getOperand(2).getReg();
1408   LLT PtrTy = MRI->getType(PtrBase);
1409   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1410 
1411   unsigned Offset;
1412   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1413 
1414   // TODO: Should this try to look through readfirstlane like GWS?
1415   if (!isDSOffsetLegal(PtrBase, Offset)) {
1416     PtrBase = MI.getOperand(2).getReg();
1417     Offset = 0;
1418   }
1419 
1420   MachineBasicBlock *MBB = MI.getParent();
1421   const DebugLoc &DL = MI.getDebugLoc();
1422   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1423 
1424   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1425     .addReg(PtrBase);
1426   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1427     return false;
1428 
1429   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1430     .addImm(Offset)
1431     .addImm(IsGDS ? -1 : 0)
1432     .cloneMemRefs(MI);
1433   MI.eraseFromParent();
1434   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1435 }
1436 
selectSBarrier(MachineInstr & MI) const1437 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1438   if (TM.getOptLevel() > CodeGenOpt::None) {
1439     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1440     if (WGSize <= STI.getWavefrontSize()) {
1441       MachineBasicBlock *MBB = MI.getParent();
1442       const DebugLoc &DL = MI.getDebugLoc();
1443       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1444       MI.eraseFromParent();
1445       return true;
1446     }
1447   }
1448   return selectImpl(MI, *CoverageInfo);
1449 }
1450 
parseTexFail(uint64_t TexFailCtrl,bool & TFE,bool & LWE,bool & IsTexFail)1451 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1452                          bool &IsTexFail) {
1453   if (TexFailCtrl)
1454     IsTexFail = true;
1455 
1456   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1457   TexFailCtrl &= ~(uint64_t)0x1;
1458   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1459   TexFailCtrl &= ~(uint64_t)0x2;
1460 
1461   return TexFailCtrl == 0;
1462 }
1463 
selectImageIntrinsic(MachineInstr & MI,const AMDGPU::ImageDimIntrinsicInfo * Intr) const1464 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1465   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1466   MachineBasicBlock *MBB = MI.getParent();
1467   const DebugLoc &DL = MI.getDebugLoc();
1468 
1469   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1470     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1471 
1472   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1473   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1474       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1475   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1476       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1477   unsigned IntrOpcode = Intr->BaseOpcode;
1478   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1479 
1480   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1481 
1482   Register VDataIn, VDataOut;
1483   LLT VDataTy;
1484   int NumVDataDwords = -1;
1485   bool IsD16 = false;
1486 
1487   bool Unorm;
1488   if (!BaseOpcode->Sampler)
1489     Unorm = true;
1490   else
1491     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1492 
1493   bool TFE;
1494   bool LWE;
1495   bool IsTexFail = false;
1496   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1497                     TFE, LWE, IsTexFail))
1498     return false;
1499 
1500   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1501   const bool IsA16 = (Flags & 1) != 0;
1502   const bool IsG16 = (Flags & 2) != 0;
1503 
1504   // A16 implies 16 bit gradients if subtarget doesn't support G16
1505   if (IsA16 && !STI.hasG16() && !IsG16)
1506     return false;
1507 
1508   unsigned DMask = 0;
1509   unsigned DMaskLanes = 0;
1510 
1511   if (BaseOpcode->Atomic) {
1512     VDataOut = MI.getOperand(0).getReg();
1513     VDataIn = MI.getOperand(2).getReg();
1514     LLT Ty = MRI->getType(VDataIn);
1515 
1516     // Be careful to allow atomic swap on 16-bit element vectors.
1517     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1518       Ty.getSizeInBits() == 128 :
1519       Ty.getSizeInBits() == 64;
1520 
1521     if (BaseOpcode->AtomicX2) {
1522       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1523 
1524       DMask = Is64Bit ? 0xf : 0x3;
1525       NumVDataDwords = Is64Bit ? 4 : 2;
1526     } else {
1527       DMask = Is64Bit ? 0x3 : 0x1;
1528       NumVDataDwords = Is64Bit ? 2 : 1;
1529     }
1530   } else {
1531     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1532     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1533 
1534     // One memoperand is mandatory, except for getresinfo.
1535     // FIXME: Check this in verifier.
1536     if (!MI.memoperands_empty()) {
1537       const MachineMemOperand *MMO = *MI.memoperands_begin();
1538 
1539       // Infer d16 from the memory size, as the register type will be mangled by
1540       // unpacked subtargets, or by TFE.
1541       IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1542     }
1543 
1544     if (BaseOpcode->Store) {
1545       VDataIn = MI.getOperand(1).getReg();
1546       VDataTy = MRI->getType(VDataIn);
1547       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1548     } else {
1549       VDataOut = MI.getOperand(0).getReg();
1550       VDataTy = MRI->getType(VDataOut);
1551       NumVDataDwords = DMaskLanes;
1552 
1553       if (IsD16 && !STI.hasUnpackedD16VMem())
1554         NumVDataDwords = (DMaskLanes + 1) / 2;
1555     }
1556   }
1557 
1558   // Optimize _L to _LZ when _L is zero
1559   if (LZMappingInfo) {
1560     // The legalizer replaced the register with an immediate 0 if we need to
1561     // change the opcode.
1562     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1563     if (Lod.isImm()) {
1564       assert(Lod.getImm() == 0);
1565       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1566     }
1567   }
1568 
1569   // Optimize _mip away, when 'lod' is zero
1570   if (MIPMappingInfo) {
1571     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1572     if (Lod.isImm()) {
1573       assert(Lod.getImm() == 0);
1574       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1575     }
1576   }
1577 
1578   // Set G16 opcode
1579   if (IsG16 && !IsA16) {
1580     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1581         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1582     assert(G16MappingInfo);
1583     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1584   }
1585 
1586   // TODO: Check this in verifier.
1587   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1588 
1589   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1590   if (BaseOpcode->Atomic)
1591     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1592   if (CPol & ~AMDGPU::CPol::ALL)
1593     return false;
1594 
1595   int NumVAddrRegs = 0;
1596   int NumVAddrDwords = 0;
1597   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1598     // Skip the $noregs and 0s inserted during legalization.
1599     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1600     if (!AddrOp.isReg())
1601       continue; // XXX - Break?
1602 
1603     Register Addr = AddrOp.getReg();
1604     if (!Addr)
1605       break;
1606 
1607     ++NumVAddrRegs;
1608     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1609   }
1610 
1611   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1612   // NSA, these should have beeen packed into a single value in the first
1613   // address register
1614   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1615   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1616     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1617     return false;
1618   }
1619 
1620   if (IsTexFail)
1621     ++NumVDataDwords;
1622 
1623   int Opcode = -1;
1624   if (IsGFX10Plus) {
1625     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1626                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1627                                           : AMDGPU::MIMGEncGfx10Default,
1628                                    NumVDataDwords, NumVAddrDwords);
1629   } else {
1630     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1631       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1632                                      NumVDataDwords, NumVAddrDwords);
1633     if (Opcode == -1)
1634       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1635                                      NumVDataDwords, NumVAddrDwords);
1636   }
1637   assert(Opcode != -1);
1638 
1639   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1640     .cloneMemRefs(MI);
1641 
1642   if (VDataOut) {
1643     if (BaseOpcode->AtomicX2) {
1644       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1645 
1646       Register TmpReg = MRI->createVirtualRegister(
1647         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1648       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1649 
1650       MIB.addDef(TmpReg);
1651       if (!MRI->use_empty(VDataOut)) {
1652         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1653             .addReg(TmpReg, RegState::Kill, SubReg);
1654       }
1655 
1656     } else {
1657       MIB.addDef(VDataOut); // vdata output
1658     }
1659   }
1660 
1661   if (VDataIn)
1662     MIB.addReg(VDataIn); // vdata input
1663 
1664   for (int I = 0; I != NumVAddrRegs; ++I) {
1665     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1666     if (SrcOp.isReg()) {
1667       assert(SrcOp.getReg() != 0);
1668       MIB.addReg(SrcOp.getReg());
1669     }
1670   }
1671 
1672   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1673   if (BaseOpcode->Sampler)
1674     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1675 
1676   MIB.addImm(DMask); // dmask
1677 
1678   if (IsGFX10Plus)
1679     MIB.addImm(DimInfo->Encoding);
1680   MIB.addImm(Unorm);
1681 
1682   MIB.addImm(CPol);
1683   MIB.addImm(IsA16 &&  // a16 or r128
1684              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1685   if (IsGFX10Plus)
1686     MIB.addImm(IsA16 ? -1 : 0);
1687 
1688   MIB.addImm(TFE); // tfe
1689   MIB.addImm(LWE); // lwe
1690   if (!IsGFX10Plus)
1691     MIB.addImm(DimInfo->DA ? -1 : 0);
1692   if (BaseOpcode->HasD16)
1693     MIB.addImm(IsD16 ? -1 : 0);
1694 
1695   if (IsTexFail) {
1696     // An image load instruction with TFE/LWE only conditionally writes to its
1697     // result registers. Initialize them to zero so that we always get well
1698     // defined result values.
1699     assert(VDataOut && !VDataIn);
1700     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1701     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1702     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1703       .addImm(0);
1704     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1705     if (STI.usePRTStrictNull()) {
1706       // With enable-prt-strict-null enabled, initialize all result registers to
1707       // zero.
1708       auto RegSeq =
1709           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1710       for (auto Sub : Parts)
1711         RegSeq.addReg(Zero).addImm(Sub);
1712     } else {
1713       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1714       // result register.
1715       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1716       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1717       auto RegSeq =
1718           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1719       for (auto Sub : Parts.drop_back(1))
1720         RegSeq.addReg(Undef).addImm(Sub);
1721       RegSeq.addReg(Zero).addImm(Parts.back());
1722     }
1723     MIB.addReg(Tied, RegState::Implicit);
1724     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1725   }
1726 
1727   MI.eraseFromParent();
1728   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1729 }
1730 
selectG_INTRINSIC_W_SIDE_EFFECTS(MachineInstr & I) const1731 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1732     MachineInstr &I) const {
1733   unsigned IntrinsicID = I.getIntrinsicID();
1734   switch (IntrinsicID) {
1735   case Intrinsic::amdgcn_end_cf:
1736     return selectEndCfIntrinsic(I);
1737   case Intrinsic::amdgcn_ds_ordered_add:
1738   case Intrinsic::amdgcn_ds_ordered_swap:
1739     return selectDSOrderedIntrinsic(I, IntrinsicID);
1740   case Intrinsic::amdgcn_ds_gws_init:
1741   case Intrinsic::amdgcn_ds_gws_barrier:
1742   case Intrinsic::amdgcn_ds_gws_sema_v:
1743   case Intrinsic::amdgcn_ds_gws_sema_br:
1744   case Intrinsic::amdgcn_ds_gws_sema_p:
1745   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1746     return selectDSGWSIntrinsic(I, IntrinsicID);
1747   case Intrinsic::amdgcn_ds_append:
1748     return selectDSAppendConsume(I, true);
1749   case Intrinsic::amdgcn_ds_consume:
1750     return selectDSAppendConsume(I, false);
1751   case Intrinsic::amdgcn_s_barrier:
1752     return selectSBarrier(I);
1753   case Intrinsic::amdgcn_global_atomic_fadd:
1754     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1755   default: {
1756     return selectImpl(I, *CoverageInfo);
1757   }
1758   }
1759 }
1760 
selectG_SELECT(MachineInstr & I) const1761 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1762   if (selectImpl(I, *CoverageInfo))
1763     return true;
1764 
1765   MachineBasicBlock *BB = I.getParent();
1766   const DebugLoc &DL = I.getDebugLoc();
1767 
1768   Register DstReg = I.getOperand(0).getReg();
1769   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1770   assert(Size <= 32 || Size == 64);
1771   const MachineOperand &CCOp = I.getOperand(1);
1772   Register CCReg = CCOp.getReg();
1773   if (!isVCC(CCReg, *MRI)) {
1774     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1775                                          AMDGPU::S_CSELECT_B32;
1776     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1777             .addReg(CCReg);
1778 
1779     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1780     // bank, because it does not cover the register class that we used to represent
1781     // for it.  So we need to manually set the register class here.
1782     if (!MRI->getRegClassOrNull(CCReg))
1783         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1784     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1785             .add(I.getOperand(2))
1786             .add(I.getOperand(3));
1787 
1788     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1789                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1790     I.eraseFromParent();
1791     return Ret;
1792   }
1793 
1794   // Wide VGPR select should have been split in RegBankSelect.
1795   if (Size > 32)
1796     return false;
1797 
1798   MachineInstr *Select =
1799       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1800               .addImm(0)
1801               .add(I.getOperand(3))
1802               .addImm(0)
1803               .add(I.getOperand(2))
1804               .add(I.getOperand(1));
1805 
1806   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1807   I.eraseFromParent();
1808   return Ret;
1809 }
1810 
sizeToSubRegIndex(unsigned Size)1811 static int sizeToSubRegIndex(unsigned Size) {
1812   switch (Size) {
1813   case 32:
1814     return AMDGPU::sub0;
1815   case 64:
1816     return AMDGPU::sub0_sub1;
1817   case 96:
1818     return AMDGPU::sub0_sub1_sub2;
1819   case 128:
1820     return AMDGPU::sub0_sub1_sub2_sub3;
1821   case 256:
1822     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1823   default:
1824     if (Size < 32)
1825       return AMDGPU::sub0;
1826     if (Size > 256)
1827       return -1;
1828     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1829   }
1830 }
1831 
selectG_TRUNC(MachineInstr & I) const1832 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1833   Register DstReg = I.getOperand(0).getReg();
1834   Register SrcReg = I.getOperand(1).getReg();
1835   const LLT DstTy = MRI->getType(DstReg);
1836   const LLT SrcTy = MRI->getType(SrcReg);
1837   const LLT S1 = LLT::scalar(1);
1838 
1839   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1840   const RegisterBank *DstRB;
1841   if (DstTy == S1) {
1842     // This is a special case. We don't treat s1 for legalization artifacts as
1843     // vcc booleans.
1844     DstRB = SrcRB;
1845   } else {
1846     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1847     if (SrcRB != DstRB)
1848       return false;
1849   }
1850 
1851   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1852 
1853   unsigned DstSize = DstTy.getSizeInBits();
1854   unsigned SrcSize = SrcTy.getSizeInBits();
1855 
1856   const TargetRegisterClass *SrcRC
1857     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1858   const TargetRegisterClass *DstRC
1859     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1860   if (!SrcRC || !DstRC)
1861     return false;
1862 
1863   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1864       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1865     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1866     return false;
1867   }
1868 
1869   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1870     MachineBasicBlock *MBB = I.getParent();
1871     const DebugLoc &DL = I.getDebugLoc();
1872 
1873     Register LoReg = MRI->createVirtualRegister(DstRC);
1874     Register HiReg = MRI->createVirtualRegister(DstRC);
1875     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1876       .addReg(SrcReg, 0, AMDGPU::sub0);
1877     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1878       .addReg(SrcReg, 0, AMDGPU::sub1);
1879 
1880     if (IsVALU && STI.hasSDWA()) {
1881       // Write the low 16-bits of the high element into the high 16-bits of the
1882       // low element.
1883       MachineInstr *MovSDWA =
1884         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1885         .addImm(0)                             // $src0_modifiers
1886         .addReg(HiReg)                         // $src0
1887         .addImm(0)                             // $clamp
1888         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1889         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1890         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1891         .addReg(LoReg, RegState::Implicit);
1892       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1893     } else {
1894       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1895       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1896       Register ImmReg = MRI->createVirtualRegister(DstRC);
1897       if (IsVALU) {
1898         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1899           .addImm(16)
1900           .addReg(HiReg);
1901       } else {
1902         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1903           .addReg(HiReg)
1904           .addImm(16);
1905       }
1906 
1907       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1908       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1909       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1910 
1911       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1912         .addImm(0xffff);
1913       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1914         .addReg(LoReg)
1915         .addReg(ImmReg);
1916       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1917         .addReg(TmpReg0)
1918         .addReg(TmpReg1);
1919     }
1920 
1921     I.eraseFromParent();
1922     return true;
1923   }
1924 
1925   if (!DstTy.isScalar())
1926     return false;
1927 
1928   if (SrcSize > 32) {
1929     int SubRegIdx = sizeToSubRegIndex(DstSize);
1930     if (SubRegIdx == -1)
1931       return false;
1932 
1933     // Deal with weird cases where the class only partially supports the subreg
1934     // index.
1935     const TargetRegisterClass *SrcWithSubRC
1936       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1937     if (!SrcWithSubRC)
1938       return false;
1939 
1940     if (SrcWithSubRC != SrcRC) {
1941       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1942         return false;
1943     }
1944 
1945     I.getOperand(1).setSubReg(SubRegIdx);
1946   }
1947 
1948   I.setDesc(TII.get(TargetOpcode::COPY));
1949   return true;
1950 }
1951 
1952 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
shouldUseAndMask(unsigned Size,unsigned & Mask)1953 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1954   Mask = maskTrailingOnes<unsigned>(Size);
1955   int SignedMask = static_cast<int>(Mask);
1956   return SignedMask >= -16 && SignedMask <= 64;
1957 }
1958 
1959 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
getArtifactRegBank(Register Reg,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI) const1960 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1961   Register Reg, const MachineRegisterInfo &MRI,
1962   const TargetRegisterInfo &TRI) const {
1963   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1964   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1965     return RB;
1966 
1967   // Ignore the type, since we don't use vcc in artifacts.
1968   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1969     return &RBI.getRegBankFromRegClass(*RC, LLT());
1970   return nullptr;
1971 }
1972 
selectG_SZA_EXT(MachineInstr & I) const1973 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1974   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1975   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1976   const DebugLoc &DL = I.getDebugLoc();
1977   MachineBasicBlock &MBB = *I.getParent();
1978   const Register DstReg = I.getOperand(0).getReg();
1979   const Register SrcReg = I.getOperand(1).getReg();
1980 
1981   const LLT DstTy = MRI->getType(DstReg);
1982   const LLT SrcTy = MRI->getType(SrcReg);
1983   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1984     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1985   const unsigned DstSize = DstTy.getSizeInBits();
1986   if (!DstTy.isScalar())
1987     return false;
1988 
1989   // Artifact casts should never use vcc.
1990   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1991 
1992   // FIXME: This should probably be illegal and split earlier.
1993   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
1994     if (DstSize <= 32)
1995       return selectCOPY(I);
1996 
1997     const TargetRegisterClass *SrcRC =
1998         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
1999     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2000     const TargetRegisterClass *DstRC =
2001         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
2002 
2003     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2004     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2005     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2006       .addReg(SrcReg)
2007       .addImm(AMDGPU::sub0)
2008       .addReg(UndefReg)
2009       .addImm(AMDGPU::sub1);
2010     I.eraseFromParent();
2011 
2012     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2013            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2014   }
2015 
2016   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2017     // 64-bit should have been split up in RegBankSelect
2018 
2019     // Try to use an and with a mask if it will save code size.
2020     unsigned Mask;
2021     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2022       MachineInstr *ExtI =
2023       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2024         .addImm(Mask)
2025         .addReg(SrcReg);
2026       I.eraseFromParent();
2027       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2028     }
2029 
2030     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2031     MachineInstr *ExtI =
2032       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2033       .addReg(SrcReg)
2034       .addImm(0) // Offset
2035       .addImm(SrcSize); // Width
2036     I.eraseFromParent();
2037     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2038   }
2039 
2040   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2041     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2042       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2043     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2044       return false;
2045 
2046     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2047       const unsigned SextOpc = SrcSize == 8 ?
2048         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2049       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2050         .addReg(SrcReg);
2051       I.eraseFromParent();
2052       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2053     }
2054 
2055     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2056     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2057 
2058     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2059     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2060       // We need a 64-bit register source, but the high bits don't matter.
2061       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2062       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2063       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2064 
2065       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2066       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2067         .addReg(SrcReg, 0, SubReg)
2068         .addImm(AMDGPU::sub0)
2069         .addReg(UndefReg)
2070         .addImm(AMDGPU::sub1);
2071 
2072       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2073         .addReg(ExtReg)
2074         .addImm(SrcSize << 16);
2075 
2076       I.eraseFromParent();
2077       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2078     }
2079 
2080     unsigned Mask;
2081     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2082       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2083         .addReg(SrcReg)
2084         .addImm(Mask);
2085     } else {
2086       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2087         .addReg(SrcReg)
2088         .addImm(SrcSize << 16);
2089     }
2090 
2091     I.eraseFromParent();
2092     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2093   }
2094 
2095   return false;
2096 }
2097 
selectG_CONSTANT(MachineInstr & I) const2098 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2099   MachineBasicBlock *BB = I.getParent();
2100   MachineOperand &ImmOp = I.getOperand(1);
2101   Register DstReg = I.getOperand(0).getReg();
2102   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2103 
2104   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2105   if (ImmOp.isFPImm()) {
2106     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2107     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2108   } else if (ImmOp.isCImm()) {
2109     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2110   } else {
2111     llvm_unreachable("Not supported by g_constants");
2112   }
2113 
2114   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2115   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2116 
2117   unsigned Opcode;
2118   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2119     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2120   } else {
2121     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2122 
2123     // We should never produce s1 values on banks other than VCC. If the user of
2124     // this already constrained the register, we may incorrectly think it's VCC
2125     // if it wasn't originally.
2126     if (Size == 1)
2127       return false;
2128   }
2129 
2130   if (Size != 64) {
2131     I.setDesc(TII.get(Opcode));
2132     I.addImplicitDefUseOperands(*MF);
2133     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2134   }
2135 
2136   const DebugLoc &DL = I.getDebugLoc();
2137 
2138   APInt Imm(Size, I.getOperand(1).getImm());
2139 
2140   MachineInstr *ResInst;
2141   if (IsSgpr && TII.isInlineConstant(Imm)) {
2142     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2143       .addImm(I.getOperand(1).getImm());
2144   } else {
2145     const TargetRegisterClass *RC = IsSgpr ?
2146       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2147     Register LoReg = MRI->createVirtualRegister(RC);
2148     Register HiReg = MRI->createVirtualRegister(RC);
2149 
2150     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2151       .addImm(Imm.trunc(32).getZExtValue());
2152 
2153     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2154       .addImm(Imm.ashr(32).getZExtValue());
2155 
2156     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2157       .addReg(LoReg)
2158       .addImm(AMDGPU::sub0)
2159       .addReg(HiReg)
2160       .addImm(AMDGPU::sub1);
2161   }
2162 
2163   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2164   // work for target independent opcodes
2165   I.eraseFromParent();
2166   const TargetRegisterClass *DstRC =
2167     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2168   if (!DstRC)
2169     return true;
2170   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2171 }
2172 
selectG_FNEG(MachineInstr & MI) const2173 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2174   // Only manually handle the f64 SGPR case.
2175   //
2176   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2177   // the bit ops theoretically have a second result due to the implicit def of
2178   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2179   // that is easy by disabling the check. The result works, but uses a
2180   // nonsensical sreg32orlds_and_sreg_1 regclass.
2181   //
2182   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2183   // the variadic REG_SEQUENCE operands.
2184 
2185   Register Dst = MI.getOperand(0).getReg();
2186   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2187   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2188       MRI->getType(Dst) != LLT::scalar(64))
2189     return false;
2190 
2191   Register Src = MI.getOperand(1).getReg();
2192   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2193   if (Fabs)
2194     Src = Fabs->getOperand(1).getReg();
2195 
2196   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2197       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2198     return false;
2199 
2200   MachineBasicBlock *BB = MI.getParent();
2201   const DebugLoc &DL = MI.getDebugLoc();
2202   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2203   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2204   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2205   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2206 
2207   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2208     .addReg(Src, 0, AMDGPU::sub0);
2209   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2210     .addReg(Src, 0, AMDGPU::sub1);
2211   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2212     .addImm(0x80000000);
2213 
2214   // Set or toggle sign bit.
2215   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2216   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2217     .addReg(HiReg)
2218     .addReg(ConstReg);
2219   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2220     .addReg(LoReg)
2221     .addImm(AMDGPU::sub0)
2222     .addReg(OpReg)
2223     .addImm(AMDGPU::sub1);
2224   MI.eraseFromParent();
2225   return true;
2226 }
2227 
2228 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
selectG_FABS(MachineInstr & MI) const2229 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2230   Register Dst = MI.getOperand(0).getReg();
2231   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2232   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2233       MRI->getType(Dst) != LLT::scalar(64))
2234     return false;
2235 
2236   Register Src = MI.getOperand(1).getReg();
2237   MachineBasicBlock *BB = MI.getParent();
2238   const DebugLoc &DL = MI.getDebugLoc();
2239   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2240   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2241   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2242   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2243 
2244   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2245       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2246     return false;
2247 
2248   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2249     .addReg(Src, 0, AMDGPU::sub0);
2250   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2251     .addReg(Src, 0, AMDGPU::sub1);
2252   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2253     .addImm(0x7fffffff);
2254 
2255   // Clear sign bit.
2256   // TODO: Should this used S_BITSET0_*?
2257   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2258     .addReg(HiReg)
2259     .addReg(ConstReg);
2260   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2261     .addReg(LoReg)
2262     .addImm(AMDGPU::sub0)
2263     .addReg(OpReg)
2264     .addImm(AMDGPU::sub1);
2265 
2266   MI.eraseFromParent();
2267   return true;
2268 }
2269 
isConstant(const MachineInstr & MI)2270 static bool isConstant(const MachineInstr &MI) {
2271   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2272 }
2273 
getAddrModeInfo(const MachineInstr & Load,const MachineRegisterInfo & MRI,SmallVectorImpl<GEPInfo> & AddrInfo) const2274 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2275     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2276 
2277   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2278 
2279   assert(PtrMI);
2280 
2281   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2282     return;
2283 
2284   GEPInfo GEPInfo(*PtrMI);
2285 
2286   for (unsigned i = 1; i != 3; ++i) {
2287     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2288     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2289     assert(OpDef);
2290     if (i == 2 && isConstant(*OpDef)) {
2291       // TODO: Could handle constant base + variable offset, but a combine
2292       // probably should have commuted it.
2293       assert(GEPInfo.Imm == 0);
2294       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2295       continue;
2296     }
2297     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2298     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2299       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2300     else
2301       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2302   }
2303 
2304   AddrInfo.push_back(GEPInfo);
2305   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2306 }
2307 
isSGPR(Register Reg) const2308 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2309   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2310 }
2311 
isInstrUniform(const MachineInstr & MI) const2312 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2313   if (!MI.hasOneMemOperand())
2314     return false;
2315 
2316   const MachineMemOperand *MMO = *MI.memoperands_begin();
2317   const Value *Ptr = MMO->getValue();
2318 
2319   // UndefValue means this is a load of a kernel input.  These are uniform.
2320   // Sometimes LDS instructions have constant pointers.
2321   // If Ptr is null, then that means this mem operand contains a
2322   // PseudoSourceValue like GOT.
2323   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2324       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2325     return true;
2326 
2327   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2328     return true;
2329 
2330   const Instruction *I = dyn_cast<Instruction>(Ptr);
2331   return I && I->getMetadata("amdgpu.uniform");
2332 }
2333 
hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const2334 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2335   for (const GEPInfo &GEPInfo : AddrInfo) {
2336     if (!GEPInfo.VgprParts.empty())
2337       return true;
2338   }
2339   return false;
2340 }
2341 
initM0(MachineInstr & I) const2342 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2343   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2344   unsigned AS = PtrTy.getAddressSpace();
2345   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2346       STI.ldsRequiresM0Init()) {
2347     MachineBasicBlock *BB = I.getParent();
2348 
2349     // If DS instructions require M0 initializtion, insert it before selecting.
2350     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2351       .addImm(-1);
2352   }
2353 }
2354 
selectG_LOAD_STORE_ATOMICRMW(MachineInstr & I) const2355 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2356   MachineInstr &I) const {
2357   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2358     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2359     unsigned AS = PtrTy.getAddressSpace();
2360     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2361       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2362   }
2363 
2364   initM0(I);
2365   return selectImpl(I, *CoverageInfo);
2366 }
2367 
2368 // TODO: No rtn optimization.
selectG_AMDGPU_ATOMIC_CMPXCHG(MachineInstr & MI) const2369 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2370   MachineInstr &MI) const {
2371   Register PtrReg = MI.getOperand(1).getReg();
2372   const LLT PtrTy = MRI->getType(PtrReg);
2373   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2374       STI.useFlatForGlobal())
2375     return selectImpl(MI, *CoverageInfo);
2376 
2377   Register DstReg = MI.getOperand(0).getReg();
2378   const LLT Ty = MRI->getType(DstReg);
2379   const bool Is64 = Ty.getSizeInBits() == 64;
2380   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2381   Register TmpReg = MRI->createVirtualRegister(
2382     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2383 
2384   const DebugLoc &DL = MI.getDebugLoc();
2385   MachineBasicBlock *BB = MI.getParent();
2386 
2387   Register VAddr, RSrcReg, SOffset;
2388   int64_t Offset = 0;
2389 
2390   unsigned Opcode;
2391   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2392     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2393                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2394   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2395                                    RSrcReg, SOffset, Offset)) {
2396     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2397                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2398   } else
2399     return selectImpl(MI, *CoverageInfo);
2400 
2401   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2402     .addReg(MI.getOperand(2).getReg());
2403 
2404   if (VAddr)
2405     MIB.addReg(VAddr);
2406 
2407   MIB.addReg(RSrcReg);
2408   if (SOffset)
2409     MIB.addReg(SOffset);
2410   else
2411     MIB.addImm(0);
2412 
2413   MIB.addImm(Offset);
2414   MIB.addImm(AMDGPU::CPol::GLC);
2415   MIB.cloneMemRefs(MI);
2416 
2417   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2418     .addReg(TmpReg, RegState::Kill, SubReg);
2419 
2420   MI.eraseFromParent();
2421 
2422   MRI->setRegClass(
2423     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2424   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2425 }
2426 
selectG_BRCOND(MachineInstr & I) const2427 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2428   MachineBasicBlock *BB = I.getParent();
2429   MachineOperand &CondOp = I.getOperand(0);
2430   Register CondReg = CondOp.getReg();
2431   const DebugLoc &DL = I.getDebugLoc();
2432 
2433   unsigned BrOpcode;
2434   Register CondPhysReg;
2435   const TargetRegisterClass *ConstrainRC;
2436 
2437   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2438   // whether the branch is uniform when selecting the instruction. In
2439   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2440   // RegBankSelect knows what it's doing if the branch condition is scc, even
2441   // though it currently does not.
2442   if (!isVCC(CondReg, *MRI)) {
2443     if (MRI->getType(CondReg) != LLT::scalar(32))
2444       return false;
2445 
2446     CondPhysReg = AMDGPU::SCC;
2447     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2448     ConstrainRC = &AMDGPU::SReg_32RegClass;
2449   } else {
2450     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2451     // We sort of know that a VCC producer based on the register bank, that ands
2452     // inactive lanes with 0. What if there was a logical operation with vcc
2453     // producers in different blocks/with different exec masks?
2454     // FIXME: Should scc->vcc copies and with exec?
2455     CondPhysReg = TRI.getVCC();
2456     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2457     ConstrainRC = TRI.getBoolRC();
2458   }
2459 
2460   if (!MRI->getRegClassOrNull(CondReg))
2461     MRI->setRegClass(CondReg, ConstrainRC);
2462 
2463   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2464     .addReg(CondReg);
2465   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2466     .addMBB(I.getOperand(1).getMBB());
2467 
2468   I.eraseFromParent();
2469   return true;
2470 }
2471 
selectG_GLOBAL_VALUE(MachineInstr & I) const2472 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2473   MachineInstr &I) const {
2474   Register DstReg = I.getOperand(0).getReg();
2475   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2476   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2477   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2478   if (IsVGPR)
2479     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2480 
2481   return RBI.constrainGenericRegister(
2482     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2483 }
2484 
selectG_PTRMASK(MachineInstr & I) const2485 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2486   Register DstReg = I.getOperand(0).getReg();
2487   Register SrcReg = I.getOperand(1).getReg();
2488   Register MaskReg = I.getOperand(2).getReg();
2489   LLT Ty = MRI->getType(DstReg);
2490   LLT MaskTy = MRI->getType(MaskReg);
2491 
2492   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2493   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2494   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2495   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2496   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2497     return false;
2498 
2499   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2500   const TargetRegisterClass &RegRC
2501     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2502 
2503   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2504                                                                   *MRI);
2505   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2506                                                                   *MRI);
2507   const TargetRegisterClass *MaskRC =
2508       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2509 
2510   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2511       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2512       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2513     return false;
2514 
2515   MachineBasicBlock *BB = I.getParent();
2516   const DebugLoc &DL = I.getDebugLoc();
2517   if (Ty.getSizeInBits() == 32) {
2518     assert(MaskTy.getSizeInBits() == 32 &&
2519            "ptrmask should have been narrowed during legalize");
2520 
2521     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2522       .addReg(SrcReg)
2523       .addReg(MaskReg);
2524     I.eraseFromParent();
2525     return true;
2526   }
2527 
2528   Register HiReg = MRI->createVirtualRegister(&RegRC);
2529   Register LoReg = MRI->createVirtualRegister(&RegRC);
2530 
2531   // Extract the subregisters from the source pointer.
2532   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2533     .addReg(SrcReg, 0, AMDGPU::sub0);
2534   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2535     .addReg(SrcReg, 0, AMDGPU::sub1);
2536 
2537   Register MaskedLo, MaskedHi;
2538 
2539   // Try to avoid emitting a bit operation when we only need to touch half of
2540   // the 64-bit pointer.
2541   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2542 
2543   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2544   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2545   if ((MaskOnes & MaskLo32) == MaskLo32) {
2546     // If all the bits in the low half are 1, we only need a copy for it.
2547     MaskedLo = LoReg;
2548   } else {
2549     // Extract the mask subregister and apply the and.
2550     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2551     MaskedLo = MRI->createVirtualRegister(&RegRC);
2552 
2553     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2554       .addReg(MaskReg, 0, AMDGPU::sub0);
2555     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2556       .addReg(LoReg)
2557       .addReg(MaskLo);
2558   }
2559 
2560   if ((MaskOnes & MaskHi32) == MaskHi32) {
2561     // If all the bits in the high half are 1, we only need a copy for it.
2562     MaskedHi = HiReg;
2563   } else {
2564     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2565     MaskedHi = MRI->createVirtualRegister(&RegRC);
2566 
2567     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2568       .addReg(MaskReg, 0, AMDGPU::sub1);
2569     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2570       .addReg(HiReg)
2571       .addReg(MaskHi);
2572   }
2573 
2574   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2575     .addReg(MaskedLo)
2576     .addImm(AMDGPU::sub0)
2577     .addReg(MaskedHi)
2578     .addImm(AMDGPU::sub1);
2579   I.eraseFromParent();
2580   return true;
2581 }
2582 
2583 /// Return the register to use for the index value, and the subregister to use
2584 /// for the indirectly accessed register.
2585 static std::pair<Register, unsigned>
computeIndirectRegIndex(MachineRegisterInfo & MRI,const SIRegisterInfo & TRI,const TargetRegisterClass * SuperRC,Register IdxReg,unsigned EltSize)2586 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2587                         const SIRegisterInfo &TRI,
2588                         const TargetRegisterClass *SuperRC,
2589                         Register IdxReg,
2590                         unsigned EltSize) {
2591   Register IdxBaseReg;
2592   int Offset;
2593 
2594   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2595   if (IdxBaseReg == AMDGPU::NoRegister) {
2596     // This will happen if the index is a known constant. This should ordinarily
2597     // be legalized out, but handle it as a register just in case.
2598     assert(Offset == 0);
2599     IdxBaseReg = IdxReg;
2600   }
2601 
2602   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2603 
2604   // Skip out of bounds offsets, or else we would end up using an undefined
2605   // register.
2606   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2607     return std::make_pair(IdxReg, SubRegs[0]);
2608   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2609 }
2610 
selectG_EXTRACT_VECTOR_ELT(MachineInstr & MI) const2611 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2612   MachineInstr &MI) const {
2613   Register DstReg = MI.getOperand(0).getReg();
2614   Register SrcReg = MI.getOperand(1).getReg();
2615   Register IdxReg = MI.getOperand(2).getReg();
2616 
2617   LLT DstTy = MRI->getType(DstReg);
2618   LLT SrcTy = MRI->getType(SrcReg);
2619 
2620   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2621   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2622   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2623 
2624   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2625   // into a waterfall loop.
2626   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2627     return false;
2628 
2629   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2630                                                                   *MRI);
2631   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2632                                                                   *MRI);
2633   if (!SrcRC || !DstRC)
2634     return false;
2635   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2636       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2637       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2638     return false;
2639 
2640   MachineBasicBlock *BB = MI.getParent();
2641   const DebugLoc &DL = MI.getDebugLoc();
2642   const bool Is64 = DstTy.getSizeInBits() == 64;
2643 
2644   unsigned SubReg;
2645   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2646                                                      DstTy.getSizeInBits() / 8);
2647 
2648   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2649     if (DstTy.getSizeInBits() != 32 && !Is64)
2650       return false;
2651 
2652     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2653       .addReg(IdxReg);
2654 
2655     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2656     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2657       .addReg(SrcReg, 0, SubReg)
2658       .addReg(SrcReg, RegState::Implicit);
2659     MI.eraseFromParent();
2660     return true;
2661   }
2662 
2663   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2664     return false;
2665 
2666   if (!STI.useVGPRIndexMode()) {
2667     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2668       .addReg(IdxReg);
2669     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2670       .addReg(SrcReg, 0, SubReg)
2671       .addReg(SrcReg, RegState::Implicit);
2672     MI.eraseFromParent();
2673     return true;
2674   }
2675 
2676   const MCInstrDesc &GPRIDXDesc =
2677       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2678   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2679       .addReg(SrcReg)
2680       .addReg(IdxReg)
2681       .addImm(SubReg);
2682 
2683   MI.eraseFromParent();
2684   return true;
2685 }
2686 
2687 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
selectG_INSERT_VECTOR_ELT(MachineInstr & MI) const2688 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2689   MachineInstr &MI) const {
2690   Register DstReg = MI.getOperand(0).getReg();
2691   Register VecReg = MI.getOperand(1).getReg();
2692   Register ValReg = MI.getOperand(2).getReg();
2693   Register IdxReg = MI.getOperand(3).getReg();
2694 
2695   LLT VecTy = MRI->getType(DstReg);
2696   LLT ValTy = MRI->getType(ValReg);
2697   unsigned VecSize = VecTy.getSizeInBits();
2698   unsigned ValSize = ValTy.getSizeInBits();
2699 
2700   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2701   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2702   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2703 
2704   assert(VecTy.getElementType() == ValTy);
2705 
2706   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2707   // into a waterfall loop.
2708   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2709     return false;
2710 
2711   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2712                                                                   *MRI);
2713   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2714                                                                   *MRI);
2715 
2716   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2717       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2718       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2719       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2720     return false;
2721 
2722   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2723     return false;
2724 
2725   unsigned SubReg;
2726   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2727                                                      ValSize / 8);
2728 
2729   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2730                          STI.useVGPRIndexMode();
2731 
2732   MachineBasicBlock *BB = MI.getParent();
2733   const DebugLoc &DL = MI.getDebugLoc();
2734 
2735   if (!IndexMode) {
2736     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2737       .addReg(IdxReg);
2738 
2739     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2740         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2741     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2742         .addReg(VecReg)
2743         .addReg(ValReg)
2744         .addImm(SubReg);
2745     MI.eraseFromParent();
2746     return true;
2747   }
2748 
2749   const MCInstrDesc &GPRIDXDesc =
2750       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2751   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2752       .addReg(VecReg)
2753       .addReg(ValReg)
2754       .addReg(IdxReg)
2755       .addImm(SubReg);
2756 
2757   MI.eraseFromParent();
2758   return true;
2759 }
2760 
isZeroOrUndef(int X)2761 static bool isZeroOrUndef(int X) {
2762   return X == 0 || X == -1;
2763 }
2764 
isOneOrUndef(int X)2765 static bool isOneOrUndef(int X) {
2766   return X == 1 || X == -1;
2767 }
2768 
isZeroOrOneOrUndef(int X)2769 static bool isZeroOrOneOrUndef(int X) {
2770   return X == 0 || X == 1 || X == -1;
2771 }
2772 
2773 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2774 // 32-bit register.
normalizeVOP3PMask(int NewMask[2],Register Src0,Register Src1,ArrayRef<int> Mask)2775 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2776                                    ArrayRef<int> Mask) {
2777   NewMask[0] = Mask[0];
2778   NewMask[1] = Mask[1];
2779   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2780     return Src0;
2781 
2782   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2783   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2784 
2785   // Shift the mask inputs to be 0/1;
2786   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2787   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2788   return Src1;
2789 }
2790 
2791 // This is only legal with VOP3P instructions as an aid to op_sel matching.
selectG_SHUFFLE_VECTOR(MachineInstr & MI) const2792 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2793   MachineInstr &MI) const {
2794   Register DstReg = MI.getOperand(0).getReg();
2795   Register Src0Reg = MI.getOperand(1).getReg();
2796   Register Src1Reg = MI.getOperand(2).getReg();
2797   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2798 
2799   const LLT V2S16 = LLT::vector(2, 16);
2800   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2801     return false;
2802 
2803   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2804     return false;
2805 
2806   assert(ShufMask.size() == 2);
2807   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2808 
2809   MachineBasicBlock *MBB = MI.getParent();
2810   const DebugLoc &DL = MI.getDebugLoc();
2811 
2812   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2813   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2814   const TargetRegisterClass &RC = IsVALU ?
2815     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2816 
2817   // Handle the degenerate case which should have folded out.
2818   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2819     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2820 
2821     MI.eraseFromParent();
2822     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2823   }
2824 
2825   // A legal VOP3P mask only reads one of the sources.
2826   int Mask[2];
2827   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2828 
2829   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2830       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2831     return false;
2832 
2833   // TODO: This also should have been folded out
2834   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2835     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2836       .addReg(SrcVec);
2837 
2838     MI.eraseFromParent();
2839     return true;
2840   }
2841 
2842   if (Mask[0] == 1 && Mask[1] == -1) {
2843     if (IsVALU) {
2844       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2845         .addImm(16)
2846         .addReg(SrcVec);
2847     } else {
2848       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2849         .addReg(SrcVec)
2850         .addImm(16);
2851     }
2852   } else if (Mask[0] == -1 && Mask[1] == 0) {
2853     if (IsVALU) {
2854       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2855         .addImm(16)
2856         .addReg(SrcVec);
2857     } else {
2858       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2859         .addReg(SrcVec)
2860         .addImm(16);
2861     }
2862   } else if (Mask[0] == 0 && Mask[1] == 0) {
2863     if (IsVALU) {
2864       // Write low half of the register into the high half.
2865       MachineInstr *MovSDWA =
2866         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2867         .addImm(0)                             // $src0_modifiers
2868         .addReg(SrcVec)                        // $src0
2869         .addImm(0)                             // $clamp
2870         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2871         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2872         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2873         .addReg(SrcVec, RegState::Implicit);
2874       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2875     } else {
2876       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2877         .addReg(SrcVec)
2878         .addReg(SrcVec);
2879     }
2880   } else if (Mask[0] == 1 && Mask[1] == 1) {
2881     if (IsVALU) {
2882       // Write high half of the register into the low half.
2883       MachineInstr *MovSDWA =
2884         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2885         .addImm(0)                             // $src0_modifiers
2886         .addReg(SrcVec)                        // $src0
2887         .addImm(0)                             // $clamp
2888         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2889         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2890         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2891         .addReg(SrcVec, RegState::Implicit);
2892       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2893     } else {
2894       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2895         .addReg(SrcVec)
2896         .addReg(SrcVec);
2897     }
2898   } else if (Mask[0] == 1 && Mask[1] == 0) {
2899     if (IsVALU) {
2900       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2901         .addReg(SrcVec)
2902         .addReg(SrcVec)
2903         .addImm(16);
2904     } else {
2905       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2906       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2907         .addReg(SrcVec)
2908         .addImm(16);
2909       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2910         .addReg(TmpReg)
2911         .addReg(SrcVec);
2912     }
2913   } else
2914     llvm_unreachable("all shuffle masks should be handled");
2915 
2916   MI.eraseFromParent();
2917   return true;
2918 }
2919 
selectAMDGPU_BUFFER_ATOMIC_FADD(MachineInstr & MI) const2920 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2921   MachineInstr &MI) const {
2922   if (STI.hasGFX90AInsts())
2923     return selectImpl(MI, *CoverageInfo);
2924 
2925   MachineBasicBlock *MBB = MI.getParent();
2926   const DebugLoc &DL = MI.getDebugLoc();
2927 
2928   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2929     Function &F = MBB->getParent()->getFunction();
2930     DiagnosticInfoUnsupported
2931       NoFpRet(F, "return versions of fp atomics not supported",
2932               MI.getDebugLoc(), DS_Error);
2933     F.getContext().diagnose(NoFpRet);
2934     return false;
2935   }
2936 
2937   // FIXME: This is only needed because tablegen requires number of dst operands
2938   // in match and replace pattern to be the same. Otherwise patterns can be
2939   // exported from SDag path.
2940   MachineOperand &VDataIn = MI.getOperand(1);
2941   MachineOperand &VIndex = MI.getOperand(3);
2942   MachineOperand &VOffset = MI.getOperand(4);
2943   MachineOperand &SOffset = MI.getOperand(5);
2944   int16_t Offset = MI.getOperand(6).getImm();
2945 
2946   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2947   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2948 
2949   unsigned Opcode;
2950   if (HasVOffset) {
2951     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2952                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2953   } else {
2954     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2955                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2956   }
2957 
2958   if (MRI->getType(VDataIn.getReg()).isVector()) {
2959     switch (Opcode) {
2960     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2961       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2962       break;
2963     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2964       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2965       break;
2966     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
2967       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
2968       break;
2969     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
2970       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
2971       break;
2972     }
2973   }
2974 
2975   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
2976   I.add(VDataIn);
2977 
2978   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
2979       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
2980     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
2981     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
2982       .addReg(VIndex.getReg())
2983       .addImm(AMDGPU::sub0)
2984       .addReg(VOffset.getReg())
2985       .addImm(AMDGPU::sub1);
2986 
2987     I.addReg(IdxReg);
2988   } else if (HasVIndex) {
2989     I.add(VIndex);
2990   } else if (HasVOffset) {
2991     I.add(VOffset);
2992   }
2993 
2994   I.add(MI.getOperand(2)); // rsrc
2995   I.add(SOffset);
2996   I.addImm(Offset);
2997   I.addImm(MI.getOperand(7).getImm()); // cpol
2998   I.cloneMemRefs(MI);
2999 
3000   MI.eraseFromParent();
3001 
3002   return true;
3003 }
3004 
selectGlobalAtomicFadd(MachineInstr & MI,MachineOperand & AddrOp,MachineOperand & DataOp) const3005 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3006   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3007 
3008   if (STI.hasGFX90AInsts()) {
3009     // gfx90a adds return versions of the global atomic fadd instructions so no
3010     // special handling is required.
3011     return selectImpl(MI, *CoverageInfo);
3012   }
3013 
3014   MachineBasicBlock *MBB = MI.getParent();
3015   const DebugLoc &DL = MI.getDebugLoc();
3016 
3017   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3018     Function &F = MBB->getParent()->getFunction();
3019     DiagnosticInfoUnsupported
3020       NoFpRet(F, "return versions of fp atomics not supported",
3021               MI.getDebugLoc(), DS_Error);
3022     F.getContext().diagnose(NoFpRet);
3023     return false;
3024   }
3025 
3026   // FIXME: This is only needed because tablegen requires number of dst operands
3027   // in match and replace pattern to be the same. Otherwise patterns can be
3028   // exported from SDag path.
3029   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3030 
3031   Register Data = DataOp.getReg();
3032   const unsigned Opc = MRI->getType(Data).isVector() ?
3033     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3034   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3035     .addReg(Addr.first)
3036     .addReg(Data)
3037     .addImm(Addr.second)
3038     .addImm(0) // cpol
3039     .cloneMemRefs(MI);
3040 
3041   MI.eraseFromParent();
3042   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3043 }
3044 
selectBVHIntrinsic(MachineInstr & MI) const3045 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3046   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3047   MI.RemoveOperand(1);
3048   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3049   return true;
3050 }
3051 
select(MachineInstr & I)3052 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3053   if (I.isPHI())
3054     return selectPHI(I);
3055 
3056   if (!I.isPreISelOpcode()) {
3057     if (I.isCopy())
3058       return selectCOPY(I);
3059     return true;
3060   }
3061 
3062   switch (I.getOpcode()) {
3063   case TargetOpcode::G_AND:
3064   case TargetOpcode::G_OR:
3065   case TargetOpcode::G_XOR:
3066     if (selectImpl(I, *CoverageInfo))
3067       return true;
3068     return selectG_AND_OR_XOR(I);
3069   case TargetOpcode::G_ADD:
3070   case TargetOpcode::G_SUB:
3071     if (selectImpl(I, *CoverageInfo))
3072       return true;
3073     return selectG_ADD_SUB(I);
3074   case TargetOpcode::G_UADDO:
3075   case TargetOpcode::G_USUBO:
3076   case TargetOpcode::G_UADDE:
3077   case TargetOpcode::G_USUBE:
3078     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3079   case TargetOpcode::G_INTTOPTR:
3080   case TargetOpcode::G_BITCAST:
3081   case TargetOpcode::G_PTRTOINT:
3082     return selectCOPY(I);
3083   case TargetOpcode::G_CONSTANT:
3084   case TargetOpcode::G_FCONSTANT:
3085     return selectG_CONSTANT(I);
3086   case TargetOpcode::G_FNEG:
3087     if (selectImpl(I, *CoverageInfo))
3088       return true;
3089     return selectG_FNEG(I);
3090   case TargetOpcode::G_FABS:
3091     if (selectImpl(I, *CoverageInfo))
3092       return true;
3093     return selectG_FABS(I);
3094   case TargetOpcode::G_EXTRACT:
3095     return selectG_EXTRACT(I);
3096   case TargetOpcode::G_MERGE_VALUES:
3097   case TargetOpcode::G_BUILD_VECTOR:
3098   case TargetOpcode::G_CONCAT_VECTORS:
3099     return selectG_MERGE_VALUES(I);
3100   case TargetOpcode::G_UNMERGE_VALUES:
3101     return selectG_UNMERGE_VALUES(I);
3102   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3103     return selectG_BUILD_VECTOR_TRUNC(I);
3104   case TargetOpcode::G_PTR_ADD:
3105     return selectG_PTR_ADD(I);
3106   case TargetOpcode::G_IMPLICIT_DEF:
3107     return selectG_IMPLICIT_DEF(I);
3108   case TargetOpcode::G_FREEZE:
3109     return selectCOPY(I);
3110   case TargetOpcode::G_INSERT:
3111     return selectG_INSERT(I);
3112   case TargetOpcode::G_INTRINSIC:
3113     return selectG_INTRINSIC(I);
3114   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3115     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3116   case TargetOpcode::G_ICMP:
3117     if (selectG_ICMP(I))
3118       return true;
3119     return selectImpl(I, *CoverageInfo);
3120   case TargetOpcode::G_LOAD:
3121   case TargetOpcode::G_STORE:
3122   case TargetOpcode::G_ATOMIC_CMPXCHG:
3123   case TargetOpcode::G_ATOMICRMW_XCHG:
3124   case TargetOpcode::G_ATOMICRMW_ADD:
3125   case TargetOpcode::G_ATOMICRMW_SUB:
3126   case TargetOpcode::G_ATOMICRMW_AND:
3127   case TargetOpcode::G_ATOMICRMW_OR:
3128   case TargetOpcode::G_ATOMICRMW_XOR:
3129   case TargetOpcode::G_ATOMICRMW_MIN:
3130   case TargetOpcode::G_ATOMICRMW_MAX:
3131   case TargetOpcode::G_ATOMICRMW_UMIN:
3132   case TargetOpcode::G_ATOMICRMW_UMAX:
3133   case TargetOpcode::G_ATOMICRMW_FADD:
3134   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3135   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3136   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3137   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3138     return selectG_LOAD_STORE_ATOMICRMW(I);
3139   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3140     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3141   case TargetOpcode::G_SELECT:
3142     return selectG_SELECT(I);
3143   case TargetOpcode::G_TRUNC:
3144     return selectG_TRUNC(I);
3145   case TargetOpcode::G_SEXT:
3146   case TargetOpcode::G_ZEXT:
3147   case TargetOpcode::G_ANYEXT:
3148   case TargetOpcode::G_SEXT_INREG:
3149     if (selectImpl(I, *CoverageInfo))
3150       return true;
3151     return selectG_SZA_EXT(I);
3152   case TargetOpcode::G_BRCOND:
3153     return selectG_BRCOND(I);
3154   case TargetOpcode::G_GLOBAL_VALUE:
3155     return selectG_GLOBAL_VALUE(I);
3156   case TargetOpcode::G_PTRMASK:
3157     return selectG_PTRMASK(I);
3158   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3159     return selectG_EXTRACT_VECTOR_ELT(I);
3160   case TargetOpcode::G_INSERT_VECTOR_ELT:
3161     return selectG_INSERT_VECTOR_ELT(I);
3162   case TargetOpcode::G_SHUFFLE_VECTOR:
3163     return selectG_SHUFFLE_VECTOR(I);
3164   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3165   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3166     const AMDGPU::ImageDimIntrinsicInfo *Intr
3167       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3168     assert(Intr && "not an image intrinsic with image pseudo");
3169     return selectImageIntrinsic(I, Intr);
3170   }
3171   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3172     return selectBVHIntrinsic(I);
3173   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3174     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3175   default:
3176     return selectImpl(I, *CoverageInfo);
3177   }
3178   return false;
3179 }
3180 
3181 InstructionSelector::ComplexRendererFns
selectVCSRC(MachineOperand & Root) const3182 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3183   return {{
3184       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3185   }};
3186 
3187 }
3188 
3189 std::pair<Register, unsigned>
selectVOP3ModsImpl(MachineOperand & Root,bool AllowAbs) const3190 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3191                                               bool AllowAbs) const {
3192   Register Src = Root.getReg();
3193   Register OrigSrc = Src;
3194   unsigned Mods = 0;
3195   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3196 
3197   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3198     Src = MI->getOperand(1).getReg();
3199     Mods |= SISrcMods::NEG;
3200     MI = getDefIgnoringCopies(Src, *MRI);
3201   }
3202 
3203   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3204     Src = MI->getOperand(1).getReg();
3205     Mods |= SISrcMods::ABS;
3206   }
3207 
3208   if (Mods != 0 &&
3209       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3210     MachineInstr *UseMI = Root.getParent();
3211 
3212     // If we looked through copies to find source modifiers on an SGPR operand,
3213     // we now have an SGPR register source. To avoid potentially violating the
3214     // constant bus restriction, we need to insert a copy to a VGPR.
3215     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3216     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3217             TII.get(AMDGPU::COPY), VGPRSrc)
3218       .addReg(Src);
3219     Src = VGPRSrc;
3220   }
3221 
3222   return std::make_pair(Src, Mods);
3223 }
3224 
3225 ///
3226 /// This will select either an SGPR or VGPR operand and will save us from
3227 /// having to write an extra tablegen pattern.
3228 InstructionSelector::ComplexRendererFns
selectVSRC0(MachineOperand & Root) const3229 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3230   return {{
3231       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3232   }};
3233 }
3234 
3235 InstructionSelector::ComplexRendererFns
selectVOP3Mods0(MachineOperand & Root) const3236 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3237   Register Src;
3238   unsigned Mods;
3239   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3240 
3241   return {{
3242       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3243       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3244       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3245       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3246   }};
3247 }
3248 
3249 InstructionSelector::ComplexRendererFns
selectVOP3BMods0(MachineOperand & Root) const3250 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3251   Register Src;
3252   unsigned Mods;
3253   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3254 
3255   return {{
3256       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3257       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3258       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3259       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3260   }};
3261 }
3262 
3263 InstructionSelector::ComplexRendererFns
selectVOP3OMods(MachineOperand & Root) const3264 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3265   return {{
3266       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3267       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3268       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3269   }};
3270 }
3271 
3272 InstructionSelector::ComplexRendererFns
selectVOP3Mods(MachineOperand & Root) const3273 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3274   Register Src;
3275   unsigned Mods;
3276   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3277 
3278   return {{
3279       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3280       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3281   }};
3282 }
3283 
3284 InstructionSelector::ComplexRendererFns
selectVOP3BMods(MachineOperand & Root) const3285 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3286   Register Src;
3287   unsigned Mods;
3288   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3289 
3290   return {{
3291       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3292       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3293   }};
3294 }
3295 
3296 InstructionSelector::ComplexRendererFns
selectVOP3NoMods(MachineOperand & Root) const3297 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3298   Register Reg = Root.getReg();
3299   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3300   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3301               Def->getOpcode() == AMDGPU::G_FABS))
3302     return {};
3303   return {{
3304       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3305   }};
3306 }
3307 
3308 std::pair<Register, unsigned>
selectVOP3PModsImpl(Register Src,const MachineRegisterInfo & MRI) const3309 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3310   Register Src, const MachineRegisterInfo &MRI) const {
3311   unsigned Mods = 0;
3312   MachineInstr *MI = MRI.getVRegDef(Src);
3313 
3314   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3315       // It's possible to see an f32 fneg here, but unlikely.
3316       // TODO: Treat f32 fneg as only high bit.
3317       MRI.getType(Src) == LLT::vector(2, 16)) {
3318     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3319     Src = MI->getOperand(1).getReg();
3320     MI = MRI.getVRegDef(Src);
3321   }
3322 
3323   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3324 
3325   // Packed instructions do not have abs modifiers.
3326   Mods |= SISrcMods::OP_SEL_1;
3327 
3328   return std::make_pair(Src, Mods);
3329 }
3330 
3331 InstructionSelector::ComplexRendererFns
selectVOP3PMods(MachineOperand & Root) const3332 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3333   MachineRegisterInfo &MRI
3334     = Root.getParent()->getParent()->getParent()->getRegInfo();
3335 
3336   Register Src;
3337   unsigned Mods;
3338   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3339 
3340   return {{
3341       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3342       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3343   }};
3344 }
3345 
3346 InstructionSelector::ComplexRendererFns
selectVOP3Mods_nnan(MachineOperand & Root) const3347 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3348   Register Src;
3349   unsigned Mods;
3350   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3351   if (!isKnownNeverNaN(Src, *MRI))
3352     return None;
3353 
3354   return {{
3355       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3356       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3357   }};
3358 }
3359 
3360 InstructionSelector::ComplexRendererFns
selectVOP3OpSelMods(MachineOperand & Root) const3361 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3362   // FIXME: Handle op_sel
3363   return {{
3364       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3365       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3366   }};
3367 }
3368 
3369 InstructionSelector::ComplexRendererFns
selectSmrdImm(MachineOperand & Root) const3370 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3371   SmallVector<GEPInfo, 4> AddrInfo;
3372   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3373 
3374   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3375     return None;
3376 
3377   const GEPInfo &GEPInfo = AddrInfo[0];
3378   Optional<int64_t> EncodedImm =
3379       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3380   if (!EncodedImm)
3381     return None;
3382 
3383   unsigned PtrReg = GEPInfo.SgprParts[0];
3384   return {{
3385     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3386     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3387   }};
3388 }
3389 
3390 InstructionSelector::ComplexRendererFns
selectSmrdImm32(MachineOperand & Root) const3391 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3392   SmallVector<GEPInfo, 4> AddrInfo;
3393   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3394 
3395   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3396     return None;
3397 
3398   const GEPInfo &GEPInfo = AddrInfo[0];
3399   Register PtrReg = GEPInfo.SgprParts[0];
3400   Optional<int64_t> EncodedImm =
3401       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3402   if (!EncodedImm)
3403     return None;
3404 
3405   return {{
3406     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3407     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3408   }};
3409 }
3410 
3411 InstructionSelector::ComplexRendererFns
selectSmrdSgpr(MachineOperand & Root) const3412 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3413   MachineInstr *MI = Root.getParent();
3414   MachineBasicBlock *MBB = MI->getParent();
3415 
3416   SmallVector<GEPInfo, 4> AddrInfo;
3417   getAddrModeInfo(*MI, *MRI, AddrInfo);
3418 
3419   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3420   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3421   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3422     return None;
3423 
3424   const GEPInfo &GEPInfo = AddrInfo[0];
3425   // SGPR offset is unsigned.
3426   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3427     return None;
3428 
3429   // If we make it this far we have a load with an 32-bit immediate offset.
3430   // It is OK to select this using a sgpr offset, because we have already
3431   // failed trying to select this load into one of the _IMM variants since
3432   // the _IMM Patterns are considered before the _SGPR patterns.
3433   Register PtrReg = GEPInfo.SgprParts[0];
3434   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3435   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3436           .addImm(GEPInfo.Imm);
3437   return {{
3438     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3439     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3440   }};
3441 }
3442 
3443 std::pair<Register, int>
selectFlatOffsetImpl(MachineOperand & Root,uint64_t FlatVariant) const3444 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3445                                                 uint64_t FlatVariant) const {
3446   MachineInstr *MI = Root.getParent();
3447 
3448   auto Default = std::make_pair(Root.getReg(), 0);
3449 
3450   if (!STI.hasFlatInstOffsets())
3451     return Default;
3452 
3453   Register PtrBase;
3454   int64_t ConstOffset;
3455   std::tie(PtrBase, ConstOffset) =
3456       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3457   if (ConstOffset == 0)
3458     return Default;
3459 
3460   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3461   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3462     return Default;
3463 
3464   return std::make_pair(PtrBase, ConstOffset);
3465 }
3466 
3467 InstructionSelector::ComplexRendererFns
selectFlatOffset(MachineOperand & Root) const3468 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3469   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3470 
3471   return {{
3472       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3473       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3474     }};
3475 }
3476 
3477 InstructionSelector::ComplexRendererFns
selectGlobalOffset(MachineOperand & Root) const3478 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3479   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3480 
3481   return {{
3482       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3483       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3484   }};
3485 }
3486 
3487 InstructionSelector::ComplexRendererFns
selectScratchOffset(MachineOperand & Root) const3488 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3489   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3490 
3491   return {{
3492       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3493       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3494     }};
3495 }
3496 
3497 /// Match a zero extend from a 32-bit value to 64-bits.
matchZeroExtendFromS32(MachineRegisterInfo & MRI,Register Reg)3498 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3499   Register ZExtSrc;
3500   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3501     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3502 
3503   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3504   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3505   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3506     return false;
3507 
3508   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3509     return Def->getOperand(1).getReg();
3510   }
3511 
3512   return Register();
3513 }
3514 
3515 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3516 InstructionSelector::ComplexRendererFns
selectGlobalSAddr(MachineOperand & Root) const3517 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3518   Register Addr = Root.getReg();
3519   Register PtrBase;
3520   int64_t ConstOffset;
3521   int64_t ImmOffset = 0;
3522 
3523   // Match the immediate offset first, which canonically is moved as low as
3524   // possible.
3525   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3526 
3527   if (ConstOffset != 0) {
3528     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3529                               SIInstrFlags::FlatGlobal)) {
3530       Addr = PtrBase;
3531       ImmOffset = ConstOffset;
3532     } else {
3533       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3534       if (!PtrBaseDef)
3535         return None;
3536 
3537       if (isSGPR(PtrBaseDef->Reg)) {
3538         if (ConstOffset > 0) {
3539           // Offset is too large.
3540           //
3541           // saddr + large_offset -> saddr +
3542           //                         (voffset = large_offset & ~MaxOffset) +
3543           //                         (large_offset & MaxOffset);
3544           int64_t SplitImmOffset, RemainderOffset;
3545           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3546               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3547 
3548           if (isUInt<32>(RemainderOffset)) {
3549             MachineInstr *MI = Root.getParent();
3550             MachineBasicBlock *MBB = MI->getParent();
3551             Register HighBits =
3552                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3553 
3554             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3555                     HighBits)
3556                 .addImm(RemainderOffset);
3557 
3558             return {{
3559                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3560                 [=](MachineInstrBuilder &MIB) {
3561                   MIB.addReg(HighBits);
3562                 }, // voffset
3563                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3564             }};
3565           }
3566         }
3567 
3568         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3569         // is 1 we would need to perform 1 or 2 extra moves for each half of
3570         // the constant and it is better to do a scalar add and then issue a
3571         // single VALU instruction to materialize zero. Otherwise it is less
3572         // instructions to perform VALU adds with immediates or inline literals.
3573         unsigned NumLiterals =
3574             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3575             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3576         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3577           return None;
3578       }
3579     }
3580   }
3581 
3582   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3583   if (!AddrDef)
3584     return None;
3585 
3586   // Match the variable offset.
3587   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3588     // Look through the SGPR->VGPR copy.
3589     Register SAddr =
3590         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3591 
3592     if (SAddr && isSGPR(SAddr)) {
3593       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3594 
3595       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3596       // inserted later.
3597       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3598         return {{[=](MachineInstrBuilder &MIB) { // saddr
3599                    MIB.addReg(SAddr);
3600                  },
3601                  [=](MachineInstrBuilder &MIB) { // voffset
3602                    MIB.addReg(VOffset);
3603                  },
3604                  [=](MachineInstrBuilder &MIB) { // offset
3605                    MIB.addImm(ImmOffset);
3606                  }}};
3607       }
3608     }
3609   }
3610 
3611   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3612   // drop this.
3613   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3614       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3615     return None;
3616 
3617   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3618   // moves required to copy a 64-bit SGPR to VGPR.
3619   MachineInstr *MI = Root.getParent();
3620   MachineBasicBlock *MBB = MI->getParent();
3621   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3622 
3623   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3624       .addImm(0);
3625 
3626   return {{
3627       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3628       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3629       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3630   }};
3631 }
3632 
3633 InstructionSelector::ComplexRendererFns
selectScratchSAddr(MachineOperand & Root) const3634 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3635   Register Addr = Root.getReg();
3636   Register PtrBase;
3637   int64_t ConstOffset;
3638   int64_t ImmOffset = 0;
3639 
3640   // Match the immediate offset first, which canonically is moved as low as
3641   // possible.
3642   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3643 
3644   if (ConstOffset != 0 &&
3645       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3646                             SIInstrFlags::FlatScratch)) {
3647     Addr = PtrBase;
3648     ImmOffset = ConstOffset;
3649   }
3650 
3651   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3652   if (!AddrDef)
3653     return None;
3654 
3655   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3656     int FI = AddrDef->MI->getOperand(1).getIndex();
3657     return {{
3658         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3659         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3660     }};
3661   }
3662 
3663   Register SAddr = AddrDef->Reg;
3664 
3665   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3666     Register LHS = AddrDef->MI->getOperand(1).getReg();
3667     Register RHS = AddrDef->MI->getOperand(2).getReg();
3668     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3669     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3670 
3671     if (LHSDef && RHSDef &&
3672         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3673         isSGPR(RHSDef->Reg)) {
3674       int FI = LHSDef->MI->getOperand(1).getIndex();
3675       MachineInstr &I = *Root.getParent();
3676       MachineBasicBlock *BB = I.getParent();
3677       const DebugLoc &DL = I.getDebugLoc();
3678       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3679 
3680       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), SAddr)
3681         .addFrameIndex(FI)
3682         .addReg(RHSDef->Reg);
3683     }
3684   }
3685 
3686   if (!isSGPR(SAddr))
3687     return None;
3688 
3689   return {{
3690       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3691       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3692   }};
3693 }
3694 
isStackPtrRelative(const MachinePointerInfo & PtrInfo)3695 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
3696   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
3697   return PSV && PSV->isStack();
3698 }
3699 
3700 InstructionSelector::ComplexRendererFns
selectMUBUFScratchOffen(MachineOperand & Root) const3701 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3702   MachineInstr *MI = Root.getParent();
3703   MachineBasicBlock *MBB = MI->getParent();
3704   MachineFunction *MF = MBB->getParent();
3705   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3706 
3707   int64_t Offset = 0;
3708   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3709       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3710     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3711 
3712     // TODO: Should this be inside the render function? The iterator seems to
3713     // move.
3714     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3715             HighBits)
3716       .addImm(Offset & ~4095);
3717 
3718     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3719                MIB.addReg(Info->getScratchRSrcReg());
3720              },
3721              [=](MachineInstrBuilder &MIB) { // vaddr
3722                MIB.addReg(HighBits);
3723              },
3724              [=](MachineInstrBuilder &MIB) { // soffset
3725                // Use constant zero for soffset and rely on eliminateFrameIndex
3726                // to choose the appropriate frame register if need be.
3727                MIB.addImm(0);
3728              },
3729              [=](MachineInstrBuilder &MIB) { // offset
3730                MIB.addImm(Offset & 4095);
3731              }}};
3732   }
3733 
3734   assert(Offset == 0 || Offset == -1);
3735 
3736   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3737   // offsets.
3738   Optional<int> FI;
3739   Register VAddr = Root.getReg();
3740   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3741     Register PtrBase;
3742     int64_t ConstOffset;
3743     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
3744     if (ConstOffset != 0) {
3745       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
3746           (!STI.privateMemoryResourceIsRangeChecked() ||
3747            KnownBits->signBitIsZero(PtrBase))) {
3748         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
3749         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3750           FI = PtrBaseDef->getOperand(1).getIndex();
3751         else
3752           VAddr = PtrBase;
3753         Offset = ConstOffset;
3754       }
3755     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3756       FI = RootDef->getOperand(1).getIndex();
3757     }
3758   }
3759 
3760   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3761              MIB.addReg(Info->getScratchRSrcReg());
3762            },
3763            [=](MachineInstrBuilder &MIB) { // vaddr
3764              if (FI.hasValue())
3765                MIB.addFrameIndex(FI.getValue());
3766              else
3767                MIB.addReg(VAddr);
3768            },
3769            [=](MachineInstrBuilder &MIB) { // soffset
3770              // Use constant zero for soffset and rely on eliminateFrameIndex
3771              // to choose the appropriate frame register if need be.
3772              MIB.addImm(0);
3773            },
3774            [=](MachineInstrBuilder &MIB) { // offset
3775              MIB.addImm(Offset);
3776            }}};
3777 }
3778 
isDSOffsetLegal(Register Base,int64_t Offset) const3779 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3780                                                 int64_t Offset) const {
3781   if (!isUInt<16>(Offset))
3782     return false;
3783 
3784   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3785     return true;
3786 
3787   // On Southern Islands instruction with a negative base value and an offset
3788   // don't seem to work.
3789   return KnownBits->signBitIsZero(Base);
3790 }
3791 
isDSOffset2Legal(Register Base,int64_t Offset0,int64_t Offset1,unsigned Size) const3792 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3793                                                  int64_t Offset1,
3794                                                  unsigned Size) const {
3795   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3796     return false;
3797   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3798     return false;
3799 
3800   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3801     return true;
3802 
3803   // On Southern Islands instruction with a negative base value and an offset
3804   // don't seem to work.
3805   return KnownBits->signBitIsZero(Base);
3806 }
3807 
3808 InstructionSelector::ComplexRendererFns
selectMUBUFScratchOffset(MachineOperand & Root) const3809 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3810     MachineOperand &Root) const {
3811   MachineInstr *MI = Root.getParent();
3812   MachineBasicBlock *MBB = MI->getParent();
3813 
3814   int64_t Offset = 0;
3815   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3816       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3817     return {};
3818 
3819   const MachineFunction *MF = MBB->getParent();
3820   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3821   const MachineMemOperand *MMO = *MI->memoperands_begin();
3822   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3823 
3824   return {{
3825       [=](MachineInstrBuilder &MIB) { // rsrc
3826         MIB.addReg(Info->getScratchRSrcReg());
3827       },
3828       [=](MachineInstrBuilder &MIB) { // soffset
3829         if (isStackPtrRelative(PtrInfo))
3830           MIB.addReg(Info->getStackPtrOffsetReg());
3831         else
3832           MIB.addImm(0);
3833       },
3834       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3835   }};
3836 }
3837 
3838 std::pair<Register, unsigned>
selectDS1Addr1OffsetImpl(MachineOperand & Root) const3839 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3840   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3841   if (!RootDef)
3842     return std::make_pair(Root.getReg(), 0);
3843 
3844   int64_t ConstAddr = 0;
3845 
3846   Register PtrBase;
3847   int64_t Offset;
3848   std::tie(PtrBase, Offset) =
3849     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3850 
3851   if (Offset) {
3852     if (isDSOffsetLegal(PtrBase, Offset)) {
3853       // (add n0, c0)
3854       return std::make_pair(PtrBase, Offset);
3855     }
3856   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3857     // TODO
3858 
3859 
3860   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3861     // TODO
3862 
3863   }
3864 
3865   return std::make_pair(Root.getReg(), 0);
3866 }
3867 
3868 InstructionSelector::ComplexRendererFns
selectDS1Addr1Offset(MachineOperand & Root) const3869 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3870   Register Reg;
3871   unsigned Offset;
3872   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3873   return {{
3874       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3875       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3876     }};
3877 }
3878 
3879 InstructionSelector::ComplexRendererFns
selectDS64Bit4ByteAligned(MachineOperand & Root) const3880 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3881   return selectDSReadWrite2(Root, 4);
3882 }
3883 
3884 InstructionSelector::ComplexRendererFns
selectDS128Bit8ByteAligned(MachineOperand & Root) const3885 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3886   return selectDSReadWrite2(Root, 8);
3887 }
3888 
3889 InstructionSelector::ComplexRendererFns
selectDSReadWrite2(MachineOperand & Root,unsigned Size) const3890 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3891                                               unsigned Size) const {
3892   Register Reg;
3893   unsigned Offset;
3894   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
3895   return {{
3896       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3897       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3898       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3899     }};
3900 }
3901 
3902 std::pair<Register, unsigned>
selectDSReadWrite2Impl(MachineOperand & Root,unsigned Size) const3903 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3904                                                   unsigned Size) const {
3905   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3906   if (!RootDef)
3907     return std::make_pair(Root.getReg(), 0);
3908 
3909   int64_t ConstAddr = 0;
3910 
3911   Register PtrBase;
3912   int64_t Offset;
3913   std::tie(PtrBase, Offset) =
3914     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3915 
3916   if (Offset) {
3917     int64_t OffsetValue0 = Offset;
3918     int64_t OffsetValue1 = Offset + Size;
3919     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
3920       // (add n0, c0)
3921       return std::make_pair(PtrBase, OffsetValue0 / Size);
3922     }
3923   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3924     // TODO
3925 
3926   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3927     // TODO
3928 
3929   }
3930 
3931   return std::make_pair(Root.getReg(), 0);
3932 }
3933 
3934 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3935 /// the base value with the constant offset. There may be intervening copies
3936 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3937 /// not match the pattern.
3938 std::pair<Register, int64_t>
getPtrBaseWithConstantOffset(Register Root,const MachineRegisterInfo & MRI) const3939 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3940   Register Root, const MachineRegisterInfo &MRI) const {
3941   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
3942   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3943     return {Root, 0};
3944 
3945   MachineOperand &RHS = RootI->getOperand(2);
3946   Optional<ValueAndVReg> MaybeOffset
3947     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3948   if (!MaybeOffset)
3949     return {Root, 0};
3950   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
3951 }
3952 
addZeroImm(MachineInstrBuilder & MIB)3953 static void addZeroImm(MachineInstrBuilder &MIB) {
3954   MIB.addImm(0);
3955 }
3956 
3957 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3958 /// BasePtr is not valid, a null base pointer will be used.
buildRSRC(MachineIRBuilder & B,MachineRegisterInfo & MRI,uint32_t FormatLo,uint32_t FormatHi,Register BasePtr)3959 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3960                           uint32_t FormatLo, uint32_t FormatHi,
3961                           Register BasePtr) {
3962   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3963   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3964   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3965   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3966 
3967   B.buildInstr(AMDGPU::S_MOV_B32)
3968     .addDef(RSrc2)
3969     .addImm(FormatLo);
3970   B.buildInstr(AMDGPU::S_MOV_B32)
3971     .addDef(RSrc3)
3972     .addImm(FormatHi);
3973 
3974   // Build the half of the subregister with the constants before building the
3975   // full 128-bit register. If we are building multiple resource descriptors,
3976   // this will allow CSEing of the 2-component register.
3977   B.buildInstr(AMDGPU::REG_SEQUENCE)
3978     .addDef(RSrcHi)
3979     .addReg(RSrc2)
3980     .addImm(AMDGPU::sub0)
3981     .addReg(RSrc3)
3982     .addImm(AMDGPU::sub1);
3983 
3984   Register RSrcLo = BasePtr;
3985   if (!BasePtr) {
3986     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3987     B.buildInstr(AMDGPU::S_MOV_B64)
3988       .addDef(RSrcLo)
3989       .addImm(0);
3990   }
3991 
3992   B.buildInstr(AMDGPU::REG_SEQUENCE)
3993     .addDef(RSrc)
3994     .addReg(RSrcLo)
3995     .addImm(AMDGPU::sub0_sub1)
3996     .addReg(RSrcHi)
3997     .addImm(AMDGPU::sub2_sub3);
3998 
3999   return RSrc;
4000 }
4001 
buildAddr64RSrc(MachineIRBuilder & B,MachineRegisterInfo & MRI,const SIInstrInfo & TII,Register BasePtr)4002 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4003                                 const SIInstrInfo &TII, Register BasePtr) {
4004   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4005 
4006   // FIXME: Why are half the "default" bits ignored based on the addressing
4007   // mode?
4008   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4009 }
4010 
buildOffsetSrc(MachineIRBuilder & B,MachineRegisterInfo & MRI,const SIInstrInfo & TII,Register BasePtr)4011 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4012                                const SIInstrInfo &TII, Register BasePtr) {
4013   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4014 
4015   // FIXME: Why are half the "default" bits ignored based on the addressing
4016   // mode?
4017   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4018 }
4019 
4020 AMDGPUInstructionSelector::MUBUFAddressData
parseMUBUFAddress(Register Src) const4021 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4022   MUBUFAddressData Data;
4023   Data.N0 = Src;
4024 
4025   Register PtrBase;
4026   int64_t Offset;
4027 
4028   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4029   if (isUInt<32>(Offset)) {
4030     Data.N0 = PtrBase;
4031     Data.Offset = Offset;
4032   }
4033 
4034   if (MachineInstr *InputAdd
4035       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4036     Data.N2 = InputAdd->getOperand(1).getReg();
4037     Data.N3 = InputAdd->getOperand(2).getReg();
4038 
4039     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4040     // FIXME: Don't know this was defined by operand 0
4041     //
4042     // TODO: Remove this when we have copy folding optimizations after
4043     // RegBankSelect.
4044     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4045     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4046   }
4047 
4048   return Data;
4049 }
4050 
4051 /// Return if the addr64 mubuf mode should be used for the given address.
shouldUseAddr64(MUBUFAddressData Addr) const4052 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4053   // (ptr_add N2, N3) -> addr64, or
4054   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4055   if (Addr.N2)
4056     return true;
4057 
4058   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4059   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4060 }
4061 
4062 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4063 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4064 /// component.
splitIllegalMUBUFOffset(MachineIRBuilder & B,Register & SOffset,int64_t & ImmOffset) const4065 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4066   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4067   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4068     return;
4069 
4070   // Illegal offset, store it in soffset.
4071   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4072   B.buildInstr(AMDGPU::S_MOV_B32)
4073     .addDef(SOffset)
4074     .addImm(ImmOffset);
4075   ImmOffset = 0;
4076 }
4077 
selectMUBUFAddr64Impl(MachineOperand & Root,Register & VAddr,Register & RSrcReg,Register & SOffset,int64_t & Offset) const4078 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4079   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4080   Register &SOffset, int64_t &Offset) const {
4081   // FIXME: Predicates should stop this from reaching here.
4082   // addr64 bit was removed for volcanic islands.
4083   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4084     return false;
4085 
4086   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4087   if (!shouldUseAddr64(AddrData))
4088     return false;
4089 
4090   Register N0 = AddrData.N0;
4091   Register N2 = AddrData.N2;
4092   Register N3 = AddrData.N3;
4093   Offset = AddrData.Offset;
4094 
4095   // Base pointer for the SRD.
4096   Register SRDPtr;
4097 
4098   if (N2) {
4099     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4100       assert(N3);
4101       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4102         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4103         // addr64, and construct the default resource from a 0 address.
4104         VAddr = N0;
4105       } else {
4106         SRDPtr = N3;
4107         VAddr = N2;
4108       }
4109     } else {
4110       // N2 is not divergent.
4111       SRDPtr = N2;
4112       VAddr = N3;
4113     }
4114   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4115     // Use the default null pointer in the resource
4116     VAddr = N0;
4117   } else {
4118     // N0 -> offset, or
4119     // (N0 + C1) -> offset
4120     SRDPtr = N0;
4121   }
4122 
4123   MachineIRBuilder B(*Root.getParent());
4124   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4125   splitIllegalMUBUFOffset(B, SOffset, Offset);
4126   return true;
4127 }
4128 
selectMUBUFOffsetImpl(MachineOperand & Root,Register & RSrcReg,Register & SOffset,int64_t & Offset) const4129 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4130   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4131   int64_t &Offset) const {
4132 
4133   // FIXME: Pattern should not reach here.
4134   if (STI.useFlatForGlobal())
4135     return false;
4136 
4137   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4138   if (shouldUseAddr64(AddrData))
4139     return false;
4140 
4141   // N0 -> offset, or
4142   // (N0 + C1) -> offset
4143   Register SRDPtr = AddrData.N0;
4144   Offset = AddrData.Offset;
4145 
4146   // TODO: Look through extensions for 32-bit soffset.
4147   MachineIRBuilder B(*Root.getParent());
4148 
4149   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4150   splitIllegalMUBUFOffset(B, SOffset, Offset);
4151   return true;
4152 }
4153 
4154 InstructionSelector::ComplexRendererFns
selectMUBUFAddr64(MachineOperand & Root) const4155 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4156   Register VAddr;
4157   Register RSrcReg;
4158   Register SOffset;
4159   int64_t Offset = 0;
4160 
4161   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4162     return {};
4163 
4164   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4165   // pattern.
4166   return {{
4167       [=](MachineInstrBuilder &MIB) {  // rsrc
4168         MIB.addReg(RSrcReg);
4169       },
4170       [=](MachineInstrBuilder &MIB) { // vaddr
4171         MIB.addReg(VAddr);
4172       },
4173       [=](MachineInstrBuilder &MIB) { // soffset
4174         if (SOffset)
4175           MIB.addReg(SOffset);
4176         else
4177           MIB.addImm(0);
4178       },
4179       [=](MachineInstrBuilder &MIB) { // offset
4180         MIB.addImm(Offset);
4181       },
4182       addZeroImm, //  cpol
4183       addZeroImm, //  tfe
4184       addZeroImm  //  swz
4185     }};
4186 }
4187 
4188 InstructionSelector::ComplexRendererFns
selectMUBUFOffset(MachineOperand & Root) const4189 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4190   Register RSrcReg;
4191   Register SOffset;
4192   int64_t Offset = 0;
4193 
4194   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4195     return {};
4196 
4197   return {{
4198       [=](MachineInstrBuilder &MIB) {  // rsrc
4199         MIB.addReg(RSrcReg);
4200       },
4201       [=](MachineInstrBuilder &MIB) { // soffset
4202         if (SOffset)
4203           MIB.addReg(SOffset);
4204         else
4205           MIB.addImm(0);
4206       },
4207       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4208       addZeroImm, //  cpol
4209       addZeroImm, //  tfe
4210       addZeroImm, //  swz
4211     }};
4212 }
4213 
4214 InstructionSelector::ComplexRendererFns
selectMUBUFAddr64Atomic(MachineOperand & Root) const4215 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4216   Register VAddr;
4217   Register RSrcReg;
4218   Register SOffset;
4219   int64_t Offset = 0;
4220 
4221   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4222     return {};
4223 
4224   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4225   // pattern.
4226   return {{
4227       [=](MachineInstrBuilder &MIB) {  // rsrc
4228         MIB.addReg(RSrcReg);
4229       },
4230       [=](MachineInstrBuilder &MIB) { // vaddr
4231         MIB.addReg(VAddr);
4232       },
4233       [=](MachineInstrBuilder &MIB) { // soffset
4234         if (SOffset)
4235           MIB.addReg(SOffset);
4236         else
4237           MIB.addImm(0);
4238       },
4239       [=](MachineInstrBuilder &MIB) { // offset
4240         MIB.addImm(Offset);
4241       },
4242       [=](MachineInstrBuilder &MIB) {
4243         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4244       }
4245     }};
4246 }
4247 
4248 InstructionSelector::ComplexRendererFns
selectMUBUFOffsetAtomic(MachineOperand & Root) const4249 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4250   Register RSrcReg;
4251   Register SOffset;
4252   int64_t Offset = 0;
4253 
4254   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4255     return {};
4256 
4257   return {{
4258       [=](MachineInstrBuilder &MIB) {  // rsrc
4259         MIB.addReg(RSrcReg);
4260       },
4261       [=](MachineInstrBuilder &MIB) { // soffset
4262         if (SOffset)
4263           MIB.addReg(SOffset);
4264         else
4265           MIB.addImm(0);
4266       },
4267       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4268       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4269     }};
4270 }
4271 
4272 /// Get an immediate that must be 32-bits, and treated as zero extended.
getConstantZext32Val(Register Reg,const MachineRegisterInfo & MRI)4273 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4274                                                const MachineRegisterInfo &MRI) {
4275   // getConstantVRegVal sexts any values, so see if that matters.
4276   Optional<int64_t> OffsetVal = getConstantVRegSExtVal(Reg, MRI);
4277   if (!OffsetVal || !isInt<32>(*OffsetVal))
4278     return None;
4279   return Lo_32(*OffsetVal);
4280 }
4281 
4282 InstructionSelector::ComplexRendererFns
selectSMRDBufferImm(MachineOperand & Root) const4283 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4284   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4285   if (!OffsetVal)
4286     return {};
4287 
4288   Optional<int64_t> EncodedImm =
4289       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4290   if (!EncodedImm)
4291     return {};
4292 
4293   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4294 }
4295 
4296 InstructionSelector::ComplexRendererFns
selectSMRDBufferImm32(MachineOperand & Root) const4297 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4298   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4299 
4300   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4301   if (!OffsetVal)
4302     return {};
4303 
4304   Optional<int64_t> EncodedImm
4305     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4306   if (!EncodedImm)
4307     return {};
4308 
4309   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4310 }
4311 
renderTruncImm32(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4312 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4313                                                  const MachineInstr &MI,
4314                                                  int OpIdx) const {
4315   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4316          "Expected G_CONSTANT");
4317   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4318 }
4319 
renderNegateImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4320 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4321                                                 const MachineInstr &MI,
4322                                                 int OpIdx) const {
4323   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4324          "Expected G_CONSTANT");
4325   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4326 }
4327 
renderBitcastImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4328 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4329                                                  const MachineInstr &MI,
4330                                                  int OpIdx) const {
4331   assert(OpIdx == -1);
4332 
4333   const MachineOperand &Op = MI.getOperand(1);
4334   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4335     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4336   else {
4337     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4338     MIB.addImm(Op.getCImm()->getSExtValue());
4339   }
4340 }
4341 
renderPopcntImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4342 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4343                                                 const MachineInstr &MI,
4344                                                 int OpIdx) const {
4345   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4346          "Expected G_CONSTANT");
4347   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4348 }
4349 
4350 /// This only really exists to satisfy DAG type checking machinery, so is a
4351 /// no-op here.
renderTruncTImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4352 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4353                                                 const MachineInstr &MI,
4354                                                 int OpIdx) const {
4355   MIB.addImm(MI.getOperand(OpIdx).getImm());
4356 }
4357 
renderExtractCPol(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4358 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4359                                                   const MachineInstr &MI,
4360                                                   int OpIdx) const {
4361   assert(OpIdx >= 0 && "expected to match an immediate operand");
4362   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4363 }
4364 
renderExtractSWZ(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4365 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4366                                                  const MachineInstr &MI,
4367                                                  int OpIdx) const {
4368   assert(OpIdx >= 0 && "expected to match an immediate operand");
4369   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4370 }
4371 
renderSetGLC(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4372 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4373                                              const MachineInstr &MI,
4374                                              int OpIdx) const {
4375   assert(OpIdx >= 0 && "expected to match an immediate operand");
4376   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4377 }
4378 
renderFrameIndex(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4379 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4380                                                  const MachineInstr &MI,
4381                                                  int OpIdx) const {
4382   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4383 }
4384 
isInlineImmediate16(int64_t Imm) const4385 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4386   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4387 }
4388 
isInlineImmediate32(int64_t Imm) const4389 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4390   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4391 }
4392 
isInlineImmediate64(int64_t Imm) const4393 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4394   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4395 }
4396 
isInlineImmediate(const APFloat & Imm) const4397 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4398   return TII.isInlineConstant(Imm);
4399 }
4400