1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 #include <optional>
30
31 #define DEBUG_TYPE "amdgpu-isel"
32
33 using namespace llvm;
34 using namespace MIPatternMatch;
35
36 static cl::opt<bool> AllowRiskySelect(
37 "amdgpu-global-isel-risky-select",
38 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
39 cl::init(false),
40 cl::ReallyHidden);
41
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
47
AMDGPUInstructionSelector(const GCNSubtarget & STI,const AMDGPURegisterBankInfo & RBI,const AMDGPUTargetMachine & TM)48 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
49 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50 const AMDGPUTargetMachine &TM)
51 : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
52 STI(STI),
53 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
54 #define GET_GLOBALISEL_PREDICATES_INIT
55 #include "AMDGPUGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_PREDICATES_INIT
57 #define GET_GLOBALISEL_TEMPORARIES_INIT
58 #include "AMDGPUGenGlobalISel.inc"
59 #undef GET_GLOBALISEL_TEMPORARIES_INIT
60 {
61 }
62
getName()63 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
64
setupMF(MachineFunction & MF,GISelKnownBits * KB,CodeGenCoverage & CoverageInfo,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)65 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
66 CodeGenCoverage &CoverageInfo,
67 ProfileSummaryInfo *PSI,
68 BlockFrequencyInfo *BFI) {
69 MRI = &MF.getRegInfo();
70 Subtarget = &MF.getSubtarget<GCNSubtarget>();
71 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
72 }
73
isVCC(Register Reg,const MachineRegisterInfo & MRI) const74 bool AMDGPUInstructionSelector::isVCC(Register Reg,
75 const MachineRegisterInfo &MRI) const {
76 // The verifier is oblivious to s1 being a valid value for wavesize registers.
77 if (Reg.isPhysical())
78 return false;
79
80 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
81 const TargetRegisterClass *RC =
82 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
83 if (RC) {
84 const LLT Ty = MRI.getType(Reg);
85 if (!Ty.isValid() || Ty.getSizeInBits() != 1)
86 return false;
87 // G_TRUNC s1 result is never vcc.
88 return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
89 RC->hasSuperClassEq(TRI.getBoolRC());
90 }
91
92 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
93 return RB->getID() == AMDGPU::VCCRegBankID;
94 }
95
constrainCopyLikeIntrin(MachineInstr & MI,unsigned NewOpc) const96 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
97 unsigned NewOpc) const {
98 MI.setDesc(TII.get(NewOpc));
99 MI.removeOperand(1); // Remove intrinsic ID.
100 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
101
102 MachineOperand &Dst = MI.getOperand(0);
103 MachineOperand &Src = MI.getOperand(1);
104
105 // TODO: This should be legalized to s32 if needed
106 if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
107 return false;
108
109 const TargetRegisterClass *DstRC
110 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
111 const TargetRegisterClass *SrcRC
112 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
113 if (!DstRC || DstRC != SrcRC)
114 return false;
115
116 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
117 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
118 }
119
selectCOPY(MachineInstr & I) const120 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
121 const DebugLoc &DL = I.getDebugLoc();
122 MachineBasicBlock *BB = I.getParent();
123 I.setDesc(TII.get(TargetOpcode::COPY));
124
125 const MachineOperand &Src = I.getOperand(1);
126 MachineOperand &Dst = I.getOperand(0);
127 Register DstReg = Dst.getReg();
128 Register SrcReg = Src.getReg();
129
130 if (isVCC(DstReg, *MRI)) {
131 if (SrcReg == AMDGPU::SCC) {
132 const TargetRegisterClass *RC
133 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
134 if (!RC)
135 return true;
136 return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
137 }
138
139 if (!isVCC(SrcReg, *MRI)) {
140 // TODO: Should probably leave the copy and let copyPhysReg expand it.
141 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
142 return false;
143
144 const TargetRegisterClass *SrcRC
145 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
146
147 std::optional<ValueAndVReg> ConstVal =
148 getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
149 if (ConstVal) {
150 unsigned MovOpc =
151 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
152 BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
153 .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
154 } else {
155 Register MaskedReg = MRI->createVirtualRegister(SrcRC);
156
157 // We can't trust the high bits at this point, so clear them.
158
159 // TODO: Skip masking high bits if def is known boolean.
160
161 unsigned AndOpc =
162 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
163 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
164 .addImm(1)
165 .addReg(SrcReg);
166 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
167 .addImm(0)
168 .addReg(MaskedReg);
169 }
170
171 if (!MRI->getRegClassOrNull(SrcReg))
172 MRI->setRegClass(SrcReg, SrcRC);
173 I.eraseFromParent();
174 return true;
175 }
176
177 const TargetRegisterClass *RC =
178 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
179 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
180 return false;
181
182 return true;
183 }
184
185 for (const MachineOperand &MO : I.operands()) {
186 if (MO.getReg().isPhysical())
187 continue;
188
189 const TargetRegisterClass *RC =
190 TRI.getConstrainedRegClassForOperand(MO, *MRI);
191 if (!RC)
192 continue;
193 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
194 }
195 return true;
196 }
197
selectPHI(MachineInstr & I) const198 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
199 const Register DefReg = I.getOperand(0).getReg();
200 const LLT DefTy = MRI->getType(DefReg);
201 if (DefTy == LLT::scalar(1)) {
202 if (!AllowRiskySelect) {
203 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
204 return false;
205 }
206
207 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
208 }
209
210 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
211
212 const RegClassOrRegBank &RegClassOrBank =
213 MRI->getRegClassOrRegBank(DefReg);
214
215 const TargetRegisterClass *DefRC
216 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
217 if (!DefRC) {
218 if (!DefTy.isValid()) {
219 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
220 return false;
221 }
222
223 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
224 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
225 if (!DefRC) {
226 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
227 return false;
228 }
229 }
230
231 // TODO: Verify that all registers have the same bank
232 I.setDesc(TII.get(TargetOpcode::PHI));
233 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
234 }
235
236 MachineOperand
getSubOperand64(MachineOperand & MO,const TargetRegisterClass & SubRC,unsigned SubIdx) const237 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
238 const TargetRegisterClass &SubRC,
239 unsigned SubIdx) const {
240
241 MachineInstr *MI = MO.getParent();
242 MachineBasicBlock *BB = MO.getParent()->getParent();
243 Register DstReg = MRI->createVirtualRegister(&SubRC);
244
245 if (MO.isReg()) {
246 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
247 Register Reg = MO.getReg();
248 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
249 .addReg(Reg, 0, ComposedSubIdx);
250
251 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
252 MO.isKill(), MO.isDead(), MO.isUndef(),
253 MO.isEarlyClobber(), 0, MO.isDebug(),
254 MO.isInternalRead());
255 }
256
257 assert(MO.isImm());
258
259 APInt Imm(64, MO.getImm());
260
261 switch (SubIdx) {
262 default:
263 llvm_unreachable("do not know to split immediate with this sub index.");
264 case AMDGPU::sub0:
265 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
266 case AMDGPU::sub1:
267 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
268 }
269 }
270
getLogicalBitOpcode(unsigned Opc,bool Is64)271 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
272 switch (Opc) {
273 case AMDGPU::G_AND:
274 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
275 case AMDGPU::G_OR:
276 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
277 case AMDGPU::G_XOR:
278 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
279 default:
280 llvm_unreachable("not a bit op");
281 }
282 }
283
selectG_AND_OR_XOR(MachineInstr & I) const284 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
285 Register DstReg = I.getOperand(0).getReg();
286 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
287
288 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
289 if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
290 DstRB->getID() != AMDGPU::VCCRegBankID)
291 return false;
292
293 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
294 STI.isWave64());
295 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
296
297 // Dead implicit-def of scc
298 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
299 true, // isImp
300 false, // isKill
301 true)); // isDead
302 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
303 }
304
selectG_ADD_SUB(MachineInstr & I) const305 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
306 MachineBasicBlock *BB = I.getParent();
307 MachineFunction *MF = BB->getParent();
308 Register DstReg = I.getOperand(0).getReg();
309 const DebugLoc &DL = I.getDebugLoc();
310 LLT Ty = MRI->getType(DstReg);
311 if (Ty.isVector())
312 return false;
313
314 unsigned Size = Ty.getSizeInBits();
315 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
316 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
317 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
318
319 if (Size == 32) {
320 if (IsSALU) {
321 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
322 MachineInstr *Add =
323 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
324 .add(I.getOperand(1))
325 .add(I.getOperand(2));
326 I.eraseFromParent();
327 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
328 }
329
330 if (STI.hasAddNoCarry()) {
331 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
332 I.setDesc(TII.get(Opc));
333 I.addOperand(*MF, MachineOperand::CreateImm(0));
334 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
335 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
336 }
337
338 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
339
340 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
341 MachineInstr *Add
342 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
343 .addDef(UnusedCarry, RegState::Dead)
344 .add(I.getOperand(1))
345 .add(I.getOperand(2))
346 .addImm(0);
347 I.eraseFromParent();
348 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
349 }
350
351 assert(!Sub && "illegal sub should not reach here");
352
353 const TargetRegisterClass &RC
354 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
355 const TargetRegisterClass &HalfRC
356 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
357
358 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
359 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
360 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
361 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
362
363 Register DstLo = MRI->createVirtualRegister(&HalfRC);
364 Register DstHi = MRI->createVirtualRegister(&HalfRC);
365
366 if (IsSALU) {
367 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
368 .add(Lo1)
369 .add(Lo2);
370 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
371 .add(Hi1)
372 .add(Hi2);
373 } else {
374 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
375 Register CarryReg = MRI->createVirtualRegister(CarryRC);
376 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
377 .addDef(CarryReg)
378 .add(Lo1)
379 .add(Lo2)
380 .addImm(0);
381 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
382 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
383 .add(Hi1)
384 .add(Hi2)
385 .addReg(CarryReg, RegState::Kill)
386 .addImm(0);
387
388 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
389 return false;
390 }
391
392 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
393 .addReg(DstLo)
394 .addImm(AMDGPU::sub0)
395 .addReg(DstHi)
396 .addImm(AMDGPU::sub1);
397
398
399 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
400 return false;
401
402 I.eraseFromParent();
403 return true;
404 }
405
selectG_UADDO_USUBO_UADDE_USUBE(MachineInstr & I) const406 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
407 MachineInstr &I) const {
408 MachineBasicBlock *BB = I.getParent();
409 MachineFunction *MF = BB->getParent();
410 const DebugLoc &DL = I.getDebugLoc();
411 Register Dst0Reg = I.getOperand(0).getReg();
412 Register Dst1Reg = I.getOperand(1).getReg();
413 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
414 I.getOpcode() == AMDGPU::G_UADDE;
415 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
416 I.getOpcode() == AMDGPU::G_USUBE;
417
418 if (isVCC(Dst1Reg, *MRI)) {
419 unsigned NoCarryOpc =
420 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
421 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
422 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
423 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
424 I.addOperand(*MF, MachineOperand::CreateImm(0));
425 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
426 }
427
428 Register Src0Reg = I.getOperand(2).getReg();
429 Register Src1Reg = I.getOperand(3).getReg();
430
431 if (HasCarryIn) {
432 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
433 .addReg(I.getOperand(4).getReg());
434 }
435
436 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
437 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
438
439 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
440 .add(I.getOperand(2))
441 .add(I.getOperand(3));
442 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
443 .addReg(AMDGPU::SCC);
444
445 if (!MRI->getRegClassOrNull(Dst1Reg))
446 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
447
448 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
450 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
451 return false;
452
453 if (HasCarryIn &&
454 !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
455 AMDGPU::SReg_32RegClass, *MRI))
456 return false;
457
458 I.eraseFromParent();
459 return true;
460 }
461
selectG_AMDGPU_MAD_64_32(MachineInstr & I) const462 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
463 MachineInstr &I) const {
464 MachineBasicBlock *BB = I.getParent();
465 MachineFunction *MF = BB->getParent();
466 const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
467
468 unsigned Opc;
469 if (Subtarget->hasMADIntraFwdBug())
470 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
471 : AMDGPU::V_MAD_I64_I32_gfx11_e64;
472 else
473 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
474 I.setDesc(TII.get(Opc));
475 I.addOperand(*MF, MachineOperand::CreateImm(0));
476 I.addImplicitDefUseOperands(*MF);
477 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
478 }
479
480 // TODO: We should probably legalize these to only using 32-bit results.
selectG_EXTRACT(MachineInstr & I) const481 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
482 MachineBasicBlock *BB = I.getParent();
483 Register DstReg = I.getOperand(0).getReg();
484 Register SrcReg = I.getOperand(1).getReg();
485 LLT DstTy = MRI->getType(DstReg);
486 LLT SrcTy = MRI->getType(SrcReg);
487 const unsigned SrcSize = SrcTy.getSizeInBits();
488 unsigned DstSize = DstTy.getSizeInBits();
489
490 // TODO: Should handle any multiple of 32 offset.
491 unsigned Offset = I.getOperand(2).getImm();
492 if (Offset % 32 != 0 || DstSize > 128)
493 return false;
494
495 // 16-bit operations really use 32-bit registers.
496 // FIXME: Probably should not allow 16-bit G_EXTRACT results.
497 if (DstSize == 16)
498 DstSize = 32;
499
500 const TargetRegisterClass *DstRC =
501 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
502 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
503 return false;
504
505 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
506 const TargetRegisterClass *SrcRC =
507 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
508 if (!SrcRC)
509 return false;
510 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
511 DstSize / 32);
512 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
513 if (!SrcRC)
514 return false;
515
516 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
517 *SrcRC, I.getOperand(1));
518 const DebugLoc &DL = I.getDebugLoc();
519 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
520 .addReg(SrcReg, 0, SubReg);
521
522 I.eraseFromParent();
523 return true;
524 }
525
selectG_FMA_FMAD(MachineInstr & I) const526 bool AMDGPUInstructionSelector::selectG_FMA_FMAD(MachineInstr &I) const {
527 assert(I.getOpcode() == AMDGPU::G_FMA || I.getOpcode() == AMDGPU::G_FMAD);
528
529 // Try to manually select MAD_MIX/FMA_MIX.
530 Register Dst = I.getOperand(0).getReg();
531 LLT ResultTy = MRI->getType(Dst);
532 bool IsFMA = I.getOpcode() == AMDGPU::G_FMA;
533 if (ResultTy != LLT::scalar(32) ||
534 (IsFMA ? !Subtarget->hasFmaMixInsts() : !Subtarget->hasMadMixInsts()))
535 return false;
536
537 // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
538 // using the conversion from f16.
539 bool MatchedSrc0, MatchedSrc1, MatchedSrc2;
540 auto [Src0, Src0Mods] =
541 selectVOP3PMadMixModsImpl(I.getOperand(1), MatchedSrc0);
542 auto [Src1, Src1Mods] =
543 selectVOP3PMadMixModsImpl(I.getOperand(2), MatchedSrc1);
544 auto [Src2, Src2Mods] =
545 selectVOP3PMadMixModsImpl(I.getOperand(3), MatchedSrc2);
546
547 #ifndef NDEBUG
548 const SIMachineFunctionInfo *MFI =
549 I.getMF()->getInfo<SIMachineFunctionInfo>();
550 AMDGPU::SIModeRegisterDefaults Mode = MFI->getMode();
551 assert((IsFMA || !Mode.allFP32Denormals()) &&
552 "fmad selected with denormals enabled");
553 #endif
554
555 // TODO: We can select this with f32 denormals enabled if all the sources are
556 // converted from f16 (in which case fmad isn't legal).
557 if (!MatchedSrc0 && !MatchedSrc1 && !MatchedSrc2)
558 return false;
559
560 const unsigned OpC = IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32;
561 MachineInstr *MixInst =
562 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpC), Dst)
563 .addImm(Src0Mods)
564 .addReg(copyToVGPRIfSrcFolded(Src0, Src0Mods, I.getOperand(1), &I))
565 .addImm(Src1Mods)
566 .addReg(copyToVGPRIfSrcFolded(Src1, Src1Mods, I.getOperand(2), &I))
567 .addImm(Src2Mods)
568 .addReg(copyToVGPRIfSrcFolded(Src2, Src2Mods, I.getOperand(3), &I))
569 .addImm(0)
570 .addImm(0)
571 .addImm(0);
572
573 if (!constrainSelectedInstRegOperands(*MixInst, TII, TRI, RBI))
574 return false;
575
576 I.eraseFromParent();
577 return true;
578 }
579
selectG_MERGE_VALUES(MachineInstr & MI) const580 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
581 MachineBasicBlock *BB = MI.getParent();
582 Register DstReg = MI.getOperand(0).getReg();
583 LLT DstTy = MRI->getType(DstReg);
584 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
585
586 const unsigned SrcSize = SrcTy.getSizeInBits();
587 if (SrcSize < 32)
588 return selectImpl(MI, *CoverageInfo);
589
590 const DebugLoc &DL = MI.getDebugLoc();
591 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
592 const unsigned DstSize = DstTy.getSizeInBits();
593 const TargetRegisterClass *DstRC =
594 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
595 if (!DstRC)
596 return false;
597
598 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
599 MachineInstrBuilder MIB =
600 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
601 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
602 MachineOperand &Src = MI.getOperand(I + 1);
603 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
604 MIB.addImm(SubRegs[I]);
605
606 const TargetRegisterClass *SrcRC
607 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
608 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
609 return false;
610 }
611
612 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
613 return false;
614
615 MI.eraseFromParent();
616 return true;
617 }
618
selectG_UNMERGE_VALUES(MachineInstr & MI) const619 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
620 MachineBasicBlock *BB = MI.getParent();
621 const int NumDst = MI.getNumOperands() - 1;
622
623 MachineOperand &Src = MI.getOperand(NumDst);
624
625 Register SrcReg = Src.getReg();
626 Register DstReg0 = MI.getOperand(0).getReg();
627 LLT DstTy = MRI->getType(DstReg0);
628 LLT SrcTy = MRI->getType(SrcReg);
629
630 const unsigned DstSize = DstTy.getSizeInBits();
631 const unsigned SrcSize = SrcTy.getSizeInBits();
632 const DebugLoc &DL = MI.getDebugLoc();
633 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
634
635 const TargetRegisterClass *SrcRC =
636 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
637 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
638 return false;
639
640 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
641 // source, and this relies on the fact that the same subregister indices are
642 // used for both.
643 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
644 for (int I = 0, E = NumDst; I != E; ++I) {
645 MachineOperand &Dst = MI.getOperand(I);
646 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
647 .addReg(SrcReg, 0, SubRegs[I]);
648
649 // Make sure the subregister index is valid for the source register.
650 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
651 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
652 return false;
653
654 const TargetRegisterClass *DstRC =
655 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
656 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
657 return false;
658 }
659
660 MI.eraseFromParent();
661 return true;
662 }
663
selectG_BUILD_VECTOR(MachineInstr & MI) const664 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {
665 assert(MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC ||
666 MI.getOpcode() == AMDGPU::G_BUILD_VECTOR);
667
668 Register Src0 = MI.getOperand(1).getReg();
669 Register Src1 = MI.getOperand(2).getReg();
670 LLT SrcTy = MRI->getType(Src0);
671 const unsigned SrcSize = SrcTy.getSizeInBits();
672
673 // BUILD_VECTOR with >=32 bits source is handled by MERGE_VALUE.
674 if (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR && SrcSize >= 32) {
675 return selectG_MERGE_VALUES(MI);
676 }
677
678 // Selection logic below is for V2S16 only.
679 // For G_BUILD_VECTOR_TRUNC, additionally check that the operands are s32.
680 Register Dst = MI.getOperand(0).getReg();
681 if (MRI->getType(Dst) != LLT::fixed_vector(2, 16) ||
682 (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC &&
683 SrcTy != LLT::scalar(32)))
684 return selectImpl(MI, *CoverageInfo);
685
686 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
687 if (DstBank->getID() == AMDGPU::AGPRRegBankID)
688 return false;
689
690 assert(DstBank->getID() == AMDGPU::SGPRRegBankID ||
691 DstBank->getID() == AMDGPU::VGPRRegBankID);
692 const bool IsVector = DstBank->getID() == AMDGPU::VGPRRegBankID;
693
694 const DebugLoc &DL = MI.getDebugLoc();
695 MachineBasicBlock *BB = MI.getParent();
696
697 // First, before trying TableGen patterns, check if both sources are
698 // constants. In those cases, we can trivially compute the final constant
699 // and emit a simple move.
700 auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
701 if (ConstSrc1) {
702 auto ConstSrc0 =
703 getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
704 if (ConstSrc0) {
705 const int64_t K0 = ConstSrc0->Value.getSExtValue();
706 const int64_t K1 = ConstSrc1->Value.getSExtValue();
707 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
708 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
709 uint32_t Imm = Lo16 | (Hi16 << 16);
710
711 // VALU
712 if (IsVector) {
713 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), Dst).addImm(Imm);
714 MI.eraseFromParent();
715 return RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI);
716 }
717
718 // SALU
719 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst).addImm(Imm);
720 MI.eraseFromParent();
721 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
722 }
723 }
724
725 // Now try TableGen patterns.
726 if (selectImpl(MI, *CoverageInfo))
727 return true;
728
729 // TODO: This should probably be a combine somewhere
730 // (build_vector $src0, undef) -> copy $src0
731 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
732 if (Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
733 MI.setDesc(TII.get(AMDGPU::COPY));
734 MI.removeOperand(2);
735 const auto &RC =
736 IsVector ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
737 return RBI.constrainGenericRegister(Dst, RC, *MRI) &&
738 RBI.constrainGenericRegister(Src0, RC, *MRI);
739 }
740
741 // TODO: Can be improved?
742 if (IsVector) {
743 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
744 auto MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
745 .addImm(0xFFFF)
746 .addReg(Src0);
747 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
748 return false;
749
750 MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), Dst)
751 .addReg(Src1)
752 .addImm(16)
753 .addReg(TmpReg);
754 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
755 return false;
756
757 MI.eraseFromParent();
758 return true;
759 }
760
761 Register ShiftSrc0;
762 Register ShiftSrc1;
763
764 // With multiple uses of the shift, this will duplicate the shift and
765 // increase register pressure.
766 //
767 // (build_vector (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
768 // => (S_PACK_HH_B32_B16 $src0, $src1)
769 // (build_vector (lshr_oneuse SReg_32:$src0, 16), $src1)
770 // => (S_PACK_HL_B32_B16 $src0, $src1)
771 // (build_vector $src0, (lshr_oneuse SReg_32:$src1, 16))
772 // => (S_PACK_LH_B32_B16 $src0, $src1)
773 // (build_vector $src0, $src1)
774 // => (S_PACK_LL_B32_B16 $src0, $src1)
775
776 bool Shift0 = mi_match(
777 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
778
779 bool Shift1 = mi_match(
780 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
781
782 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
783 if (Shift0 && Shift1) {
784 Opc = AMDGPU::S_PACK_HH_B32_B16;
785 MI.getOperand(1).setReg(ShiftSrc0);
786 MI.getOperand(2).setReg(ShiftSrc1);
787 } else if (Shift1) {
788 Opc = AMDGPU::S_PACK_LH_B32_B16;
789 MI.getOperand(2).setReg(ShiftSrc1);
790 } else if (Shift0) {
791 auto ConstSrc1 =
792 getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
793 if (ConstSrc1 && ConstSrc1->Value == 0) {
794 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
795 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
796 .addReg(ShiftSrc0)
797 .addImm(16);
798
799 MI.eraseFromParent();
800 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
801 }
802 if (STI.hasSPackHL()) {
803 Opc = AMDGPU::S_PACK_HL_B32_B16;
804 MI.getOperand(1).setReg(ShiftSrc0);
805 }
806 }
807
808 MI.setDesc(TII.get(Opc));
809 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
810 }
811
selectG_PTR_ADD(MachineInstr & I) const812 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
813 return selectG_ADD_SUB(I);
814 }
815
selectG_IMPLICIT_DEF(MachineInstr & I) const816 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
817 const MachineOperand &MO = I.getOperand(0);
818
819 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
820 // regbank check here is to know why getConstrainedRegClassForOperand failed.
821 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
822 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
823 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
824 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
825 return true;
826 }
827
828 return false;
829 }
830
selectG_INSERT(MachineInstr & I) const831 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
832 MachineBasicBlock *BB = I.getParent();
833
834 Register DstReg = I.getOperand(0).getReg();
835 Register Src0Reg = I.getOperand(1).getReg();
836 Register Src1Reg = I.getOperand(2).getReg();
837 LLT Src1Ty = MRI->getType(Src1Reg);
838
839 unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
840 unsigned InsSize = Src1Ty.getSizeInBits();
841
842 int64_t Offset = I.getOperand(3).getImm();
843
844 // FIXME: These cases should have been illegal and unnecessary to check here.
845 if (Offset % 32 != 0 || InsSize % 32 != 0)
846 return false;
847
848 // Currently not handled by getSubRegFromChannel.
849 if (InsSize > 128)
850 return false;
851
852 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
853 if (SubReg == AMDGPU::NoSubRegister)
854 return false;
855
856 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
857 const TargetRegisterClass *DstRC =
858 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
859 if (!DstRC)
860 return false;
861
862 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
863 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
864 const TargetRegisterClass *Src0RC =
865 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
866 const TargetRegisterClass *Src1RC =
867 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
868
869 // Deal with weird cases where the class only partially supports the subreg
870 // index.
871 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
872 if (!Src0RC || !Src1RC)
873 return false;
874
875 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
876 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
877 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
878 return false;
879
880 const DebugLoc &DL = I.getDebugLoc();
881 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
882 .addReg(Src0Reg)
883 .addReg(Src1Reg)
884 .addImm(SubReg);
885
886 I.eraseFromParent();
887 return true;
888 }
889
selectG_SBFX_UBFX(MachineInstr & MI) const890 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
891 Register DstReg = MI.getOperand(0).getReg();
892 Register SrcReg = MI.getOperand(1).getReg();
893 Register OffsetReg = MI.getOperand(2).getReg();
894 Register WidthReg = MI.getOperand(3).getReg();
895
896 assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
897 "scalar BFX instructions are expanded in regbankselect");
898 assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
899 "64-bit vector BFX instructions are expanded in regbankselect");
900
901 const DebugLoc &DL = MI.getDebugLoc();
902 MachineBasicBlock *MBB = MI.getParent();
903
904 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
905 unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
906 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
907 .addReg(SrcReg)
908 .addReg(OffsetReg)
909 .addReg(WidthReg);
910 MI.eraseFromParent();
911 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
912 }
913
selectInterpP1F16(MachineInstr & MI) const914 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
915 if (STI.getLDSBankCount() != 16)
916 return selectImpl(MI, *CoverageInfo);
917
918 Register Dst = MI.getOperand(0).getReg();
919 Register Src0 = MI.getOperand(2).getReg();
920 Register M0Val = MI.getOperand(6).getReg();
921 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
922 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
923 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
924 return false;
925
926 // This requires 2 instructions. It is possible to write a pattern to support
927 // this, but the generated isel emitter doesn't correctly deal with multiple
928 // output instructions using the same physical register input. The copy to m0
929 // is incorrectly placed before the second instruction.
930 //
931 // TODO: Match source modifiers.
932
933 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
934 const DebugLoc &DL = MI.getDebugLoc();
935 MachineBasicBlock *MBB = MI.getParent();
936
937 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
938 .addReg(M0Val);
939 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
940 .addImm(2)
941 .addImm(MI.getOperand(4).getImm()) // $attr
942 .addImm(MI.getOperand(3).getImm()); // $attrchan
943
944 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
945 .addImm(0) // $src0_modifiers
946 .addReg(Src0) // $src0
947 .addImm(MI.getOperand(4).getImm()) // $attr
948 .addImm(MI.getOperand(3).getImm()) // $attrchan
949 .addImm(0) // $src2_modifiers
950 .addReg(InterpMov) // $src2 - 2 f16 values selected by high
951 .addImm(MI.getOperand(5).getImm()) // $high
952 .addImm(0) // $clamp
953 .addImm(0); // $omod
954
955 MI.eraseFromParent();
956 return true;
957 }
958
959 // Writelane is special in that it can use SGPR and M0 (which would normally
960 // count as using the constant bus twice - but in this case it is allowed since
961 // the lane selector doesn't count as a use of the constant bus). However, it is
962 // still required to abide by the 1 SGPR rule. Fix this up if we might have
963 // multiple SGPRs.
selectWritelane(MachineInstr & MI) const964 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
965 // With a constant bus limit of at least 2, there's no issue.
966 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
967 return selectImpl(MI, *CoverageInfo);
968
969 MachineBasicBlock *MBB = MI.getParent();
970 const DebugLoc &DL = MI.getDebugLoc();
971 Register VDst = MI.getOperand(0).getReg();
972 Register Val = MI.getOperand(2).getReg();
973 Register LaneSelect = MI.getOperand(3).getReg();
974 Register VDstIn = MI.getOperand(4).getReg();
975
976 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
977
978 std::optional<ValueAndVReg> ConstSelect =
979 getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
980 if (ConstSelect) {
981 // The selector has to be an inline immediate, so we can use whatever for
982 // the other operands.
983 MIB.addReg(Val);
984 MIB.addImm(ConstSelect->Value.getSExtValue() &
985 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
986 } else {
987 std::optional<ValueAndVReg> ConstVal =
988 getIConstantVRegValWithLookThrough(Val, *MRI);
989
990 // If the value written is an inline immediate, we can get away without a
991 // copy to m0.
992 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
993 STI.hasInv2PiInlineImm())) {
994 MIB.addImm(ConstVal->Value.getSExtValue());
995 MIB.addReg(LaneSelect);
996 } else {
997 MIB.addReg(Val);
998
999 // If the lane selector was originally in a VGPR and copied with
1000 // readfirstlane, there's a hazard to read the same SGPR from the
1001 // VALU. Constrain to a different SGPR to help avoid needing a nop later.
1002 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
1003
1004 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1005 .addReg(LaneSelect);
1006 MIB.addReg(AMDGPU::M0);
1007 }
1008 }
1009
1010 MIB.addReg(VDstIn);
1011
1012 MI.eraseFromParent();
1013 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1014 }
1015
1016 // We need to handle this here because tablegen doesn't support matching
1017 // instructions with multiple outputs.
selectDivScale(MachineInstr & MI) const1018 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
1019 Register Dst0 = MI.getOperand(0).getReg();
1020 Register Dst1 = MI.getOperand(1).getReg();
1021
1022 LLT Ty = MRI->getType(Dst0);
1023 unsigned Opc;
1024 if (Ty == LLT::scalar(32))
1025 Opc = AMDGPU::V_DIV_SCALE_F32_e64;
1026 else if (Ty == LLT::scalar(64))
1027 Opc = AMDGPU::V_DIV_SCALE_F64_e64;
1028 else
1029 return false;
1030
1031 // TODO: Match source modifiers.
1032
1033 const DebugLoc &DL = MI.getDebugLoc();
1034 MachineBasicBlock *MBB = MI.getParent();
1035
1036 Register Numer = MI.getOperand(3).getReg();
1037 Register Denom = MI.getOperand(4).getReg();
1038 unsigned ChooseDenom = MI.getOperand(5).getImm();
1039
1040 Register Src0 = ChooseDenom != 0 ? Numer : Denom;
1041
1042 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
1043 .addDef(Dst1)
1044 .addImm(0) // $src0_modifiers
1045 .addUse(Src0) // $src0
1046 .addImm(0) // $src1_modifiers
1047 .addUse(Denom) // $src1
1048 .addImm(0) // $src2_modifiers
1049 .addUse(Numer) // $src2
1050 .addImm(0) // $clamp
1051 .addImm(0); // $omod
1052
1053 MI.eraseFromParent();
1054 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1055 }
1056
selectG_INTRINSIC(MachineInstr & I) const1057 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
1058 unsigned IntrinsicID = I.getIntrinsicID();
1059 switch (IntrinsicID) {
1060 case Intrinsic::amdgcn_if_break: {
1061 MachineBasicBlock *BB = I.getParent();
1062
1063 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1064 // SelectionDAG uses for wave32 vs wave64.
1065 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
1066 .add(I.getOperand(0))
1067 .add(I.getOperand(2))
1068 .add(I.getOperand(3));
1069
1070 Register DstReg = I.getOperand(0).getReg();
1071 Register Src0Reg = I.getOperand(2).getReg();
1072 Register Src1Reg = I.getOperand(3).getReg();
1073
1074 I.eraseFromParent();
1075
1076 for (Register Reg : { DstReg, Src0Reg, Src1Reg })
1077 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1078
1079 return true;
1080 }
1081 case Intrinsic::amdgcn_interp_p1_f16:
1082 return selectInterpP1F16(I);
1083 case Intrinsic::amdgcn_wqm:
1084 return constrainCopyLikeIntrin(I, AMDGPU::WQM);
1085 case Intrinsic::amdgcn_softwqm:
1086 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
1087 case Intrinsic::amdgcn_strict_wwm:
1088 case Intrinsic::amdgcn_wwm:
1089 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
1090 case Intrinsic::amdgcn_strict_wqm:
1091 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
1092 case Intrinsic::amdgcn_writelane:
1093 return selectWritelane(I);
1094 case Intrinsic::amdgcn_div_scale:
1095 return selectDivScale(I);
1096 case Intrinsic::amdgcn_icmp:
1097 case Intrinsic::amdgcn_fcmp:
1098 if (selectImpl(I, *CoverageInfo))
1099 return true;
1100 return selectIntrinsicCmp(I);
1101 case Intrinsic::amdgcn_ballot:
1102 return selectBallot(I);
1103 case Intrinsic::amdgcn_reloc_constant:
1104 return selectRelocConstant(I);
1105 case Intrinsic::amdgcn_groupstaticsize:
1106 return selectGroupStaticSize(I);
1107 case Intrinsic::returnaddress:
1108 return selectReturnAddress(I);
1109 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
1110 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
1111 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
1112 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
1113 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1114 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1115 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
1116 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
1117 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
1118 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
1119 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
1120 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
1121 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
1122 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
1123 return selectSMFMACIntrin(I);
1124 default:
1125 return selectImpl(I, *CoverageInfo);
1126 }
1127 }
1128
getV_CMPOpcode(CmpInst::Predicate P,unsigned Size,const GCNSubtarget & ST)1129 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size,
1130 const GCNSubtarget &ST) {
1131 if (Size != 16 && Size != 32 && Size != 64)
1132 return -1;
1133
1134 if (Size == 16 && !ST.has16BitInsts())
1135 return -1;
1136
1137 const auto Select = [&](unsigned S16Opc, unsigned TrueS16Opc, unsigned S32Opc,
1138 unsigned S64Opc) {
1139 if (Size == 16)
1140 return ST.hasTrue16BitInsts() ? TrueS16Opc : S16Opc;
1141 if (Size == 32)
1142 return S32Opc;
1143 return S64Opc;
1144 };
1145
1146 switch (P) {
1147 default:
1148 llvm_unreachable("Unknown condition code!");
1149 case CmpInst::ICMP_NE:
1150 return Select(AMDGPU::V_CMP_NE_U16_e64, AMDGPU::V_CMP_NE_U16_t16_e64,
1151 AMDGPU::V_CMP_NE_U32_e64, AMDGPU::V_CMP_NE_U64_e64);
1152 case CmpInst::ICMP_EQ:
1153 return Select(AMDGPU::V_CMP_EQ_U16_e64, AMDGPU::V_CMP_EQ_U16_t16_e64,
1154 AMDGPU::V_CMP_EQ_U32_e64, AMDGPU::V_CMP_EQ_U64_e64);
1155 case CmpInst::ICMP_SGT:
1156 return Select(AMDGPU::V_CMP_GT_I16_e64, AMDGPU::V_CMP_GT_I16_t16_e64,
1157 AMDGPU::V_CMP_GT_I32_e64, AMDGPU::V_CMP_GT_I64_e64);
1158 case CmpInst::ICMP_SGE:
1159 return Select(AMDGPU::V_CMP_GE_I16_e64, AMDGPU::V_CMP_GE_I16_t16_e64,
1160 AMDGPU::V_CMP_GE_I32_e64, AMDGPU::V_CMP_GE_I64_e64);
1161 case CmpInst::ICMP_SLT:
1162 return Select(AMDGPU::V_CMP_LT_I16_e64, AMDGPU::V_CMP_LT_I16_t16_e64,
1163 AMDGPU::V_CMP_LT_I32_e64, AMDGPU::V_CMP_LT_I64_e64);
1164 case CmpInst::ICMP_SLE:
1165 return Select(AMDGPU::V_CMP_LE_I16_e64, AMDGPU::V_CMP_LE_I16_t16_e64,
1166 AMDGPU::V_CMP_LE_I32_e64, AMDGPU::V_CMP_LE_I64_e64);
1167 case CmpInst::ICMP_UGT:
1168 return Select(AMDGPU::V_CMP_GT_U16_e64, AMDGPU::V_CMP_GT_U16_t16_e64,
1169 AMDGPU::V_CMP_GT_U32_e64, AMDGPU::V_CMP_GT_U64_e64);
1170 case CmpInst::ICMP_UGE:
1171 return Select(AMDGPU::V_CMP_GE_U16_e64, AMDGPU::V_CMP_GE_U16_t16_e64,
1172 AMDGPU::V_CMP_GE_U32_e64, AMDGPU::V_CMP_GE_U64_e64);
1173 case CmpInst::ICMP_ULT:
1174 return Select(AMDGPU::V_CMP_LT_U16_e64, AMDGPU::V_CMP_LT_U16_t16_e64,
1175 AMDGPU::V_CMP_LT_U32_e64, AMDGPU::V_CMP_LT_U64_e64);
1176 case CmpInst::ICMP_ULE:
1177 return Select(AMDGPU::V_CMP_LE_U16_e64, AMDGPU::V_CMP_LE_U16_t16_e64,
1178 AMDGPU::V_CMP_LE_U32_e64, AMDGPU::V_CMP_LE_U64_e64);
1179
1180 case CmpInst::FCMP_OEQ:
1181 return Select(AMDGPU::V_CMP_EQ_F16_e64, AMDGPU::V_CMP_EQ_F16_t16_e64,
1182 AMDGPU::V_CMP_EQ_F32_e64, AMDGPU::V_CMP_EQ_F64_e64);
1183 case CmpInst::FCMP_OGT:
1184 return Select(AMDGPU::V_CMP_GT_F16_e64, AMDGPU::V_CMP_GT_F16_t16_e64,
1185 AMDGPU::V_CMP_GT_F32_e64, AMDGPU::V_CMP_GT_F64_e64);
1186 case CmpInst::FCMP_OGE:
1187 return Select(AMDGPU::V_CMP_GE_F16_e64, AMDGPU::V_CMP_GE_F16_t16_e64,
1188 AMDGPU::V_CMP_GE_F32_e64, AMDGPU::V_CMP_GE_F64_e64);
1189 case CmpInst::FCMP_OLT:
1190 return Select(AMDGPU::V_CMP_LT_F16_e64, AMDGPU::V_CMP_LT_F16_t16_e64,
1191 AMDGPU::V_CMP_LT_F32_e64, AMDGPU::V_CMP_LT_F64_e64);
1192 case CmpInst::FCMP_OLE:
1193 return Select(AMDGPU::V_CMP_LE_F16_e64, AMDGPU::V_CMP_LE_F16_t16_e64,
1194 AMDGPU::V_CMP_LE_F32_e64, AMDGPU::V_CMP_LE_F64_e64);
1195 case CmpInst::FCMP_ONE:
1196 return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1197 AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1198 case CmpInst::FCMP_ORD:
1199 return Select(AMDGPU::V_CMP_O_F16_e64, AMDGPU::V_CMP_O_F16_t16_e64,
1200 AMDGPU::V_CMP_O_F32_e64, AMDGPU::V_CMP_O_F64_e64);
1201 case CmpInst::FCMP_UNO:
1202 return Select(AMDGPU::V_CMP_U_F16_e64, AMDGPU::V_CMP_U_F16_t16_e64,
1203 AMDGPU::V_CMP_U_F32_e64, AMDGPU::V_CMP_U_F64_e64);
1204 case CmpInst::FCMP_UEQ:
1205 return Select(AMDGPU::V_CMP_NLG_F16_e64, AMDGPU::V_CMP_NLG_F16_t16_e64,
1206 AMDGPU::V_CMP_NLG_F32_e64, AMDGPU::V_CMP_NLG_F64_e64);
1207 case CmpInst::FCMP_UGT:
1208 return Select(AMDGPU::V_CMP_NLE_F16_e64, AMDGPU::V_CMP_NLE_F16_t16_e64,
1209 AMDGPU::V_CMP_NLE_F32_e64, AMDGPU::V_CMP_NLE_F64_e64);
1210 case CmpInst::FCMP_UGE:
1211 return Select(AMDGPU::V_CMP_NLT_F16_e64, AMDGPU::V_CMP_NLT_F16_t16_e64,
1212 AMDGPU::V_CMP_NLT_F32_e64, AMDGPU::V_CMP_NLT_F64_e64);
1213 case CmpInst::FCMP_ULT:
1214 return Select(AMDGPU::V_CMP_NGE_F16_e64, AMDGPU::V_CMP_NGE_F16_t16_e64,
1215 AMDGPU::V_CMP_NGE_F32_e64, AMDGPU::V_CMP_NGE_F64_e64);
1216 case CmpInst::FCMP_ULE:
1217 return Select(AMDGPU::V_CMP_NGT_F16_e64, AMDGPU::V_CMP_NGT_F16_t16_e64,
1218 AMDGPU::V_CMP_NGT_F32_e64, AMDGPU::V_CMP_NGT_F64_e64);
1219 case CmpInst::FCMP_UNE:
1220 return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1221 AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1222 case CmpInst::FCMP_TRUE:
1223 return Select(AMDGPU::V_CMP_TRU_F16_e64, AMDGPU::V_CMP_TRU_F16_t16_e64,
1224 AMDGPU::V_CMP_TRU_F32_e64, AMDGPU::V_CMP_TRU_F64_e64);
1225 case CmpInst::FCMP_FALSE:
1226 return Select(AMDGPU::V_CMP_F_F16_e64, AMDGPU::V_CMP_F_F16_t16_e64,
1227 AMDGPU::V_CMP_F_F32_e64, AMDGPU::V_CMP_F_F64_e64);
1228 }
1229 }
1230
getS_CMPOpcode(CmpInst::Predicate P,unsigned Size) const1231 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1232 unsigned Size) const {
1233 if (Size == 64) {
1234 if (!STI.hasScalarCompareEq64())
1235 return -1;
1236
1237 switch (P) {
1238 case CmpInst::ICMP_NE:
1239 return AMDGPU::S_CMP_LG_U64;
1240 case CmpInst::ICMP_EQ:
1241 return AMDGPU::S_CMP_EQ_U64;
1242 default:
1243 return -1;
1244 }
1245 }
1246
1247 if (Size != 32)
1248 return -1;
1249
1250 switch (P) {
1251 case CmpInst::ICMP_NE:
1252 return AMDGPU::S_CMP_LG_U32;
1253 case CmpInst::ICMP_EQ:
1254 return AMDGPU::S_CMP_EQ_U32;
1255 case CmpInst::ICMP_SGT:
1256 return AMDGPU::S_CMP_GT_I32;
1257 case CmpInst::ICMP_SGE:
1258 return AMDGPU::S_CMP_GE_I32;
1259 case CmpInst::ICMP_SLT:
1260 return AMDGPU::S_CMP_LT_I32;
1261 case CmpInst::ICMP_SLE:
1262 return AMDGPU::S_CMP_LE_I32;
1263 case CmpInst::ICMP_UGT:
1264 return AMDGPU::S_CMP_GT_U32;
1265 case CmpInst::ICMP_UGE:
1266 return AMDGPU::S_CMP_GE_U32;
1267 case CmpInst::ICMP_ULT:
1268 return AMDGPU::S_CMP_LT_U32;
1269 case CmpInst::ICMP_ULE:
1270 return AMDGPU::S_CMP_LE_U32;
1271 default:
1272 llvm_unreachable("Unknown condition code!");
1273 }
1274 }
1275
selectG_ICMP(MachineInstr & I) const1276 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1277 MachineBasicBlock *BB = I.getParent();
1278 const DebugLoc &DL = I.getDebugLoc();
1279
1280 Register SrcReg = I.getOperand(2).getReg();
1281 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1282
1283 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1284
1285 Register CCReg = I.getOperand(0).getReg();
1286 if (!isVCC(CCReg, *MRI)) {
1287 int Opcode = getS_CMPOpcode(Pred, Size);
1288 if (Opcode == -1)
1289 return false;
1290 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1291 .add(I.getOperand(2))
1292 .add(I.getOperand(3));
1293 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1294 .addReg(AMDGPU::SCC);
1295 bool Ret =
1296 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1297 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1298 I.eraseFromParent();
1299 return Ret;
1300 }
1301
1302 int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1303 if (Opcode == -1)
1304 return false;
1305
1306 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1307 I.getOperand(0).getReg())
1308 .add(I.getOperand(2))
1309 .add(I.getOperand(3));
1310 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1311 *TRI.getBoolRC(), *MRI);
1312 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1313 I.eraseFromParent();
1314 return Ret;
1315 }
1316
selectIntrinsicCmp(MachineInstr & I) const1317 bool AMDGPUInstructionSelector::selectIntrinsicCmp(MachineInstr &I) const {
1318 Register Dst = I.getOperand(0).getReg();
1319 if (isVCC(Dst, *MRI))
1320 return false;
1321
1322 LLT DstTy = MRI->getType(Dst);
1323 if (DstTy.getSizeInBits() != STI.getWavefrontSize())
1324 return false;
1325
1326 MachineBasicBlock *BB = I.getParent();
1327 const DebugLoc &DL = I.getDebugLoc();
1328 Register SrcReg = I.getOperand(2).getReg();
1329 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1330
1331 // i1 inputs are not supported in GlobalISel.
1332 if (Size == 1)
1333 return false;
1334
1335 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1336 if (!CmpInst::isIntPredicate(Pred) && !CmpInst::isFPPredicate(Pred)) {
1337 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1338 I.eraseFromParent();
1339 return RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1340 }
1341
1342 const int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1343 if (Opcode == -1)
1344 return false;
1345
1346 MachineInstr *SelectedMI;
1347 if (CmpInst::isFPPredicate(Pred)) {
1348 MachineOperand &LHS = I.getOperand(2);
1349 MachineOperand &RHS = I.getOperand(3);
1350 auto [Src0, Src0Mods] = selectVOP3ModsImpl(LHS);
1351 auto [Src1, Src1Mods] = selectVOP3ModsImpl(RHS);
1352 Register Src0Reg =
1353 copyToVGPRIfSrcFolded(Src0, Src0Mods, LHS, &I, /*ForceVGPR*/ true);
1354 Register Src1Reg =
1355 copyToVGPRIfSrcFolded(Src1, Src1Mods, RHS, &I, /*ForceVGPR*/ true);
1356 SelectedMI = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1357 .addImm(Src0Mods)
1358 .addReg(Src0Reg)
1359 .addImm(Src1Mods)
1360 .addReg(Src1Reg)
1361 .addImm(0); // clamp
1362 } else {
1363 SelectedMI = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1364 .add(I.getOperand(2))
1365 .add(I.getOperand(3));
1366 }
1367
1368 RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1369 if (!constrainSelectedInstRegOperands(*SelectedMI, TII, TRI, RBI))
1370 return false;
1371
1372 I.eraseFromParent();
1373 return true;
1374 }
1375
selectBallot(MachineInstr & I) const1376 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1377 MachineBasicBlock *BB = I.getParent();
1378 const DebugLoc &DL = I.getDebugLoc();
1379 Register DstReg = I.getOperand(0).getReg();
1380 const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1381 const bool Is64 = Size == 64;
1382
1383 if (Size != STI.getWavefrontSize())
1384 return false;
1385
1386 std::optional<ValueAndVReg> Arg =
1387 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1388
1389 if (Arg) {
1390 const int64_t Value = Arg->Value.getSExtValue();
1391 if (Value == 0) {
1392 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1393 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1394 } else if (Value == -1) { // all ones
1395 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1396 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1397 } else
1398 return false;
1399 } else {
1400 Register SrcReg = I.getOperand(2).getReg();
1401 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1402 }
1403
1404 I.eraseFromParent();
1405 return true;
1406 }
1407
selectRelocConstant(MachineInstr & I) const1408 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1409 Register DstReg = I.getOperand(0).getReg();
1410 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1411 const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1412 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1413 return false;
1414
1415 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1416
1417 Module *M = MF->getFunction().getParent();
1418 const MDNode *Metadata = I.getOperand(2).getMetadata();
1419 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1420 auto RelocSymbol = cast<GlobalVariable>(
1421 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1422
1423 MachineBasicBlock *BB = I.getParent();
1424 BuildMI(*BB, &I, I.getDebugLoc(),
1425 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1426 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1427
1428 I.eraseFromParent();
1429 return true;
1430 }
1431
selectGroupStaticSize(MachineInstr & I) const1432 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1433 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1434
1435 Register DstReg = I.getOperand(0).getReg();
1436 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1437 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1438 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1439
1440 MachineBasicBlock *MBB = I.getParent();
1441 const DebugLoc &DL = I.getDebugLoc();
1442
1443 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1444
1445 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1446 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1447 MIB.addImm(MFI->getLDSSize());
1448 } else {
1449 Module *M = MF->getFunction().getParent();
1450 const GlobalValue *GV
1451 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1452 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1453 }
1454
1455 I.eraseFromParent();
1456 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1457 }
1458
selectReturnAddress(MachineInstr & I) const1459 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1460 MachineBasicBlock *MBB = I.getParent();
1461 MachineFunction &MF = *MBB->getParent();
1462 const DebugLoc &DL = I.getDebugLoc();
1463
1464 MachineOperand &Dst = I.getOperand(0);
1465 Register DstReg = Dst.getReg();
1466 unsigned Depth = I.getOperand(2).getImm();
1467
1468 const TargetRegisterClass *RC
1469 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1470 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1471 !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1472 return false;
1473
1474 // Check for kernel and shader functions
1475 if (Depth != 0 ||
1476 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1477 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1478 .addImm(0);
1479 I.eraseFromParent();
1480 return true;
1481 }
1482
1483 MachineFrameInfo &MFI = MF.getFrameInfo();
1484 // There is a call to @llvm.returnaddress in this function
1485 MFI.setReturnAddressIsTaken(true);
1486
1487 // Get the return address reg and mark it as an implicit live-in
1488 Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1489 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1490 AMDGPU::SReg_64RegClass, DL);
1491 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1492 .addReg(LiveIn);
1493 I.eraseFromParent();
1494 return true;
1495 }
1496
selectEndCfIntrinsic(MachineInstr & MI) const1497 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1498 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1499 // SelectionDAG uses for wave32 vs wave64.
1500 MachineBasicBlock *BB = MI.getParent();
1501 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1502 .add(MI.getOperand(1));
1503
1504 Register Reg = MI.getOperand(1).getReg();
1505 MI.eraseFromParent();
1506
1507 if (!MRI->getRegClassOrNull(Reg))
1508 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1509 return true;
1510 }
1511
selectDSOrderedIntrinsic(MachineInstr & MI,Intrinsic::ID IntrID) const1512 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1513 MachineInstr &MI, Intrinsic::ID IntrID) const {
1514 MachineBasicBlock *MBB = MI.getParent();
1515 MachineFunction *MF = MBB->getParent();
1516 const DebugLoc &DL = MI.getDebugLoc();
1517
1518 unsigned IndexOperand = MI.getOperand(7).getImm();
1519 bool WaveRelease = MI.getOperand(8).getImm() != 0;
1520 bool WaveDone = MI.getOperand(9).getImm() != 0;
1521
1522 if (WaveDone && !WaveRelease)
1523 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1524
1525 unsigned OrderedCountIndex = IndexOperand & 0x3f;
1526 IndexOperand &= ~0x3f;
1527 unsigned CountDw = 0;
1528
1529 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1530 CountDw = (IndexOperand >> 24) & 0xf;
1531 IndexOperand &= ~(0xf << 24);
1532
1533 if (CountDw < 1 || CountDw > 4) {
1534 report_fatal_error(
1535 "ds_ordered_count: dword count must be between 1 and 4");
1536 }
1537 }
1538
1539 if (IndexOperand)
1540 report_fatal_error("ds_ordered_count: bad index operand");
1541
1542 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1543 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1544
1545 unsigned Offset0 = OrderedCountIndex << 2;
1546 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1547
1548 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1549 Offset1 |= (CountDw - 1) << 6;
1550
1551 if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1552 Offset1 |= ShaderType << 2;
1553
1554 unsigned Offset = Offset0 | (Offset1 << 8);
1555
1556 Register M0Val = MI.getOperand(2).getReg();
1557 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1558 .addReg(M0Val);
1559
1560 Register DstReg = MI.getOperand(0).getReg();
1561 Register ValReg = MI.getOperand(3).getReg();
1562 MachineInstrBuilder DS =
1563 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1564 .addReg(ValReg)
1565 .addImm(Offset)
1566 .cloneMemRefs(MI);
1567
1568 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1569 return false;
1570
1571 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1572 MI.eraseFromParent();
1573 return Ret;
1574 }
1575
gwsIntrinToOpcode(unsigned IntrID)1576 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1577 switch (IntrID) {
1578 case Intrinsic::amdgcn_ds_gws_init:
1579 return AMDGPU::DS_GWS_INIT;
1580 case Intrinsic::amdgcn_ds_gws_barrier:
1581 return AMDGPU::DS_GWS_BARRIER;
1582 case Intrinsic::amdgcn_ds_gws_sema_v:
1583 return AMDGPU::DS_GWS_SEMA_V;
1584 case Intrinsic::amdgcn_ds_gws_sema_br:
1585 return AMDGPU::DS_GWS_SEMA_BR;
1586 case Intrinsic::amdgcn_ds_gws_sema_p:
1587 return AMDGPU::DS_GWS_SEMA_P;
1588 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1589 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1590 default:
1591 llvm_unreachable("not a gws intrinsic");
1592 }
1593 }
1594
selectDSGWSIntrinsic(MachineInstr & MI,Intrinsic::ID IID) const1595 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1596 Intrinsic::ID IID) const {
1597 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1598 !STI.hasGWSSemaReleaseAll())
1599 return false;
1600
1601 // intrinsic ID, vsrc, offset
1602 const bool HasVSrc = MI.getNumOperands() == 3;
1603 assert(HasVSrc || MI.getNumOperands() == 2);
1604
1605 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1606 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1607 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1608 return false;
1609
1610 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1611 unsigned ImmOffset;
1612
1613 MachineBasicBlock *MBB = MI.getParent();
1614 const DebugLoc &DL = MI.getDebugLoc();
1615
1616 MachineInstr *Readfirstlane = nullptr;
1617
1618 // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1619 // incoming offset, in case there's an add of a constant. We'll have to put it
1620 // back later.
1621 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1622 Readfirstlane = OffsetDef;
1623 BaseOffset = OffsetDef->getOperand(1).getReg();
1624 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1625 }
1626
1627 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1628 // If we have a constant offset, try to use the 0 in m0 as the base.
1629 // TODO: Look into changing the default m0 initialization value. If the
1630 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1631 // the immediate offset.
1632
1633 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1634 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1635 .addImm(0);
1636 } else {
1637 std::tie(BaseOffset, ImmOffset) =
1638 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KnownBits);
1639
1640 if (Readfirstlane) {
1641 // We have the constant offset now, so put the readfirstlane back on the
1642 // variable component.
1643 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1644 return false;
1645
1646 Readfirstlane->getOperand(1).setReg(BaseOffset);
1647 BaseOffset = Readfirstlane->getOperand(0).getReg();
1648 } else {
1649 if (!RBI.constrainGenericRegister(BaseOffset,
1650 AMDGPU::SReg_32RegClass, *MRI))
1651 return false;
1652 }
1653
1654 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1655 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1656 .addReg(BaseOffset)
1657 .addImm(16);
1658
1659 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1660 .addReg(M0Base);
1661 }
1662
1663 // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1664 // offset field) % 64. Some versions of the programming guide omit the m0
1665 // part, or claim it's from offset 0.
1666 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1667
1668 if (HasVSrc) {
1669 Register VSrc = MI.getOperand(1).getReg();
1670 MIB.addReg(VSrc);
1671
1672 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1673 return false;
1674 }
1675
1676 MIB.addImm(ImmOffset)
1677 .cloneMemRefs(MI);
1678
1679 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1680
1681 MI.eraseFromParent();
1682 return true;
1683 }
1684
selectDSAppendConsume(MachineInstr & MI,bool IsAppend) const1685 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1686 bool IsAppend) const {
1687 Register PtrBase = MI.getOperand(2).getReg();
1688 LLT PtrTy = MRI->getType(PtrBase);
1689 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1690
1691 unsigned Offset;
1692 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1693
1694 // TODO: Should this try to look through readfirstlane like GWS?
1695 if (!isDSOffsetLegal(PtrBase, Offset)) {
1696 PtrBase = MI.getOperand(2).getReg();
1697 Offset = 0;
1698 }
1699
1700 MachineBasicBlock *MBB = MI.getParent();
1701 const DebugLoc &DL = MI.getDebugLoc();
1702 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1703
1704 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1705 .addReg(PtrBase);
1706 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1707 return false;
1708
1709 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1710 .addImm(Offset)
1711 .addImm(IsGDS ? -1 : 0)
1712 .cloneMemRefs(MI);
1713 MI.eraseFromParent();
1714 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1715 }
1716
selectSBarrier(MachineInstr & MI) const1717 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1718 if (TM.getOptLevel() > CodeGenOpt::None) {
1719 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1720 if (WGSize <= STI.getWavefrontSize()) {
1721 MachineBasicBlock *MBB = MI.getParent();
1722 const DebugLoc &DL = MI.getDebugLoc();
1723 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1724 MI.eraseFromParent();
1725 return true;
1726 }
1727 }
1728 return selectImpl(MI, *CoverageInfo);
1729 }
1730
parseTexFail(uint64_t TexFailCtrl,bool & TFE,bool & LWE,bool & IsTexFail)1731 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1732 bool &IsTexFail) {
1733 if (TexFailCtrl)
1734 IsTexFail = true;
1735
1736 TFE = (TexFailCtrl & 0x1) ? true : false;
1737 TexFailCtrl &= ~(uint64_t)0x1;
1738 LWE = (TexFailCtrl & 0x2) ? true : false;
1739 TexFailCtrl &= ~(uint64_t)0x2;
1740
1741 return TexFailCtrl == 0;
1742 }
1743
selectImageIntrinsic(MachineInstr & MI,const AMDGPU::ImageDimIntrinsicInfo * Intr) const1744 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1745 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1746 MachineBasicBlock *MBB = MI.getParent();
1747 const DebugLoc &DL = MI.getDebugLoc();
1748
1749 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1750 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1751
1752 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1753 unsigned IntrOpcode = Intr->BaseOpcode;
1754 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1755 const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1756
1757 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1758
1759 Register VDataIn, VDataOut;
1760 LLT VDataTy;
1761 int NumVDataDwords = -1;
1762 bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1763 MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1764
1765 bool Unorm;
1766 if (!BaseOpcode->Sampler)
1767 Unorm = true;
1768 else
1769 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1770
1771 bool TFE;
1772 bool LWE;
1773 bool IsTexFail = false;
1774 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1775 TFE, LWE, IsTexFail))
1776 return false;
1777
1778 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1779 const bool IsA16 = (Flags & 1) != 0;
1780 const bool IsG16 = (Flags & 2) != 0;
1781
1782 // A16 implies 16 bit gradients if subtarget doesn't support G16
1783 if (IsA16 && !STI.hasG16() && !IsG16)
1784 return false;
1785
1786 unsigned DMask = 0;
1787 unsigned DMaskLanes = 0;
1788
1789 if (BaseOpcode->Atomic) {
1790 VDataOut = MI.getOperand(0).getReg();
1791 VDataIn = MI.getOperand(2).getReg();
1792 LLT Ty = MRI->getType(VDataIn);
1793
1794 // Be careful to allow atomic swap on 16-bit element vectors.
1795 const bool Is64Bit = BaseOpcode->AtomicX2 ?
1796 Ty.getSizeInBits() == 128 :
1797 Ty.getSizeInBits() == 64;
1798
1799 if (BaseOpcode->AtomicX2) {
1800 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1801
1802 DMask = Is64Bit ? 0xf : 0x3;
1803 NumVDataDwords = Is64Bit ? 4 : 2;
1804 } else {
1805 DMask = Is64Bit ? 0x3 : 0x1;
1806 NumVDataDwords = Is64Bit ? 2 : 1;
1807 }
1808 } else {
1809 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1810 DMaskLanes = BaseOpcode->Gather4 ? 4 : llvm::popcount(DMask);
1811
1812 if (BaseOpcode->Store) {
1813 VDataIn = MI.getOperand(1).getReg();
1814 VDataTy = MRI->getType(VDataIn);
1815 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1816 } else {
1817 VDataOut = MI.getOperand(0).getReg();
1818 VDataTy = MRI->getType(VDataOut);
1819 NumVDataDwords = DMaskLanes;
1820
1821 if (IsD16 && !STI.hasUnpackedD16VMem())
1822 NumVDataDwords = (DMaskLanes + 1) / 2;
1823 }
1824 }
1825
1826 // Set G16 opcode
1827 if (IsG16 && !IsA16) {
1828 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1829 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1830 assert(G16MappingInfo);
1831 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1832 }
1833
1834 // TODO: Check this in verifier.
1835 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1836
1837 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1838 if (BaseOpcode->Atomic)
1839 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1840 if (CPol & ~AMDGPU::CPol::ALL)
1841 return false;
1842
1843 int NumVAddrRegs = 0;
1844 int NumVAddrDwords = 0;
1845 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1846 // Skip the $noregs and 0s inserted during legalization.
1847 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1848 if (!AddrOp.isReg())
1849 continue; // XXX - Break?
1850
1851 Register Addr = AddrOp.getReg();
1852 if (!Addr)
1853 break;
1854
1855 ++NumVAddrRegs;
1856 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1857 }
1858
1859 // The legalizer preprocessed the intrinsic arguments. If we aren't using
1860 // NSA, these should have been packed into a single value in the first
1861 // address register
1862 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1863 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1864 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1865 return false;
1866 }
1867
1868 if (IsTexFail)
1869 ++NumVDataDwords;
1870
1871 int Opcode = -1;
1872 if (IsGFX11Plus) {
1873 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1874 UseNSA ? AMDGPU::MIMGEncGfx11NSA
1875 : AMDGPU::MIMGEncGfx11Default,
1876 NumVDataDwords, NumVAddrDwords);
1877 } else if (IsGFX10Plus) {
1878 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1879 UseNSA ? AMDGPU::MIMGEncGfx10NSA
1880 : AMDGPU::MIMGEncGfx10Default,
1881 NumVDataDwords, NumVAddrDwords);
1882 } else {
1883 if (Subtarget->hasGFX90AInsts()) {
1884 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1885 NumVDataDwords, NumVAddrDwords);
1886 if (Opcode == -1) {
1887 LLVM_DEBUG(
1888 dbgs()
1889 << "requested image instruction is not supported on this GPU\n");
1890 return false;
1891 }
1892 }
1893 if (Opcode == -1 &&
1894 STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1895 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1896 NumVDataDwords, NumVAddrDwords);
1897 if (Opcode == -1)
1898 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1899 NumVDataDwords, NumVAddrDwords);
1900 }
1901 assert(Opcode != -1);
1902
1903 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1904 .cloneMemRefs(MI);
1905
1906 if (VDataOut) {
1907 if (BaseOpcode->AtomicX2) {
1908 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1909
1910 Register TmpReg = MRI->createVirtualRegister(
1911 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1912 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1913
1914 MIB.addDef(TmpReg);
1915 if (!MRI->use_empty(VDataOut)) {
1916 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1917 .addReg(TmpReg, RegState::Kill, SubReg);
1918 }
1919
1920 } else {
1921 MIB.addDef(VDataOut); // vdata output
1922 }
1923 }
1924
1925 if (VDataIn)
1926 MIB.addReg(VDataIn); // vdata input
1927
1928 for (int I = 0; I != NumVAddrRegs; ++I) {
1929 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1930 if (SrcOp.isReg()) {
1931 assert(SrcOp.getReg() != 0);
1932 MIB.addReg(SrcOp.getReg());
1933 }
1934 }
1935
1936 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1937 if (BaseOpcode->Sampler)
1938 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1939
1940 MIB.addImm(DMask); // dmask
1941
1942 if (IsGFX10Plus)
1943 MIB.addImm(DimInfo->Encoding);
1944 MIB.addImm(Unorm);
1945
1946 MIB.addImm(CPol);
1947 MIB.addImm(IsA16 && // a16 or r128
1948 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1949 if (IsGFX10Plus)
1950 MIB.addImm(IsA16 ? -1 : 0);
1951
1952 if (!Subtarget->hasGFX90AInsts()) {
1953 MIB.addImm(TFE); // tfe
1954 } else if (TFE) {
1955 LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1956 return false;
1957 }
1958
1959 MIB.addImm(LWE); // lwe
1960 if (!IsGFX10Plus)
1961 MIB.addImm(DimInfo->DA ? -1 : 0);
1962 if (BaseOpcode->HasD16)
1963 MIB.addImm(IsD16 ? -1 : 0);
1964
1965 if (IsTexFail) {
1966 // An image load instruction with TFE/LWE only conditionally writes to its
1967 // result registers. Initialize them to zero so that we always get well
1968 // defined result values.
1969 assert(VDataOut && !VDataIn);
1970 Register Tied = MRI->cloneVirtualRegister(VDataOut);
1971 Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1972 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1973 .addImm(0);
1974 auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1975 if (STI.usePRTStrictNull()) {
1976 // With enable-prt-strict-null enabled, initialize all result registers to
1977 // zero.
1978 auto RegSeq =
1979 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1980 for (auto Sub : Parts)
1981 RegSeq.addReg(Zero).addImm(Sub);
1982 } else {
1983 // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1984 // result register.
1985 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1986 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1987 auto RegSeq =
1988 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1989 for (auto Sub : Parts.drop_back(1))
1990 RegSeq.addReg(Undef).addImm(Sub);
1991 RegSeq.addReg(Zero).addImm(Parts.back());
1992 }
1993 MIB.addReg(Tied, RegState::Implicit);
1994 MIB->tieOperands(0, MIB->getNumOperands() - 1);
1995 }
1996
1997 MI.eraseFromParent();
1998 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1999 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
2000 return true;
2001 }
2002
2003 // We need to handle this here because tablegen doesn't support matching
2004 // instructions with multiple outputs.
selectDSBvhStackIntrinsic(MachineInstr & MI) const2005 bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic(
2006 MachineInstr &MI) const {
2007 Register Dst0 = MI.getOperand(0).getReg();
2008 Register Dst1 = MI.getOperand(1).getReg();
2009
2010 const DebugLoc &DL = MI.getDebugLoc();
2011 MachineBasicBlock *MBB = MI.getParent();
2012
2013 Register Addr = MI.getOperand(3).getReg();
2014 Register Data0 = MI.getOperand(4).getReg();
2015 Register Data1 = MI.getOperand(5).getReg();
2016 unsigned Offset = MI.getOperand(6).getImm();
2017
2018 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_BVH_STACK_RTN_B32), Dst0)
2019 .addDef(Dst1)
2020 .addUse(Addr)
2021 .addUse(Data0)
2022 .addUse(Data1)
2023 .addImm(Offset)
2024 .cloneMemRefs(MI);
2025
2026 MI.eraseFromParent();
2027 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2028 }
2029
selectG_INTRINSIC_W_SIDE_EFFECTS(MachineInstr & I) const2030 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
2031 MachineInstr &I) const {
2032 unsigned IntrinsicID = I.getIntrinsicID();
2033 switch (IntrinsicID) {
2034 case Intrinsic::amdgcn_end_cf:
2035 return selectEndCfIntrinsic(I);
2036 case Intrinsic::amdgcn_ds_ordered_add:
2037 case Intrinsic::amdgcn_ds_ordered_swap:
2038 return selectDSOrderedIntrinsic(I, IntrinsicID);
2039 case Intrinsic::amdgcn_ds_gws_init:
2040 case Intrinsic::amdgcn_ds_gws_barrier:
2041 case Intrinsic::amdgcn_ds_gws_sema_v:
2042 case Intrinsic::amdgcn_ds_gws_sema_br:
2043 case Intrinsic::amdgcn_ds_gws_sema_p:
2044 case Intrinsic::amdgcn_ds_gws_sema_release_all:
2045 return selectDSGWSIntrinsic(I, IntrinsicID);
2046 case Intrinsic::amdgcn_ds_append:
2047 return selectDSAppendConsume(I, true);
2048 case Intrinsic::amdgcn_ds_consume:
2049 return selectDSAppendConsume(I, false);
2050 case Intrinsic::amdgcn_s_barrier:
2051 return selectSBarrier(I);
2052 case Intrinsic::amdgcn_raw_buffer_load_lds:
2053 case Intrinsic::amdgcn_struct_buffer_load_lds:
2054 return selectBufferLoadLds(I);
2055 case Intrinsic::amdgcn_global_load_lds:
2056 return selectGlobalLoadLds(I);
2057 case Intrinsic::amdgcn_exp_compr:
2058 if (!STI.hasCompressedExport()) {
2059 Function &F = I.getMF()->getFunction();
2060 DiagnosticInfoUnsupported NoFpRet(
2061 F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error);
2062 F.getContext().diagnose(NoFpRet);
2063 return false;
2064 }
2065 break;
2066 case Intrinsic::amdgcn_ds_bvh_stack_rtn:
2067 return selectDSBvhStackIntrinsic(I);
2068 }
2069 return selectImpl(I, *CoverageInfo);
2070 }
2071
selectG_SELECT(MachineInstr & I) const2072 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
2073 if (selectImpl(I, *CoverageInfo))
2074 return true;
2075
2076 MachineBasicBlock *BB = I.getParent();
2077 const DebugLoc &DL = I.getDebugLoc();
2078
2079 Register DstReg = I.getOperand(0).getReg();
2080 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
2081 assert(Size <= 32 || Size == 64);
2082 const MachineOperand &CCOp = I.getOperand(1);
2083 Register CCReg = CCOp.getReg();
2084 if (!isVCC(CCReg, *MRI)) {
2085 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
2086 AMDGPU::S_CSELECT_B32;
2087 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
2088 .addReg(CCReg);
2089
2090 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
2091 // bank, because it does not cover the register class that we used to represent
2092 // for it. So we need to manually set the register class here.
2093 if (!MRI->getRegClassOrNull(CCReg))
2094 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
2095 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
2096 .add(I.getOperand(2))
2097 .add(I.getOperand(3));
2098
2099 bool Ret = false;
2100 Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2101 Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
2102 I.eraseFromParent();
2103 return Ret;
2104 }
2105
2106 // Wide VGPR select should have been split in RegBankSelect.
2107 if (Size > 32)
2108 return false;
2109
2110 MachineInstr *Select =
2111 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
2112 .addImm(0)
2113 .add(I.getOperand(3))
2114 .addImm(0)
2115 .add(I.getOperand(2))
2116 .add(I.getOperand(1));
2117
2118 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2119 I.eraseFromParent();
2120 return Ret;
2121 }
2122
sizeToSubRegIndex(unsigned Size)2123 static int sizeToSubRegIndex(unsigned Size) {
2124 switch (Size) {
2125 case 32:
2126 return AMDGPU::sub0;
2127 case 64:
2128 return AMDGPU::sub0_sub1;
2129 case 96:
2130 return AMDGPU::sub0_sub1_sub2;
2131 case 128:
2132 return AMDGPU::sub0_sub1_sub2_sub3;
2133 case 256:
2134 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
2135 default:
2136 if (Size < 32)
2137 return AMDGPU::sub0;
2138 if (Size > 256)
2139 return -1;
2140 return sizeToSubRegIndex(PowerOf2Ceil(Size));
2141 }
2142 }
2143
selectG_TRUNC(MachineInstr & I) const2144 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
2145 Register DstReg = I.getOperand(0).getReg();
2146 Register SrcReg = I.getOperand(1).getReg();
2147 const LLT DstTy = MRI->getType(DstReg);
2148 const LLT SrcTy = MRI->getType(SrcReg);
2149 const LLT S1 = LLT::scalar(1);
2150
2151 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2152 const RegisterBank *DstRB;
2153 if (DstTy == S1) {
2154 // This is a special case. We don't treat s1 for legalization artifacts as
2155 // vcc booleans.
2156 DstRB = SrcRB;
2157 } else {
2158 DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2159 if (SrcRB != DstRB)
2160 return false;
2161 }
2162
2163 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2164
2165 unsigned DstSize = DstTy.getSizeInBits();
2166 unsigned SrcSize = SrcTy.getSizeInBits();
2167
2168 const TargetRegisterClass *SrcRC =
2169 TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
2170 const TargetRegisterClass *DstRC =
2171 TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
2172 if (!SrcRC || !DstRC)
2173 return false;
2174
2175 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2176 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
2177 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
2178 return false;
2179 }
2180
2181 if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
2182 MachineBasicBlock *MBB = I.getParent();
2183 const DebugLoc &DL = I.getDebugLoc();
2184
2185 Register LoReg = MRI->createVirtualRegister(DstRC);
2186 Register HiReg = MRI->createVirtualRegister(DstRC);
2187 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
2188 .addReg(SrcReg, 0, AMDGPU::sub0);
2189 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
2190 .addReg(SrcReg, 0, AMDGPU::sub1);
2191
2192 if (IsVALU && STI.hasSDWA()) {
2193 // Write the low 16-bits of the high element into the high 16-bits of the
2194 // low element.
2195 MachineInstr *MovSDWA =
2196 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2197 .addImm(0) // $src0_modifiers
2198 .addReg(HiReg) // $src0
2199 .addImm(0) // $clamp
2200 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
2201 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2202 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
2203 .addReg(LoReg, RegState::Implicit);
2204 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2205 } else {
2206 Register TmpReg0 = MRI->createVirtualRegister(DstRC);
2207 Register TmpReg1 = MRI->createVirtualRegister(DstRC);
2208 Register ImmReg = MRI->createVirtualRegister(DstRC);
2209 if (IsVALU) {
2210 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
2211 .addImm(16)
2212 .addReg(HiReg);
2213 } else {
2214 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
2215 .addReg(HiReg)
2216 .addImm(16);
2217 }
2218
2219 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2220 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2221 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
2222
2223 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
2224 .addImm(0xffff);
2225 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
2226 .addReg(LoReg)
2227 .addReg(ImmReg);
2228 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
2229 .addReg(TmpReg0)
2230 .addReg(TmpReg1);
2231 }
2232
2233 I.eraseFromParent();
2234 return true;
2235 }
2236
2237 if (!DstTy.isScalar())
2238 return false;
2239
2240 if (SrcSize > 32) {
2241 int SubRegIdx = sizeToSubRegIndex(DstSize);
2242 if (SubRegIdx == -1)
2243 return false;
2244
2245 // Deal with weird cases where the class only partially supports the subreg
2246 // index.
2247 const TargetRegisterClass *SrcWithSubRC
2248 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2249 if (!SrcWithSubRC)
2250 return false;
2251
2252 if (SrcWithSubRC != SrcRC) {
2253 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2254 return false;
2255 }
2256
2257 I.getOperand(1).setSubReg(SubRegIdx);
2258 }
2259
2260 I.setDesc(TII.get(TargetOpcode::COPY));
2261 return true;
2262 }
2263
2264 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
shouldUseAndMask(unsigned Size,unsigned & Mask)2265 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2266 Mask = maskTrailingOnes<unsigned>(Size);
2267 int SignedMask = static_cast<int>(Mask);
2268 return SignedMask >= -16 && SignedMask <= 64;
2269 }
2270
2271 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
getArtifactRegBank(Register Reg,const MachineRegisterInfo & MRI,const TargetRegisterInfo & TRI) const2272 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2273 Register Reg, const MachineRegisterInfo &MRI,
2274 const TargetRegisterInfo &TRI) const {
2275 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2276 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2277 return RB;
2278
2279 // Ignore the type, since we don't use vcc in artifacts.
2280 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2281 return &RBI.getRegBankFromRegClass(*RC, LLT());
2282 return nullptr;
2283 }
2284
selectG_SZA_EXT(MachineInstr & I) const2285 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2286 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2287 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2288 const DebugLoc &DL = I.getDebugLoc();
2289 MachineBasicBlock &MBB = *I.getParent();
2290 const Register DstReg = I.getOperand(0).getReg();
2291 const Register SrcReg = I.getOperand(1).getReg();
2292
2293 const LLT DstTy = MRI->getType(DstReg);
2294 const LLT SrcTy = MRI->getType(SrcReg);
2295 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2296 I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2297 const unsigned DstSize = DstTy.getSizeInBits();
2298 if (!DstTy.isScalar())
2299 return false;
2300
2301 // Artifact casts should never use vcc.
2302 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2303
2304 // FIXME: This should probably be illegal and split earlier.
2305 if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2306 if (DstSize <= 32)
2307 return selectCOPY(I);
2308
2309 const TargetRegisterClass *SrcRC =
2310 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2311 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2312 const TargetRegisterClass *DstRC =
2313 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2314
2315 Register UndefReg = MRI->createVirtualRegister(SrcRC);
2316 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2317 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2318 .addReg(SrcReg)
2319 .addImm(AMDGPU::sub0)
2320 .addReg(UndefReg)
2321 .addImm(AMDGPU::sub1);
2322 I.eraseFromParent();
2323
2324 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2325 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2326 }
2327
2328 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2329 // 64-bit should have been split up in RegBankSelect
2330
2331 // Try to use an and with a mask if it will save code size.
2332 unsigned Mask;
2333 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2334 MachineInstr *ExtI =
2335 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2336 .addImm(Mask)
2337 .addReg(SrcReg);
2338 I.eraseFromParent();
2339 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2340 }
2341
2342 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2343 MachineInstr *ExtI =
2344 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2345 .addReg(SrcReg)
2346 .addImm(0) // Offset
2347 .addImm(SrcSize); // Width
2348 I.eraseFromParent();
2349 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2350 }
2351
2352 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2353 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2354 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2355 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2356 return false;
2357
2358 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2359 const unsigned SextOpc = SrcSize == 8 ?
2360 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2361 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2362 .addReg(SrcReg);
2363 I.eraseFromParent();
2364 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2365 }
2366
2367 // Using a single 32-bit SALU to calculate the high half is smaller than
2368 // S_BFE with a literal constant operand.
2369 if (DstSize > 32 && SrcSize == 32) {
2370 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2371 unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2372 if (Signed) {
2373 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_ASHR_I32), HiReg)
2374 .addReg(SrcReg, 0, SubReg)
2375 .addImm(31);
2376 } else {
2377 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg)
2378 .addImm(0);
2379 }
2380 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2381 .addReg(SrcReg, 0, SubReg)
2382 .addImm(AMDGPU::sub0)
2383 .addReg(HiReg)
2384 .addImm(AMDGPU::sub1);
2385 I.eraseFromParent();
2386 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass,
2387 *MRI);
2388 }
2389
2390 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2391 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2392
2393 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2394 if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2395 // We need a 64-bit register source, but the high bits don't matter.
2396 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2397 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2398 unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2399
2400 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2401 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2402 .addReg(SrcReg, 0, SubReg)
2403 .addImm(AMDGPU::sub0)
2404 .addReg(UndefReg)
2405 .addImm(AMDGPU::sub1);
2406
2407 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2408 .addReg(ExtReg)
2409 .addImm(SrcSize << 16);
2410
2411 I.eraseFromParent();
2412 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2413 }
2414
2415 unsigned Mask;
2416 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2417 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2418 .addReg(SrcReg)
2419 .addImm(Mask);
2420 } else {
2421 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2422 .addReg(SrcReg)
2423 .addImm(SrcSize << 16);
2424 }
2425
2426 I.eraseFromParent();
2427 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2428 }
2429
2430 return false;
2431 }
2432
selectG_CONSTANT(MachineInstr & I) const2433 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2434 MachineBasicBlock *BB = I.getParent();
2435 MachineOperand &ImmOp = I.getOperand(1);
2436 Register DstReg = I.getOperand(0).getReg();
2437 unsigned Size = MRI->getType(DstReg).getSizeInBits();
2438
2439 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2440 if (ImmOp.isFPImm()) {
2441 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2442 ImmOp.ChangeToImmediate(Imm.getZExtValue());
2443 } else if (ImmOp.isCImm()) {
2444 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2445 } else {
2446 llvm_unreachable("Not supported by g_constants");
2447 }
2448
2449 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2450 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2451
2452 unsigned Opcode;
2453 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2454 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2455 } else {
2456 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2457
2458 // We should never produce s1 values on banks other than VCC. If the user of
2459 // this already constrained the register, we may incorrectly think it's VCC
2460 // if it wasn't originally.
2461 if (Size == 1)
2462 return false;
2463 }
2464
2465 if (Size != 64) {
2466 I.setDesc(TII.get(Opcode));
2467 I.addImplicitDefUseOperands(*MF);
2468 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2469 }
2470
2471 const DebugLoc &DL = I.getDebugLoc();
2472
2473 APInt Imm(Size, I.getOperand(1).getImm());
2474
2475 MachineInstr *ResInst;
2476 if (IsSgpr && TII.isInlineConstant(Imm)) {
2477 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2478 .addImm(I.getOperand(1).getImm());
2479 } else {
2480 const TargetRegisterClass *RC = IsSgpr ?
2481 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2482 Register LoReg = MRI->createVirtualRegister(RC);
2483 Register HiReg = MRI->createVirtualRegister(RC);
2484
2485 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2486 .addImm(Imm.trunc(32).getZExtValue());
2487
2488 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2489 .addImm(Imm.ashr(32).getZExtValue());
2490
2491 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2492 .addReg(LoReg)
2493 .addImm(AMDGPU::sub0)
2494 .addReg(HiReg)
2495 .addImm(AMDGPU::sub1);
2496 }
2497
2498 // We can't call constrainSelectedInstRegOperands here, because it doesn't
2499 // work for target independent opcodes
2500 I.eraseFromParent();
2501 const TargetRegisterClass *DstRC =
2502 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2503 if (!DstRC)
2504 return true;
2505 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2506 }
2507
selectG_FNEG(MachineInstr & MI) const2508 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2509 // Only manually handle the f64 SGPR case.
2510 //
2511 // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2512 // the bit ops theoretically have a second result due to the implicit def of
2513 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2514 // that is easy by disabling the check. The result works, but uses a
2515 // nonsensical sreg32orlds_and_sreg_1 regclass.
2516 //
2517 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2518 // the variadic REG_SEQUENCE operands.
2519
2520 Register Dst = MI.getOperand(0).getReg();
2521 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2522 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2523 MRI->getType(Dst) != LLT::scalar(64))
2524 return false;
2525
2526 Register Src = MI.getOperand(1).getReg();
2527 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2528 if (Fabs)
2529 Src = Fabs->getOperand(1).getReg();
2530
2531 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2532 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2533 return false;
2534
2535 MachineBasicBlock *BB = MI.getParent();
2536 const DebugLoc &DL = MI.getDebugLoc();
2537 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2538 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2539 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2540 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2541
2542 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2543 .addReg(Src, 0, AMDGPU::sub0);
2544 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2545 .addReg(Src, 0, AMDGPU::sub1);
2546 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2547 .addImm(0x80000000);
2548
2549 // Set or toggle sign bit.
2550 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2551 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2552 .addReg(HiReg)
2553 .addReg(ConstReg);
2554 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2555 .addReg(LoReg)
2556 .addImm(AMDGPU::sub0)
2557 .addReg(OpReg)
2558 .addImm(AMDGPU::sub1);
2559 MI.eraseFromParent();
2560 return true;
2561 }
2562
2563 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
selectG_FABS(MachineInstr & MI) const2564 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2565 Register Dst = MI.getOperand(0).getReg();
2566 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2567 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2568 MRI->getType(Dst) != LLT::scalar(64))
2569 return false;
2570
2571 Register Src = MI.getOperand(1).getReg();
2572 MachineBasicBlock *BB = MI.getParent();
2573 const DebugLoc &DL = MI.getDebugLoc();
2574 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2575 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2576 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2577 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2578
2579 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2580 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2581 return false;
2582
2583 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2584 .addReg(Src, 0, AMDGPU::sub0);
2585 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2586 .addReg(Src, 0, AMDGPU::sub1);
2587 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2588 .addImm(0x7fffffff);
2589
2590 // Clear sign bit.
2591 // TODO: Should this used S_BITSET0_*?
2592 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2593 .addReg(HiReg)
2594 .addReg(ConstReg);
2595 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2596 .addReg(LoReg)
2597 .addImm(AMDGPU::sub0)
2598 .addReg(OpReg)
2599 .addImm(AMDGPU::sub1);
2600
2601 MI.eraseFromParent();
2602 return true;
2603 }
2604
isConstant(const MachineInstr & MI)2605 static bool isConstant(const MachineInstr &MI) {
2606 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2607 }
2608
getAddrModeInfo(const MachineInstr & Load,const MachineRegisterInfo & MRI,SmallVectorImpl<GEPInfo> & AddrInfo) const2609 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2610 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2611
2612 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2613
2614 assert(PtrMI);
2615
2616 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2617 return;
2618
2619 GEPInfo GEPInfo;
2620
2621 for (unsigned i = 1; i != 3; ++i) {
2622 const MachineOperand &GEPOp = PtrMI->getOperand(i);
2623 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2624 assert(OpDef);
2625 if (i == 2 && isConstant(*OpDef)) {
2626 // TODO: Could handle constant base + variable offset, but a combine
2627 // probably should have commuted it.
2628 assert(GEPInfo.Imm == 0);
2629 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2630 continue;
2631 }
2632 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2633 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2634 GEPInfo.SgprParts.push_back(GEPOp.getReg());
2635 else
2636 GEPInfo.VgprParts.push_back(GEPOp.getReg());
2637 }
2638
2639 AddrInfo.push_back(GEPInfo);
2640 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2641 }
2642
isSGPR(Register Reg) const2643 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2644 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2645 }
2646
isInstrUniform(const MachineInstr & MI) const2647 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2648 if (!MI.hasOneMemOperand())
2649 return false;
2650
2651 const MachineMemOperand *MMO = *MI.memoperands_begin();
2652 const Value *Ptr = MMO->getValue();
2653
2654 // UndefValue means this is a load of a kernel input. These are uniform.
2655 // Sometimes LDS instructions have constant pointers.
2656 // If Ptr is null, then that means this mem operand contains a
2657 // PseudoSourceValue like GOT.
2658 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2659 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2660 return true;
2661
2662 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2663 return true;
2664
2665 const Instruction *I = dyn_cast<Instruction>(Ptr);
2666 return I && I->getMetadata("amdgpu.uniform");
2667 }
2668
hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const2669 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2670 for (const GEPInfo &GEPInfo : AddrInfo) {
2671 if (!GEPInfo.VgprParts.empty())
2672 return true;
2673 }
2674 return false;
2675 }
2676
initM0(MachineInstr & I) const2677 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2678 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2679 unsigned AS = PtrTy.getAddressSpace();
2680 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2681 STI.ldsRequiresM0Init()) {
2682 MachineBasicBlock *BB = I.getParent();
2683
2684 // If DS instructions require M0 initialization, insert it before selecting.
2685 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2686 .addImm(-1);
2687 }
2688 }
2689
selectG_LOAD_STORE_ATOMICRMW(MachineInstr & I) const2690 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2691 MachineInstr &I) const {
2692 initM0(I);
2693 return selectImpl(I, *CoverageInfo);
2694 }
2695
isVCmpResult(Register Reg,MachineRegisterInfo & MRI)2696 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2697 if (Reg.isPhysical())
2698 return false;
2699
2700 MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2701 const unsigned Opcode = MI.getOpcode();
2702
2703 if (Opcode == AMDGPU::COPY)
2704 return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2705
2706 if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2707 Opcode == AMDGPU::G_XOR)
2708 return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2709 isVCmpResult(MI.getOperand(2).getReg(), MRI);
2710
2711 if (Opcode == TargetOpcode::G_INTRINSIC)
2712 return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2713
2714 return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2715 }
2716
selectG_BRCOND(MachineInstr & I) const2717 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2718 MachineBasicBlock *BB = I.getParent();
2719 MachineOperand &CondOp = I.getOperand(0);
2720 Register CondReg = CondOp.getReg();
2721 const DebugLoc &DL = I.getDebugLoc();
2722
2723 unsigned BrOpcode;
2724 Register CondPhysReg;
2725 const TargetRegisterClass *ConstrainRC;
2726
2727 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2728 // whether the branch is uniform when selecting the instruction. In
2729 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2730 // RegBankSelect knows what it's doing if the branch condition is scc, even
2731 // though it currently does not.
2732 if (!isVCC(CondReg, *MRI)) {
2733 if (MRI->getType(CondReg) != LLT::scalar(32))
2734 return false;
2735
2736 CondPhysReg = AMDGPU::SCC;
2737 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2738 ConstrainRC = &AMDGPU::SReg_32RegClass;
2739 } else {
2740 // FIXME: Should scc->vcc copies and with exec?
2741
2742 // Unless the value of CondReg is a result of a V_CMP* instruction then we
2743 // need to insert an and with exec.
2744 if (!isVCmpResult(CondReg, *MRI)) {
2745 const bool Is64 = STI.isWave64();
2746 const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2747 const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2748
2749 Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2750 BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2751 .addReg(CondReg)
2752 .addReg(Exec);
2753 CondReg = TmpReg;
2754 }
2755
2756 CondPhysReg = TRI.getVCC();
2757 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2758 ConstrainRC = TRI.getBoolRC();
2759 }
2760
2761 if (!MRI->getRegClassOrNull(CondReg))
2762 MRI->setRegClass(CondReg, ConstrainRC);
2763
2764 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2765 .addReg(CondReg);
2766 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2767 .addMBB(I.getOperand(1).getMBB());
2768
2769 I.eraseFromParent();
2770 return true;
2771 }
2772
selectG_GLOBAL_VALUE(MachineInstr & I) const2773 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2774 MachineInstr &I) const {
2775 Register DstReg = I.getOperand(0).getReg();
2776 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2777 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2778 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2779 if (IsVGPR)
2780 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2781
2782 return RBI.constrainGenericRegister(
2783 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2784 }
2785
selectG_PTRMASK(MachineInstr & I) const2786 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2787 Register DstReg = I.getOperand(0).getReg();
2788 Register SrcReg = I.getOperand(1).getReg();
2789 Register MaskReg = I.getOperand(2).getReg();
2790 LLT Ty = MRI->getType(DstReg);
2791 LLT MaskTy = MRI->getType(MaskReg);
2792 MachineBasicBlock *BB = I.getParent();
2793 const DebugLoc &DL = I.getDebugLoc();
2794
2795 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2796 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2797 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2798 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2799 if (DstRB != SrcRB) // Should only happen for hand written MIR.
2800 return false;
2801
2802 // Try to avoid emitting a bit operation when we only need to touch half of
2803 // the 64-bit pointer.
2804 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2805 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2806 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2807
2808 const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2809 const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2810
2811 if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2812 !CanCopyLow32 && !CanCopyHi32) {
2813 auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2814 .addReg(SrcReg)
2815 .addReg(MaskReg);
2816 I.eraseFromParent();
2817 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2818 }
2819
2820 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2821 const TargetRegisterClass &RegRC
2822 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2823
2824 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2825 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2826 const TargetRegisterClass *MaskRC =
2827 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2828
2829 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2830 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2831 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2832 return false;
2833
2834 if (Ty.getSizeInBits() == 32) {
2835 assert(MaskTy.getSizeInBits() == 32 &&
2836 "ptrmask should have been narrowed during legalize");
2837
2838 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2839 .addReg(SrcReg)
2840 .addReg(MaskReg);
2841 I.eraseFromParent();
2842 return true;
2843 }
2844
2845 Register HiReg = MRI->createVirtualRegister(&RegRC);
2846 Register LoReg = MRI->createVirtualRegister(&RegRC);
2847
2848 // Extract the subregisters from the source pointer.
2849 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2850 .addReg(SrcReg, 0, AMDGPU::sub0);
2851 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2852 .addReg(SrcReg, 0, AMDGPU::sub1);
2853
2854 Register MaskedLo, MaskedHi;
2855
2856 if (CanCopyLow32) {
2857 // If all the bits in the low half are 1, we only need a copy for it.
2858 MaskedLo = LoReg;
2859 } else {
2860 // Extract the mask subregister and apply the and.
2861 Register MaskLo = MRI->createVirtualRegister(&RegRC);
2862 MaskedLo = MRI->createVirtualRegister(&RegRC);
2863
2864 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2865 .addReg(MaskReg, 0, AMDGPU::sub0);
2866 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2867 .addReg(LoReg)
2868 .addReg(MaskLo);
2869 }
2870
2871 if (CanCopyHi32) {
2872 // If all the bits in the high half are 1, we only need a copy for it.
2873 MaskedHi = HiReg;
2874 } else {
2875 Register MaskHi = MRI->createVirtualRegister(&RegRC);
2876 MaskedHi = MRI->createVirtualRegister(&RegRC);
2877
2878 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2879 .addReg(MaskReg, 0, AMDGPU::sub1);
2880 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2881 .addReg(HiReg)
2882 .addReg(MaskHi);
2883 }
2884
2885 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2886 .addReg(MaskedLo)
2887 .addImm(AMDGPU::sub0)
2888 .addReg(MaskedHi)
2889 .addImm(AMDGPU::sub1);
2890 I.eraseFromParent();
2891 return true;
2892 }
2893
2894 /// Return the register to use for the index value, and the subregister to use
2895 /// for the indirectly accessed register.
2896 static std::pair<Register, unsigned>
computeIndirectRegIndex(MachineRegisterInfo & MRI,const SIRegisterInfo & TRI,const TargetRegisterClass * SuperRC,Register IdxReg,unsigned EltSize,GISelKnownBits & KnownBits)2897 computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI,
2898 const TargetRegisterClass *SuperRC, Register IdxReg,
2899 unsigned EltSize, GISelKnownBits &KnownBits) {
2900 Register IdxBaseReg;
2901 int Offset;
2902
2903 std::tie(IdxBaseReg, Offset) =
2904 AMDGPU::getBaseWithConstantOffset(MRI, IdxReg, &KnownBits);
2905 if (IdxBaseReg == AMDGPU::NoRegister) {
2906 // This will happen if the index is a known constant. This should ordinarily
2907 // be legalized out, but handle it as a register just in case.
2908 assert(Offset == 0);
2909 IdxBaseReg = IdxReg;
2910 }
2911
2912 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2913
2914 // Skip out of bounds offsets, or else we would end up using an undefined
2915 // register.
2916 if (static_cast<unsigned>(Offset) >= SubRegs.size())
2917 return std::pair(IdxReg, SubRegs[0]);
2918 return std::pair(IdxBaseReg, SubRegs[Offset]);
2919 }
2920
selectG_EXTRACT_VECTOR_ELT(MachineInstr & MI) const2921 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2922 MachineInstr &MI) const {
2923 Register DstReg = MI.getOperand(0).getReg();
2924 Register SrcReg = MI.getOperand(1).getReg();
2925 Register IdxReg = MI.getOperand(2).getReg();
2926
2927 LLT DstTy = MRI->getType(DstReg);
2928 LLT SrcTy = MRI->getType(SrcReg);
2929
2930 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2931 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2932 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2933
2934 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2935 // into a waterfall loop.
2936 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2937 return false;
2938
2939 const TargetRegisterClass *SrcRC =
2940 TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2941 const TargetRegisterClass *DstRC =
2942 TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2943 if (!SrcRC || !DstRC)
2944 return false;
2945 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2946 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2947 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2948 return false;
2949
2950 MachineBasicBlock *BB = MI.getParent();
2951 const DebugLoc &DL = MI.getDebugLoc();
2952 const bool Is64 = DstTy.getSizeInBits() == 64;
2953
2954 unsigned SubReg;
2955 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(
2956 *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KnownBits);
2957
2958 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2959 if (DstTy.getSizeInBits() != 32 && !Is64)
2960 return false;
2961
2962 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2963 .addReg(IdxReg);
2964
2965 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2966 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2967 .addReg(SrcReg, 0, SubReg)
2968 .addReg(SrcReg, RegState::Implicit);
2969 MI.eraseFromParent();
2970 return true;
2971 }
2972
2973 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2974 return false;
2975
2976 if (!STI.useVGPRIndexMode()) {
2977 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2978 .addReg(IdxReg);
2979 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2980 .addReg(SrcReg, 0, SubReg)
2981 .addReg(SrcReg, RegState::Implicit);
2982 MI.eraseFromParent();
2983 return true;
2984 }
2985
2986 const MCInstrDesc &GPRIDXDesc =
2987 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2988 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2989 .addReg(SrcReg)
2990 .addReg(IdxReg)
2991 .addImm(SubReg);
2992
2993 MI.eraseFromParent();
2994 return true;
2995 }
2996
2997 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
selectG_INSERT_VECTOR_ELT(MachineInstr & MI) const2998 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2999 MachineInstr &MI) const {
3000 Register DstReg = MI.getOperand(0).getReg();
3001 Register VecReg = MI.getOperand(1).getReg();
3002 Register ValReg = MI.getOperand(2).getReg();
3003 Register IdxReg = MI.getOperand(3).getReg();
3004
3005 LLT VecTy = MRI->getType(DstReg);
3006 LLT ValTy = MRI->getType(ValReg);
3007 unsigned VecSize = VecTy.getSizeInBits();
3008 unsigned ValSize = ValTy.getSizeInBits();
3009
3010 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
3011 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
3012 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
3013
3014 assert(VecTy.getElementType() == ValTy);
3015
3016 // The index must be scalar. If it wasn't RegBankSelect should have moved this
3017 // into a waterfall loop.
3018 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
3019 return false;
3020
3021 const TargetRegisterClass *VecRC =
3022 TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
3023 const TargetRegisterClass *ValRC =
3024 TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
3025
3026 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
3027 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
3028 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
3029 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
3030 return false;
3031
3032 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
3033 return false;
3034
3035 unsigned SubReg;
3036 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
3037 ValSize / 8, *KnownBits);
3038
3039 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
3040 STI.useVGPRIndexMode();
3041
3042 MachineBasicBlock *BB = MI.getParent();
3043 const DebugLoc &DL = MI.getDebugLoc();
3044
3045 if (!IndexMode) {
3046 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3047 .addReg(IdxReg);
3048
3049 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
3050 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
3051 BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
3052 .addReg(VecReg)
3053 .addReg(ValReg)
3054 .addImm(SubReg);
3055 MI.eraseFromParent();
3056 return true;
3057 }
3058
3059 const MCInstrDesc &GPRIDXDesc =
3060 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
3061 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
3062 .addReg(VecReg)
3063 .addReg(ValReg)
3064 .addReg(IdxReg)
3065 .addImm(SubReg);
3066
3067 MI.eraseFromParent();
3068 return true;
3069 }
3070
selectBufferLoadLds(MachineInstr & MI) const3071 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3072 unsigned Opc;
3073 unsigned Size = MI.getOperand(3).getImm();
3074
3075 // The struct intrinsic variants add one additional operand over raw.
3076 const bool HasVIndex = MI.getNumOperands() == 9;
3077 Register VIndex;
3078 int OpOffset = 0;
3079 if (HasVIndex) {
3080 VIndex = MI.getOperand(4).getReg();
3081 OpOffset = 1;
3082 }
3083
3084 Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3085 std::optional<ValueAndVReg> MaybeVOffset =
3086 getIConstantVRegValWithLookThrough(VOffset, *MRI);
3087 const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3088
3089 switch (Size) {
3090 default:
3091 return false;
3092 case 1:
3093 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3094 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3095 : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3096 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3097 break;
3098 case 2:
3099 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3100 : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3101 : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3102 : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3103 break;
3104 case 4:
3105 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3106 : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3107 : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3108 : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3109 break;
3110 }
3111
3112 MachineBasicBlock *MBB = MI.getParent();
3113 const DebugLoc &DL = MI.getDebugLoc();
3114 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3115 .add(MI.getOperand(2));
3116
3117 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3118
3119 if (HasVIndex && HasVOffset) {
3120 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3121 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3122 .addReg(VIndex)
3123 .addImm(AMDGPU::sub0)
3124 .addReg(VOffset)
3125 .addImm(AMDGPU::sub1);
3126
3127 MIB.addReg(IdxReg);
3128 } else if (HasVIndex) {
3129 MIB.addReg(VIndex);
3130 } else if (HasVOffset) {
3131 MIB.addReg(VOffset);
3132 }
3133
3134 MIB.add(MI.getOperand(1)); // rsrc
3135 MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3136 MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3137 unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3138 MIB.addImm(Aux & AMDGPU::CPol::ALL); // cpol
3139 MIB.addImm((Aux >> 3) & 1); // swz
3140
3141 MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3142 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3143 LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3144 MachinePointerInfo StorePtrI = LoadPtrI;
3145 StorePtrI.V = nullptr;
3146 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3147
3148 auto F = LoadMMO->getFlags() &
3149 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3150 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3151 Size, LoadMMO->getBaseAlign());
3152
3153 MachineMemOperand *StoreMMO =
3154 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3155 sizeof(int32_t), LoadMMO->getBaseAlign());
3156
3157 MIB.setMemRefs({LoadMMO, StoreMMO});
3158
3159 MI.eraseFromParent();
3160 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3161 }
3162
3163 /// Match a zero extend from a 32-bit value to 64-bits.
matchZeroExtendFromS32(MachineRegisterInfo & MRI,Register Reg)3164 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3165 Register ZExtSrc;
3166 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3167 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3168
3169 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3170 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3171 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3172 return Register();
3173
3174 assert(Def->getNumOperands() == 3 &&
3175 MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64));
3176 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3177 return Def->getOperand(1).getReg();
3178 }
3179
3180 return Register();
3181 }
3182
selectGlobalLoadLds(MachineInstr & MI) const3183 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3184 unsigned Opc;
3185 unsigned Size = MI.getOperand(3).getImm();
3186
3187 switch (Size) {
3188 default:
3189 return false;
3190 case 1:
3191 Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3192 break;
3193 case 2:
3194 Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3195 break;
3196 case 4:
3197 Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3198 break;
3199 }
3200
3201 MachineBasicBlock *MBB = MI.getParent();
3202 const DebugLoc &DL = MI.getDebugLoc();
3203 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3204 .add(MI.getOperand(2));
3205
3206 Register Addr = MI.getOperand(1).getReg();
3207 Register VOffset;
3208 // Try to split SAddr and VOffset. Global and LDS pointers share the same
3209 // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3210 if (!isSGPR(Addr)) {
3211 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3212 if (isSGPR(AddrDef->Reg)) {
3213 Addr = AddrDef->Reg;
3214 } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3215 Register SAddr =
3216 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3217 if (isSGPR(SAddr)) {
3218 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3219 if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3220 Addr = SAddr;
3221 VOffset = Off;
3222 }
3223 }
3224 }
3225 }
3226
3227 if (isSGPR(Addr)) {
3228 Opc = AMDGPU::getGlobalSaddrOp(Opc);
3229 if (!VOffset) {
3230 VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3231 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3232 .addImm(0);
3233 }
3234 }
3235
3236 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3237 .addReg(Addr);
3238
3239 if (isSGPR(Addr))
3240 MIB.addReg(VOffset);
3241
3242 MIB.add(MI.getOperand(4)) // offset
3243 .add(MI.getOperand(5)); // cpol
3244
3245 MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3246 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3247 LoadPtrI.Offset = MI.getOperand(4).getImm();
3248 MachinePointerInfo StorePtrI = LoadPtrI;
3249 LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3250 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3251 auto F = LoadMMO->getFlags() &
3252 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3253 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3254 Size, LoadMMO->getBaseAlign());
3255 MachineMemOperand *StoreMMO =
3256 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3257 sizeof(int32_t), Align(4));
3258
3259 MIB.setMemRefs({LoadMMO, StoreMMO});
3260
3261 MI.eraseFromParent();
3262 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3263 }
3264
selectBVHIntrinsic(MachineInstr & MI) const3265 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3266 MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3267 MI.removeOperand(1);
3268 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3269 return true;
3270 }
3271
selectSMFMACIntrin(MachineInstr & MI) const3272 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3273 unsigned Opc;
3274 switch (MI.getIntrinsicID()) {
3275 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3276 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3277 break;
3278 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3279 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3280 break;
3281 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3282 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3283 break;
3284 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3285 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3286 break;
3287 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3288 Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3289 break;
3290 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3291 Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3292 break;
3293 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
3294 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_BF8_e64;
3295 break;
3296 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
3297 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_FP8_e64;
3298 break;
3299 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
3300 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_BF8_e64;
3301 break;
3302 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
3303 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_FP8_e64;
3304 break;
3305 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
3306 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_BF8_e64;
3307 break;
3308 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
3309 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_FP8_e64;
3310 break;
3311 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
3312 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_BF8_e64;
3313 break;
3314 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
3315 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_FP8_e64;
3316 break;
3317 default:
3318 llvm_unreachable("unhandled smfmac intrinsic");
3319 }
3320
3321 auto VDst_In = MI.getOperand(4);
3322
3323 MI.setDesc(TII.get(Opc));
3324 MI.removeOperand(4); // VDst_In
3325 MI.removeOperand(1); // Intrinsic ID
3326 MI.addOperand(VDst_In); // Readd VDst_In to the end
3327 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3328 return true;
3329 }
3330
selectWaveAddress(MachineInstr & MI) const3331 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3332 Register DstReg = MI.getOperand(0).getReg();
3333 Register SrcReg = MI.getOperand(1).getReg();
3334 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3335 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3336 MachineBasicBlock *MBB = MI.getParent();
3337 const DebugLoc &DL = MI.getDebugLoc();
3338
3339 if (IsVALU) {
3340 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3341 .addImm(Subtarget->getWavefrontSizeLog2())
3342 .addReg(SrcReg);
3343 } else {
3344 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3345 .addReg(SrcReg)
3346 .addImm(Subtarget->getWavefrontSizeLog2());
3347 }
3348
3349 const TargetRegisterClass &RC =
3350 IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3351 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3352 return false;
3353
3354 MI.eraseFromParent();
3355 return true;
3356 }
3357
select(MachineInstr & I)3358 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3359 if (I.isPHI())
3360 return selectPHI(I);
3361
3362 if (!I.isPreISelOpcode()) {
3363 if (I.isCopy())
3364 return selectCOPY(I);
3365 return true;
3366 }
3367
3368 switch (I.getOpcode()) {
3369 case TargetOpcode::G_AND:
3370 case TargetOpcode::G_OR:
3371 case TargetOpcode::G_XOR:
3372 if (selectImpl(I, *CoverageInfo))
3373 return true;
3374 return selectG_AND_OR_XOR(I);
3375 case TargetOpcode::G_ADD:
3376 case TargetOpcode::G_SUB:
3377 if (selectImpl(I, *CoverageInfo))
3378 return true;
3379 return selectG_ADD_SUB(I);
3380 case TargetOpcode::G_UADDO:
3381 case TargetOpcode::G_USUBO:
3382 case TargetOpcode::G_UADDE:
3383 case TargetOpcode::G_USUBE:
3384 return selectG_UADDO_USUBO_UADDE_USUBE(I);
3385 case AMDGPU::G_AMDGPU_MAD_U64_U32:
3386 case AMDGPU::G_AMDGPU_MAD_I64_I32:
3387 return selectG_AMDGPU_MAD_64_32(I);
3388 case TargetOpcode::G_INTTOPTR:
3389 case TargetOpcode::G_BITCAST:
3390 case TargetOpcode::G_PTRTOINT:
3391 return selectCOPY(I);
3392 case TargetOpcode::G_CONSTANT:
3393 case TargetOpcode::G_FCONSTANT:
3394 return selectG_CONSTANT(I);
3395 case TargetOpcode::G_FNEG:
3396 if (selectImpl(I, *CoverageInfo))
3397 return true;
3398 return selectG_FNEG(I);
3399 case TargetOpcode::G_FABS:
3400 if (selectImpl(I, *CoverageInfo))
3401 return true;
3402 return selectG_FABS(I);
3403 case TargetOpcode::G_EXTRACT:
3404 return selectG_EXTRACT(I);
3405 case TargetOpcode::G_FMA:
3406 case TargetOpcode::G_FMAD:
3407 if (selectG_FMA_FMAD(I))
3408 return true;
3409 return selectImpl(I, *CoverageInfo);
3410 case TargetOpcode::G_MERGE_VALUES:
3411 case TargetOpcode::G_CONCAT_VECTORS:
3412 return selectG_MERGE_VALUES(I);
3413 case TargetOpcode::G_UNMERGE_VALUES:
3414 return selectG_UNMERGE_VALUES(I);
3415 case TargetOpcode::G_BUILD_VECTOR:
3416 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3417 return selectG_BUILD_VECTOR(I);
3418 case TargetOpcode::G_PTR_ADD:
3419 if (selectImpl(I, *CoverageInfo))
3420 return true;
3421 return selectG_PTR_ADD(I);
3422 case TargetOpcode::G_IMPLICIT_DEF:
3423 return selectG_IMPLICIT_DEF(I);
3424 case TargetOpcode::G_FREEZE:
3425 return selectCOPY(I);
3426 case TargetOpcode::G_INSERT:
3427 return selectG_INSERT(I);
3428 case TargetOpcode::G_INTRINSIC:
3429 return selectG_INTRINSIC(I);
3430 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3431 return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3432 case TargetOpcode::G_ICMP:
3433 if (selectG_ICMP(I))
3434 return true;
3435 return selectImpl(I, *CoverageInfo);
3436 case TargetOpcode::G_LOAD:
3437 case TargetOpcode::G_STORE:
3438 case TargetOpcode::G_ATOMIC_CMPXCHG:
3439 case TargetOpcode::G_ATOMICRMW_XCHG:
3440 case TargetOpcode::G_ATOMICRMW_ADD:
3441 case TargetOpcode::G_ATOMICRMW_SUB:
3442 case TargetOpcode::G_ATOMICRMW_AND:
3443 case TargetOpcode::G_ATOMICRMW_OR:
3444 case TargetOpcode::G_ATOMICRMW_XOR:
3445 case TargetOpcode::G_ATOMICRMW_MIN:
3446 case TargetOpcode::G_ATOMICRMW_MAX:
3447 case TargetOpcode::G_ATOMICRMW_UMIN:
3448 case TargetOpcode::G_ATOMICRMW_UMAX:
3449 case TargetOpcode::G_ATOMICRMW_FADD:
3450 case AMDGPU::G_AMDGPU_ATOMIC_INC:
3451 case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3452 case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3453 case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3454 return selectG_LOAD_STORE_ATOMICRMW(I);
3455 case TargetOpcode::G_SELECT:
3456 return selectG_SELECT(I);
3457 case TargetOpcode::G_TRUNC:
3458 return selectG_TRUNC(I);
3459 case TargetOpcode::G_SEXT:
3460 case TargetOpcode::G_ZEXT:
3461 case TargetOpcode::G_ANYEXT:
3462 case TargetOpcode::G_SEXT_INREG:
3463 if (selectImpl(I, *CoverageInfo))
3464 return true;
3465 return selectG_SZA_EXT(I);
3466 case TargetOpcode::G_BRCOND:
3467 return selectG_BRCOND(I);
3468 case TargetOpcode::G_GLOBAL_VALUE:
3469 return selectG_GLOBAL_VALUE(I);
3470 case TargetOpcode::G_PTRMASK:
3471 return selectG_PTRMASK(I);
3472 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3473 return selectG_EXTRACT_VECTOR_ELT(I);
3474 case TargetOpcode::G_INSERT_VECTOR_ELT:
3475 return selectG_INSERT_VECTOR_ELT(I);
3476 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3477 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3478 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3479 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3480 const AMDGPU::ImageDimIntrinsicInfo *Intr
3481 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3482 assert(Intr && "not an image intrinsic with image pseudo");
3483 return selectImageIntrinsic(I, Intr);
3484 }
3485 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3486 return selectBVHIntrinsic(I);
3487 case AMDGPU::G_SBFX:
3488 case AMDGPU::G_UBFX:
3489 return selectG_SBFX_UBFX(I);
3490 case AMDGPU::G_SI_CALL:
3491 I.setDesc(TII.get(AMDGPU::SI_CALL));
3492 return true;
3493 case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3494 return selectWaveAddress(I);
3495 default:
3496 return selectImpl(I, *CoverageInfo);
3497 }
3498 return false;
3499 }
3500
3501 InstructionSelector::ComplexRendererFns
selectVCSRC(MachineOperand & Root) const3502 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3503 return {{
3504 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3505 }};
3506
3507 }
3508
selectVOP3ModsImpl(MachineOperand & Root,bool AllowAbs,bool OpSel) const3509 std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
3510 MachineOperand &Root, bool AllowAbs, bool OpSel) const {
3511 Register Src = Root.getReg();
3512 unsigned Mods = 0;
3513 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3514
3515 if (MI->getOpcode() == AMDGPU::G_FNEG) {
3516 Src = MI->getOperand(1).getReg();
3517 Mods |= SISrcMods::NEG;
3518 MI = getDefIgnoringCopies(Src, *MRI);
3519 }
3520
3521 if (AllowAbs && MI->getOpcode() == AMDGPU::G_FABS) {
3522 Src = MI->getOperand(1).getReg();
3523 Mods |= SISrcMods::ABS;
3524 }
3525
3526 if (OpSel)
3527 Mods |= SISrcMods::OP_SEL_0;
3528
3529 return std::pair(Src, Mods);
3530 }
3531
copyToVGPRIfSrcFolded(Register Src,unsigned Mods,MachineOperand Root,MachineInstr * InsertPt,bool ForceVGPR) const3532 Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded(
3533 Register Src, unsigned Mods, MachineOperand Root, MachineInstr *InsertPt,
3534 bool ForceVGPR) const {
3535 if ((Mods != 0 || ForceVGPR) &&
3536 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3537
3538 // If we looked through copies to find source modifiers on an SGPR operand,
3539 // we now have an SGPR register source. To avoid potentially violating the
3540 // constant bus restriction, we need to insert a copy to a VGPR.
3541 Register VGPRSrc = MRI->cloneVirtualRegister(Root.getReg());
3542 BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(),
3543 TII.get(AMDGPU::COPY), VGPRSrc)
3544 .addReg(Src);
3545 Src = VGPRSrc;
3546 }
3547
3548 return Src;
3549 }
3550
3551 ///
3552 /// This will select either an SGPR or VGPR operand and will save us from
3553 /// having to write an extra tablegen pattern.
3554 InstructionSelector::ComplexRendererFns
selectVSRC0(MachineOperand & Root) const3555 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3556 return {{
3557 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3558 }};
3559 }
3560
3561 InstructionSelector::ComplexRendererFns
selectVOP3Mods0(MachineOperand & Root) const3562 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3563 Register Src;
3564 unsigned Mods;
3565 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3566
3567 return {{
3568 [=](MachineInstrBuilder &MIB) {
3569 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3570 },
3571 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3572 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3573 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3574 }};
3575 }
3576
3577 InstructionSelector::ComplexRendererFns
selectVOP3BMods0(MachineOperand & Root) const3578 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3579 Register Src;
3580 unsigned Mods;
3581 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3582
3583 return {{
3584 [=](MachineInstrBuilder &MIB) {
3585 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3586 },
3587 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3588 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3589 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3590 }};
3591 }
3592
3593 InstructionSelector::ComplexRendererFns
selectVOP3OMods(MachineOperand & Root) const3594 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3595 return {{
3596 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3597 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3598 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3599 }};
3600 }
3601
3602 InstructionSelector::ComplexRendererFns
selectVOP3Mods(MachineOperand & Root) const3603 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3604 Register Src;
3605 unsigned Mods;
3606 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3607
3608 return {{
3609 [=](MachineInstrBuilder &MIB) {
3610 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3611 },
3612 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3613 }};
3614 }
3615
3616 InstructionSelector::ComplexRendererFns
selectVOP3BMods(MachineOperand & Root) const3617 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3618 Register Src;
3619 unsigned Mods;
3620 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3621
3622 return {{
3623 [=](MachineInstrBuilder &MIB) {
3624 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3625 },
3626 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3627 }};
3628 }
3629
3630 InstructionSelector::ComplexRendererFns
selectVOP3NoMods(MachineOperand & Root) const3631 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3632 Register Reg = Root.getReg();
3633 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3634 if (Def->getOpcode() == AMDGPU::G_FNEG || Def->getOpcode() == AMDGPU::G_FABS)
3635 return {};
3636 return {{
3637 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3638 }};
3639 }
3640
3641 std::pair<Register, unsigned>
selectVOP3PModsImpl(Register Src,const MachineRegisterInfo & MRI,bool IsDOT) const3642 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3643 Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3644 unsigned Mods = 0;
3645 MachineInstr *MI = MRI.getVRegDef(Src);
3646
3647 if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3648 // It's possible to see an f32 fneg here, but unlikely.
3649 // TODO: Treat f32 fneg as only high bit.
3650 MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3651 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3652 Src = MI->getOperand(1).getReg();
3653 MI = MRI.getVRegDef(Src);
3654 }
3655
3656 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3657 (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3658
3659 // Packed instructions do not have abs modifiers.
3660 Mods |= SISrcMods::OP_SEL_1;
3661
3662 return std::pair(Src, Mods);
3663 }
3664
3665 InstructionSelector::ComplexRendererFns
selectVOP3PMods(MachineOperand & Root) const3666 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3667 MachineRegisterInfo &MRI
3668 = Root.getParent()->getParent()->getParent()->getRegInfo();
3669
3670 Register Src;
3671 unsigned Mods;
3672 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3673
3674 return {{
3675 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3676 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3677 }};
3678 }
3679
3680 InstructionSelector::ComplexRendererFns
selectVOP3PModsDOT(MachineOperand & Root) const3681 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3682 MachineRegisterInfo &MRI
3683 = Root.getParent()->getParent()->getParent()->getRegInfo();
3684
3685 Register Src;
3686 unsigned Mods;
3687 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3688
3689 return {{
3690 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3691 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3692 }};
3693 }
3694
3695 InstructionSelector::ComplexRendererFns
selectDotIUVOP3PMods(MachineOperand & Root) const3696 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3697 // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3698 // Value is in Imm operand as i1 sign extended to int64_t.
3699 // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3700 assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3701 "expected i1 value");
3702 unsigned Mods = SISrcMods::OP_SEL_1;
3703 if (Root.getImm() == -1)
3704 Mods ^= SISrcMods::NEG;
3705 return {{
3706 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3707 }};
3708 }
3709
3710 InstructionSelector::ComplexRendererFns
selectWMMAOpSelVOP3PMods(MachineOperand & Root) const3711 AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
3712 MachineOperand &Root) const {
3713 assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3714 "expected i1 value");
3715 unsigned Mods = SISrcMods::OP_SEL_1;
3716 if (Root.getImm() != 0)
3717 Mods |= SISrcMods::OP_SEL_0;
3718
3719 return {{
3720 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3721 }};
3722 }
3723
3724 InstructionSelector::ComplexRendererFns
selectVOP3OpSelMods(MachineOperand & Root) const3725 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3726 Register Src;
3727 unsigned Mods;
3728 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3729
3730 // FIXME: Handle op_sel
3731 return {{
3732 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3733 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3734 }};
3735 }
3736
3737 InstructionSelector::ComplexRendererFns
selectVINTERPMods(MachineOperand & Root) const3738 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3739 Register Src;
3740 unsigned Mods;
3741 std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3742 /* AllowAbs */ false,
3743 /* OpSel */ false);
3744
3745 return {{
3746 [=](MachineInstrBuilder &MIB) {
3747 MIB.addReg(
3748 copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
3749 },
3750 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3751 }};
3752 }
3753
3754 InstructionSelector::ComplexRendererFns
selectVINTERPModsHi(MachineOperand & Root) const3755 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3756 Register Src;
3757 unsigned Mods;
3758 std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3759 /* AllowAbs */ false,
3760 /* OpSel */ true);
3761
3762 return {{
3763 [=](MachineInstrBuilder &MIB) {
3764 MIB.addReg(
3765 copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
3766 },
3767 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3768 }};
3769 }
3770
selectSmrdOffset(MachineOperand & Root,Register & Base,Register * SOffset,int64_t * Offset) const3771 bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
3772 Register &Base,
3773 Register *SOffset,
3774 int64_t *Offset) const {
3775 MachineInstr *MI = Root.getParent();
3776 MachineBasicBlock *MBB = MI->getParent();
3777
3778 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3779 // then we can select all ptr + 32-bit offsets.
3780 SmallVector<GEPInfo, 4> AddrInfo;
3781 getAddrModeInfo(*MI, *MRI, AddrInfo);
3782
3783 if (AddrInfo.empty())
3784 return false;
3785
3786 const GEPInfo &GEPI = AddrInfo[0];
3787 std::optional<int64_t> EncodedImm =
3788 AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
3789
3790 if (SOffset && Offset) {
3791 if (GEPI.SgprParts.size() == 1 && GEPI.Imm != 0 && EncodedImm &&
3792 AddrInfo.size() > 1) {
3793 const GEPInfo &GEPI2 = AddrInfo[1];
3794 if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) {
3795 if (Register OffsetReg =
3796 matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) {
3797 Base = GEPI2.SgprParts[0];
3798 *SOffset = OffsetReg;
3799 *Offset = *EncodedImm;
3800 return true;
3801 }
3802 }
3803 }
3804 return false;
3805 }
3806
3807 if (Offset && GEPI.SgprParts.size() == 1 && EncodedImm) {
3808 Base = GEPI.SgprParts[0];
3809 *Offset = *EncodedImm;
3810 return true;
3811 }
3812
3813 // SGPR offset is unsigned.
3814 if (SOffset && GEPI.SgprParts.size() == 1 && isUInt<32>(GEPI.Imm) &&
3815 GEPI.Imm != 0) {
3816 // If we make it this far we have a load with an 32-bit immediate offset.
3817 // It is OK to select this using a sgpr offset, because we have already
3818 // failed trying to select this load into one of the _IMM variants since
3819 // the _IMM Patterns are considered before the _SGPR patterns.
3820 Base = GEPI.SgprParts[0];
3821 *SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3822 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), *SOffset)
3823 .addImm(GEPI.Imm);
3824 return true;
3825 }
3826
3827 if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) {
3828 if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) {
3829 Base = GEPI.SgprParts[0];
3830 *SOffset = OffsetReg;
3831 return true;
3832 }
3833 }
3834
3835 return false;
3836 }
3837
3838 InstructionSelector::ComplexRendererFns
selectSmrdImm(MachineOperand & Root) const3839 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3840 Register Base;
3841 int64_t Offset;
3842 if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset))
3843 return std::nullopt;
3844
3845 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3846 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3847 }
3848
3849 InstructionSelector::ComplexRendererFns
selectSmrdImm32(MachineOperand & Root) const3850 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3851 SmallVector<GEPInfo, 4> AddrInfo;
3852 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3853
3854 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3855 return std::nullopt;
3856
3857 const GEPInfo &GEPInfo = AddrInfo[0];
3858 Register PtrReg = GEPInfo.SgprParts[0];
3859 std::optional<int64_t> EncodedImm =
3860 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3861 if (!EncodedImm)
3862 return std::nullopt;
3863
3864 return {{
3865 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3866 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3867 }};
3868 }
3869
3870 InstructionSelector::ComplexRendererFns
selectSmrdSgpr(MachineOperand & Root) const3871 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3872 Register Base, SOffset;
3873 if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr))
3874 return std::nullopt;
3875
3876 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3877 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}};
3878 }
3879
3880 InstructionSelector::ComplexRendererFns
selectSmrdSgprImm(MachineOperand & Root) const3881 AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const {
3882 Register Base, SOffset;
3883 int64_t Offset;
3884 if (!selectSmrdOffset(Root, Base, &SOffset, &Offset))
3885 return std::nullopt;
3886
3887 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3888 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
3889 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3890 }
3891
3892 std::pair<Register, int>
selectFlatOffsetImpl(MachineOperand & Root,uint64_t FlatVariant) const3893 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3894 uint64_t FlatVariant) const {
3895 MachineInstr *MI = Root.getParent();
3896
3897 auto Default = std::pair(Root.getReg(), 0);
3898
3899 if (!STI.hasFlatInstOffsets())
3900 return Default;
3901
3902 Register PtrBase;
3903 int64_t ConstOffset;
3904 std::tie(PtrBase, ConstOffset) =
3905 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3906 if (ConstOffset == 0)
3907 return Default;
3908
3909 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3910 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3911 return Default;
3912
3913 return std::pair(PtrBase, ConstOffset);
3914 }
3915
3916 InstructionSelector::ComplexRendererFns
selectFlatOffset(MachineOperand & Root) const3917 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3918 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3919
3920 return {{
3921 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3922 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3923 }};
3924 }
3925
3926 InstructionSelector::ComplexRendererFns
selectGlobalOffset(MachineOperand & Root) const3927 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3928 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3929
3930 return {{
3931 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3932 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3933 }};
3934 }
3935
3936 InstructionSelector::ComplexRendererFns
selectScratchOffset(MachineOperand & Root) const3937 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3938 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3939
3940 return {{
3941 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3942 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3943 }};
3944 }
3945
3946 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3947 InstructionSelector::ComplexRendererFns
selectGlobalSAddr(MachineOperand & Root) const3948 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3949 Register Addr = Root.getReg();
3950 Register PtrBase;
3951 int64_t ConstOffset;
3952 int64_t ImmOffset = 0;
3953
3954 // Match the immediate offset first, which canonically is moved as low as
3955 // possible.
3956 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3957
3958 if (ConstOffset != 0) {
3959 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3960 SIInstrFlags::FlatGlobal)) {
3961 Addr = PtrBase;
3962 ImmOffset = ConstOffset;
3963 } else {
3964 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3965 if (isSGPR(PtrBaseDef->Reg)) {
3966 if (ConstOffset > 0) {
3967 // Offset is too large.
3968 //
3969 // saddr + large_offset -> saddr +
3970 // (voffset = large_offset & ~MaxOffset) +
3971 // (large_offset & MaxOffset);
3972 int64_t SplitImmOffset, RemainderOffset;
3973 std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3974 ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3975
3976 if (isUInt<32>(RemainderOffset)) {
3977 MachineInstr *MI = Root.getParent();
3978 MachineBasicBlock *MBB = MI->getParent();
3979 Register HighBits =
3980 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3981
3982 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3983 HighBits)
3984 .addImm(RemainderOffset);
3985
3986 return {{
3987 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3988 [=](MachineInstrBuilder &MIB) {
3989 MIB.addReg(HighBits);
3990 }, // voffset
3991 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3992 }};
3993 }
3994 }
3995
3996 // We are adding a 64 bit SGPR and a constant. If constant bus limit
3997 // is 1 we would need to perform 1 or 2 extra moves for each half of
3998 // the constant and it is better to do a scalar add and then issue a
3999 // single VALU instruction to materialize zero. Otherwise it is less
4000 // instructions to perform VALU adds with immediates or inline literals.
4001 unsigned NumLiterals =
4002 !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
4003 !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
4004 if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
4005 return std::nullopt;
4006 }
4007 }
4008 }
4009
4010 // Match the variable offset.
4011 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4012 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4013 // Look through the SGPR->VGPR copy.
4014 Register SAddr =
4015 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
4016
4017 if (isSGPR(SAddr)) {
4018 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
4019
4020 // It's possible voffset is an SGPR here, but the copy to VGPR will be
4021 // inserted later.
4022 if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
4023 return {{[=](MachineInstrBuilder &MIB) { // saddr
4024 MIB.addReg(SAddr);
4025 },
4026 [=](MachineInstrBuilder &MIB) { // voffset
4027 MIB.addReg(VOffset);
4028 },
4029 [=](MachineInstrBuilder &MIB) { // offset
4030 MIB.addImm(ImmOffset);
4031 }}};
4032 }
4033 }
4034 }
4035
4036 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
4037 // drop this.
4038 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
4039 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
4040 return std::nullopt;
4041
4042 // It's cheaper to materialize a single 32-bit zero for vaddr than the two
4043 // moves required to copy a 64-bit SGPR to VGPR.
4044 MachineInstr *MI = Root.getParent();
4045 MachineBasicBlock *MBB = MI->getParent();
4046 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4047
4048 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4049 .addImm(0);
4050
4051 return {{
4052 [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4053 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset
4054 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4055 }};
4056 }
4057
4058 InstructionSelector::ComplexRendererFns
selectScratchSAddr(MachineOperand & Root) const4059 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4060 Register Addr = Root.getReg();
4061 Register PtrBase;
4062 int64_t ConstOffset;
4063 int64_t ImmOffset = 0;
4064
4065 // Match the immediate offset first, which canonically is moved as low as
4066 // possible.
4067 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4068
4069 if (ConstOffset != 0 &&
4070 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4071 SIInstrFlags::FlatScratch)) {
4072 Addr = PtrBase;
4073 ImmOffset = ConstOffset;
4074 }
4075
4076 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4077 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4078 int FI = AddrDef->MI->getOperand(1).getIndex();
4079 return {{
4080 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4081 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4082 }};
4083 }
4084
4085 Register SAddr = AddrDef->Reg;
4086
4087 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4088 Register LHS = AddrDef->MI->getOperand(1).getReg();
4089 Register RHS = AddrDef->MI->getOperand(2).getReg();
4090 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4091 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4092
4093 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4094 isSGPR(RHSDef->Reg)) {
4095 int FI = LHSDef->MI->getOperand(1).getIndex();
4096 MachineInstr &I = *Root.getParent();
4097 MachineBasicBlock *BB = I.getParent();
4098 const DebugLoc &DL = I.getDebugLoc();
4099 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4100
4101 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4102 .addFrameIndex(FI)
4103 .addReg(RHSDef->Reg);
4104 }
4105 }
4106
4107 if (!isSGPR(SAddr))
4108 return std::nullopt;
4109
4110 return {{
4111 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4112 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4113 }};
4114 }
4115
4116 // Check whether the flat scratch SVS swizzle bug affects this access.
checkFlatScratchSVSSwizzleBug(Register VAddr,Register SAddr,uint64_t ImmOffset) const4117 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4118 Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4119 if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4120 return false;
4121
4122 // The bug affects the swizzling of SVS accesses if there is any carry out
4123 // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4124 // voffset to (soffset + inst_offset).
4125 auto VKnown = KnownBits->getKnownBits(VAddr);
4126 auto SKnown = KnownBits::computeForAddSub(
4127 true, false, KnownBits->getKnownBits(SAddr),
4128 KnownBits::makeConstant(APInt(32, ImmOffset)));
4129 uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4130 uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4131 return (VMax & 3) + (SMax & 3) >= 4;
4132 }
4133
4134 InstructionSelector::ComplexRendererFns
selectScratchSVAddr(MachineOperand & Root) const4135 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4136 Register Addr = Root.getReg();
4137 Register PtrBase;
4138 int64_t ConstOffset;
4139 int64_t ImmOffset = 0;
4140
4141 // Match the immediate offset first, which canonically is moved as low as
4142 // possible.
4143 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4144
4145 if (ConstOffset != 0 &&
4146 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4147 Addr = PtrBase;
4148 ImmOffset = ConstOffset;
4149 }
4150
4151 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4152 if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4153 return std::nullopt;
4154
4155 Register RHS = AddrDef->MI->getOperand(2).getReg();
4156 if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4157 return std::nullopt;
4158
4159 Register LHS = AddrDef->MI->getOperand(1).getReg();
4160 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4161
4162 if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4163 return std::nullopt;
4164
4165 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4166 int FI = LHSDef->MI->getOperand(1).getIndex();
4167 return {{
4168 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4169 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4170 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4171 }};
4172 }
4173
4174 if (!isSGPR(LHS))
4175 return std::nullopt;
4176
4177 return {{
4178 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4179 [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4180 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4181 }};
4182 }
4183
4184 InstructionSelector::ComplexRendererFns
selectMUBUFScratchOffen(MachineOperand & Root) const4185 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4186 MachineInstr *MI = Root.getParent();
4187 MachineBasicBlock *MBB = MI->getParent();
4188 MachineFunction *MF = MBB->getParent();
4189 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4190
4191 int64_t Offset = 0;
4192 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4193 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4194 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4195
4196 // TODO: Should this be inside the render function? The iterator seems to
4197 // move.
4198 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4199 HighBits)
4200 .addImm(Offset & ~4095);
4201
4202 return {{[=](MachineInstrBuilder &MIB) { // rsrc
4203 MIB.addReg(Info->getScratchRSrcReg());
4204 },
4205 [=](MachineInstrBuilder &MIB) { // vaddr
4206 MIB.addReg(HighBits);
4207 },
4208 [=](MachineInstrBuilder &MIB) { // soffset
4209 // Use constant zero for soffset and rely on eliminateFrameIndex
4210 // to choose the appropriate frame register if need be.
4211 MIB.addImm(0);
4212 },
4213 [=](MachineInstrBuilder &MIB) { // offset
4214 MIB.addImm(Offset & 4095);
4215 }}};
4216 }
4217
4218 assert(Offset == 0 || Offset == -1);
4219
4220 // Try to fold a frame index directly into the MUBUF vaddr field, and any
4221 // offsets.
4222 std::optional<int> FI;
4223 Register VAddr = Root.getReg();
4224 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4225 Register PtrBase;
4226 int64_t ConstOffset;
4227 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4228 if (ConstOffset != 0) {
4229 if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4230 (!STI.privateMemoryResourceIsRangeChecked() ||
4231 KnownBits->signBitIsZero(PtrBase))) {
4232 const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4233 if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4234 FI = PtrBaseDef->getOperand(1).getIndex();
4235 else
4236 VAddr = PtrBase;
4237 Offset = ConstOffset;
4238 }
4239 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4240 FI = RootDef->getOperand(1).getIndex();
4241 }
4242 }
4243
4244 return {{[=](MachineInstrBuilder &MIB) { // rsrc
4245 MIB.addReg(Info->getScratchRSrcReg());
4246 },
4247 [=](MachineInstrBuilder &MIB) { // vaddr
4248 if (FI)
4249 MIB.addFrameIndex(*FI);
4250 else
4251 MIB.addReg(VAddr);
4252 },
4253 [=](MachineInstrBuilder &MIB) { // soffset
4254 // Use constant zero for soffset and rely on eliminateFrameIndex
4255 // to choose the appropriate frame register if need be.
4256 MIB.addImm(0);
4257 },
4258 [=](MachineInstrBuilder &MIB) { // offset
4259 MIB.addImm(Offset);
4260 }}};
4261 }
4262
isDSOffsetLegal(Register Base,int64_t Offset) const4263 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4264 int64_t Offset) const {
4265 if (!isUInt<16>(Offset))
4266 return false;
4267
4268 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4269 return true;
4270
4271 // On Southern Islands instruction with a negative base value and an offset
4272 // don't seem to work.
4273 return KnownBits->signBitIsZero(Base);
4274 }
4275
isDSOffset2Legal(Register Base,int64_t Offset0,int64_t Offset1,unsigned Size) const4276 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4277 int64_t Offset1,
4278 unsigned Size) const {
4279 if (Offset0 % Size != 0 || Offset1 % Size != 0)
4280 return false;
4281 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4282 return false;
4283
4284 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4285 return true;
4286
4287 // On Southern Islands instruction with a negative base value and an offset
4288 // don't seem to work.
4289 return KnownBits->signBitIsZero(Base);
4290 }
4291
isUnneededShiftMask(const MachineInstr & MI,unsigned ShAmtBits) const4292 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4293 unsigned ShAmtBits) const {
4294 assert(MI.getOpcode() == TargetOpcode::G_AND);
4295
4296 std::optional<APInt> RHS =
4297 getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4298 if (!RHS)
4299 return false;
4300
4301 if (RHS->countTrailingOnes() >= ShAmtBits)
4302 return true;
4303
4304 const APInt &LHSKnownZeros =
4305 KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4306 return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4307 }
4308
4309 // Return the wave level SGPR base address if this is a wave address.
getWaveAddress(const MachineInstr * Def)4310 static Register getWaveAddress(const MachineInstr *Def) {
4311 return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4312 ? Def->getOperand(1).getReg()
4313 : Register();
4314 }
4315
4316 InstructionSelector::ComplexRendererFns
selectMUBUFScratchOffset(MachineOperand & Root) const4317 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4318 MachineOperand &Root) const {
4319 Register Reg = Root.getReg();
4320 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4321
4322 const MachineInstr *Def = MRI->getVRegDef(Reg);
4323 if (Register WaveBase = getWaveAddress(Def)) {
4324 return {{
4325 [=](MachineInstrBuilder &MIB) { // rsrc
4326 MIB.addReg(Info->getScratchRSrcReg());
4327 },
4328 [=](MachineInstrBuilder &MIB) { // soffset
4329 MIB.addReg(WaveBase);
4330 },
4331 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4332 }};
4333 }
4334
4335 int64_t Offset = 0;
4336
4337 // FIXME: Copy check is a hack
4338 Register BasePtr;
4339 if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4340 if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4341 return {};
4342 const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4343 Register WaveBase = getWaveAddress(BasePtrDef);
4344 if (!WaveBase)
4345 return {};
4346
4347 return {{
4348 [=](MachineInstrBuilder &MIB) { // rsrc
4349 MIB.addReg(Info->getScratchRSrcReg());
4350 },
4351 [=](MachineInstrBuilder &MIB) { // soffset
4352 MIB.addReg(WaveBase);
4353 },
4354 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4355 }};
4356 }
4357
4358 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4359 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4360 return {};
4361
4362 return {{
4363 [=](MachineInstrBuilder &MIB) { // rsrc
4364 MIB.addReg(Info->getScratchRSrcReg());
4365 },
4366 [=](MachineInstrBuilder &MIB) { // soffset
4367 MIB.addImm(0);
4368 },
4369 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4370 }};
4371 }
4372
4373 std::pair<Register, unsigned>
selectDS1Addr1OffsetImpl(MachineOperand & Root) const4374 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4375 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4376 if (!RootDef)
4377 return std::pair(Root.getReg(), 0);
4378
4379 int64_t ConstAddr = 0;
4380
4381 Register PtrBase;
4382 int64_t Offset;
4383 std::tie(PtrBase, Offset) =
4384 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4385
4386 if (Offset) {
4387 if (isDSOffsetLegal(PtrBase, Offset)) {
4388 // (add n0, c0)
4389 return std::pair(PtrBase, Offset);
4390 }
4391 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4392 // TODO
4393
4394
4395 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4396 // TODO
4397
4398 }
4399
4400 return std::pair(Root.getReg(), 0);
4401 }
4402
4403 InstructionSelector::ComplexRendererFns
selectDS1Addr1Offset(MachineOperand & Root) const4404 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4405 Register Reg;
4406 unsigned Offset;
4407 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4408 return {{
4409 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4410 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4411 }};
4412 }
4413
4414 InstructionSelector::ComplexRendererFns
selectDS64Bit4ByteAligned(MachineOperand & Root) const4415 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4416 return selectDSReadWrite2(Root, 4);
4417 }
4418
4419 InstructionSelector::ComplexRendererFns
selectDS128Bit8ByteAligned(MachineOperand & Root) const4420 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4421 return selectDSReadWrite2(Root, 8);
4422 }
4423
4424 InstructionSelector::ComplexRendererFns
selectDSReadWrite2(MachineOperand & Root,unsigned Size) const4425 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4426 unsigned Size) const {
4427 Register Reg;
4428 unsigned Offset;
4429 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4430 return {{
4431 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4432 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4433 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4434 }};
4435 }
4436
4437 std::pair<Register, unsigned>
selectDSReadWrite2Impl(MachineOperand & Root,unsigned Size) const4438 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4439 unsigned Size) const {
4440 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4441 if (!RootDef)
4442 return std::pair(Root.getReg(), 0);
4443
4444 int64_t ConstAddr = 0;
4445
4446 Register PtrBase;
4447 int64_t Offset;
4448 std::tie(PtrBase, Offset) =
4449 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4450
4451 if (Offset) {
4452 int64_t OffsetValue0 = Offset;
4453 int64_t OffsetValue1 = Offset + Size;
4454 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4455 // (add n0, c0)
4456 return std::pair(PtrBase, OffsetValue0 / Size);
4457 }
4458 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4459 // TODO
4460
4461 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4462 // TODO
4463
4464 }
4465
4466 return std::pair(Root.getReg(), 0);
4467 }
4468
4469 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4470 /// the base value with the constant offset. There may be intervening copies
4471 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4472 /// not match the pattern.
4473 std::pair<Register, int64_t>
getPtrBaseWithConstantOffset(Register Root,const MachineRegisterInfo & MRI) const4474 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4475 Register Root, const MachineRegisterInfo &MRI) const {
4476 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4477 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4478 return {Root, 0};
4479
4480 MachineOperand &RHS = RootI->getOperand(2);
4481 std::optional<ValueAndVReg> MaybeOffset =
4482 getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4483 if (!MaybeOffset)
4484 return {Root, 0};
4485 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4486 }
4487
addZeroImm(MachineInstrBuilder & MIB)4488 static void addZeroImm(MachineInstrBuilder &MIB) {
4489 MIB.addImm(0);
4490 }
4491
4492 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4493 /// BasePtr is not valid, a null base pointer will be used.
buildRSRC(MachineIRBuilder & B,MachineRegisterInfo & MRI,uint32_t FormatLo,uint32_t FormatHi,Register BasePtr)4494 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4495 uint32_t FormatLo, uint32_t FormatHi,
4496 Register BasePtr) {
4497 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4498 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4499 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4500 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4501
4502 B.buildInstr(AMDGPU::S_MOV_B32)
4503 .addDef(RSrc2)
4504 .addImm(FormatLo);
4505 B.buildInstr(AMDGPU::S_MOV_B32)
4506 .addDef(RSrc3)
4507 .addImm(FormatHi);
4508
4509 // Build the half of the subregister with the constants before building the
4510 // full 128-bit register. If we are building multiple resource descriptors,
4511 // this will allow CSEing of the 2-component register.
4512 B.buildInstr(AMDGPU::REG_SEQUENCE)
4513 .addDef(RSrcHi)
4514 .addReg(RSrc2)
4515 .addImm(AMDGPU::sub0)
4516 .addReg(RSrc3)
4517 .addImm(AMDGPU::sub1);
4518
4519 Register RSrcLo = BasePtr;
4520 if (!BasePtr) {
4521 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4522 B.buildInstr(AMDGPU::S_MOV_B64)
4523 .addDef(RSrcLo)
4524 .addImm(0);
4525 }
4526
4527 B.buildInstr(AMDGPU::REG_SEQUENCE)
4528 .addDef(RSrc)
4529 .addReg(RSrcLo)
4530 .addImm(AMDGPU::sub0_sub1)
4531 .addReg(RSrcHi)
4532 .addImm(AMDGPU::sub2_sub3);
4533
4534 return RSrc;
4535 }
4536
buildAddr64RSrc(MachineIRBuilder & B,MachineRegisterInfo & MRI,const SIInstrInfo & TII,Register BasePtr)4537 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4538 const SIInstrInfo &TII, Register BasePtr) {
4539 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4540
4541 // FIXME: Why are half the "default" bits ignored based on the addressing
4542 // mode?
4543 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4544 }
4545
buildOffsetSrc(MachineIRBuilder & B,MachineRegisterInfo & MRI,const SIInstrInfo & TII,Register BasePtr)4546 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4547 const SIInstrInfo &TII, Register BasePtr) {
4548 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4549
4550 // FIXME: Why are half the "default" bits ignored based on the addressing
4551 // mode?
4552 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4553 }
4554
4555 AMDGPUInstructionSelector::MUBUFAddressData
parseMUBUFAddress(Register Src) const4556 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4557 MUBUFAddressData Data;
4558 Data.N0 = Src;
4559
4560 Register PtrBase;
4561 int64_t Offset;
4562
4563 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4564 if (isUInt<32>(Offset)) {
4565 Data.N0 = PtrBase;
4566 Data.Offset = Offset;
4567 }
4568
4569 if (MachineInstr *InputAdd
4570 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4571 Data.N2 = InputAdd->getOperand(1).getReg();
4572 Data.N3 = InputAdd->getOperand(2).getReg();
4573
4574 // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4575 // FIXME: Don't know this was defined by operand 0
4576 //
4577 // TODO: Remove this when we have copy folding optimizations after
4578 // RegBankSelect.
4579 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4580 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4581 }
4582
4583 return Data;
4584 }
4585
4586 /// Return if the addr64 mubuf mode should be used for the given address.
shouldUseAddr64(MUBUFAddressData Addr) const4587 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4588 // (ptr_add N2, N3) -> addr64, or
4589 // (ptr_add (ptr_add N2, N3), C1) -> addr64
4590 if (Addr.N2)
4591 return true;
4592
4593 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4594 return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4595 }
4596
4597 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4598 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4599 /// component.
splitIllegalMUBUFOffset(MachineIRBuilder & B,Register & SOffset,int64_t & ImmOffset) const4600 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4601 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4602 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4603 return;
4604
4605 // Illegal offset, store it in soffset.
4606 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4607 B.buildInstr(AMDGPU::S_MOV_B32)
4608 .addDef(SOffset)
4609 .addImm(ImmOffset);
4610 ImmOffset = 0;
4611 }
4612
selectMUBUFAddr64Impl(MachineOperand & Root,Register & VAddr,Register & RSrcReg,Register & SOffset,int64_t & Offset) const4613 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4614 MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4615 Register &SOffset, int64_t &Offset) const {
4616 // FIXME: Predicates should stop this from reaching here.
4617 // addr64 bit was removed for volcanic islands.
4618 if (!STI.hasAddr64() || STI.useFlatForGlobal())
4619 return false;
4620
4621 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4622 if (!shouldUseAddr64(AddrData))
4623 return false;
4624
4625 Register N0 = AddrData.N0;
4626 Register N2 = AddrData.N2;
4627 Register N3 = AddrData.N3;
4628 Offset = AddrData.Offset;
4629
4630 // Base pointer for the SRD.
4631 Register SRDPtr;
4632
4633 if (N2) {
4634 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4635 assert(N3);
4636 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4637 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4638 // addr64, and construct the default resource from a 0 address.
4639 VAddr = N0;
4640 } else {
4641 SRDPtr = N3;
4642 VAddr = N2;
4643 }
4644 } else {
4645 // N2 is not divergent.
4646 SRDPtr = N2;
4647 VAddr = N3;
4648 }
4649 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4650 // Use the default null pointer in the resource
4651 VAddr = N0;
4652 } else {
4653 // N0 -> offset, or
4654 // (N0 + C1) -> offset
4655 SRDPtr = N0;
4656 }
4657
4658 MachineIRBuilder B(*Root.getParent());
4659 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4660 splitIllegalMUBUFOffset(B, SOffset, Offset);
4661 return true;
4662 }
4663
selectMUBUFOffsetImpl(MachineOperand & Root,Register & RSrcReg,Register & SOffset,int64_t & Offset) const4664 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4665 MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4666 int64_t &Offset) const {
4667
4668 // FIXME: Pattern should not reach here.
4669 if (STI.useFlatForGlobal())
4670 return false;
4671
4672 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4673 if (shouldUseAddr64(AddrData))
4674 return false;
4675
4676 // N0 -> offset, or
4677 // (N0 + C1) -> offset
4678 Register SRDPtr = AddrData.N0;
4679 Offset = AddrData.Offset;
4680
4681 // TODO: Look through extensions for 32-bit soffset.
4682 MachineIRBuilder B(*Root.getParent());
4683
4684 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4685 splitIllegalMUBUFOffset(B, SOffset, Offset);
4686 return true;
4687 }
4688
4689 InstructionSelector::ComplexRendererFns
selectMUBUFAddr64(MachineOperand & Root) const4690 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4691 Register VAddr;
4692 Register RSrcReg;
4693 Register SOffset;
4694 int64_t Offset = 0;
4695
4696 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4697 return {};
4698
4699 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4700 // pattern.
4701 return {{
4702 [=](MachineInstrBuilder &MIB) { // rsrc
4703 MIB.addReg(RSrcReg);
4704 },
4705 [=](MachineInstrBuilder &MIB) { // vaddr
4706 MIB.addReg(VAddr);
4707 },
4708 [=](MachineInstrBuilder &MIB) { // soffset
4709 if (SOffset)
4710 MIB.addReg(SOffset);
4711 else
4712 MIB.addImm(0);
4713 },
4714 [=](MachineInstrBuilder &MIB) { // offset
4715 MIB.addImm(Offset);
4716 },
4717 addZeroImm, // cpol
4718 addZeroImm, // tfe
4719 addZeroImm // swz
4720 }};
4721 }
4722
4723 InstructionSelector::ComplexRendererFns
selectMUBUFOffset(MachineOperand & Root) const4724 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4725 Register RSrcReg;
4726 Register SOffset;
4727 int64_t Offset = 0;
4728
4729 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4730 return {};
4731
4732 return {{
4733 [=](MachineInstrBuilder &MIB) { // rsrc
4734 MIB.addReg(RSrcReg);
4735 },
4736 [=](MachineInstrBuilder &MIB) { // soffset
4737 if (SOffset)
4738 MIB.addReg(SOffset);
4739 else
4740 MIB.addImm(0);
4741 },
4742 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4743 addZeroImm, // cpol
4744 addZeroImm, // tfe
4745 addZeroImm, // swz
4746 }};
4747 }
4748
4749 InstructionSelector::ComplexRendererFns
selectMUBUFAddr64Atomic(MachineOperand & Root) const4750 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4751 Register VAddr;
4752 Register RSrcReg;
4753 Register SOffset;
4754 int64_t Offset = 0;
4755
4756 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4757 return {};
4758
4759 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4760 // pattern.
4761 return {{
4762 [=](MachineInstrBuilder &MIB) { // rsrc
4763 MIB.addReg(RSrcReg);
4764 },
4765 [=](MachineInstrBuilder &MIB) { // vaddr
4766 MIB.addReg(VAddr);
4767 },
4768 [=](MachineInstrBuilder &MIB) { // soffset
4769 if (SOffset)
4770 MIB.addReg(SOffset);
4771 else
4772 MIB.addImm(0);
4773 },
4774 [=](MachineInstrBuilder &MIB) { // offset
4775 MIB.addImm(Offset);
4776 },
4777 [=](MachineInstrBuilder &MIB) {
4778 MIB.addImm(AMDGPU::CPol::GLC); // cpol
4779 }
4780 }};
4781 }
4782
4783 InstructionSelector::ComplexRendererFns
selectMUBUFOffsetAtomic(MachineOperand & Root) const4784 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4785 Register RSrcReg;
4786 Register SOffset;
4787 int64_t Offset = 0;
4788
4789 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4790 return {};
4791
4792 return {{
4793 [=](MachineInstrBuilder &MIB) { // rsrc
4794 MIB.addReg(RSrcReg);
4795 },
4796 [=](MachineInstrBuilder &MIB) { // soffset
4797 if (SOffset)
4798 MIB.addReg(SOffset);
4799 else
4800 MIB.addImm(0);
4801 },
4802 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4803 [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4804 }};
4805 }
4806
4807 /// Get an immediate that must be 32-bits, and treated as zero extended.
4808 static std::optional<uint64_t>
getConstantZext32Val(Register Reg,const MachineRegisterInfo & MRI)4809 getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {
4810 // getIConstantVRegVal sexts any values, so see if that matters.
4811 std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4812 if (!OffsetVal || !isInt<32>(*OffsetVal))
4813 return std::nullopt;
4814 return Lo_32(*OffsetVal);
4815 }
4816
4817 InstructionSelector::ComplexRendererFns
selectSMRDBufferImm(MachineOperand & Root) const4818 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4819 std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4820 if (!OffsetVal)
4821 return {};
4822
4823 std::optional<int64_t> EncodedImm =
4824 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4825 if (!EncodedImm)
4826 return {};
4827
4828 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4829 }
4830
4831 InstructionSelector::ComplexRendererFns
selectSMRDBufferImm32(MachineOperand & Root) const4832 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4833 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4834
4835 std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4836 if (!OffsetVal)
4837 return {};
4838
4839 std::optional<int64_t> EncodedImm =
4840 AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4841 if (!EncodedImm)
4842 return {};
4843
4844 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4845 }
4846
4847 InstructionSelector::ComplexRendererFns
selectSMRDBufferSgprImm(MachineOperand & Root) const4848 AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
4849 // Match the (soffset + offset) pair as a 32-bit register base and
4850 // an immediate offset.
4851 Register SOffset;
4852 unsigned Offset;
4853 std::tie(SOffset, Offset) =
4854 AMDGPU::getBaseWithConstantOffset(*MRI, Root.getReg(), KnownBits);
4855 if (!SOffset)
4856 return std::nullopt;
4857
4858 std::optional<int64_t> EncodedOffset =
4859 AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true);
4860 if (!EncodedOffset)
4861 return std::nullopt;
4862
4863 assert(MRI->getType(SOffset) == LLT::scalar(32));
4864 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
4865 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedOffset); }}};
4866 }
4867
4868 // Variant of stripBitCast that returns the instruction instead of a
4869 // MachineOperand.
stripBitCast(MachineInstr * MI,MachineRegisterInfo & MRI)4870 static MachineInstr *stripBitCast(MachineInstr *MI, MachineRegisterInfo &MRI) {
4871 if (MI->getOpcode() == AMDGPU::G_BITCAST)
4872 return getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI);
4873 return MI;
4874 }
4875
4876 // Figure out if this is really an extract of the high 16-bits of a dword,
4877 // returns nullptr if it isn't.
isExtractHiElt(MachineInstr * Inst,MachineRegisterInfo & MRI)4878 static MachineInstr *isExtractHiElt(MachineInstr *Inst,
4879 MachineRegisterInfo &MRI) {
4880 Inst = stripBitCast(Inst, MRI);
4881
4882 if (Inst->getOpcode() != AMDGPU::G_TRUNC)
4883 return nullptr;
4884
4885 MachineInstr *TruncOp =
4886 getDefIgnoringCopies(Inst->getOperand(1).getReg(), MRI);
4887 TruncOp = stripBitCast(TruncOp, MRI);
4888
4889 // G_LSHR x, (G_CONSTANT i32 16)
4890 if (TruncOp->getOpcode() == AMDGPU::G_LSHR) {
4891 auto SrlAmount = getIConstantVRegValWithLookThrough(
4892 TruncOp->getOperand(2).getReg(), MRI);
4893 if (SrlAmount && SrlAmount->Value.getZExtValue() == 16) {
4894 MachineInstr *SrlOp =
4895 getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
4896 return stripBitCast(SrlOp, MRI);
4897 }
4898 }
4899
4900 // G_SHUFFLE_VECTOR x, y, shufflemask(1, 1|0)
4901 // 1, 0 swaps the low/high 16 bits.
4902 // 1, 1 sets the high 16 bits to be the same as the low 16.
4903 // in any case, it selects the high elts.
4904 if (TruncOp->getOpcode() == AMDGPU::G_SHUFFLE_VECTOR) {
4905 assert(MRI.getType(TruncOp->getOperand(0).getReg()) ==
4906 LLT::fixed_vector(2, 16));
4907
4908 ArrayRef<int> Mask = TruncOp->getOperand(3).getShuffleMask();
4909 assert(Mask.size() == 2);
4910
4911 if (Mask[0] == 1 && Mask[1] <= 1) {
4912 MachineInstr *LHS =
4913 getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
4914 return stripBitCast(LHS, MRI);
4915 }
4916 }
4917
4918 return nullptr;
4919 }
4920
4921 std::pair<Register, unsigned>
selectVOP3PMadMixModsImpl(MachineOperand & Root,bool & Matched) const4922 AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
4923 bool &Matched) const {
4924 Matched = false;
4925
4926 Register Src;
4927 unsigned Mods;
4928 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
4929
4930 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
4931 if (MI->getOpcode() == AMDGPU::G_FPEXT) {
4932 MachineOperand *MO = &MI->getOperand(1);
4933 Src = MO->getReg();
4934 MI = getDefIgnoringCopies(Src, *MRI);
4935
4936 assert(MRI->getType(Src) == LLT::scalar(16));
4937
4938 // See through bitcasts.
4939 // FIXME: Would be nice to use stripBitCast here.
4940 if (MI->getOpcode() == AMDGPU::G_BITCAST) {
4941 MO = &MI->getOperand(1);
4942 Src = MO->getReg();
4943 MI = getDefIgnoringCopies(Src, *MRI);
4944 }
4945
4946 const auto CheckAbsNeg = [&]() {
4947 // Be careful about folding modifiers if we already have an abs. fneg is
4948 // applied last, so we don't want to apply an earlier fneg.
4949 if ((Mods & SISrcMods::ABS) == 0) {
4950 unsigned ModsTmp;
4951 std::tie(Src, ModsTmp) = selectVOP3ModsImpl(*MO);
4952 MI = getDefIgnoringCopies(Src, *MRI);
4953
4954 if ((ModsTmp & SISrcMods::NEG) != 0)
4955 Mods ^= SISrcMods::NEG;
4956
4957 if ((ModsTmp & SISrcMods::ABS) != 0)
4958 Mods |= SISrcMods::ABS;
4959 }
4960 };
4961
4962 CheckAbsNeg();
4963
4964 // op_sel/op_sel_hi decide the source type and source.
4965 // If the source's op_sel_hi is set, it indicates to do a conversion from
4966 // fp16. If the sources's op_sel is set, it picks the high half of the
4967 // source register.
4968
4969 Mods |= SISrcMods::OP_SEL_1;
4970
4971 if (MachineInstr *ExtractHiEltMI = isExtractHiElt(MI, *MRI)) {
4972 Mods |= SISrcMods::OP_SEL_0;
4973 MI = ExtractHiEltMI;
4974 MO = &MI->getOperand(0);
4975 Src = MO->getReg();
4976
4977 CheckAbsNeg();
4978 }
4979
4980 Matched = true;
4981 }
4982
4983 return {Src, Mods};
4984 }
4985
4986 InstructionSelector::ComplexRendererFns
selectVOP3PMadMixMods(MachineOperand & Root) const4987 AMDGPUInstructionSelector::selectVOP3PMadMixMods(MachineOperand &Root) const {
4988 Register Src;
4989 unsigned Mods;
4990 bool Matched;
4991 std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched);
4992
4993 return {{
4994 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
4995 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
4996 }};
4997 }
4998
renderTruncImm32(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const4999 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
5000 const MachineInstr &MI,
5001 int OpIdx) const {
5002 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5003 "Expected G_CONSTANT");
5004 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
5005 }
5006
renderNegateImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5007 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
5008 const MachineInstr &MI,
5009 int OpIdx) const {
5010 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5011 "Expected G_CONSTANT");
5012 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
5013 }
5014
renderBitcastImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5015 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
5016 const MachineInstr &MI,
5017 int OpIdx) const {
5018 assert(OpIdx == -1);
5019
5020 const MachineOperand &Op = MI.getOperand(1);
5021 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
5022 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
5023 else {
5024 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
5025 MIB.addImm(Op.getCImm()->getSExtValue());
5026 }
5027 }
5028
renderPopcntImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5029 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
5030 const MachineInstr &MI,
5031 int OpIdx) const {
5032 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5033 "Expected G_CONSTANT");
5034 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
5035 }
5036
5037 /// This only really exists to satisfy DAG type checking machinery, so is a
5038 /// no-op here.
renderTruncTImm(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5039 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
5040 const MachineInstr &MI,
5041 int OpIdx) const {
5042 MIB.addImm(MI.getOperand(OpIdx).getImm());
5043 }
5044
renderExtractCPol(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5045 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
5046 const MachineInstr &MI,
5047 int OpIdx) const {
5048 assert(OpIdx >= 0 && "expected to match an immediate operand");
5049 MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
5050 }
5051
renderExtractSWZ(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5052 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
5053 const MachineInstr &MI,
5054 int OpIdx) const {
5055 assert(OpIdx >= 0 && "expected to match an immediate operand");
5056 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
5057 }
5058
renderSetGLC(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5059 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
5060 const MachineInstr &MI,
5061 int OpIdx) const {
5062 assert(OpIdx >= 0 && "expected to match an immediate operand");
5063 MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
5064 }
5065
renderFrameIndex(MachineInstrBuilder & MIB,const MachineInstr & MI,int OpIdx) const5066 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
5067 const MachineInstr &MI,
5068 int OpIdx) const {
5069 MIB.addFrameIndex((MI.getOperand(1).getIndex()));
5070 }
5071
isInlineImmediate16(int64_t Imm) const5072 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
5073 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
5074 }
5075
isInlineImmediate32(int64_t Imm) const5076 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
5077 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
5078 }
5079
isInlineImmediate64(int64_t Imm) const5080 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
5081 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
5082 }
5083
isInlineImmediate(const APFloat & Imm) const5084 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
5085 return TII.isInlineConstant(Imm);
5086 }
5087