1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
12 /// AMD GPUs.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22
23 using namespace llvm;
24
25 #define GET_INSTRINFO_CTOR_DTOR
26 #define GET_INSTRINFO_NAMED_OPS
27 #define GET_INSTRMAP_INFO
28 #include "AMDGPUGenInstrInfo.inc"
29
30 // Pin the vtable to this file.
anchor()31 void AMDGPUInstrInfo::anchor() {}
32
AMDGPUInstrInfo(const AMDGPUSubtarget & st)33 AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
34 : AMDGPUGenInstrInfo(-1,-1), RI(st), ST(st) { }
35
getRegisterInfo() const36 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
37 return RI;
38 }
39
isCoalescableExtInstr(const MachineInstr & MI,unsigned & SrcReg,unsigned & DstReg,unsigned & SubIdx) const40 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
41 unsigned &SrcReg, unsigned &DstReg,
42 unsigned &SubIdx) const {
43 // TODO: Implement this function
44 return false;
45 }
46
isLoadFromStackSlot(const MachineInstr * MI,int & FrameIndex) const47 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
48 int &FrameIndex) const {
49 // TODO: Implement this function
50 return 0;
51 }
52
isLoadFromStackSlotPostFE(const MachineInstr * MI,int & FrameIndex) const53 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
54 int &FrameIndex) const {
55 // TODO: Implement this function
56 return 0;
57 }
58
hasLoadFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const59 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
60 const MachineMemOperand *&MMO,
61 int &FrameIndex) const {
62 // TODO: Implement this function
63 return false;
64 }
isStoreFromStackSlot(const MachineInstr * MI,int & FrameIndex) const65 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
66 int &FrameIndex) const {
67 // TODO: Implement this function
68 return 0;
69 }
isStoreFromStackSlotPostFE(const MachineInstr * MI,int & FrameIndex) const70 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
71 int &FrameIndex) const {
72 // TODO: Implement this function
73 return 0;
74 }
hasStoreFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const75 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
76 const MachineMemOperand *&MMO,
77 int &FrameIndex) const {
78 // TODO: Implement this function
79 return false;
80 }
81
82 MachineInstr *
convertToThreeAddress(MachineFunction::iterator & MFI,MachineBasicBlock::iterator & MBBI,LiveVariables * LV) const83 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
84 MachineBasicBlock::iterator &MBBI,
85 LiveVariables *LV) const {
86 // TODO: Implement this function
87 return nullptr;
88 }
89
90 void
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned SrcReg,bool isKill,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const91 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
92 MachineBasicBlock::iterator MI,
93 unsigned SrcReg, bool isKill,
94 int FrameIndex,
95 const TargetRegisterClass *RC,
96 const TargetRegisterInfo *TRI) const {
97 llvm_unreachable("Not Implemented");
98 }
99
100 void
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned DestReg,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const101 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
102 MachineBasicBlock::iterator MI,
103 unsigned DestReg, int FrameIndex,
104 const TargetRegisterClass *RC,
105 const TargetRegisterInfo *TRI) const {
106 llvm_unreachable("Not Implemented");
107 }
108
expandPostRAPseudo(MachineBasicBlock::iterator MI) const109 bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
110 MachineBasicBlock *MBB = MI->getParent();
111 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
112 AMDGPU::OpName::addr);
113 // addr is a custom operand with multiple MI operands, and only the
114 // first MI operand is given a name.
115 int RegOpIdx = OffsetOpIdx + 1;
116 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
117 AMDGPU::OpName::chan);
118 if (isRegisterLoad(*MI)) {
119 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
120 AMDGPU::OpName::dst);
121 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
122 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
123 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
124 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
125 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
126 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
127 getIndirectAddrRegClass()->getRegister(Address));
128 } else {
129 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
130 Address, OffsetReg);
131 }
132 } else if (isRegisterStore(*MI)) {
133 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
134 AMDGPU::OpName::val);
135 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
136 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
137 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
138 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
139 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
140 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
141 MI->getOperand(ValOpIdx).getReg());
142 } else {
143 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
144 calculateIndirectAddress(RegIndex, Channel),
145 OffsetReg);
146 }
147 } else {
148 return false;
149 }
150
151 MBB->erase(MI);
152 return true;
153 }
154
155
156 MachineInstr *
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,int FrameIndex) const157 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
158 MachineInstr *MI,
159 const SmallVectorImpl<unsigned> &Ops,
160 int FrameIndex) const {
161 // TODO: Implement this function
162 return nullptr;
163 }
164 MachineInstr*
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,MachineInstr * LoadMI) const165 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
166 MachineInstr *MI,
167 const SmallVectorImpl<unsigned> &Ops,
168 MachineInstr *LoadMI) const {
169 // TODO: Implement this function
170 return nullptr;
171 }
172 bool
canFoldMemoryOperand(const MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops) const173 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
174 const SmallVectorImpl<unsigned> &Ops) const {
175 // TODO: Implement this function
176 return false;
177 }
178 bool
unfoldMemoryOperand(MachineFunction & MF,MachineInstr * MI,unsigned Reg,bool UnfoldLoad,bool UnfoldStore,SmallVectorImpl<MachineInstr * > & NewMIs) const179 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
180 unsigned Reg, bool UnfoldLoad,
181 bool UnfoldStore,
182 SmallVectorImpl<MachineInstr*> &NewMIs) const {
183 // TODO: Implement this function
184 return false;
185 }
186
187 bool
unfoldMemoryOperand(SelectionDAG & DAG,SDNode * N,SmallVectorImpl<SDNode * > & NewNodes) const188 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
189 SmallVectorImpl<SDNode*> &NewNodes) const {
190 // TODO: Implement this function
191 return false;
192 }
193
194 unsigned
getOpcodeAfterMemoryUnfold(unsigned Opc,bool UnfoldLoad,bool UnfoldStore,unsigned * LoadRegIndex) const195 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
196 bool UnfoldLoad, bool UnfoldStore,
197 unsigned *LoadRegIndex) const {
198 // TODO: Implement this function
199 return 0;
200 }
201
enableClusterLoads() const202 bool AMDGPUInstrInfo::enableClusterLoads() const {
203 return true;
204 }
205
206 // FIXME: This behaves strangely. If, for example, you have 32 load + stores,
207 // the first 16 loads will be interleaved with the stores, and the next 16 will
208 // be clustered as expected. It should really split into 2 16 store batches.
209 //
210 // Loads are clustered until this returns false, rather than trying to schedule
211 // groups of stores. This also means we have to deal with saying different
212 // address space loads should be clustered, and ones which might cause bank
213 // conflicts.
214 //
215 // This might be deprecated so it might not be worth that much effort to fix.
shouldScheduleLoadsNear(SDNode * Load0,SDNode * Load1,int64_t Offset0,int64_t Offset1,unsigned NumLoads) const216 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
217 int64_t Offset0, int64_t Offset1,
218 unsigned NumLoads) const {
219 assert(Offset1 > Offset0 &&
220 "Second offset should be larger than first offset!");
221 // If we have less than 16 loads in a row, and the offsets are within 64
222 // bytes, then schedule together.
223
224 // A cacheline is 64 bytes (for global memory).
225 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
226 }
227
228 bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const229 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
230 const {
231 // TODO: Implement this function
232 return true;
233 }
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const234 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
235 MachineBasicBlock::iterator MI) const {
236 // TODO: Implement this function
237 }
238
isPredicated(const MachineInstr * MI) const239 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
240 // TODO: Implement this function
241 return false;
242 }
243 bool
SubsumesPredicate(const SmallVectorImpl<MachineOperand> & Pred1,const SmallVectorImpl<MachineOperand> & Pred2) const244 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
245 const SmallVectorImpl<MachineOperand> &Pred2)
246 const {
247 // TODO: Implement this function
248 return false;
249 }
250
DefinesPredicate(MachineInstr * MI,std::vector<MachineOperand> & Pred) const251 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
252 std::vector<MachineOperand> &Pred) const {
253 // TODO: Implement this function
254 return false;
255 }
256
isPredicable(MachineInstr * MI) const257 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
258 // TODO: Implement this function
259 return MI->getDesc().isPredicable();
260 }
261
262 bool
isSafeToMoveRegClassDefs(const TargetRegisterClass * RC) const263 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
264 // TODO: Implement this function
265 return true;
266 }
267
isRegisterStore(const MachineInstr & MI) const268 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
269 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
270 }
271
isRegisterLoad(const MachineInstr & MI) const272 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
273 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
274 }
275
getIndirectIndexBegin(const MachineFunction & MF) const276 int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
277 const MachineRegisterInfo &MRI = MF.getRegInfo();
278 const MachineFrameInfo *MFI = MF.getFrameInfo();
279 int Offset = -1;
280
281 if (MFI->getNumObjects() == 0) {
282 return -1;
283 }
284
285 if (MRI.livein_empty()) {
286 return 0;
287 }
288
289 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
290 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
291 LE = MRI.livein_end();
292 LI != LE; ++LI) {
293 unsigned Reg = LI->first;
294 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
295 !IndirectRC->contains(Reg))
296 continue;
297
298 unsigned RegIndex;
299 unsigned RegEnd;
300 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
301 ++RegIndex) {
302 if (IndirectRC->getRegister(RegIndex) == Reg)
303 break;
304 }
305 Offset = std::max(Offset, (int)RegIndex);
306 }
307
308 return Offset + 1;
309 }
310
getIndirectIndexEnd(const MachineFunction & MF) const311 int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
312 int Offset = 0;
313 const MachineFrameInfo *MFI = MF.getFrameInfo();
314
315 // Variable sized objects are not supported
316 assert(!MFI->hasVarSizedObjects());
317
318 if (MFI->getNumObjects() == 0) {
319 return -1;
320 }
321
322 Offset = MF.getTarget()
323 .getSubtargetImpl()
324 ->getFrameLowering()
325 ->getFrameIndexOffset(MF, -1);
326
327 return getIndirectIndexBegin(MF) + Offset;
328 }
329
getMaskedMIMGOp(uint16_t Opcode,unsigned Channels) const330 int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
331 switch (Channels) {
332 default: return Opcode;
333 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
334 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
335 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
336 }
337 }
338
339 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
340 // header files, so we need to wrap it in a function that takes unsigned
341 // instead.
342 namespace llvm {
343 namespace AMDGPU {
getMCOpcode(uint16_t Opcode,unsigned Gen)344 static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
345 return getMCOpcodeGen(Opcode, (enum Subtarget)Gen);
346 }
347 }
348 }
349
350 // This must be kept in sync with the SISubtarget class in SIInstrInfo.td
351 enum SISubtarget {
352 SI = 0,
353 VI = 1
354 };
355
AMDGPUSubtargetToSISubtarget(unsigned Gen)356 enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) {
357 switch (Gen) {
358 default:
359 return SI;
360 case AMDGPUSubtarget::VOLCANIC_ISLANDS:
361 return VI;
362 }
363 }
364
pseudoToMCOpcode(int Opcode) const365 int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
366 int MCOp = AMDGPU::getMCOpcode(Opcode,
367 AMDGPUSubtargetToSISubtarget(RI.ST.getGeneration()));
368
369 // -1 means that Opcode is already a native instruction.
370 if (MCOp == -1)
371 return Opcode;
372
373 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
374 // no encoding in the given subtarget generation.
375 if (MCOp == (uint16_t)-1)
376 return -1;
377
378 return MCOp;
379 }
380