xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp (revision 5f757f3ff9144b609b3c433dfd370cc6bdc191ad)
1 //===- X86RegisterBankInfo.cpp -----------------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the RegisterBankInfo class for X86.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
12 
13 #include "X86RegisterBankInfo.h"
14 #include "X86InstrInfo.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/RegisterBank.h"
17 #include "llvm/CodeGen/RegisterBankInfo.h"
18 #include "llvm/CodeGen/TargetRegisterInfo.h"
19 
20 #define GET_TARGET_REGBANK_IMPL
21 #include "X86GenRegisterBank.inc"
22 
23 using namespace llvm;
24 // This file will be TableGen'ed at some point.
25 #define GET_TARGET_REGBANK_INFO_IMPL
26 #include "X86GenRegisterBankInfo.def"
27 
28 X86RegisterBankInfo::X86RegisterBankInfo(const TargetRegisterInfo &TRI) {
29 
30   // validate RegBank initialization.
31   const RegisterBank &RBGPR = getRegBank(X86::GPRRegBankID);
32   (void)RBGPR;
33   assert(&X86::GPRRegBank == &RBGPR && "Incorrect RegBanks inizalization.");
34 
35   // The GPR register bank is fully defined by all the registers in
36   // GR64 + its subclasses.
37   assert(RBGPR.covers(*TRI.getRegClass(X86::GR64RegClassID)) &&
38          "Subclass not added?");
39   assert(getMaximumSize(RBGPR.getID()) == 64 &&
40          "GPRs should hold up to 64-bit");
41 }
42 
43 const RegisterBank &
44 X86RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
45                                             LLT) const {
46 
47   if (X86::GR8RegClass.hasSubClassEq(&RC) ||
48       X86::GR16RegClass.hasSubClassEq(&RC) ||
49       X86::GR32RegClass.hasSubClassEq(&RC) ||
50       X86::GR64RegClass.hasSubClassEq(&RC) ||
51       X86::LOW32_ADDR_ACCESSRegClass.hasSubClassEq(&RC) ||
52       X86::LOW32_ADDR_ACCESS_RBPRegClass.hasSubClassEq(&RC))
53     return getRegBank(X86::GPRRegBankID);
54 
55   if (X86::FR32XRegClass.hasSubClassEq(&RC) ||
56       X86::FR64XRegClass.hasSubClassEq(&RC) ||
57       X86::VR128XRegClass.hasSubClassEq(&RC) ||
58       X86::VR256XRegClass.hasSubClassEq(&RC) ||
59       X86::VR512RegClass.hasSubClassEq(&RC))
60     return getRegBank(X86::VECRRegBankID);
61 
62   llvm_unreachable("Unsupported register kind yet.");
63 }
64 
65 X86GenRegisterBankInfo::PartialMappingIdx
66 X86GenRegisterBankInfo::getPartialMappingIdx(const LLT &Ty, bool isFP) {
67   if ((Ty.isScalar() && !isFP) || Ty.isPointer()) {
68     switch (Ty.getSizeInBits()) {
69     case 1:
70     case 8:
71       return PMI_GPR8;
72     case 16:
73       return PMI_GPR16;
74     case 32:
75       return PMI_GPR32;
76     case 64:
77       return PMI_GPR64;
78     case 128:
79       return PMI_VEC128;
80       break;
81     default:
82       llvm_unreachable("Unsupported register size.");
83     }
84   } else if (Ty.isScalar()) {
85     switch (Ty.getSizeInBits()) {
86     case 32:
87       return PMI_FP32;
88     case 64:
89       return PMI_FP64;
90     case 128:
91       return PMI_VEC128;
92     default:
93       llvm_unreachable("Unsupported register size.");
94     }
95   } else {
96     switch (Ty.getSizeInBits()) {
97     case 128:
98       return PMI_VEC128;
99     case 256:
100       return PMI_VEC256;
101     case 512:
102       return PMI_VEC512;
103     default:
104       llvm_unreachable("Unsupported register size.");
105     }
106   }
107 
108   return PMI_None;
109 }
110 
111 void X86RegisterBankInfo::getInstrPartialMappingIdxs(
112     const MachineInstr &MI, const MachineRegisterInfo &MRI, const bool isFP,
113     SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx) {
114 
115   unsigned NumOperands = MI.getNumOperands();
116   for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
117     auto &MO = MI.getOperand(Idx);
118     if (!MO.isReg() || !MO.getReg())
119       OpRegBankIdx[Idx] = PMI_None;
120     else
121       OpRegBankIdx[Idx] = getPartialMappingIdx(MRI.getType(MO.getReg()), isFP);
122   }
123 }
124 
125 bool X86RegisterBankInfo::getInstrValueMapping(
126     const MachineInstr &MI,
127     const SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx,
128     SmallVectorImpl<const ValueMapping *> &OpdsMapping) {
129 
130   unsigned NumOperands = MI.getNumOperands();
131   for (unsigned Idx = 0; Idx < NumOperands; ++Idx) {
132     if (!MI.getOperand(Idx).isReg())
133       continue;
134     if (!MI.getOperand(Idx).getReg())
135       continue;
136 
137     auto Mapping = getValueMapping(OpRegBankIdx[Idx], 1);
138     if (!Mapping->isValid())
139       return false;
140 
141     OpdsMapping[Idx] = Mapping;
142   }
143   return true;
144 }
145 
146 const RegisterBankInfo::InstructionMapping &
147 X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
148                                             bool isFP) const {
149   const MachineFunction &MF = *MI.getParent()->getParent();
150   const MachineRegisterInfo &MRI = MF.getRegInfo();
151 
152   unsigned NumOperands = MI.getNumOperands();
153   LLT Ty = MRI.getType(MI.getOperand(0).getReg());
154 
155   if (NumOperands != 3 || (Ty != MRI.getType(MI.getOperand(1).getReg())) ||
156       (Ty != MRI.getType(MI.getOperand(2).getReg())))
157     llvm_unreachable("Unsupported operand mapping yet.");
158 
159   auto Mapping = getValueMapping(getPartialMappingIdx(Ty, isFP), 3);
160   return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
161 }
162 
163 const RegisterBankInfo::InstructionMapping &
164 X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
165   const MachineFunction &MF = *MI.getParent()->getParent();
166   const MachineRegisterInfo &MRI = MF.getRegInfo();
167   unsigned Opc = MI.getOpcode();
168 
169   // Try the default logic for non-generic instructions that are either copies
170   // or already have some operands assigned to banks.
171   if (!isPreISelGenericOpcode(Opc) || Opc == TargetOpcode::G_PHI) {
172     const InstructionMapping &Mapping = getInstrMappingImpl(MI);
173     if (Mapping.isValid())
174       return Mapping;
175   }
176 
177   switch (Opc) {
178   case TargetOpcode::G_ADD:
179   case TargetOpcode::G_SUB:
180   case TargetOpcode::G_MUL:
181     return getSameOperandsMapping(MI, false);
182   case TargetOpcode::G_FADD:
183   case TargetOpcode::G_FSUB:
184   case TargetOpcode::G_FMUL:
185   case TargetOpcode::G_FDIV:
186     return getSameOperandsMapping(MI, true);
187   case TargetOpcode::G_SHL:
188   case TargetOpcode::G_LSHR:
189   case TargetOpcode::G_ASHR: {
190     unsigned NumOperands = MI.getNumOperands();
191     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
192 
193     auto Mapping = getValueMapping(getPartialMappingIdx(Ty, false), 3);
194     return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
195 
196   }
197   default:
198     break;
199   }
200 
201   unsigned NumOperands = MI.getNumOperands();
202   SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
203 
204   switch (Opc) {
205   case TargetOpcode::G_FPEXT:
206   case TargetOpcode::G_FPTRUNC:
207   case TargetOpcode::G_FCONSTANT:
208     // Instruction having only floating-point operands (all scalars in VECRReg)
209     getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
210     break;
211   case TargetOpcode::G_SITOFP:
212   case TargetOpcode::G_FPTOSI: {
213     // Some of the floating-point instructions have mixed GPR and FP operands:
214     // fine-tune the computed mapping.
215     auto &Op0 = MI.getOperand(0);
216     auto &Op1 = MI.getOperand(1);
217     const LLT Ty0 = MRI.getType(Op0.getReg());
218     const LLT Ty1 = MRI.getType(Op1.getReg());
219 
220     bool FirstArgIsFP = Opc == TargetOpcode::G_SITOFP;
221     bool SecondArgIsFP = Opc == TargetOpcode::G_FPTOSI;
222     OpRegBankIdx[0] = getPartialMappingIdx(Ty0, /* isFP */ FirstArgIsFP);
223     OpRegBankIdx[1] = getPartialMappingIdx(Ty1, /* isFP */ SecondArgIsFP);
224     break;
225   }
226   case TargetOpcode::G_FCMP: {
227     LLT Ty1 = MRI.getType(MI.getOperand(2).getReg());
228     LLT Ty2 = MRI.getType(MI.getOperand(3).getReg());
229     (void)Ty2;
230     assert(Ty1.getSizeInBits() == Ty2.getSizeInBits() &&
231            "Mismatched operand sizes for G_FCMP");
232 
233     unsigned Size = Ty1.getSizeInBits();
234     (void)Size;
235     assert((Size == 32 || Size == 64) && "Unsupported size for G_FCMP");
236 
237     auto FpRegBank = getPartialMappingIdx(Ty1, /* isFP */ true);
238     OpRegBankIdx = {PMI_GPR8,
239                     /* Predicate */ PMI_None, FpRegBank, FpRegBank};
240     break;
241   }
242   case TargetOpcode::G_TRUNC:
243   case TargetOpcode::G_ANYEXT: {
244     auto &Op0 = MI.getOperand(0);
245     auto &Op1 = MI.getOperand(1);
246     const LLT Ty0 = MRI.getType(Op0.getReg());
247     const LLT Ty1 = MRI.getType(Op1.getReg());
248 
249     bool isFPTrunc = (Ty0.getSizeInBits() == 32 || Ty0.getSizeInBits() == 64) &&
250                      Ty1.getSizeInBits() == 128 && Opc == TargetOpcode::G_TRUNC;
251     bool isFPAnyExt =
252         Ty0.getSizeInBits() == 128 &&
253         (Ty1.getSizeInBits() == 32 || Ty1.getSizeInBits() == 64) &&
254         Opc == TargetOpcode::G_ANYEXT;
255 
256     getInstrPartialMappingIdxs(MI, MRI, /* isFP */ isFPTrunc || isFPAnyExt,
257                                OpRegBankIdx);
258   } break;
259   default:
260     // Track the bank of each register, use NotFP mapping (all scalars in GPRs)
261     getInstrPartialMappingIdxs(MI, MRI, /* isFP */ false, OpRegBankIdx);
262     break;
263   }
264 
265   // Finally construct the computed mapping.
266   SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
267   if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
268     return getInvalidInstructionMapping();
269 
270   return getInstructionMapping(DefaultMappingID, /* Cost */ 1,
271                                getOperandsMapping(OpdsMapping), NumOperands);
272 }
273 
274 void X86RegisterBankInfo::applyMappingImpl(
275     MachineIRBuilder &Builder, const OperandsMapper &OpdMapper) const {
276   return applyDefaultMapping(OpdMapper);
277 }
278 
279 RegisterBankInfo::InstructionMappings
280 X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr &MI) const {
281 
282   const MachineFunction &MF = *MI.getParent()->getParent();
283   const TargetSubtargetInfo &STI = MF.getSubtarget();
284   const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
285   const MachineRegisterInfo &MRI = MF.getRegInfo();
286 
287   switch (MI.getOpcode()) {
288   case TargetOpcode::G_LOAD:
289   case TargetOpcode::G_STORE:
290   case TargetOpcode::G_IMPLICIT_DEF: {
291     // we going to try to map 32/64 bit to PMI_FP32/PMI_FP64
292     unsigned Size = getSizeInBits(MI.getOperand(0).getReg(), MRI, TRI);
293     if (Size != 32 && Size != 64)
294       break;
295 
296     unsigned NumOperands = MI.getNumOperands();
297 
298     // Track the bank of each register, use FP mapping (all scalars in VEC)
299     SmallVector<PartialMappingIdx, 4> OpRegBankIdx(NumOperands);
300     getInstrPartialMappingIdxs(MI, MRI, /* isFP */ true, OpRegBankIdx);
301 
302     // Finally construct the computed mapping.
303     SmallVector<const ValueMapping *, 8> OpdsMapping(NumOperands);
304     if (!getInstrValueMapping(MI, OpRegBankIdx, OpdsMapping))
305       break;
306 
307     const RegisterBankInfo::InstructionMapping &Mapping = getInstructionMapping(
308         /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping), NumOperands);
309     InstructionMappings AltMappings;
310     AltMappings.push_back(&Mapping);
311     return AltMappings;
312   }
313   default:
314     break;
315   }
316   return RegisterBankInfo::getInstrAlternativeMappings(MI);
317 }
318