xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/Utils.cpp (revision e77e5f44b871faa775b2cfa289b35f900f9443ab)
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/Twine.h"
15 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/StackProtector.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/IR/Constants.h"
25 
26 #define DEBUG_TYPE "globalisel-utils"
27 
28 using namespace llvm;
29 
30 unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI,
31                                    const TargetInstrInfo &TII,
32                                    const RegisterBankInfo &RBI,
33                                    MachineInstr &InsertPt, unsigned Reg,
34                                    const TargetRegisterClass &RegClass) {
35   if (!RBI.constrainGenericRegister(Reg, RegClass, MRI)) {
36     unsigned NewReg = MRI.createVirtualRegister(&RegClass);
37     BuildMI(*InsertPt.getParent(), InsertPt, InsertPt.getDebugLoc(),
38             TII.get(TargetOpcode::COPY), NewReg)
39         .addReg(Reg);
40     return NewReg;
41   }
42 
43   return Reg;
44 }
45 
46 unsigned llvm::constrainOperandRegClass(
47     const MachineFunction &MF, const TargetRegisterInfo &TRI,
48     MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
49     const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
50     const MachineOperand &RegMO, unsigned OpIdx) {
51   unsigned Reg = RegMO.getReg();
52   // Assume physical registers are properly constrained.
53   assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
54          "PhysReg not implemented");
55 
56   const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
57   // Some of the target independent instructions, like COPY, may not impose any
58   // register class constraints on some of their operands: If it's a use, we can
59   // skip constraining as the instruction defining the register would constrain
60   // it.
61 
62   // We can't constrain unallocatable register classes, because we can't create
63   // virtual registers for these classes, so we need to let targets handled this
64   // case.
65   if (RegClass && !RegClass->isAllocatable())
66     RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
67 
68   if (!RegClass) {
69     assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
70            "Register class constraint is required unless either the "
71            "instruction is target independent or the operand is a use");
72     // FIXME: Just bailing out like this here could be not enough, unless we
73     // expect the users of this function to do the right thing for PHIs and
74     // COPY:
75     //   v1 = COPY v0
76     //   v2 = COPY v1
77     // v1 here may end up not being constrained at all. Please notice that to
78     // reproduce the issue we likely need a destination pattern of a selection
79     // rule producing such extra copies, not just an input GMIR with them as
80     // every existing target using selectImpl handles copies before calling it
81     // and they never reach this function.
82     return Reg;
83   }
84   return constrainRegToClass(MRI, TII, RBI, InsertPt, Reg, *RegClass);
85 }
86 
87 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
88                                             const TargetInstrInfo &TII,
89                                             const TargetRegisterInfo &TRI,
90                                             const RegisterBankInfo &RBI) {
91   assert(!isPreISelGenericOpcode(I.getOpcode()) &&
92          "A selected instruction is expected");
93   MachineBasicBlock &MBB = *I.getParent();
94   MachineFunction &MF = *MBB.getParent();
95   MachineRegisterInfo &MRI = MF.getRegInfo();
96 
97   for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
98     MachineOperand &MO = I.getOperand(OpI);
99 
100     // There's nothing to be done on non-register operands.
101     if (!MO.isReg())
102       continue;
103 
104     LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
105     assert(MO.isReg() && "Unsupported non-reg operand");
106 
107     unsigned Reg = MO.getReg();
108     // Physical registers don't need to be constrained.
109     if (TRI.isPhysicalRegister(Reg))
110       continue;
111 
112     // Register operands with a value of 0 (e.g. predicate operands) don't need
113     // to be constrained.
114     if (Reg == 0)
115       continue;
116 
117     // If the operand is a vreg, we should constrain its regclass, and only
118     // insert COPYs if that's impossible.
119     // constrainOperandRegClass does that for us.
120     MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
121                                        MO, OpI));
122 
123     // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
124     // done.
125     if (MO.isUse()) {
126       int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
127       if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
128         I.tieOperands(DefIdx, OpI);
129     }
130   }
131   return true;
132 }
133 
134 bool llvm::isTriviallyDead(const MachineInstr &MI,
135                            const MachineRegisterInfo &MRI) {
136   // If we can move an instruction, we can remove it.  Otherwise, it has
137   // a side-effect of some sort.
138   bool SawStore = false;
139   if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
140     return false;
141 
142   // Instructions without side-effects are dead iff they only define dead vregs.
143   for (auto &MO : MI.operands()) {
144     if (!MO.isReg() || !MO.isDef())
145       continue;
146 
147     unsigned Reg = MO.getReg();
148     if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
149         !MRI.use_nodbg_empty(Reg))
150       return false;
151   }
152   return true;
153 }
154 
155 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
156                               MachineOptimizationRemarkEmitter &MORE,
157                               MachineOptimizationRemarkMissed &R) {
158   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
159 
160   // Print the function name explicitly if we don't have a debug location (which
161   // makes the diagnostic less useful) or if we're going to emit a raw error.
162   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
163     R << (" (in function: " + MF.getName() + ")").str();
164 
165   if (TPC.isGlobalISelAbortEnabled())
166     report_fatal_error(R.getMsg());
167   else
168     MORE.emit(R);
169 }
170 
171 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
172                               MachineOptimizationRemarkEmitter &MORE,
173                               const char *PassName, StringRef Msg,
174                               const MachineInstr &MI) {
175   MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
176                                     MI.getDebugLoc(), MI.getParent());
177   R << Msg;
178   // Printing MI is expensive;  only do it if expensive remarks are enabled.
179   if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
180     R << ": " << ore::MNV("Inst", MI);
181   reportGISelFailure(MF, TPC, MORE, R);
182 }
183 
184 Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg,
185                                            const MachineRegisterInfo &MRI) {
186   Optional<ValueAndVReg> ValAndVReg =
187       getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
188   assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
189          "Value found while looking through instrs");
190   if (!ValAndVReg)
191     return None;
192   return ValAndVReg->Value;
193 }
194 
195 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
196     unsigned VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
197   SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
198   MachineInstr *MI;
199   while ((MI = MRI.getVRegDef(VReg)) &&
200          MI->getOpcode() != TargetOpcode::G_CONSTANT && LookThroughInstrs) {
201     switch (MI->getOpcode()) {
202     case TargetOpcode::G_TRUNC:
203     case TargetOpcode::G_SEXT:
204     case TargetOpcode::G_ZEXT:
205       SeenOpcodes.push_back(std::make_pair(
206           MI->getOpcode(),
207           MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
208       VReg = MI->getOperand(1).getReg();
209       break;
210     case TargetOpcode::COPY:
211       VReg = MI->getOperand(1).getReg();
212       if (TargetRegisterInfo::isPhysicalRegister(VReg))
213         return None;
214       break;
215     default:
216       return None;
217     }
218   }
219   if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT ||
220       (!MI->getOperand(1).isImm() && !MI->getOperand(1).isCImm()))
221     return None;
222 
223   const MachineOperand &CstVal = MI->getOperand(1);
224   unsigned BitWidth = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits();
225   APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
226                              : CstVal.getCImm()->getValue();
227   assert(Val.getBitWidth() == BitWidth &&
228          "Value bitwidth doesn't match definition type");
229   while (!SeenOpcodes.empty()) {
230     std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
231     switch (OpcodeAndSize.first) {
232     case TargetOpcode::G_TRUNC:
233       Val = Val.trunc(OpcodeAndSize.second);
234       break;
235     case TargetOpcode::G_SEXT:
236       Val = Val.sext(OpcodeAndSize.second);
237       break;
238     case TargetOpcode::G_ZEXT:
239       Val = Val.zext(OpcodeAndSize.second);
240       break;
241     }
242   }
243 
244   if (Val.getBitWidth() > 64)
245     return None;
246 
247   return ValueAndVReg{Val.getSExtValue(), VReg};
248 }
249 
250 const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg,
251                                        const MachineRegisterInfo &MRI) {
252   MachineInstr *MI = MRI.getVRegDef(VReg);
253   if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
254     return nullptr;
255   return MI->getOperand(1).getFPImm();
256 }
257 
258 llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, unsigned Reg,
259                                        const MachineRegisterInfo &MRI) {
260   auto *DefMI = MRI.getVRegDef(Reg);
261   auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
262   if (!DstTy.isValid())
263     return nullptr;
264   while (DefMI->getOpcode() == TargetOpcode::COPY) {
265     unsigned SrcReg = DefMI->getOperand(1).getReg();
266     auto SrcTy = MRI.getType(SrcReg);
267     if (!SrcTy.isValid() || SrcTy != DstTy)
268       break;
269     DefMI = MRI.getVRegDef(SrcReg);
270   }
271   return DefMI->getOpcode() == Opcode ? DefMI : nullptr;
272 }
273 
274 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
275   if (Size == 32)
276     return APFloat(float(Val));
277   if (Size == 64)
278     return APFloat(Val);
279   if (Size != 16)
280     llvm_unreachable("Unsupported FPConstant size");
281   bool Ignored;
282   APFloat APF(Val);
283   APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
284   return APF;
285 }
286 
287 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
288                                         const unsigned Op2,
289                                         const MachineRegisterInfo &MRI) {
290   auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
291   auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
292   if (MaybeOp1Cst && MaybeOp2Cst) {
293     LLT Ty = MRI.getType(Op1);
294     APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
295     APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
296     switch (Opcode) {
297     default:
298       break;
299     case TargetOpcode::G_ADD:
300       return C1 + C2;
301     case TargetOpcode::G_AND:
302       return C1 & C2;
303     case TargetOpcode::G_ASHR:
304       return C1.ashr(C2);
305     case TargetOpcode::G_LSHR:
306       return C1.lshr(C2);
307     case TargetOpcode::G_MUL:
308       return C1 * C2;
309     case TargetOpcode::G_OR:
310       return C1 | C2;
311     case TargetOpcode::G_SHL:
312       return C1 << C2;
313     case TargetOpcode::G_SUB:
314       return C1 - C2;
315     case TargetOpcode::G_XOR:
316       return C1 ^ C2;
317     case TargetOpcode::G_UDIV:
318       if (!C2.getBoolValue())
319         break;
320       return C1.udiv(C2);
321     case TargetOpcode::G_SDIV:
322       if (!C2.getBoolValue())
323         break;
324       return C1.sdiv(C2);
325     case TargetOpcode::G_UREM:
326       if (!C2.getBoolValue())
327         break;
328       return C1.urem(C2);
329     case TargetOpcode::G_SREM:
330       if (!C2.getBoolValue())
331         break;
332       return C1.srem(C2);
333     }
334   }
335   return None;
336 }
337 
338 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
339   AU.addPreserved<StackProtector>();
340 }
341