xref: /llvm-project/llvm/lib/CodeGen/GlobalISel/CSEMIRBuilder.cpp (revision 5af0f097bad5f8ff7d502c241b913ba77dfdd9eb)
1 //===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the CSEMIRBuilder class which CSEs as it builds
10 /// instructions.
11 //===----------------------------------------------------------------------===//
12 //
13 
14 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
15 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/IR/DebugInfoMetadata.h"
19 
20 using namespace llvm;
21 
22 bool CSEMIRBuilder::dominates(MachineBasicBlock::const_iterator A,
23                               MachineBasicBlock::const_iterator B) const {
24   auto MBBEnd = getMBB().end();
25   if (B == MBBEnd)
26     return true;
27   assert(A->getParent() == B->getParent() &&
28          "Iterators should be in same block");
29   const MachineBasicBlock *BBA = A->getParent();
30   MachineBasicBlock::const_iterator I = BBA->begin();
31   for (; &*I != A && &*I != B; ++I)
32     ;
33   return &*I == A;
34 }
35 
36 MachineInstrBuilder
37 CSEMIRBuilder::getDominatingInstrForID(FoldingSetNodeID &ID,
38                                        void *&NodeInsertPos) {
39   GISelCSEInfo *CSEInfo = getCSEInfo();
40   assert(CSEInfo && "Can't get here without setting CSEInfo");
41   MachineBasicBlock *CurMBB = &getMBB();
42   MachineInstr *MI =
43       CSEInfo->getMachineInstrIfExists(ID, CurMBB, NodeInsertPos);
44   if (MI) {
45     CSEInfo->countOpcodeHit(MI->getOpcode());
46     auto CurrPos = getInsertPt();
47     auto MII = MachineBasicBlock::iterator(MI);
48     if (MII == CurrPos) {
49       // Move the insert point ahead of the instruction so any future uses of
50       // this builder will have the def ready.
51       setInsertPt(*CurMBB, std::next(MII));
52     } else if (!dominates(MI, CurrPos)) {
53       CurMBB->splice(CurrPos, CurMBB, MI);
54     }
55     return MachineInstrBuilder(getMF(), MI);
56   }
57   return MachineInstrBuilder();
58 }
59 
60 bool CSEMIRBuilder::canPerformCSEForOpc(unsigned Opc) const {
61   const GISelCSEInfo *CSEInfo = getCSEInfo();
62   if (!CSEInfo || !CSEInfo->shouldCSE(Opc))
63     return false;
64   return true;
65 }
66 
67 void CSEMIRBuilder::profileDstOp(const DstOp &Op,
68                                  GISelInstProfileBuilder &B) const {
69   switch (Op.getDstOpKind()) {
70   case DstOp::DstType::Ty_RC:
71     B.addNodeIDRegType(Op.getRegClass());
72     break;
73   case DstOp::DstType::Ty_Reg: {
74     // Regs can have LLT&(RB|RC). If those exist, profile them as well.
75     B.addNodeIDReg(Op.getReg());
76     break;
77   }
78   default:
79     B.addNodeIDRegType(Op.getLLTTy(*getMRI()));
80     break;
81   }
82 }
83 
84 void CSEMIRBuilder::profileSrcOp(const SrcOp &Op,
85                                  GISelInstProfileBuilder &B) const {
86   switch (Op.getSrcOpKind()) {
87   case SrcOp::SrcType::Ty_Imm:
88     B.addNodeIDImmediate(static_cast<int64_t>(Op.getImm()));
89     break;
90   case SrcOp::SrcType::Ty_Predicate:
91     B.addNodeIDImmediate(static_cast<int64_t>(Op.getPredicate()));
92     break;
93   default:
94     B.addNodeIDRegType(Op.getReg());
95     break;
96   }
97 }
98 
99 void CSEMIRBuilder::profileMBBOpcode(GISelInstProfileBuilder &B,
100                                      unsigned Opc) const {
101   // First add the MBB (Local CSE).
102   B.addNodeIDMBB(&getMBB());
103   // Then add the opcode.
104   B.addNodeIDOpcode(Opc);
105 }
106 
107 void CSEMIRBuilder::profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
108                                       ArrayRef<SrcOp> SrcOps,
109                                       Optional<unsigned> Flags,
110                                       GISelInstProfileBuilder &B) const {
111 
112   profileMBBOpcode(B, Opc);
113   // Then add the DstOps.
114   profileDstOps(DstOps, B);
115   // Then add the SrcOps.
116   profileSrcOps(SrcOps, B);
117   // Add Flags if passed in.
118   if (Flags)
119     B.addNodeIDFlag(*Flags);
120 }
121 
122 MachineInstrBuilder CSEMIRBuilder::memoizeMI(MachineInstrBuilder MIB,
123                                              void *NodeInsertPos) {
124   assert(canPerformCSEForOpc(MIB->getOpcode()) &&
125          "Attempting to CSE illegal op");
126   MachineInstr *MIBInstr = MIB;
127   getCSEInfo()->insertInstr(MIBInstr, NodeInsertPos);
128   return MIB;
129 }
130 
131 bool CSEMIRBuilder::checkCopyToDefsPossible(ArrayRef<DstOp> DstOps) {
132   if (DstOps.size() == 1)
133     return true; // always possible to emit copy to just 1 vreg.
134 
135   return llvm::all_of(DstOps, [](const DstOp &Op) {
136     DstOp::DstType DT = Op.getDstOpKind();
137     return DT == DstOp::DstType::Ty_LLT || DT == DstOp::DstType::Ty_RC;
138   });
139 }
140 
141 MachineInstrBuilder
142 CSEMIRBuilder::generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
143                                         MachineInstrBuilder &MIB) {
144   assert(checkCopyToDefsPossible(DstOps) &&
145          "Impossible return a single MIB with copies to multiple defs");
146   if (DstOps.size() == 1) {
147     const DstOp &Op = DstOps[0];
148     if (Op.getDstOpKind() == DstOp::DstType::Ty_Reg)
149       return buildCopy(Op.getReg(), MIB.getReg(0));
150   }
151 
152   // If we didn't generate a copy then we're re-using an existing node directly
153   // instead of emitting any code. Merge the debug location we wanted to emit
154   // into the instruction we're CSE'ing with. Debug locations arent part of the
155   // profile so we don't need to recompute it.
156   if (getDebugLoc()) {
157     GISelChangeObserver *Observer = getState().Observer;
158     if (Observer)
159       Observer->changingInstr(*MIB);
160     MIB->setDebugLoc(
161         DILocation::getMergedLocation(MIB->getDebugLoc(), getDebugLoc()));
162     if (Observer)
163       Observer->changedInstr(*MIB);
164   }
165 
166   return MIB;
167 }
168 
169 MachineInstrBuilder CSEMIRBuilder::buildInstr(unsigned Opc,
170                                               ArrayRef<DstOp> DstOps,
171                                               ArrayRef<SrcOp> SrcOps,
172                                               Optional<unsigned> Flag) {
173   switch (Opc) {
174   default:
175     break;
176   case TargetOpcode::G_ADD:
177   case TargetOpcode::G_PTR_ADD:
178   case TargetOpcode::G_AND:
179   case TargetOpcode::G_ASHR:
180   case TargetOpcode::G_LSHR:
181   case TargetOpcode::G_MUL:
182   case TargetOpcode::G_OR:
183   case TargetOpcode::G_SHL:
184   case TargetOpcode::G_SUB:
185   case TargetOpcode::G_XOR:
186   case TargetOpcode::G_UDIV:
187   case TargetOpcode::G_SDIV:
188   case TargetOpcode::G_UREM:
189   case TargetOpcode::G_SREM:
190   case TargetOpcode::G_SMIN:
191   case TargetOpcode::G_SMAX:
192   case TargetOpcode::G_UMIN:
193   case TargetOpcode::G_UMAX: {
194     // Try to constant fold these.
195     assert(SrcOps.size() == 2 && "Invalid sources");
196     assert(DstOps.size() == 1 && "Invalid dsts");
197     LLT SrcTy = SrcOps[0].getLLTTy(*getMRI());
198 
199     if (Opc == TargetOpcode::G_PTR_ADD &&
200         getDataLayout().isNonIntegralAddressSpace(SrcTy.getAddressSpace()))
201       break;
202 
203     if (SrcTy.isVector()) {
204       // Try to constant fold vector constants.
205       Register VecCst = ConstantFoldVectorBinop(
206           Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI(), *this);
207       if (VecCst)
208         return buildCopy(DstOps[0], VecCst);
209       break;
210     }
211 
212     if (Optional<APInt> Cst = ConstantFoldBinOp(Opc, SrcOps[0].getReg(),
213                                                 SrcOps[1].getReg(), *getMRI()))
214       return buildConstant(DstOps[0], *Cst);
215     break;
216   }
217   case TargetOpcode::G_FADD:
218   case TargetOpcode::G_FSUB:
219   case TargetOpcode::G_FMUL:
220   case TargetOpcode::G_FDIV:
221   case TargetOpcode::G_FREM:
222   case TargetOpcode::G_FMINNUM:
223   case TargetOpcode::G_FMAXNUM:
224   case TargetOpcode::G_FMINNUM_IEEE:
225   case TargetOpcode::G_FMAXNUM_IEEE:
226   case TargetOpcode::G_FMINIMUM:
227   case TargetOpcode::G_FMAXIMUM:
228   case TargetOpcode::G_FCOPYSIGN: {
229     // Try to constant fold these.
230     assert(SrcOps.size() == 2 && "Invalid sources");
231     assert(DstOps.size() == 1 && "Invalid dsts");
232     if (Optional<APFloat> Cst = ConstantFoldFPBinOp(
233             Opc, SrcOps[0].getReg(), SrcOps[1].getReg(), *getMRI()))
234       return buildFConstant(DstOps[0], *Cst);
235     break;
236   }
237   case TargetOpcode::G_SEXT_INREG: {
238     assert(DstOps.size() == 1 && "Invalid dst ops");
239     assert(SrcOps.size() == 2 && "Invalid src ops");
240     const DstOp &Dst = DstOps[0];
241     const SrcOp &Src0 = SrcOps[0];
242     const SrcOp &Src1 = SrcOps[1];
243     if (auto MaybeCst =
244             ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI()))
245       return buildConstant(Dst, *MaybeCst);
246     break;
247   }
248   case TargetOpcode::G_SITOFP:
249   case TargetOpcode::G_UITOFP: {
250     // Try to constant fold these.
251     assert(SrcOps.size() == 1 && "Invalid sources");
252     assert(DstOps.size() == 1 && "Invalid dsts");
253     if (Optional<APFloat> Cst = ConstantFoldIntToFloat(
254             Opc, DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getReg(), *getMRI()))
255       return buildFConstant(DstOps[0], *Cst);
256     break;
257   }
258   case TargetOpcode::G_CTLZ: {
259     assert(SrcOps.size() == 1 && "Expected one source");
260     assert(DstOps.size() == 1 && "Expected one dest");
261     auto MaybeCsts = ConstantFoldCTLZ(SrcOps[0].getReg(), *getMRI());
262     if (!MaybeCsts)
263       break;
264     if (MaybeCsts->size() == 1)
265       return buildConstant(DstOps[0], (*MaybeCsts)[0]);
266     // This was a vector constant. Build a G_BUILD_VECTOR for them.
267     SmallVector<Register> ConstantRegs;
268     LLT VecTy = DstOps[0].getLLTTy(*getMRI());
269     for (unsigned Cst : *MaybeCsts)
270       ConstantRegs.emplace_back(
271           buildConstant(VecTy.getScalarType(), Cst).getReg(0));
272     return buildBuildVector(DstOps[0], ConstantRegs);
273   }
274   }
275   bool CanCopy = checkCopyToDefsPossible(DstOps);
276   if (!canPerformCSEForOpc(Opc))
277     return MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag);
278   // If we can CSE this instruction, but involves generating copies to multiple
279   // regs, give up. This frequently happens to UNMERGEs.
280   if (!CanCopy) {
281     auto MIB = MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag);
282     // CSEInfo would have tracked this instruction. Remove it from the temporary
283     // insts.
284     getCSEInfo()->handleRemoveInst(&*MIB);
285     return MIB;
286   }
287   FoldingSetNodeID ID;
288   GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
289   void *InsertPos = nullptr;
290   profileEverything(Opc, DstOps, SrcOps, Flag, ProfBuilder);
291   MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos);
292   if (MIB) {
293     // Handle generating copies here.
294     return generateCopiesIfRequired(DstOps, MIB);
295   }
296   // This instruction does not exist in the CSEInfo. Build it and CSE it.
297   MachineInstrBuilder NewMIB =
298       MachineIRBuilder::buildInstr(Opc, DstOps, SrcOps, Flag);
299   return memoizeMI(NewMIB, InsertPos);
300 }
301 
302 MachineInstrBuilder CSEMIRBuilder::buildConstant(const DstOp &Res,
303                                                  const ConstantInt &Val) {
304   constexpr unsigned Opc = TargetOpcode::G_CONSTANT;
305   if (!canPerformCSEForOpc(Opc))
306     return MachineIRBuilder::buildConstant(Res, Val);
307 
308   // For vectors, CSE the element only for now.
309   LLT Ty = Res.getLLTTy(*getMRI());
310   if (Ty.isVector())
311     return buildSplatVector(Res, buildConstant(Ty.getElementType(), Val));
312 
313   FoldingSetNodeID ID;
314   GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
315   void *InsertPos = nullptr;
316   profileMBBOpcode(ProfBuilder, Opc);
317   profileDstOp(Res, ProfBuilder);
318   ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateCImm(&Val));
319   MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos);
320   if (MIB) {
321     // Handle generating copies here.
322     return generateCopiesIfRequired({Res}, MIB);
323   }
324 
325   MachineInstrBuilder NewMIB = MachineIRBuilder::buildConstant(Res, Val);
326   return memoizeMI(NewMIB, InsertPos);
327 }
328 
329 MachineInstrBuilder CSEMIRBuilder::buildFConstant(const DstOp &Res,
330                                                   const ConstantFP &Val) {
331   constexpr unsigned Opc = TargetOpcode::G_FCONSTANT;
332   if (!canPerformCSEForOpc(Opc))
333     return MachineIRBuilder::buildFConstant(Res, Val);
334 
335   // For vectors, CSE the element only for now.
336   LLT Ty = Res.getLLTTy(*getMRI());
337   if (Ty.isVector())
338     return buildSplatVector(Res, buildFConstant(Ty.getElementType(), Val));
339 
340   FoldingSetNodeID ID;
341   GISelInstProfileBuilder ProfBuilder(ID, *getMRI());
342   void *InsertPos = nullptr;
343   profileMBBOpcode(ProfBuilder, Opc);
344   profileDstOp(Res, ProfBuilder);
345   ProfBuilder.addNodeIDMachineOperand(MachineOperand::CreateFPImm(&Val));
346   MachineInstrBuilder MIB = getDominatingInstrForID(ID, InsertPos);
347   if (MIB) {
348     // Handle generating copies here.
349     return generateCopiesIfRequired({Res}, MIB);
350   }
351   MachineInstrBuilder NewMIB = MachineIRBuilder::buildFConstant(Res, Val);
352   return memoizeMI(NewMIB, InsertPos);
353 }
354