xref: /llvm-project/llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h (revision 53943de73aa8fa7a9497028100e987a3b73ac339)
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file declares the IRTranslator pass.
10 /// This pass is responsible for translating LLVM IR into MachineInstr.
11 /// It uses target hooks to lower the ABI but aside from that, the pass
12 /// generated code is generic. This is the default translator used for
13 /// GlobalISel.
14 ///
15 /// \todo Replace the comments with actual doxygen comments.
16 //===----------------------------------------------------------------------===//
17 
18 #ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
19 #define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
20 
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/CodeGenCommonISel.h"
24 #include "llvm/CodeGen/FunctionLoweringInfo.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFunctionPass.h"
27 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
28 #include "llvm/CodeGen/SwitchLoweringUtils.h"
29 #include "llvm/Support/Allocator.h"
30 #include "llvm/Support/CodeGen.h"
31 #include <memory>
32 #include <utility>
33 
34 namespace llvm {
35 
36 class AllocaInst;
37 class AssumptionCache;
38 class BasicBlock;
39 class CallInst;
40 class CallLowering;
41 class Constant;
42 class ConstrainedFPIntrinsic;
43 class DataLayout;
44 class DbgDeclareInst;
45 class DbgValueInst;
46 class Instruction;
47 class MachineBasicBlock;
48 class MachineFunction;
49 class MachineInstr;
50 class MachineRegisterInfo;
51 class OptimizationRemarkEmitter;
52 class PHINode;
53 class TargetLibraryInfo;
54 class TargetPassConfig;
55 class User;
56 class Value;
57 
58 // Technically the pass should run on an hypothetical MachineModule,
59 // since it should translate Global into some sort of MachineGlobal.
60 // The MachineGlobal should ultimately just be a transfer of ownership of
61 // the interesting bits that are relevant to represent a global value.
62 // That being said, we could investigate what would it cost to just duplicate
63 // the information from the LLVM IR.
64 // The idea is that ultimately we would be able to free up the memory used
65 // by the LLVM IR as soon as the translation is over.
66 class IRTranslator : public MachineFunctionPass {
67 public:
68   static char ID;
69 
70 private:
71   /// Interface used to lower the everything related to calls.
72   const CallLowering *CLI = nullptr;
73 
74   /// This class contains the mapping between the Values to vreg related data.
75   class ValueToVRegInfo {
76   public:
77     ValueToVRegInfo() = default;
78 
79     using VRegListT = SmallVector<Register, 1>;
80     using OffsetListT = SmallVector<uint64_t, 1>;
81 
82     using const_vreg_iterator =
83         DenseMap<const Value *, VRegListT *>::const_iterator;
84     using const_offset_iterator =
85         DenseMap<const Value *, OffsetListT *>::const_iterator;
86 
87     inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }
88 
89     VRegListT *getVRegs(const Value &V) {
90       auto It = ValToVRegs.find(&V);
91       if (It != ValToVRegs.end())
92         return It->second;
93 
94       return insertVRegs(V);
95     }
96 
97     OffsetListT *getOffsets(const Value &V) {
98       auto It = TypeToOffsets.find(V.getType());
99       if (It != TypeToOffsets.end())
100         return It->second;
101 
102       return insertOffsets(V);
103     }
104 
105     const_vreg_iterator findVRegs(const Value &V) const {
106       return ValToVRegs.find(&V);
107     }
108 
109     bool contains(const Value &V) const { return ValToVRegs.contains(&V); }
110 
111     void reset() {
112       ValToVRegs.clear();
113       TypeToOffsets.clear();
114       VRegAlloc.DestroyAll();
115       OffsetAlloc.DestroyAll();
116     }
117 
118   private:
119     VRegListT *insertVRegs(const Value &V) {
120       assert(!ValToVRegs.contains(&V) && "Value already exists");
121 
122       // We placement new using our fast allocator since we never try to free
123       // the vectors until translation is finished.
124       auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
125       ValToVRegs[&V] = VRegList;
126       return VRegList;
127     }
128 
129     OffsetListT *insertOffsets(const Value &V) {
130       assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");
131 
132       auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
133       TypeToOffsets[V.getType()] = OffsetList;
134       return OffsetList;
135     }
136     SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
137     SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;
138 
139     // We store pointers to vectors here since references may be invalidated
140     // while we hold them if we stored the vectors directly.
141     DenseMap<const Value *, VRegListT*> ValToVRegs;
142     DenseMap<const Type *, OffsetListT*> TypeToOffsets;
143   };
144 
145   /// Mapping of the values of the current LLVM IR function to the related
146   /// virtual registers and offsets.
147   ValueToVRegInfo VMap;
148 
149   // One BasicBlock can be translated to multiple MachineBasicBlocks.  For such
150   // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
151   // a mapping between the edges arriving at the BasicBlock to the corresponding
152   // created MachineBasicBlocks. Some BasicBlocks that get translated to a
153   // single MachineBasicBlock may also end up in this Map.
154   using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
155   DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;
156 
157   // List of stubbed PHI instructions, for values and basic blocks to be filled
158   // in once all MachineBasicBlocks have been created.
159   SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
160       PendingPHIs;
161 
162   /// Record of what frame index has been allocated to specified allocas for
163   /// this function.
164   DenseMap<const AllocaInst *, int> FrameIndices;
165 
166   SwiftErrorValueTracking SwiftError;
167 
168   /// \name Methods for translating form LLVM IR to MachineInstr.
169   /// \see ::translate for general information on the translate methods.
170   /// @{
171 
172   /// Translate \p Inst into its corresponding MachineInstr instruction(s).
173   /// Insert the newly translated instruction(s) right where the CurBuilder
174   /// is set.
175   ///
176   /// The general algorithm is:
177   /// 1. Look for a virtual register for each operand or
178   ///    create one.
179   /// 2 Update the VMap accordingly.
180   /// 2.alt. For constant arguments, if they are compile time constants,
181   ///   produce an immediate in the right operand and do not touch
182   ///   ValToReg. Actually we will go with a virtual register for each
183   ///   constants because it may be expensive to actually materialize the
184   ///   constant. Moreover, if the constant spans on several instructions,
185   ///   CSE may not catch them.
186   ///   => Update ValToVReg and remember that we saw a constant in Constants.
187   ///   We will materialize all the constants in finalize.
188   /// Note: we would need to do something so that we can recognize such operand
189   ///       as constants.
190   /// 3. Create the generic instruction.
191   ///
192   /// \return true if the translation succeeded.
193   bool translate(const Instruction &Inst);
194 
195   /// Materialize \p C into virtual-register \p Reg. The generic instructions
196   /// performing this materialization will be inserted into the entry block of
197   /// the function.
198   ///
199   /// \return true if the materialization succeeded.
200   bool translate(const Constant &C, Register Reg);
201 
202   /// Examine any debug-info attached to the instruction (in the form of
203   /// DbgRecords) and translate it.
204   void translateDbgInfo(const Instruction &Inst,
205                           MachineIRBuilder &MIRBuilder);
206 
207   /// Translate a debug-info record of a dbg.value into a DBG_* instruction.
208   /// Pass in all the contents of the record, rather than relying on how it's
209   /// stored.
210   void translateDbgValueRecord(Value *V, bool HasArgList,
211                          const DILocalVariable *Variable,
212                          const DIExpression *Expression, const DebugLoc &DL,
213                          MachineIRBuilder &MIRBuilder);
214 
215   /// Translate a debug-info record of a dbg.declare into an indirect DBG_*
216   /// instruction. Pass in all the contents of the record, rather than relying
217   /// on how it's stored.
218   void translateDbgDeclareRecord(Value *Address, bool HasArgList,
219                          const DILocalVariable *Variable,
220                          const DIExpression *Expression, const DebugLoc &DL,
221                          MachineIRBuilder &MIRBuilder);
222 
223   // Translate U as a copy of V.
224   bool translateCopy(const User &U, const Value &V,
225                      MachineIRBuilder &MIRBuilder);
226 
227   /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
228   /// emitted.
229   bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);
230 
231   /// Translate an LLVM load instruction into generic IR.
232   bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);
233 
234   /// Translate an LLVM store instruction into generic IR.
235   bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);
236 
237   /// Translate an LLVM string intrinsic (memcpy, memset, ...).
238   bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
239                         unsigned Opcode);
240 
241   /// Translate an LLVM trap intrinsic (trap, debugtrap, ubsantrap).
242   bool translateTrap(const CallInst &U, MachineIRBuilder &MIRBuilder,
243                      unsigned Opcode);
244 
245   // Translate @llvm.vector.interleave2 and
246   // @llvm.vector.deinterleave2 intrinsics for fixed-width vector
247   // types into vector shuffles.
248   bool translateVectorInterleave2Intrinsic(const CallInst &CI,
249                                            MachineIRBuilder &MIRBuilder);
250   bool translateVectorDeinterleave2Intrinsic(const CallInst &CI,
251                                              MachineIRBuilder &MIRBuilder);
252 
253   void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);
254 
255   bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
256                                   MachineIRBuilder &MIRBuilder);
257   bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
258                                     MachineIRBuilder &MIRBuilder);
259 
260   /// Helper function for translateSimpleIntrinsic.
261   /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
262   /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
263   /// Intrinsic::not_intrinsic.
264   unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);
265 
266   /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
267   /// \return true if the translation succeeded.
268   bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
269                                 MachineIRBuilder &MIRBuilder);
270 
271   bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
272                                        MachineIRBuilder &MIRBuilder);
273 
274   bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
275                                MachineIRBuilder &MIRBuilder);
276 
277   /// Returns the single livein physical register Arg was lowered to, if
278   /// possible.
279   std::optional<MCRegister> getArgPhysReg(Argument &Arg);
280 
281   /// If debug-info targets an Argument and its expression is an EntryValue,
282   /// lower it as either an entry in the MF debug table (dbg.declare), or a
283   /// DBG_VALUE targeting the corresponding livein register for that Argument
284   /// (dbg.value).
285   bool translateIfEntryValueArgument(bool isDeclare, Value *Arg,
286                                      const DILocalVariable *Var,
287                                      const DIExpression *Expr,
288                                      const DebugLoc &DL,
289                                      MachineIRBuilder &MIRBuilder);
290 
291   bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);
292 
293   /// Common code for translating normal calls or invokes.
294   bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);
295 
296   /// Translate call instruction.
297   /// \pre \p U is a call instruction.
298   bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);
299 
300   /// When an invoke or a cleanupret unwinds to the next EH pad, there are
301   /// many places it could ultimately go. In the IR, we have a single unwind
302   /// destination, but in the machine CFG, we enumerate all the possible blocks.
303   /// This function skips over imaginary basic blocks that hold catchswitch
304   /// instructions, and finds all the "real" machine
305   /// basic block destinations. As those destinations may not be successors of
306   /// EHPadBB, here we also calculate the edge probability to those
307   /// destinations. The passed-in Prob is the edge probability to EHPadBB.
308   bool findUnwindDestinations(
309       const BasicBlock *EHPadBB, BranchProbability Prob,
310       SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
311           &UnwindDests);
312 
313   bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);
314 
315   bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);
316 
317   bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);
318 
319   /// Translate one of LLVM's cast instructions into MachineInstrs, with the
320   /// given generic Opcode.
321   bool translateCast(unsigned Opcode, const User &U,
322                      MachineIRBuilder &MIRBuilder);
323 
324   /// Translate a phi instruction.
325   bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);
326 
327   /// Translate a comparison (icmp or fcmp) instruction or constant.
328   bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);
329 
330   /// Translate an integer compare instruction (or constant).
331   bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
332     return translateCompare(U, MIRBuilder);
333   }
334 
335   /// Translate a floating-point compare instruction (or constant).
336   bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
337     return translateCompare(U, MIRBuilder);
338   }
339 
340   /// Add remaining operands onto phis we've translated. Executed after all
341   /// MachineBasicBlocks for the function have been created.
342   void finishPendingPhis();
343 
344   /// Translate \p Inst into a unary operation \p Opcode.
345   /// \pre \p U is a unary operation.
346   bool translateUnaryOp(unsigned Opcode, const User &U,
347                         MachineIRBuilder &MIRBuilder);
348 
349   /// Translate \p Inst into a binary operation \p Opcode.
350   /// \pre \p U is a binary operation.
351   bool translateBinaryOp(unsigned Opcode, const User &U,
352                          MachineIRBuilder &MIRBuilder);
353 
354   /// If the set of cases should be emitted as a series of branches, return
355   /// true. If we should emit this as a bunch of and/or'd together conditions,
356   /// return false.
357   bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
358   /// Helper method for findMergedConditions.
359   /// This function emits a branch and is used at the leaves of an OR or an
360   /// AND operator tree.
361   void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
362                                     MachineBasicBlock *FBB,
363                                     MachineBasicBlock *CurBB,
364                                     MachineBasicBlock *SwitchBB,
365                                     BranchProbability TProb,
366                                     BranchProbability FProb, bool InvertCond);
367   /// Used during condbr translation to find trees of conditions that can be
368   /// optimized.
369   void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
370                             MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
371                             MachineBasicBlock *SwitchBB,
372                             Instruction::BinaryOps Opc, BranchProbability TProb,
373                             BranchProbability FProb, bool InvertCond);
374 
375   /// Translate branch (br) instruction.
376   /// \pre \p U is a branch instruction.
377   bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);
378 
379   // Begin switch lowering functions.
380   bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
381                            SwitchCG::JumpTableHeader &JTH,
382                            MachineBasicBlock *HeaderBB);
383   void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);
384 
385   void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
386                       MachineIRBuilder &MIB);
387 
388   /// Generate for the BitTest header block, which precedes each sequence of
389   /// BitTestCases.
390   void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
391                          MachineBasicBlock *SwitchMBB);
392   /// Generate code to produces one "bit test" for a given BitTestCase \p B.
393   void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
394                        BranchProbability BranchProbToNext, Register Reg,
395                        SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
396 
397   void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
398                      const SwitchCG::SwitchWorkListItem &W, Value *Cond,
399                      MachineBasicBlock *SwitchMBB, MachineIRBuilder &MIB);
400 
401   bool lowerJumpTableWorkItem(
402       SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
403       MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
404       MachineIRBuilder &MIB, MachineFunction::iterator BBI,
405       BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
406       MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);
407 
408   bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
409                                 MachineBasicBlock *Fallthrough,
410                                 bool FallthroughUnreachable,
411                                 BranchProbability UnhandledProbs,
412                                 MachineBasicBlock *CurMBB,
413                                 MachineIRBuilder &MIB,
414                                 MachineBasicBlock *SwitchMBB);
415 
416   bool lowerBitTestWorkItem(
417       SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
418       MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
419       MachineIRBuilder &MIB, MachineFunction::iterator BBI,
420       BranchProbability DefaultProb, BranchProbability UnhandledProbs,
421       SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
422       bool FallthroughUnreachable);
423 
424   bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
425                            MachineBasicBlock *SwitchMBB,
426                            MachineBasicBlock *DefaultMBB,
427                            MachineIRBuilder &MIB);
428 
429   bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
430   // End switch lowering section.
431 
432   bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);
433 
434   bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);
435 
436   bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);
437 
438   bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);
439 
440   bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);
441 
442   bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);
443 
444   /// Translate return (ret) instruction.
445   /// The target needs to implement CallLowering::lowerReturn for
446   /// this to succeed.
447   /// \pre \p U is a return instruction.
448   bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);
449 
450   bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);
451 
452   bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
453     return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
454   }
455   bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
456     return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
457   }
458   bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
459     return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
460   }
461   bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
462     return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
463   }
464   bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
465     return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
466   }
467   bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
468     return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
469   }
470 
471   bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
472     return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
473   }
474   bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
475     return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
476   }
477   bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
478     return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
479   }
480   bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
481     return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
482   }
483   bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
484     return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
485   }
486   bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
487     return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
488   }
489   bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
490     return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
491   }
492   bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
493     return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
494   }
495   bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
496     return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
497   }
498   bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
499     return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
500   }
501   bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
502     return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
503   }
504   bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
505     return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
506   }
507   bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
508     return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
509   }
510   bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);
511 
512   bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
513     return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
514   }
515 
516   bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
517     return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
518   }
519 
520   bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
521     return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
522   }
523   bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
524     return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
525   }
526   bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
527     return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
528   }
529 
530   bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
531     return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
532   }
533   bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
534     return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
535   }
536   bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
537     return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
538   }
539   bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
540     return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
541   }
542   bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
543     return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
544   }
545 
546   bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);
547 
548   bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);
549   bool translateInsertVector(const User &U, MachineIRBuilder &MIRBuilder);
550 
551   bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);
552   bool translateExtractVector(const User &U, MachineIRBuilder &MIRBuilder);
553 
554   bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);
555 
556   bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
557   bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
558   bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
559   bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);
560 
561   // Stubs to keep the compiler happy while we implement the rest of the
562   // translation.
563   bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
564     return false;
565   }
566   bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
567     return false;
568   }
569   bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
570     return false;
571   }
572   bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
573     return false;
574   }
575   bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
576     return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
577   }
578   bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
579     return false;
580   }
581   bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
582     return false;
583   }
584   bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
585     return false;
586   }
587   bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
588     return false;
589   }
590 
591   bool translateConvergenceControlIntrinsic(const CallInst &CI,
592                                             Intrinsic::ID ID,
593                                             MachineIRBuilder &MIRBuilder);
594 
595   /// @}
596 
597   // Builder for machine instruction a la IRBuilder.
598   // I.e., compared to regular MIBuilder, this one also inserts the instruction
599   // in the current block, it can creates block, etc., basically a kind of
600   // IRBuilder, but for Machine IR.
601   // CSEMIRBuilder CurBuilder;
602   std::unique_ptr<MachineIRBuilder> CurBuilder;
603 
604   // Builder set to the entry block (just after ABI lowering instructions). Used
605   // as a convenient location for Constants.
606   // CSEMIRBuilder EntryBuilder;
607   std::unique_ptr<MachineIRBuilder> EntryBuilder;
608 
609   // The MachineFunction currently being translated.
610   MachineFunction *MF = nullptr;
611 
612   /// MachineRegisterInfo used to create virtual registers.
613   MachineRegisterInfo *MRI = nullptr;
614 
615   const DataLayout *DL = nullptr;
616 
617   /// Current target configuration. Controls how the pass handles errors.
618   const TargetPassConfig *TPC = nullptr;
619 
620   CodeGenOptLevel OptLevel;
621 
622   /// Current optimization remark emitter. Used to report failures.
623   std::unique_ptr<OptimizationRemarkEmitter> ORE;
624 
625   AAResults *AA = nullptr;
626   AssumptionCache *AC = nullptr;
627   const TargetLibraryInfo *LibInfo = nullptr;
628   const TargetLowering *TLI = nullptr;
629   FunctionLoweringInfo FuncInfo;
630 
631   // True when either the Target Machine specifies no optimizations or the
632   // function has the optnone attribute.
633   bool EnableOpts = false;
634 
635   /// True when the block contains a tail call. This allows the IRTranslator to
636   /// stop translating such blocks early.
637   bool HasTailCall = false;
638 
639   StackProtectorDescriptor SPDescriptor;
640 
641   /// Switch analysis and optimization.
642   class GISelSwitchLowering : public SwitchCG::SwitchLowering {
643   public:
644     GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
645         : SwitchLowering(funcinfo), IRT(irt) {
646       assert(irt && "irt is null!");
647     }
648 
649     void addSuccessorWithProb(
650         MachineBasicBlock *Src, MachineBasicBlock *Dst,
651         BranchProbability Prob = BranchProbability::getUnknown()) override {
652       IRT->addSuccessorWithProb(Src, Dst, Prob);
653     }
654 
655     virtual ~GISelSwitchLowering() = default;
656 
657   private:
658     IRTranslator *IRT;
659   };
660 
661   std::unique_ptr<GISelSwitchLowering> SL;
662 
663   // * Insert all the code needed to materialize the constants
664   // at the proper place. E.g., Entry block or dominator block
665   // of each constant depending on how fancy we want to be.
666   // * Clear the different maps.
667   void finalizeFunction();
668 
669   // Processing steps done per block. E.g. emitting jump tables, stack
670   // protectors etc. Returns true if no errors, false if there was a problem
671   // that caused an abort.
672   bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);
673 
674   /// Codegen a new tail for a stack protector check ParentMBB which has had its
675   /// tail spliced into a stack protector check success bb.
676   ///
677   /// For a high level explanation of how this fits into the stack protector
678   /// generation see the comment on the declaration of class
679   /// StackProtectorDescriptor.
680   ///
681   /// \return true if there were no problems.
682   bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
683                               MachineBasicBlock *ParentBB);
684 
685   /// Codegen the failure basic block for a stack protector check.
686   ///
687   /// A failure stack protector machine basic block consists simply of a call to
688   /// __stack_chk_fail().
689   ///
690   /// For a high level explanation of how this fits into the stack protector
691   /// generation see the comment on the declaration of class
692   /// StackProtectorDescriptor.
693   ///
694   /// \return true if there were no problems.
695   bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
696                                MachineBasicBlock *FailureBB);
697 
698   /// Get the VRegs that represent \p Val.
699   /// Non-aggregate types have just one corresponding VReg and the list can be
700   /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
701   /// not exist, they are created.
702   ArrayRef<Register> getOrCreateVRegs(const Value &Val);
703 
704   Register getOrCreateVReg(const Value &Val) {
705     auto Regs = getOrCreateVRegs(Val);
706     if (Regs.empty())
707       return 0;
708     assert(Regs.size() == 1 &&
709            "attempt to get single VReg for aggregate or void");
710     return Regs[0];
711   }
712 
713   Register getOrCreateConvergenceTokenVReg(const Value &Token) {
714     assert(Token.getType()->isTokenTy());
715     auto &Regs = *VMap.getVRegs(Token);
716     if (!Regs.empty()) {
717       assert(Regs.size() == 1 &&
718              "Expected a single register for convergence tokens.");
719       return Regs[0];
720     }
721 
722     auto Reg = MRI->createGenericVirtualRegister(LLT::token());
723     Regs.push_back(Reg);
724     auto &Offsets = *VMap.getOffsets(Token);
725     if (Offsets.empty())
726       Offsets.push_back(0);
727     return Reg;
728   }
729 
730   /// Allocate some vregs and offsets in the VMap. Then populate just the
731   /// offsets while leaving the vregs empty.
732   ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);
733 
734   /// Get the frame index that represents \p Val.
735   /// If such VReg does not exist, it is created.
736   int getOrCreateFrameIndex(const AllocaInst &AI);
737 
738   /// Get the alignment of the given memory operation instruction. This will
739   /// either be the explicitly specified value or the ABI-required alignment for
740   /// the type being accessed (according to the Module's DataLayout).
741   Align getMemOpAlign(const Instruction &I);
742 
743   /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
744   /// returned will be the head of the translated block (suitable for branch
745   /// destinations).
746   MachineBasicBlock &getMBB(const BasicBlock &BB);
747 
748   /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
749   /// to `Edge.first` at the IR level. This is used when IRTranslation creates
750   /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
751   /// represented simply by the IR-level CFG.
752   void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);
753 
754   /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
755   /// this is just the single MachineBasicBlock corresponding to the predecessor
756   /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
757   /// preceding the original though (e.g. switch instructions).
758   SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
759     auto RemappedEdge = MachinePreds.find(Edge);
760     if (RemappedEdge != MachinePreds.end())
761       return RemappedEdge->second;
762     return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
763   }
764 
765   /// Return branch probability calculated by BranchProbabilityInfo for IR
766   /// blocks.
767   BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
768                                        const MachineBasicBlock *Dst) const;
769 
770   void addSuccessorWithProb(
771       MachineBasicBlock *Src, MachineBasicBlock *Dst,
772       BranchProbability Prob = BranchProbability::getUnknown());
773 
774 public:
775   IRTranslator(CodeGenOptLevel OptLevel = CodeGenOptLevel::None);
776 
777   StringRef getPassName() const override { return "IRTranslator"; }
778 
779   void getAnalysisUsage(AnalysisUsage &AU) const override;
780 
781   // Algo:
782   //   CallLowering = MF.subtarget.getCallLowering()
783   //   F = MF.getParent()
784   //   MIRBuilder.reset(MF)
785   //   getMBB(F.getEntryBB())
786   //   CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
787   //   for each bb in F
788   //     getMBB(bb)
789   //     for each inst in bb
790   //       if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
791   //         report_fatal_error("Don't know how to translate input");
792   //   finalize()
793   bool runOnMachineFunction(MachineFunction &MF) override;
794 };
795 
796 } // end namespace llvm
797 
798 #endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
799