xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
26 #include "llvm/CodeGen/LowLevelType.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/StackProtector.h"
36 #include "llvm/CodeGen/SwitchLoweringUtils.h"
37 #include "llvm/CodeGen/TargetFrameLowering.h"
38 #include "llvm/CodeGen/TargetInstrInfo.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetPassConfig.h"
41 #include "llvm/CodeGen/TargetRegisterInfo.h"
42 #include "llvm/CodeGen/TargetSubtargetInfo.h"
43 #include "llvm/IR/BasicBlock.h"
44 #include "llvm/IR/CFG.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfo.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GetElementPtrTypeIterator.h"
52 #include "llvm/IR/InlineAsm.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicInst.h"
56 #include "llvm/IR/Intrinsics.h"
57 #include "llvm/IR/LLVMContext.h"
58 #include "llvm/IR/Metadata.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/InitializePasses.h"
64 #include "llvm/MC/MCContext.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CodeGen.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/LowLevelTypeImpl.h"
71 #include "llvm/Support/MathExtras.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include "llvm/Target/TargetIntrinsicInfo.h"
74 #include "llvm/Target/TargetMachine.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cstddef>
78 #include <cstdint>
79 #include <iterator>
80 #include <string>
81 #include <utility>
82 #include <vector>
83 
84 #define DEBUG_TYPE "irtranslator"
85 
86 using namespace llvm;
87 
88 static cl::opt<bool>
89     EnableCSEInIRTranslator("enable-cse-in-irtranslator",
90                             cl::desc("Should enable CSE in irtranslator"),
91                             cl::Optional, cl::init(false));
92 char IRTranslator::ID = 0;
93 
94 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
95                 false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)96 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
97 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
98 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
99 INITIALIZE_PASS_DEPENDENCY(StackProtector)
100 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
101                 false, false)
102 
103 static void reportTranslationError(MachineFunction &MF,
104                                    const TargetPassConfig &TPC,
105                                    OptimizationRemarkEmitter &ORE,
106                                    OptimizationRemarkMissed &R) {
107   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
108 
109   // Print the function name explicitly if we don't have a debug location (which
110   // makes the diagnostic less useful) or if we're going to emit a raw error.
111   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
112     R << (" (in function: " + MF.getName() + ")").str();
113 
114   if (TPC.isGlobalISelAbortEnabled())
115     report_fatal_error(R.getMsg());
116   else
117     ORE.emit(R);
118 }
119 
IRTranslator(CodeGenOpt::Level optlevel)120 IRTranslator::IRTranslator(CodeGenOpt::Level optlevel)
121     : MachineFunctionPass(ID), OptLevel(optlevel) {}
122 
123 #ifndef NDEBUG
124 namespace {
125 /// Verify that every instruction created has the same DILocation as the
126 /// instruction being translated.
127 class DILocationVerifier : public GISelChangeObserver {
128   const Instruction *CurrInst = nullptr;
129 
130 public:
131   DILocationVerifier() = default;
132   ~DILocationVerifier() = default;
133 
getCurrentInst() const134   const Instruction *getCurrentInst() const { return CurrInst; }
setCurrentInst(const Instruction * Inst)135   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
136 
erasingInstr(MachineInstr & MI)137   void erasingInstr(MachineInstr &MI) override {}
changingInstr(MachineInstr & MI)138   void changingInstr(MachineInstr &MI) override {}
changedInstr(MachineInstr & MI)139   void changedInstr(MachineInstr &MI) override {}
140 
createdInstr(MachineInstr & MI)141   void createdInstr(MachineInstr &MI) override {
142     assert(getCurrentInst() && "Inserted instruction without a current MI");
143 
144     // Only print the check message if we're actually checking it.
145 #ifndef NDEBUG
146     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
147                       << " was copied to " << MI);
148 #endif
149     // We allow insts in the entry block to have a debug loc line of 0 because
150     // they could have originated from constants, and we don't want a jumpy
151     // debug experience.
152     assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
153             MI.getDebugLoc().getLine() == 0) &&
154            "Line info was not transferred to all instructions");
155   }
156 };
157 } // namespace
158 #endif // ifndef NDEBUG
159 
160 
getAnalysisUsage(AnalysisUsage & AU) const161 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
162   AU.addRequired<StackProtector>();
163   AU.addRequired<TargetPassConfig>();
164   AU.addRequired<GISelCSEAnalysisWrapperPass>();
165   if (OptLevel != CodeGenOpt::None)
166     AU.addRequired<BranchProbabilityInfoWrapperPass>();
167   getSelectionDAGFallbackAnalysisUsage(AU);
168   MachineFunctionPass::getAnalysisUsage(AU);
169 }
170 
171 IRTranslator::ValueToVRegInfo::VRegListT &
allocateVRegs(const Value & Val)172 IRTranslator::allocateVRegs(const Value &Val) {
173   auto VRegsIt = VMap.findVRegs(Val);
174   if (VRegsIt != VMap.vregs_end())
175     return *VRegsIt->second;
176   auto *Regs = VMap.getVRegs(Val);
177   auto *Offsets = VMap.getOffsets(Val);
178   SmallVector<LLT, 4> SplitTys;
179   computeValueLLTs(*DL, *Val.getType(), SplitTys,
180                    Offsets->empty() ? Offsets : nullptr);
181   for (unsigned i = 0; i < SplitTys.size(); ++i)
182     Regs->push_back(0);
183   return *Regs;
184 }
185 
getOrCreateVRegs(const Value & Val)186 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
187   auto VRegsIt = VMap.findVRegs(Val);
188   if (VRegsIt != VMap.vregs_end())
189     return *VRegsIt->second;
190 
191   if (Val.getType()->isVoidTy())
192     return *VMap.getVRegs(Val);
193 
194   // Create entry for this type.
195   auto *VRegs = VMap.getVRegs(Val);
196   auto *Offsets = VMap.getOffsets(Val);
197 
198   assert(Val.getType()->isSized() &&
199          "Don't know how to create an empty vreg");
200 
201   SmallVector<LLT, 4> SplitTys;
202   computeValueLLTs(*DL, *Val.getType(), SplitTys,
203                    Offsets->empty() ? Offsets : nullptr);
204 
205   if (!isa<Constant>(Val)) {
206     for (auto Ty : SplitTys)
207       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
208     return *VRegs;
209   }
210 
211   if (Val.getType()->isAggregateType()) {
212     // UndefValue, ConstantAggregateZero
213     auto &C = cast<Constant>(Val);
214     unsigned Idx = 0;
215     while (auto Elt = C.getAggregateElement(Idx++)) {
216       auto EltRegs = getOrCreateVRegs(*Elt);
217       llvm::copy(EltRegs, std::back_inserter(*VRegs));
218     }
219   } else {
220     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
221     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
222     bool Success = translate(cast<Constant>(Val), VRegs->front());
223     if (!Success) {
224       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
225                                  MF->getFunction().getSubprogram(),
226                                  &MF->getFunction().getEntryBlock());
227       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
228       reportTranslationError(*MF, *TPC, *ORE, R);
229       return *VRegs;
230     }
231   }
232 
233   return *VRegs;
234 }
235 
getOrCreateFrameIndex(const AllocaInst & AI)236 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
237   auto MapEntry = FrameIndices.find(&AI);
238   if (MapEntry != FrameIndices.end())
239     return MapEntry->second;
240 
241   uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
242   uint64_t Size =
243       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
244 
245   // Always allocate at least one byte.
246   Size = std::max<uint64_t>(Size, 1u);
247 
248   int &FI = FrameIndices[&AI];
249   FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
250   return FI;
251 }
252 
getMemOpAlign(const Instruction & I)253 Align IRTranslator::getMemOpAlign(const Instruction &I) {
254   if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
255     return SI->getAlign();
256   if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
257     return LI->getAlign();
258   if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
259     return AI->getAlign();
260   if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
261     return AI->getAlign();
262 
263   OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
264   R << "unable to translate memop: " << ore::NV("Opcode", &I);
265   reportTranslationError(*MF, *TPC, *ORE, R);
266   return Align(1);
267 }
268 
getMBB(const BasicBlock & BB)269 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
270   MachineBasicBlock *&MBB = BBToMBB[&BB];
271   assert(MBB && "BasicBlock was not encountered before");
272   return *MBB;
273 }
274 
addMachineCFGPred(CFGEdge Edge,MachineBasicBlock * NewPred)275 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
276   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
277   MachinePreds[Edge].push_back(NewPred);
278 }
279 
translateBinaryOp(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)280 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
281                                      MachineIRBuilder &MIRBuilder) {
282   // Get or create a virtual register for each value.
283   // Unless the value is a Constant => loadimm cst?
284   // or inline constant each time?
285   // Creation of a virtual register needs to have a size.
286   Register Op0 = getOrCreateVReg(*U.getOperand(0));
287   Register Op1 = getOrCreateVReg(*U.getOperand(1));
288   Register Res = getOrCreateVReg(U);
289   uint16_t Flags = 0;
290   if (isa<Instruction>(U)) {
291     const Instruction &I = cast<Instruction>(U);
292     Flags = MachineInstr::copyFlagsFromInstruction(I);
293   }
294 
295   MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
296   return true;
297 }
298 
translateUnaryOp(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)299 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
300                                     MachineIRBuilder &MIRBuilder) {
301   Register Op0 = getOrCreateVReg(*U.getOperand(0));
302   Register Res = getOrCreateVReg(U);
303   uint16_t Flags = 0;
304   if (isa<Instruction>(U)) {
305     const Instruction &I = cast<Instruction>(U);
306     Flags = MachineInstr::copyFlagsFromInstruction(I);
307   }
308   MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
309   return true;
310 }
311 
translateFNeg(const User & U,MachineIRBuilder & MIRBuilder)312 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
313   return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
314 }
315 
translateCompare(const User & U,MachineIRBuilder & MIRBuilder)316 bool IRTranslator::translateCompare(const User &U,
317                                     MachineIRBuilder &MIRBuilder) {
318   auto *CI = dyn_cast<CmpInst>(&U);
319   Register Op0 = getOrCreateVReg(*U.getOperand(0));
320   Register Op1 = getOrCreateVReg(*U.getOperand(1));
321   Register Res = getOrCreateVReg(U);
322   CmpInst::Predicate Pred =
323       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
324                                     cast<ConstantExpr>(U).getPredicate());
325   if (CmpInst::isIntPredicate(Pred))
326     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
327   else if (Pred == CmpInst::FCMP_FALSE)
328     MIRBuilder.buildCopy(
329         Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
330   else if (Pred == CmpInst::FCMP_TRUE)
331     MIRBuilder.buildCopy(
332         Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
333   else {
334     assert(CI && "Instruction should be CmpInst");
335     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
336                          MachineInstr::copyFlagsFromInstruction(*CI));
337   }
338 
339   return true;
340 }
341 
translateRet(const User & U,MachineIRBuilder & MIRBuilder)342 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
343   const ReturnInst &RI = cast<ReturnInst>(U);
344   const Value *Ret = RI.getReturnValue();
345   if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
346     Ret = nullptr;
347 
348   ArrayRef<Register> VRegs;
349   if (Ret)
350     VRegs = getOrCreateVRegs(*Ret);
351 
352   Register SwiftErrorVReg = 0;
353   if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
354     SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
355         &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
356   }
357 
358   // The target may mess up with the insertion point, but
359   // this is not important as a return is the last instruction
360   // of the block anyway.
361   return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
362 }
363 
emitBranchForMergedCondition(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,BranchProbability TProb,BranchProbability FProb,bool InvertCond)364 void IRTranslator::emitBranchForMergedCondition(
365     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
366     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
367     BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
368   // If the leaf of the tree is a comparison, merge the condition into
369   // the caseblock.
370   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
371     CmpInst::Predicate Condition;
372     if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
373       Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
374     } else {
375       const FCmpInst *FC = cast<FCmpInst>(Cond);
376       Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
377     }
378 
379     SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
380                            BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
381                            CurBuilder->getDebugLoc(), TProb, FProb);
382     SL->SwitchCases.push_back(CB);
383     return;
384   }
385 
386   // Create a CaseBlock record representing this branch.
387   CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
388   SwitchCG::CaseBlock CB(
389       Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
390       nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
391   SL->SwitchCases.push_back(CB);
392 }
393 
isValInBlock(const Value * V,const BasicBlock * BB)394 static bool isValInBlock(const Value *V, const BasicBlock *BB) {
395   if (const Instruction *I = dyn_cast<Instruction>(V))
396     return I->getParent() == BB;
397   return true;
398 }
399 
findMergedConditions(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,Instruction::BinaryOps Opc,BranchProbability TProb,BranchProbability FProb,bool InvertCond)400 void IRTranslator::findMergedConditions(
401     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
402     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
403     Instruction::BinaryOps Opc, BranchProbability TProb,
404     BranchProbability FProb, bool InvertCond) {
405   using namespace PatternMatch;
406   assert((Opc == Instruction::And || Opc == Instruction::Or) &&
407          "Expected Opc to be AND/OR");
408   // Skip over not part of the tree and remember to invert op and operands at
409   // next level.
410   Value *NotCond;
411   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
412       isValInBlock(NotCond, CurBB->getBasicBlock())) {
413     findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
414                          !InvertCond);
415     return;
416   }
417 
418   const Instruction *BOp = dyn_cast<Instruction>(Cond);
419   const Value *BOpOp0, *BOpOp1;
420   // Compute the effective opcode for Cond, taking into account whether it needs
421   // to be inverted, e.g.
422   //   and (not (or A, B)), C
423   // gets lowered as
424   //   and (and (not A, not B), C)
425   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
426   if (BOp) {
427     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
428                ? Instruction::And
429                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
430                       ? Instruction::Or
431                       : (Instruction::BinaryOps)0);
432     if (InvertCond) {
433       if (BOpc == Instruction::And)
434         BOpc = Instruction::Or;
435       else if (BOpc == Instruction::Or)
436         BOpc = Instruction::And;
437     }
438   }
439 
440   // If this node is not part of the or/and tree, emit it as a branch.
441   // Note that all nodes in the tree should have same opcode.
442   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
443   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
444       !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
445       !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
446     emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
447                                  InvertCond);
448     return;
449   }
450 
451   //  Create TmpBB after CurBB.
452   MachineFunction::iterator BBI(CurBB);
453   MachineBasicBlock *TmpBB =
454       MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
455   CurBB->getParent()->insert(++BBI, TmpBB);
456 
457   if (Opc == Instruction::Or) {
458     // Codegen X | Y as:
459     // BB1:
460     //   jmp_if_X TBB
461     //   jmp TmpBB
462     // TmpBB:
463     //   jmp_if_Y TBB
464     //   jmp FBB
465     //
466 
467     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
468     // The requirement is that
469     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
470     //     = TrueProb for original BB.
471     // Assuming the original probabilities are A and B, one choice is to set
472     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
473     // A/(1+B) and 2B/(1+B). This choice assumes that
474     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
475     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
476     // TmpBB, but the math is more complicated.
477 
478     auto NewTrueProb = TProb / 2;
479     auto NewFalseProb = TProb / 2 + FProb;
480     // Emit the LHS condition.
481     findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
482                          NewFalseProb, InvertCond);
483 
484     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
485     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
486     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
487     // Emit the RHS condition into TmpBB.
488     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
489                          Probs[1], InvertCond);
490   } else {
491     assert(Opc == Instruction::And && "Unknown merge op!");
492     // Codegen X & Y as:
493     // BB1:
494     //   jmp_if_X TmpBB
495     //   jmp FBB
496     // TmpBB:
497     //   jmp_if_Y TBB
498     //   jmp FBB
499     //
500     //  This requires creation of TmpBB after CurBB.
501 
502     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
503     // The requirement is that
504     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
505     //     = FalseProb for original BB.
506     // Assuming the original probabilities are A and B, one choice is to set
507     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
508     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
509     // TrueProb for BB1 * FalseProb for TmpBB.
510 
511     auto NewTrueProb = TProb + FProb / 2;
512     auto NewFalseProb = FProb / 2;
513     // Emit the LHS condition.
514     findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
515                          NewFalseProb, InvertCond);
516 
517     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
518     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
519     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
520     // Emit the RHS condition into TmpBB.
521     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
522                          Probs[1], InvertCond);
523   }
524 }
525 
shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> & Cases)526 bool IRTranslator::shouldEmitAsBranches(
527     const std::vector<SwitchCG::CaseBlock> &Cases) {
528   // For multiple cases, it's better to emit as branches.
529   if (Cases.size() != 2)
530     return true;
531 
532   // If this is two comparisons of the same values or'd or and'd together, they
533   // will get folded into a single comparison, so don't emit two blocks.
534   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
535        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
536       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
537        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
538     return false;
539   }
540 
541   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
542   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
543   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
544       Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
545       isa<Constant>(Cases[0].CmpRHS) &&
546       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
547     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
548         Cases[0].TrueBB == Cases[1].ThisBB)
549       return false;
550     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
551         Cases[0].FalseBB == Cases[1].ThisBB)
552       return false;
553   }
554 
555   return true;
556 }
557 
translateBr(const User & U,MachineIRBuilder & MIRBuilder)558 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
559   const BranchInst &BrInst = cast<BranchInst>(U);
560   auto &CurMBB = MIRBuilder.getMBB();
561   auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
562 
563   if (BrInst.isUnconditional()) {
564     // If the unconditional target is the layout successor, fallthrough.
565     if (!CurMBB.isLayoutSuccessor(Succ0MBB))
566       MIRBuilder.buildBr(*Succ0MBB);
567 
568     // Link successors.
569     for (const BasicBlock *Succ : successors(&BrInst))
570       CurMBB.addSuccessor(&getMBB(*Succ));
571     return true;
572   }
573 
574   // If this condition is one of the special cases we handle, do special stuff
575   // now.
576   const Value *CondVal = BrInst.getCondition();
577   MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
578 
579   const auto &TLI = *MF->getSubtarget().getTargetLowering();
580 
581   // If this is a series of conditions that are or'd or and'd together, emit
582   // this as a sequence of branches instead of setcc's with and/or operations.
583   // As long as jumps are not expensive (exceptions for multi-use logic ops,
584   // unpredictable branches, and vector extracts because those jumps are likely
585   // expensive for any target), this should improve performance.
586   // For example, instead of something like:
587   //     cmp A, B
588   //     C = seteq
589   //     cmp D, E
590   //     F = setle
591   //     or C, F
592   //     jnz foo
593   // Emit:
594   //     cmp A, B
595   //     je foo
596   //     cmp D, E
597   //     jle foo
598   using namespace PatternMatch;
599   const Instruction *CondI = dyn_cast<Instruction>(CondVal);
600   if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
601       !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
602     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
603     Value *Vec;
604     const Value *BOp0, *BOp1;
605     if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
606       Opcode = Instruction::And;
607     else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
608       Opcode = Instruction::Or;
609 
610     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
611                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
612       findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
613                            getEdgeProbability(&CurMBB, Succ0MBB),
614                            getEdgeProbability(&CurMBB, Succ1MBB),
615                            /*InvertCond=*/false);
616       assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
617 
618       // Allow some cases to be rejected.
619       if (shouldEmitAsBranches(SL->SwitchCases)) {
620         // Emit the branch for this block.
621         emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
622         SL->SwitchCases.erase(SL->SwitchCases.begin());
623         return true;
624       }
625 
626       // Okay, we decided not to do this, remove any inserted MBB's and clear
627       // SwitchCases.
628       for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
629         MF->erase(SL->SwitchCases[I].ThisBB);
630 
631       SL->SwitchCases.clear();
632     }
633   }
634 
635   // Create a CaseBlock record representing this branch.
636   SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
637                          ConstantInt::getTrue(MF->getFunction().getContext()),
638                          nullptr, Succ0MBB, Succ1MBB, &CurMBB,
639                          CurBuilder->getDebugLoc());
640 
641   // Use emitSwitchCase to actually insert the fast branch sequence for this
642   // cond branch.
643   emitSwitchCase(CB, &CurMBB, *CurBuilder);
644   return true;
645 }
646 
addSuccessorWithProb(MachineBasicBlock * Src,MachineBasicBlock * Dst,BranchProbability Prob)647 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
648                                         MachineBasicBlock *Dst,
649                                         BranchProbability Prob) {
650   if (!FuncInfo.BPI) {
651     Src->addSuccessorWithoutProb(Dst);
652     return;
653   }
654   if (Prob.isUnknown())
655     Prob = getEdgeProbability(Src, Dst);
656   Src->addSuccessor(Dst, Prob);
657 }
658 
659 BranchProbability
getEdgeProbability(const MachineBasicBlock * Src,const MachineBasicBlock * Dst) const660 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
661                                  const MachineBasicBlock *Dst) const {
662   const BasicBlock *SrcBB = Src->getBasicBlock();
663   const BasicBlock *DstBB = Dst->getBasicBlock();
664   if (!FuncInfo.BPI) {
665     // If BPI is not available, set the default probability as 1 / N, where N is
666     // the number of successors.
667     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
668     return BranchProbability(1, SuccSize);
669   }
670   return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
671 }
672 
translateSwitch(const User & U,MachineIRBuilder & MIB)673 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
674   using namespace SwitchCG;
675   // Extract cases from the switch.
676   const SwitchInst &SI = cast<SwitchInst>(U);
677   BranchProbabilityInfo *BPI = FuncInfo.BPI;
678   CaseClusterVector Clusters;
679   Clusters.reserve(SI.getNumCases());
680   for (auto &I : SI.cases()) {
681     MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
682     assert(Succ && "Could not find successor mbb in mapping");
683     const ConstantInt *CaseVal = I.getCaseValue();
684     BranchProbability Prob =
685         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
686             : BranchProbability(1, SI.getNumCases() + 1);
687     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
688   }
689 
690   MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
691 
692   // Cluster adjacent cases with the same destination. We do this at all
693   // optimization levels because it's cheap to do and will make codegen faster
694   // if there are many clusters.
695   sortAndRangeify(Clusters);
696 
697   MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
698 
699   // If there is only the default destination, jump there directly.
700   if (Clusters.empty()) {
701     SwitchMBB->addSuccessor(DefaultMBB);
702     if (DefaultMBB != SwitchMBB->getNextNode())
703       MIB.buildBr(*DefaultMBB);
704     return true;
705   }
706 
707   SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
708   SL->findBitTestClusters(Clusters, &SI);
709 
710   LLVM_DEBUG({
711     dbgs() << "Case clusters: ";
712     for (const CaseCluster &C : Clusters) {
713       if (C.Kind == CC_JumpTable)
714         dbgs() << "JT:";
715       if (C.Kind == CC_BitTests)
716         dbgs() << "BT:";
717 
718       C.Low->getValue().print(dbgs(), true);
719       if (C.Low != C.High) {
720         dbgs() << '-';
721         C.High->getValue().print(dbgs(), true);
722       }
723       dbgs() << ' ';
724     }
725     dbgs() << '\n';
726   });
727 
728   assert(!Clusters.empty());
729   SwitchWorkList WorkList;
730   CaseClusterIt First = Clusters.begin();
731   CaseClusterIt Last = Clusters.end() - 1;
732   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
733   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
734 
735   // FIXME: At the moment we don't do any splitting optimizations here like
736   // SelectionDAG does, so this worklist only has one entry.
737   while (!WorkList.empty()) {
738     SwitchWorkListItem W = WorkList.back();
739     WorkList.pop_back();
740     if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
741       return false;
742   }
743   return true;
744 }
745 
emitJumpTable(SwitchCG::JumpTable & JT,MachineBasicBlock * MBB)746 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
747                                  MachineBasicBlock *MBB) {
748   // Emit the code for the jump table
749   assert(JT.Reg != -1U && "Should lower JT Header first!");
750   MachineIRBuilder MIB(*MBB->getParent());
751   MIB.setMBB(*MBB);
752   MIB.setDebugLoc(CurBuilder->getDebugLoc());
753 
754   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
755   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
756 
757   auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
758   MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
759 }
760 
emitJumpTableHeader(SwitchCG::JumpTable & JT,SwitchCG::JumpTableHeader & JTH,MachineBasicBlock * HeaderBB)761 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
762                                        SwitchCG::JumpTableHeader &JTH,
763                                        MachineBasicBlock *HeaderBB) {
764   MachineIRBuilder MIB(*HeaderBB->getParent());
765   MIB.setMBB(*HeaderBB);
766   MIB.setDebugLoc(CurBuilder->getDebugLoc());
767 
768   const Value &SValue = *JTH.SValue;
769   // Subtract the lowest switch case value from the value being switched on.
770   const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
771   Register SwitchOpReg = getOrCreateVReg(SValue);
772   auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
773   auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
774 
775   // This value may be smaller or larger than the target's pointer type, and
776   // therefore require extension or truncating.
777   Type *PtrIRTy = SValue.getType()->getPointerTo();
778   const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
779   Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
780 
781   JT.Reg = Sub.getReg(0);
782 
783   if (JTH.OmitRangeCheck) {
784     if (JT.MBB != HeaderBB->getNextNode())
785       MIB.buildBr(*JT.MBB);
786     return true;
787   }
788 
789   // Emit the range check for the jump table, and branch to the default block
790   // for the switch statement if the value being switched on exceeds the
791   // largest case in the switch.
792   auto Cst = getOrCreateVReg(
793       *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
794   Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
795   auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
796 
797   auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
798 
799   // Avoid emitting unnecessary branches to the next block.
800   if (JT.MBB != HeaderBB->getNextNode())
801     BrCond = MIB.buildBr(*JT.MBB);
802   return true;
803 }
804 
emitSwitchCase(SwitchCG::CaseBlock & CB,MachineBasicBlock * SwitchBB,MachineIRBuilder & MIB)805 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
806                                   MachineBasicBlock *SwitchBB,
807                                   MachineIRBuilder &MIB) {
808   Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
809   Register Cond;
810   DebugLoc OldDbgLoc = MIB.getDebugLoc();
811   MIB.setDebugLoc(CB.DbgLoc);
812   MIB.setMBB(*CB.ThisBB);
813 
814   if (CB.PredInfo.NoCmp) {
815     // Branch or fall through to TrueBB.
816     addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
817     addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
818                       CB.ThisBB);
819     CB.ThisBB->normalizeSuccProbs();
820     if (CB.TrueBB != CB.ThisBB->getNextNode())
821       MIB.buildBr(*CB.TrueBB);
822     MIB.setDebugLoc(OldDbgLoc);
823     return;
824   }
825 
826   const LLT i1Ty = LLT::scalar(1);
827   // Build the compare.
828   if (!CB.CmpMHS) {
829     const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
830     // For conditional branch lowering, we might try to do something silly like
831     // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
832     // just re-use the existing condition vreg.
833     if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI &&
834         CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
835       Cond = CondLHS;
836     } else {
837       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
838       if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
839         Cond =
840             MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
841       else
842         Cond =
843             MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
844     }
845   } else {
846     assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
847            "Can only handle SLE ranges");
848 
849     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
850     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
851 
852     Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
853     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
854       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
855       Cond =
856           MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
857     } else {
858       const LLT CmpTy = MRI->getType(CmpOpReg);
859       auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
860       auto Diff = MIB.buildConstant(CmpTy, High - Low);
861       Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
862     }
863   }
864 
865   // Update successor info
866   addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
867 
868   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
869                     CB.ThisBB);
870 
871   // TrueBB and FalseBB are always different unless the incoming IR is
872   // degenerate. This only happens when running llc on weird IR.
873   if (CB.TrueBB != CB.FalseBB)
874     addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
875   CB.ThisBB->normalizeSuccProbs();
876 
877   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
878                     CB.ThisBB);
879 
880   MIB.buildBrCond(Cond, *CB.TrueBB);
881   MIB.buildBr(*CB.FalseBB);
882   MIB.setDebugLoc(OldDbgLoc);
883 }
884 
lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,MachineBasicBlock * SwitchMBB,MachineBasicBlock * CurMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB,MachineFunction::iterator BBI,BranchProbability UnhandledProbs,SwitchCG::CaseClusterIt I,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable)885 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
886                                           MachineBasicBlock *SwitchMBB,
887                                           MachineBasicBlock *CurMBB,
888                                           MachineBasicBlock *DefaultMBB,
889                                           MachineIRBuilder &MIB,
890                                           MachineFunction::iterator BBI,
891                                           BranchProbability UnhandledProbs,
892                                           SwitchCG::CaseClusterIt I,
893                                           MachineBasicBlock *Fallthrough,
894                                           bool FallthroughUnreachable) {
895   using namespace SwitchCG;
896   MachineFunction *CurMF = SwitchMBB->getParent();
897   // FIXME: Optimize away range check based on pivot comparisons.
898   JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
899   SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
900   BranchProbability DefaultProb = W.DefaultProb;
901 
902   // The jump block hasn't been inserted yet; insert it here.
903   MachineBasicBlock *JumpMBB = JT->MBB;
904   CurMF->insert(BBI, JumpMBB);
905 
906   // Since the jump table block is separate from the switch block, we need
907   // to keep track of it as a machine predecessor to the default block,
908   // otherwise we lose the phi edges.
909   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
910                     CurMBB);
911   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
912                     JumpMBB);
913 
914   auto JumpProb = I->Prob;
915   auto FallthroughProb = UnhandledProbs;
916 
917   // If the default statement is a target of the jump table, we evenly
918   // distribute the default probability to successors of CurMBB. Also
919   // update the probability on the edge from JumpMBB to Fallthrough.
920   for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
921                                         SE = JumpMBB->succ_end();
922        SI != SE; ++SI) {
923     if (*SI == DefaultMBB) {
924       JumpProb += DefaultProb / 2;
925       FallthroughProb -= DefaultProb / 2;
926       JumpMBB->setSuccProbability(SI, DefaultProb / 2);
927       JumpMBB->normalizeSuccProbs();
928     } else {
929       // Also record edges from the jump table block to it's successors.
930       addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
931                         JumpMBB);
932     }
933   }
934 
935   // Skip the range check if the fallthrough block is unreachable.
936   if (FallthroughUnreachable)
937     JTH->OmitRangeCheck = true;
938 
939   if (!JTH->OmitRangeCheck)
940     addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
941   addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
942   CurMBB->normalizeSuccProbs();
943 
944   // The jump table header will be inserted in our current block, do the
945   // range check, and fall through to our fallthrough block.
946   JTH->HeaderBB = CurMBB;
947   JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
948 
949   // If we're in the right place, emit the jump table header right now.
950   if (CurMBB == SwitchMBB) {
951     if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
952       return false;
953     JTH->Emitted = true;
954   }
955   return true;
956 }
lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,Value * Cond,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable,BranchProbability UnhandledProbs,MachineBasicBlock * CurMBB,MachineIRBuilder & MIB,MachineBasicBlock * SwitchMBB)957 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
958                                             Value *Cond,
959                                             MachineBasicBlock *Fallthrough,
960                                             bool FallthroughUnreachable,
961                                             BranchProbability UnhandledProbs,
962                                             MachineBasicBlock *CurMBB,
963                                             MachineIRBuilder &MIB,
964                                             MachineBasicBlock *SwitchMBB) {
965   using namespace SwitchCG;
966   const Value *RHS, *LHS, *MHS;
967   CmpInst::Predicate Pred;
968   if (I->Low == I->High) {
969     // Check Cond == I->Low.
970     Pred = CmpInst::ICMP_EQ;
971     LHS = Cond;
972     RHS = I->Low;
973     MHS = nullptr;
974   } else {
975     // Check I->Low <= Cond <= I->High.
976     Pred = CmpInst::ICMP_SLE;
977     LHS = I->Low;
978     MHS = Cond;
979     RHS = I->High;
980   }
981 
982   // If Fallthrough is unreachable, fold away the comparison.
983   // The false probability is the sum of all unhandled cases.
984   CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
985                CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
986 
987   emitSwitchCase(CB, SwitchMBB, MIB);
988   return true;
989 }
990 
emitBitTestHeader(SwitchCG::BitTestBlock & B,MachineBasicBlock * SwitchBB)991 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
992                                      MachineBasicBlock *SwitchBB) {
993   MachineIRBuilder &MIB = *CurBuilder;
994   MIB.setMBB(*SwitchBB);
995 
996   // Subtract the minimum value.
997   Register SwitchOpReg = getOrCreateVReg(*B.SValue);
998 
999   LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1000   Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1001   auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1002 
1003   // Ensure that the type will fit the mask value.
1004   LLT MaskTy = SwitchOpTy;
1005   for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1006     if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1007       // Switch table case range are encoded into series of masks.
1008       // Just use pointer type, it's guaranteed to fit.
1009       MaskTy = LLT::scalar(64);
1010       break;
1011     }
1012   }
1013   Register SubReg = RangeSub.getReg(0);
1014   if (SwitchOpTy != MaskTy)
1015     SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1016 
1017   B.RegVT = getMVTForLLT(MaskTy);
1018   B.Reg = SubReg;
1019 
1020   MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1021 
1022   if (!B.OmitRangeCheck)
1023     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1024   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1025 
1026   SwitchBB->normalizeSuccProbs();
1027 
1028   if (!B.OmitRangeCheck) {
1029     // Conditional branch to the default block.
1030     auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1031     auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1032                                   RangeSub, RangeCst);
1033     MIB.buildBrCond(RangeCmp, *B.Default);
1034   }
1035 
1036   // Avoid emitting unnecessary branches to the next block.
1037   if (MBB != SwitchBB->getNextNode())
1038     MIB.buildBr(*MBB);
1039 }
1040 
emitBitTestCase(SwitchCG::BitTestBlock & BB,MachineBasicBlock * NextMBB,BranchProbability BranchProbToNext,Register Reg,SwitchCG::BitTestCase & B,MachineBasicBlock * SwitchBB)1041 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1042                                    MachineBasicBlock *NextMBB,
1043                                    BranchProbability BranchProbToNext,
1044                                    Register Reg, SwitchCG::BitTestCase &B,
1045                                    MachineBasicBlock *SwitchBB) {
1046   MachineIRBuilder &MIB = *CurBuilder;
1047   MIB.setMBB(*SwitchBB);
1048 
1049   LLT SwitchTy = getLLTForMVT(BB.RegVT);
1050   Register Cmp;
1051   unsigned PopCount = countPopulation(B.Mask);
1052   if (PopCount == 1) {
1053     // Testing for a single bit; just compare the shift count with what it
1054     // would need to be to shift a 1 bit in that position.
1055     auto MaskTrailingZeros =
1056         MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask));
1057     Cmp =
1058         MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1059             .getReg(0);
1060   } else if (PopCount == BB.Range) {
1061     // There is only one zero bit in the range, test for it directly.
1062     auto MaskTrailingOnes =
1063         MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask));
1064     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1065               .getReg(0);
1066   } else {
1067     // Make desired shift.
1068     auto CstOne = MIB.buildConstant(SwitchTy, 1);
1069     auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1070 
1071     // Emit bit tests and jumps.
1072     auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1073     auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1074     auto CstZero = MIB.buildConstant(SwitchTy, 0);
1075     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1076               .getReg(0);
1077   }
1078 
1079   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1080   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1081   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1082   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1083   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1084   // one as they are relative probabilities (and thus work more like weights),
1085   // and hence we need to normalize them to let the sum of them become one.
1086   SwitchBB->normalizeSuccProbs();
1087 
1088   // Record the fact that the IR edge from the header to the bit test target
1089   // will go through our new block. Neeeded for PHIs to have nodes added.
1090   addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1091                     SwitchBB);
1092 
1093   MIB.buildBrCond(Cmp, *B.TargetBB);
1094 
1095   // Avoid emitting unnecessary branches to the next block.
1096   if (NextMBB != SwitchBB->getNextNode())
1097     MIB.buildBr(*NextMBB);
1098 }
1099 
lowerBitTestWorkItem(SwitchCG::SwitchWorkListItem W,MachineBasicBlock * SwitchMBB,MachineBasicBlock * CurMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB,MachineFunction::iterator BBI,BranchProbability DefaultProb,BranchProbability UnhandledProbs,SwitchCG::CaseClusterIt I,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable)1100 bool IRTranslator::lowerBitTestWorkItem(
1101     SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1102     MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1103     MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1104     BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1105     SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1106     bool FallthroughUnreachable) {
1107   using namespace SwitchCG;
1108   MachineFunction *CurMF = SwitchMBB->getParent();
1109   // FIXME: Optimize away range check based on pivot comparisons.
1110   BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1111   // The bit test blocks haven't been inserted yet; insert them here.
1112   for (BitTestCase &BTC : BTB->Cases)
1113     CurMF->insert(BBI, BTC.ThisBB);
1114 
1115   // Fill in fields of the BitTestBlock.
1116   BTB->Parent = CurMBB;
1117   BTB->Default = Fallthrough;
1118 
1119   BTB->DefaultProb = UnhandledProbs;
1120   // If the cases in bit test don't form a contiguous range, we evenly
1121   // distribute the probability on the edge to Fallthrough to two
1122   // successors of CurMBB.
1123   if (!BTB->ContiguousRange) {
1124     BTB->Prob += DefaultProb / 2;
1125     BTB->DefaultProb -= DefaultProb / 2;
1126   }
1127 
1128   if (FallthroughUnreachable) {
1129     // Skip the range check if the fallthrough block is unreachable.
1130     BTB->OmitRangeCheck = true;
1131   }
1132 
1133   // If we're in the right place, emit the bit test header right now.
1134   if (CurMBB == SwitchMBB) {
1135     emitBitTestHeader(*BTB, SwitchMBB);
1136     BTB->Emitted = true;
1137   }
1138   return true;
1139 }
1140 
lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB)1141 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1142                                        Value *Cond,
1143                                        MachineBasicBlock *SwitchMBB,
1144                                        MachineBasicBlock *DefaultMBB,
1145                                        MachineIRBuilder &MIB) {
1146   using namespace SwitchCG;
1147   MachineFunction *CurMF = FuncInfo.MF;
1148   MachineBasicBlock *NextMBB = nullptr;
1149   MachineFunction::iterator BBI(W.MBB);
1150   if (++BBI != FuncInfo.MF->end())
1151     NextMBB = &*BBI;
1152 
1153   if (EnableOpts) {
1154     // Here, we order cases by probability so the most likely case will be
1155     // checked first. However, two clusters can have the same probability in
1156     // which case their relative ordering is non-deterministic. So we use Low
1157     // as a tie-breaker as clusters are guaranteed to never overlap.
1158     llvm::sort(W.FirstCluster, W.LastCluster + 1,
1159                [](const CaseCluster &a, const CaseCluster &b) {
1160                  return a.Prob != b.Prob
1161                             ? a.Prob > b.Prob
1162                             : a.Low->getValue().slt(b.Low->getValue());
1163                });
1164 
1165     // Rearrange the case blocks so that the last one falls through if possible
1166     // without changing the order of probabilities.
1167     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1168       --I;
1169       if (I->Prob > W.LastCluster->Prob)
1170         break;
1171       if (I->Kind == CC_Range && I->MBB == NextMBB) {
1172         std::swap(*I, *W.LastCluster);
1173         break;
1174       }
1175     }
1176   }
1177 
1178   // Compute total probability.
1179   BranchProbability DefaultProb = W.DefaultProb;
1180   BranchProbability UnhandledProbs = DefaultProb;
1181   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1182     UnhandledProbs += I->Prob;
1183 
1184   MachineBasicBlock *CurMBB = W.MBB;
1185   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1186     bool FallthroughUnreachable = false;
1187     MachineBasicBlock *Fallthrough;
1188     if (I == W.LastCluster) {
1189       // For the last cluster, fall through to the default destination.
1190       Fallthrough = DefaultMBB;
1191       FallthroughUnreachable = isa<UnreachableInst>(
1192           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1193     } else {
1194       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1195       CurMF->insert(BBI, Fallthrough);
1196     }
1197     UnhandledProbs -= I->Prob;
1198 
1199     switch (I->Kind) {
1200     case CC_BitTests: {
1201       if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1202                                 DefaultProb, UnhandledProbs, I, Fallthrough,
1203                                 FallthroughUnreachable)) {
1204         LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1205         return false;
1206       }
1207       break;
1208     }
1209 
1210     case CC_JumpTable: {
1211       if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1212                                   UnhandledProbs, I, Fallthrough,
1213                                   FallthroughUnreachable)) {
1214         LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1215         return false;
1216       }
1217       break;
1218     }
1219     case CC_Range: {
1220       if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1221                                     FallthroughUnreachable, UnhandledProbs,
1222                                     CurMBB, MIB, SwitchMBB)) {
1223         LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1224         return false;
1225       }
1226       break;
1227     }
1228     }
1229     CurMBB = Fallthrough;
1230   }
1231 
1232   return true;
1233 }
1234 
translateIndirectBr(const User & U,MachineIRBuilder & MIRBuilder)1235 bool IRTranslator::translateIndirectBr(const User &U,
1236                                        MachineIRBuilder &MIRBuilder) {
1237   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1238 
1239   const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1240   MIRBuilder.buildBrIndirect(Tgt);
1241 
1242   // Link successors.
1243   SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1244   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1245   for (const BasicBlock *Succ : successors(&BrInst)) {
1246     // It's legal for indirectbr instructions to have duplicate blocks in the
1247     // destination list. We don't allow this in MIR. Skip anything that's
1248     // already a successor.
1249     if (!AddedSuccessors.insert(Succ).second)
1250       continue;
1251     CurBB.addSuccessor(&getMBB(*Succ));
1252   }
1253 
1254   return true;
1255 }
1256 
isSwiftError(const Value * V)1257 static bool isSwiftError(const Value *V) {
1258   if (auto Arg = dyn_cast<Argument>(V))
1259     return Arg->hasSwiftErrorAttr();
1260   if (auto AI = dyn_cast<AllocaInst>(V))
1261     return AI->isSwiftError();
1262   return false;
1263 }
1264 
translateLoad(const User & U,MachineIRBuilder & MIRBuilder)1265 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1266   const LoadInst &LI = cast<LoadInst>(U);
1267   if (DL->getTypeStoreSize(LI.getType()) == 0)
1268     return true;
1269 
1270   ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1271   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1272   Register Base = getOrCreateVReg(*LI.getPointerOperand());
1273 
1274   Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
1275   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1276 
1277   if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
1278     assert(Regs.size() == 1 && "swifterror should be single pointer");
1279     Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
1280                                                     LI.getPointerOperand());
1281     MIRBuilder.buildCopy(Regs[0], VReg);
1282     return true;
1283   }
1284 
1285   auto &TLI = *MF->getSubtarget().getTargetLowering();
1286   MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
1287 
1288   const MDNode *Ranges =
1289       Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1290   for (unsigned i = 0; i < Regs.size(); ++i) {
1291     Register Addr;
1292     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1293 
1294     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1295     Align BaseAlign = getMemOpAlign(LI);
1296     AAMDNodes AAMetadata;
1297     LI.getAAMetadata(AAMetadata);
1298     auto MMO = MF->getMachineMemOperand(
1299         Ptr, Flags, MRI->getType(Regs[i]).getSizeInBytes(),
1300         commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, Ranges,
1301         LI.getSyncScopeID(), LI.getOrdering());
1302     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1303   }
1304 
1305   return true;
1306 }
1307 
translateStore(const User & U,MachineIRBuilder & MIRBuilder)1308 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1309   const StoreInst &SI = cast<StoreInst>(U);
1310   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
1311     return true;
1312 
1313   ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1314   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1315   Register Base = getOrCreateVReg(*SI.getPointerOperand());
1316 
1317   Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
1318   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1319 
1320   if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1321     assert(Vals.size() == 1 && "swifterror should be single pointer");
1322 
1323     Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1324                                                     SI.getPointerOperand());
1325     MIRBuilder.buildCopy(VReg, Vals[0]);
1326     return true;
1327   }
1328 
1329   auto &TLI = *MF->getSubtarget().getTargetLowering();
1330   MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1331 
1332   for (unsigned i = 0; i < Vals.size(); ++i) {
1333     Register Addr;
1334     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1335 
1336     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1337     Align BaseAlign = getMemOpAlign(SI);
1338     AAMDNodes AAMetadata;
1339     SI.getAAMetadata(AAMetadata);
1340     auto MMO = MF->getMachineMemOperand(
1341         Ptr, Flags, MRI->getType(Vals[i]).getSizeInBytes(),
1342         commonAlignment(BaseAlign, Offsets[i] / 8), AAMetadata, nullptr,
1343         SI.getSyncScopeID(), SI.getOrdering());
1344     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1345   }
1346   return true;
1347 }
1348 
getOffsetFromIndices(const User & U,const DataLayout & DL)1349 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1350   const Value *Src = U.getOperand(0);
1351   Type *Int32Ty = Type::getInt32Ty(U.getContext());
1352 
1353   // getIndexedOffsetInType is designed for GEPs, so the first index is the
1354   // usual array element rather than looking into the actual aggregate.
1355   SmallVector<Value *, 1> Indices;
1356   Indices.push_back(ConstantInt::get(Int32Ty, 0));
1357 
1358   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1359     for (auto Idx : EVI->indices())
1360       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1361   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1362     for (auto Idx : IVI->indices())
1363       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1364   } else {
1365     for (unsigned i = 1; i < U.getNumOperands(); ++i)
1366       Indices.push_back(U.getOperand(i));
1367   }
1368 
1369   return 8 * static_cast<uint64_t>(
1370                  DL.getIndexedOffsetInType(Src->getType(), Indices));
1371 }
1372 
translateExtractValue(const User & U,MachineIRBuilder & MIRBuilder)1373 bool IRTranslator::translateExtractValue(const User &U,
1374                                          MachineIRBuilder &MIRBuilder) {
1375   const Value *Src = U.getOperand(0);
1376   uint64_t Offset = getOffsetFromIndices(U, *DL);
1377   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1378   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1379   unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1380   auto &DstRegs = allocateVRegs(U);
1381 
1382   for (unsigned i = 0; i < DstRegs.size(); ++i)
1383     DstRegs[i] = SrcRegs[Idx++];
1384 
1385   return true;
1386 }
1387 
translateInsertValue(const User & U,MachineIRBuilder & MIRBuilder)1388 bool IRTranslator::translateInsertValue(const User &U,
1389                                         MachineIRBuilder &MIRBuilder) {
1390   const Value *Src = U.getOperand(0);
1391   uint64_t Offset = getOffsetFromIndices(U, *DL);
1392   auto &DstRegs = allocateVRegs(U);
1393   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1394   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1395   ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1396   auto InsertedIt = InsertedRegs.begin();
1397 
1398   for (unsigned i = 0; i < DstRegs.size(); ++i) {
1399     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1400       DstRegs[i] = *InsertedIt++;
1401     else
1402       DstRegs[i] = SrcRegs[i];
1403   }
1404 
1405   return true;
1406 }
1407 
translateSelect(const User & U,MachineIRBuilder & MIRBuilder)1408 bool IRTranslator::translateSelect(const User &U,
1409                                    MachineIRBuilder &MIRBuilder) {
1410   Register Tst = getOrCreateVReg(*U.getOperand(0));
1411   ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1412   ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1413   ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1414 
1415   uint16_t Flags = 0;
1416   if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1417     Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1418 
1419   for (unsigned i = 0; i < ResRegs.size(); ++i) {
1420     MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1421   }
1422 
1423   return true;
1424 }
1425 
translateCopy(const User & U,const Value & V,MachineIRBuilder & MIRBuilder)1426 bool IRTranslator::translateCopy(const User &U, const Value &V,
1427                                  MachineIRBuilder &MIRBuilder) {
1428   Register Src = getOrCreateVReg(V);
1429   auto &Regs = *VMap.getVRegs(U);
1430   if (Regs.empty()) {
1431     Regs.push_back(Src);
1432     VMap.getOffsets(U)->push_back(0);
1433   } else {
1434     // If we already assigned a vreg for this instruction, we can't change that.
1435     // Emit a copy to satisfy the users we already emitted.
1436     MIRBuilder.buildCopy(Regs[0], Src);
1437   }
1438   return true;
1439 }
1440 
translateBitCast(const User & U,MachineIRBuilder & MIRBuilder)1441 bool IRTranslator::translateBitCast(const User &U,
1442                                     MachineIRBuilder &MIRBuilder) {
1443   // If we're bitcasting to the source type, we can reuse the source vreg.
1444   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1445       getLLTForType(*U.getType(), *DL))
1446     return translateCopy(U, *U.getOperand(0), MIRBuilder);
1447 
1448   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1449 }
1450 
translateCast(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)1451 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1452                                  MachineIRBuilder &MIRBuilder) {
1453   Register Op = getOrCreateVReg(*U.getOperand(0));
1454   Register Res = getOrCreateVReg(U);
1455   MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1456   return true;
1457 }
1458 
translateGetElementPtr(const User & U,MachineIRBuilder & MIRBuilder)1459 bool IRTranslator::translateGetElementPtr(const User &U,
1460                                           MachineIRBuilder &MIRBuilder) {
1461   Value &Op0 = *U.getOperand(0);
1462   Register BaseReg = getOrCreateVReg(Op0);
1463   Type *PtrIRTy = Op0.getType();
1464   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1465   Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1466   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1467 
1468   // Normalize Vector GEP - all scalar operands should be converted to the
1469   // splat vector.
1470   unsigned VectorWidth = 0;
1471   if (auto *VT = dyn_cast<VectorType>(U.getType()))
1472     VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1473 
1474   // We might need to splat the base pointer into a vector if the offsets
1475   // are vectors.
1476   if (VectorWidth && !PtrTy.isVector()) {
1477     BaseReg =
1478         MIRBuilder.buildSplatVector(LLT::vector(VectorWidth, PtrTy), BaseReg)
1479             .getReg(0);
1480     PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1481     PtrTy = getLLTForType(*PtrIRTy, *DL);
1482     OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1483     OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1484   }
1485 
1486   int64_t Offset = 0;
1487   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1488        GTI != E; ++GTI) {
1489     const Value *Idx = GTI.getOperand();
1490     if (StructType *StTy = GTI.getStructTypeOrNull()) {
1491       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1492       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1493       continue;
1494     } else {
1495       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1496 
1497       // If this is a scalar constant or a splat vector of constants,
1498       // handle it quickly.
1499       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1500         Offset += ElementSize * CI->getSExtValue();
1501         continue;
1502       }
1503 
1504       if (Offset != 0) {
1505         auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1506         BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1507                       .getReg(0);
1508         Offset = 0;
1509       }
1510 
1511       Register IdxReg = getOrCreateVReg(*Idx);
1512       LLT IdxTy = MRI->getType(IdxReg);
1513       if (IdxTy != OffsetTy) {
1514         if (!IdxTy.isVector() && VectorWidth) {
1515           IdxReg = MIRBuilder.buildSplatVector(
1516             OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1517         }
1518 
1519         IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1520       }
1521 
1522       // N = N + Idx * ElementSize;
1523       // Avoid doing it for ElementSize of 1.
1524       Register GepOffsetReg;
1525       if (ElementSize != 1) {
1526         auto ElementSizeMIB = MIRBuilder.buildConstant(
1527             getLLTForType(*OffsetIRTy, *DL), ElementSize);
1528         GepOffsetReg =
1529             MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1530       } else
1531         GepOffsetReg = IdxReg;
1532 
1533       BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1534     }
1535   }
1536 
1537   if (Offset != 0) {
1538     auto OffsetMIB =
1539         MIRBuilder.buildConstant(OffsetTy, Offset);
1540     MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1541     return true;
1542   }
1543 
1544   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1545   return true;
1546 }
1547 
translateMemFunc(const CallInst & CI,MachineIRBuilder & MIRBuilder,unsigned Opcode)1548 bool IRTranslator::translateMemFunc(const CallInst &CI,
1549                                     MachineIRBuilder &MIRBuilder,
1550                                     unsigned Opcode) {
1551 
1552   // If the source is undef, then just emit a nop.
1553   if (isa<UndefValue>(CI.getArgOperand(1)))
1554     return true;
1555 
1556   SmallVector<Register, 3> SrcRegs;
1557 
1558   unsigned MinPtrSize = UINT_MAX;
1559   for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1560     Register SrcReg = getOrCreateVReg(**AI);
1561     LLT SrcTy = MRI->getType(SrcReg);
1562     if (SrcTy.isPointer())
1563       MinPtrSize = std::min(SrcTy.getSizeInBits(), MinPtrSize);
1564     SrcRegs.push_back(SrcReg);
1565   }
1566 
1567   LLT SizeTy = LLT::scalar(MinPtrSize);
1568 
1569   // The size operand should be the minimum of the pointer sizes.
1570   Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1571   if (MRI->getType(SizeOpReg) != SizeTy)
1572     SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1573 
1574   auto ICall = MIRBuilder.buildInstr(Opcode);
1575   for (Register SrcReg : SrcRegs)
1576     ICall.addUse(SrcReg);
1577 
1578   Align DstAlign;
1579   Align SrcAlign;
1580   unsigned IsVol =
1581       cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1582           ->getZExtValue();
1583 
1584   if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1585     DstAlign = MCI->getDestAlign().valueOrOne();
1586     SrcAlign = MCI->getSourceAlign().valueOrOne();
1587   } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1588     DstAlign = MMI->getDestAlign().valueOrOne();
1589     SrcAlign = MMI->getSourceAlign().valueOrOne();
1590   } else {
1591     auto *MSI = cast<MemSetInst>(&CI);
1592     DstAlign = MSI->getDestAlign().valueOrOne();
1593   }
1594 
1595   // We need to propagate the tail call flag from the IR inst as an argument.
1596   // Otherwise, we have to pessimize and assume later that we cannot tail call
1597   // any memory intrinsics.
1598   ICall.addImm(CI.isTailCall() ? 1 : 0);
1599 
1600   // Create mem operands to store the alignment and volatile info.
1601   auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1602   ICall.addMemOperand(MF->getMachineMemOperand(
1603       MachinePointerInfo(CI.getArgOperand(0)),
1604       MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1605   if (Opcode != TargetOpcode::G_MEMSET)
1606     ICall.addMemOperand(MF->getMachineMemOperand(
1607         MachinePointerInfo(CI.getArgOperand(1)),
1608         MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1609 
1610   return true;
1611 }
1612 
getStackGuard(Register DstReg,MachineIRBuilder & MIRBuilder)1613 void IRTranslator::getStackGuard(Register DstReg,
1614                                  MachineIRBuilder &MIRBuilder) {
1615   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1616   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1617   auto MIB =
1618       MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1619 
1620   auto &TLI = *MF->getSubtarget().getTargetLowering();
1621   Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1622   if (!Global)
1623     return;
1624 
1625   MachinePointerInfo MPInfo(Global);
1626   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1627                MachineMemOperand::MODereferenceable;
1628   MachineMemOperand *MemRef =
1629       MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1630                                DL->getPointerABIAlignment(0));
1631   MIB.setMemRefs({MemRef});
1632 }
1633 
translateOverflowIntrinsic(const CallInst & CI,unsigned Op,MachineIRBuilder & MIRBuilder)1634 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1635                                               MachineIRBuilder &MIRBuilder) {
1636   ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1637   MIRBuilder.buildInstr(
1638       Op, {ResRegs[0], ResRegs[1]},
1639       {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1640 
1641   return true;
1642 }
1643 
translateFixedPointIntrinsic(unsigned Op,const CallInst & CI,MachineIRBuilder & MIRBuilder)1644 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1645                                                 MachineIRBuilder &MIRBuilder) {
1646   Register Dst = getOrCreateVReg(CI);
1647   Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1648   Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1649   uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1650   MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1651   return true;
1652 }
1653 
getSimpleIntrinsicOpcode(Intrinsic::ID ID)1654 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1655   switch (ID) {
1656     default:
1657       break;
1658     case Intrinsic::bswap:
1659       return TargetOpcode::G_BSWAP;
1660     case Intrinsic::bitreverse:
1661       return TargetOpcode::G_BITREVERSE;
1662     case Intrinsic::fshl:
1663       return TargetOpcode::G_FSHL;
1664     case Intrinsic::fshr:
1665       return TargetOpcode::G_FSHR;
1666     case Intrinsic::ceil:
1667       return TargetOpcode::G_FCEIL;
1668     case Intrinsic::cos:
1669       return TargetOpcode::G_FCOS;
1670     case Intrinsic::ctpop:
1671       return TargetOpcode::G_CTPOP;
1672     case Intrinsic::exp:
1673       return TargetOpcode::G_FEXP;
1674     case Intrinsic::exp2:
1675       return TargetOpcode::G_FEXP2;
1676     case Intrinsic::fabs:
1677       return TargetOpcode::G_FABS;
1678     case Intrinsic::copysign:
1679       return TargetOpcode::G_FCOPYSIGN;
1680     case Intrinsic::minnum:
1681       return TargetOpcode::G_FMINNUM;
1682     case Intrinsic::maxnum:
1683       return TargetOpcode::G_FMAXNUM;
1684     case Intrinsic::minimum:
1685       return TargetOpcode::G_FMINIMUM;
1686     case Intrinsic::maximum:
1687       return TargetOpcode::G_FMAXIMUM;
1688     case Intrinsic::canonicalize:
1689       return TargetOpcode::G_FCANONICALIZE;
1690     case Intrinsic::floor:
1691       return TargetOpcode::G_FFLOOR;
1692     case Intrinsic::fma:
1693       return TargetOpcode::G_FMA;
1694     case Intrinsic::log:
1695       return TargetOpcode::G_FLOG;
1696     case Intrinsic::log2:
1697       return TargetOpcode::G_FLOG2;
1698     case Intrinsic::log10:
1699       return TargetOpcode::G_FLOG10;
1700     case Intrinsic::nearbyint:
1701       return TargetOpcode::G_FNEARBYINT;
1702     case Intrinsic::pow:
1703       return TargetOpcode::G_FPOW;
1704     case Intrinsic::powi:
1705       return TargetOpcode::G_FPOWI;
1706     case Intrinsic::rint:
1707       return TargetOpcode::G_FRINT;
1708     case Intrinsic::round:
1709       return TargetOpcode::G_INTRINSIC_ROUND;
1710     case Intrinsic::roundeven:
1711       return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1712     case Intrinsic::sin:
1713       return TargetOpcode::G_FSIN;
1714     case Intrinsic::sqrt:
1715       return TargetOpcode::G_FSQRT;
1716     case Intrinsic::trunc:
1717       return TargetOpcode::G_INTRINSIC_TRUNC;
1718     case Intrinsic::readcyclecounter:
1719       return TargetOpcode::G_READCYCLECOUNTER;
1720     case Intrinsic::ptrmask:
1721       return TargetOpcode::G_PTRMASK;
1722     case Intrinsic::lrint:
1723       return TargetOpcode::G_INTRINSIC_LRINT;
1724     // FADD/FMUL require checking the FMF, so are handled elsewhere.
1725     case Intrinsic::vector_reduce_fmin:
1726       return TargetOpcode::G_VECREDUCE_FMIN;
1727     case Intrinsic::vector_reduce_fmax:
1728       return TargetOpcode::G_VECREDUCE_FMAX;
1729     case Intrinsic::vector_reduce_add:
1730       return TargetOpcode::G_VECREDUCE_ADD;
1731     case Intrinsic::vector_reduce_mul:
1732       return TargetOpcode::G_VECREDUCE_MUL;
1733     case Intrinsic::vector_reduce_and:
1734       return TargetOpcode::G_VECREDUCE_AND;
1735     case Intrinsic::vector_reduce_or:
1736       return TargetOpcode::G_VECREDUCE_OR;
1737     case Intrinsic::vector_reduce_xor:
1738       return TargetOpcode::G_VECREDUCE_XOR;
1739     case Intrinsic::vector_reduce_smax:
1740       return TargetOpcode::G_VECREDUCE_SMAX;
1741     case Intrinsic::vector_reduce_smin:
1742       return TargetOpcode::G_VECREDUCE_SMIN;
1743     case Intrinsic::vector_reduce_umax:
1744       return TargetOpcode::G_VECREDUCE_UMAX;
1745     case Intrinsic::vector_reduce_umin:
1746       return TargetOpcode::G_VECREDUCE_UMIN;
1747   }
1748   return Intrinsic::not_intrinsic;
1749 }
1750 
translateSimpleIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)1751 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1752                                             Intrinsic::ID ID,
1753                                             MachineIRBuilder &MIRBuilder) {
1754 
1755   unsigned Op = getSimpleIntrinsicOpcode(ID);
1756 
1757   // Is this a simple intrinsic?
1758   if (Op == Intrinsic::not_intrinsic)
1759     return false;
1760 
1761   // Yes. Let's translate it.
1762   SmallVector<llvm::SrcOp, 4> VRegs;
1763   for (auto &Arg : CI.arg_operands())
1764     VRegs.push_back(getOrCreateVReg(*Arg));
1765 
1766   MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1767                         MachineInstr::copyFlagsFromInstruction(CI));
1768   return true;
1769 }
1770 
1771 // TODO: Include ConstainedOps.def when all strict instructions are defined.
getConstrainedOpcode(Intrinsic::ID ID)1772 static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1773   switch (ID) {
1774   case Intrinsic::experimental_constrained_fadd:
1775     return TargetOpcode::G_STRICT_FADD;
1776   case Intrinsic::experimental_constrained_fsub:
1777     return TargetOpcode::G_STRICT_FSUB;
1778   case Intrinsic::experimental_constrained_fmul:
1779     return TargetOpcode::G_STRICT_FMUL;
1780   case Intrinsic::experimental_constrained_fdiv:
1781     return TargetOpcode::G_STRICT_FDIV;
1782   case Intrinsic::experimental_constrained_frem:
1783     return TargetOpcode::G_STRICT_FREM;
1784   case Intrinsic::experimental_constrained_fma:
1785     return TargetOpcode::G_STRICT_FMA;
1786   case Intrinsic::experimental_constrained_sqrt:
1787     return TargetOpcode::G_STRICT_FSQRT;
1788   default:
1789     return 0;
1790   }
1791 }
1792 
translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic & FPI,MachineIRBuilder & MIRBuilder)1793 bool IRTranslator::translateConstrainedFPIntrinsic(
1794   const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1795   fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
1796 
1797   unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1798   if (!Opcode)
1799     return false;
1800 
1801   unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1802   if (EB == fp::ExceptionBehavior::ebIgnore)
1803     Flags |= MachineInstr::NoFPExcept;
1804 
1805   SmallVector<llvm::SrcOp, 4> VRegs;
1806   VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1807   if (!FPI.isUnaryOp())
1808     VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1809   if (FPI.isTernaryOp())
1810     VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1811 
1812   MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1813   return true;
1814 }
1815 
translateKnownIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)1816 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1817                                            MachineIRBuilder &MIRBuilder) {
1818 
1819   // If this is a simple intrinsic (that is, we just need to add a def of
1820   // a vreg, and uses for each arg operand, then translate it.
1821   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1822     return true;
1823 
1824   switch (ID) {
1825   default:
1826     break;
1827   case Intrinsic::lifetime_start:
1828   case Intrinsic::lifetime_end: {
1829     // No stack colouring in O0, discard region information.
1830     if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1831       return true;
1832 
1833     unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1834                                                   : TargetOpcode::LIFETIME_END;
1835 
1836     // Get the underlying objects for the location passed on the lifetime
1837     // marker.
1838     SmallVector<const Value *, 4> Allocas;
1839     getUnderlyingObjects(CI.getArgOperand(1), Allocas);
1840 
1841     // Iterate over each underlying object, creating lifetime markers for each
1842     // static alloca. Quit if we find a non-static alloca.
1843     for (const Value *V : Allocas) {
1844       const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1845       if (!AI)
1846         continue;
1847 
1848       if (!AI->isStaticAlloca())
1849         return true;
1850 
1851       MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1852     }
1853     return true;
1854   }
1855   case Intrinsic::dbg_declare: {
1856     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1857     assert(DI.getVariable() && "Missing variable");
1858 
1859     const Value *Address = DI.getAddress();
1860     if (!Address || isa<UndefValue>(Address)) {
1861       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1862       return true;
1863     }
1864 
1865     assert(DI.getVariable()->isValidLocationForIntrinsic(
1866                MIRBuilder.getDebugLoc()) &&
1867            "Expected inlined-at fields to agree");
1868     auto AI = dyn_cast<AllocaInst>(Address);
1869     if (AI && AI->isStaticAlloca()) {
1870       // Static allocas are tracked at the MF level, no need for DBG_VALUE
1871       // instructions (in fact, they get ignored if they *do* exist).
1872       MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1873                              getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1874     } else {
1875       // A dbg.declare describes the address of a source variable, so lower it
1876       // into an indirect DBG_VALUE.
1877       MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1878                                        DI.getVariable(), DI.getExpression());
1879     }
1880     return true;
1881   }
1882   case Intrinsic::dbg_label: {
1883     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1884     assert(DI.getLabel() && "Missing label");
1885 
1886     assert(DI.getLabel()->isValidLocationForIntrinsic(
1887                MIRBuilder.getDebugLoc()) &&
1888            "Expected inlined-at fields to agree");
1889 
1890     MIRBuilder.buildDbgLabel(DI.getLabel());
1891     return true;
1892   }
1893   case Intrinsic::vaend:
1894     // No target I know of cares about va_end. Certainly no in-tree target
1895     // does. Simplest intrinsic ever!
1896     return true;
1897   case Intrinsic::vastart: {
1898     auto &TLI = *MF->getSubtarget().getTargetLowering();
1899     Value *Ptr = CI.getArgOperand(0);
1900     unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1901 
1902     // FIXME: Get alignment
1903     MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1904         .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1905                                                 MachineMemOperand::MOStore,
1906                                                 ListSize, Align(1)));
1907     return true;
1908   }
1909   case Intrinsic::dbg_value: {
1910     // This form of DBG_VALUE is target-independent.
1911     const DbgValueInst &DI = cast<DbgValueInst>(CI);
1912     const Value *V = DI.getValue();
1913     assert(DI.getVariable()->isValidLocationForIntrinsic(
1914                MIRBuilder.getDebugLoc()) &&
1915            "Expected inlined-at fields to agree");
1916     if (!V || DI.hasArgList()) {
1917       // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
1918       // terminate any prior location.
1919       MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1920     } else if (const auto *CI = dyn_cast<Constant>(V)) {
1921       MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1922     } else {
1923       for (Register Reg : getOrCreateVRegs(*V)) {
1924         // FIXME: This does not handle register-indirect values at offset 0. The
1925         // direct/indirect thing shouldn't really be handled by something as
1926         // implicit as reg+noreg vs reg+imm in the first place, but it seems
1927         // pretty baked in right now.
1928         MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1929       }
1930     }
1931     return true;
1932   }
1933   case Intrinsic::uadd_with_overflow:
1934     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1935   case Intrinsic::sadd_with_overflow:
1936     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1937   case Intrinsic::usub_with_overflow:
1938     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1939   case Intrinsic::ssub_with_overflow:
1940     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1941   case Intrinsic::umul_with_overflow:
1942     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1943   case Intrinsic::smul_with_overflow:
1944     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1945   case Intrinsic::uadd_sat:
1946     return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
1947   case Intrinsic::sadd_sat:
1948     return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
1949   case Intrinsic::usub_sat:
1950     return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
1951   case Intrinsic::ssub_sat:
1952     return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
1953   case Intrinsic::ushl_sat:
1954     return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
1955   case Intrinsic::sshl_sat:
1956     return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
1957   case Intrinsic::umin:
1958     return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
1959   case Intrinsic::umax:
1960     return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
1961   case Intrinsic::smin:
1962     return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
1963   case Intrinsic::smax:
1964     return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
1965   case Intrinsic::abs:
1966     // TODO: Preserve "int min is poison" arg in GMIR?
1967     return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
1968   case Intrinsic::smul_fix:
1969     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
1970   case Intrinsic::umul_fix:
1971     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
1972   case Intrinsic::smul_fix_sat:
1973     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
1974   case Intrinsic::umul_fix_sat:
1975     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
1976   case Intrinsic::sdiv_fix:
1977     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
1978   case Intrinsic::udiv_fix:
1979     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
1980   case Intrinsic::sdiv_fix_sat:
1981     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
1982   case Intrinsic::udiv_fix_sat:
1983     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
1984   case Intrinsic::fmuladd: {
1985     const TargetMachine &TM = MF->getTarget();
1986     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1987     Register Dst = getOrCreateVReg(CI);
1988     Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1989     Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1990     Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1991     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1992         TLI.isFMAFasterThanFMulAndFAdd(*MF,
1993                                        TLI.getValueType(*DL, CI.getType()))) {
1994       // TODO: Revisit this to see if we should move this part of the
1995       // lowering to the combiner.
1996       MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
1997                           MachineInstr::copyFlagsFromInstruction(CI));
1998     } else {
1999       LLT Ty = getLLTForType(*CI.getType(), *DL);
2000       auto FMul = MIRBuilder.buildFMul(
2001           Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2002       MIRBuilder.buildFAdd(Dst, FMul, Op2,
2003                            MachineInstr::copyFlagsFromInstruction(CI));
2004     }
2005     return true;
2006   }
2007   case Intrinsic::convert_from_fp16:
2008     // FIXME: This intrinsic should probably be removed from the IR.
2009     MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2010                           getOrCreateVReg(*CI.getArgOperand(0)),
2011                           MachineInstr::copyFlagsFromInstruction(CI));
2012     return true;
2013   case Intrinsic::convert_to_fp16:
2014     // FIXME: This intrinsic should probably be removed from the IR.
2015     MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2016                             getOrCreateVReg(*CI.getArgOperand(0)),
2017                             MachineInstr::copyFlagsFromInstruction(CI));
2018     return true;
2019   case Intrinsic::memcpy:
2020     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2021   case Intrinsic::memmove:
2022     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2023   case Intrinsic::memset:
2024     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2025   case Intrinsic::eh_typeid_for: {
2026     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2027     Register Reg = getOrCreateVReg(CI);
2028     unsigned TypeID = MF->getTypeIDFor(GV);
2029     MIRBuilder.buildConstant(Reg, TypeID);
2030     return true;
2031   }
2032   case Intrinsic::objectsize:
2033     llvm_unreachable("llvm.objectsize.* should have been lowered already");
2034 
2035   case Intrinsic::is_constant:
2036     llvm_unreachable("llvm.is.constant.* should have been lowered already");
2037 
2038   case Intrinsic::stackguard:
2039     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2040     return true;
2041   case Intrinsic::stackprotector: {
2042     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2043     Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2044     getStackGuard(GuardVal, MIRBuilder);
2045 
2046     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2047     int FI = getOrCreateFrameIndex(*Slot);
2048     MF->getFrameInfo().setStackProtectorIndex(FI);
2049 
2050     MIRBuilder.buildStore(
2051         GuardVal, getOrCreateVReg(*Slot),
2052         *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2053                                   MachineMemOperand::MOStore |
2054                                       MachineMemOperand::MOVolatile,
2055                                   PtrTy.getSizeInBits() / 8, Align(8)));
2056     return true;
2057   }
2058   case Intrinsic::stacksave: {
2059     // Save the stack pointer to the location provided by the intrinsic.
2060     Register Reg = getOrCreateVReg(CI);
2061     Register StackPtr = MF->getSubtarget()
2062                             .getTargetLowering()
2063                             ->getStackPointerRegisterToSaveRestore();
2064 
2065     // If the target doesn't specify a stack pointer, then fall back.
2066     if (!StackPtr)
2067       return false;
2068 
2069     MIRBuilder.buildCopy(Reg, StackPtr);
2070     return true;
2071   }
2072   case Intrinsic::stackrestore: {
2073     // Restore the stack pointer from the location provided by the intrinsic.
2074     Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
2075     Register StackPtr = MF->getSubtarget()
2076                             .getTargetLowering()
2077                             ->getStackPointerRegisterToSaveRestore();
2078 
2079     // If the target doesn't specify a stack pointer, then fall back.
2080     if (!StackPtr)
2081       return false;
2082 
2083     MIRBuilder.buildCopy(StackPtr, Reg);
2084     return true;
2085   }
2086   case Intrinsic::cttz:
2087   case Intrinsic::ctlz: {
2088     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2089     bool isTrailing = ID == Intrinsic::cttz;
2090     unsigned Opcode = isTrailing
2091                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
2092                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
2093                           : Cst->isZero() ? TargetOpcode::G_CTLZ
2094                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2095     MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2096                           {getOrCreateVReg(*CI.getArgOperand(0))});
2097     return true;
2098   }
2099   case Intrinsic::invariant_start: {
2100     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2101     Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2102     MIRBuilder.buildUndef(Undef);
2103     return true;
2104   }
2105   case Intrinsic::invariant_end:
2106     return true;
2107   case Intrinsic::expect:
2108   case Intrinsic::annotation:
2109   case Intrinsic::ptr_annotation:
2110   case Intrinsic::launder_invariant_group:
2111   case Intrinsic::strip_invariant_group: {
2112     // Drop the intrinsic, but forward the value.
2113     MIRBuilder.buildCopy(getOrCreateVReg(CI),
2114                          getOrCreateVReg(*CI.getArgOperand(0)));
2115     return true;
2116   }
2117   case Intrinsic::assume:
2118   case Intrinsic::experimental_noalias_scope_decl:
2119   case Intrinsic::var_annotation:
2120   case Intrinsic::sideeffect:
2121     // Discard annotate attributes, assumptions, and artificial side-effects.
2122     return true;
2123   case Intrinsic::read_volatile_register:
2124   case Intrinsic::read_register: {
2125     Value *Arg = CI.getArgOperand(0);
2126     MIRBuilder
2127         .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2128         .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2129     return true;
2130   }
2131   case Intrinsic::write_register: {
2132     Value *Arg = CI.getArgOperand(0);
2133     MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2134       .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2135       .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2136     return true;
2137   }
2138   case Intrinsic::localescape: {
2139     MachineBasicBlock &EntryMBB = MF->front();
2140     StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2141 
2142     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2143     // is the same on all targets.
2144     for (unsigned Idx = 0, E = CI.getNumArgOperands(); Idx < E; ++Idx) {
2145       Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2146       if (isa<ConstantPointerNull>(Arg))
2147         continue; // Skip null pointers. They represent a hole in index space.
2148 
2149       int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2150       MCSymbol *FrameAllocSym =
2151           MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2152                                                                 Idx);
2153 
2154       // This should be inserted at the start of the entry block.
2155       auto LocalEscape =
2156           MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2157               .addSym(FrameAllocSym)
2158               .addFrameIndex(FI);
2159 
2160       EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2161     }
2162 
2163     return true;
2164   }
2165   case Intrinsic::vector_reduce_fadd:
2166   case Intrinsic::vector_reduce_fmul: {
2167     // Need to check for the reassoc flag to decide whether we want a
2168     // sequential reduction opcode or not.
2169     Register Dst = getOrCreateVReg(CI);
2170     Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2171     Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2172     unsigned Opc = 0;
2173     if (!CI.hasAllowReassoc()) {
2174       // The sequential ordering case.
2175       Opc = ID == Intrinsic::vector_reduce_fadd
2176                 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2177                 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2178       MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2179                             MachineInstr::copyFlagsFromInstruction(CI));
2180       return true;
2181     }
2182     // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2183     // since the associativity doesn't matter.
2184     unsigned ScalarOpc;
2185     if (ID == Intrinsic::vector_reduce_fadd) {
2186       Opc = TargetOpcode::G_VECREDUCE_FADD;
2187       ScalarOpc = TargetOpcode::G_FADD;
2188     } else {
2189       Opc = TargetOpcode::G_VECREDUCE_FMUL;
2190       ScalarOpc = TargetOpcode::G_FMUL;
2191     }
2192     LLT DstTy = MRI->getType(Dst);
2193     auto Rdx = MIRBuilder.buildInstr(
2194         Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2195     MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2196                           MachineInstr::copyFlagsFromInstruction(CI));
2197 
2198     return true;
2199   }
2200 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)  \
2201   case Intrinsic::INTRINSIC:
2202 #include "llvm/IR/ConstrainedOps.def"
2203     return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2204                                            MIRBuilder);
2205 
2206   }
2207   return false;
2208 }
2209 
translateInlineAsm(const CallBase & CB,MachineIRBuilder & MIRBuilder)2210 bool IRTranslator::translateInlineAsm(const CallBase &CB,
2211                                       MachineIRBuilder &MIRBuilder) {
2212 
2213   const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2214 
2215   if (!ALI) {
2216     LLVM_DEBUG(
2217         dbgs() << "Inline asm lowering is not supported for this target yet\n");
2218     return false;
2219   }
2220 
2221   return ALI->lowerInlineAsm(
2222       MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2223 }
2224 
translateCallBase(const CallBase & CB,MachineIRBuilder & MIRBuilder)2225 bool IRTranslator::translateCallBase(const CallBase &CB,
2226                                      MachineIRBuilder &MIRBuilder) {
2227   ArrayRef<Register> Res = getOrCreateVRegs(CB);
2228 
2229   SmallVector<ArrayRef<Register>, 8> Args;
2230   Register SwiftInVReg = 0;
2231   Register SwiftErrorVReg = 0;
2232   for (auto &Arg : CB.args()) {
2233     if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2234       assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2235       LLT Ty = getLLTForType(*Arg->getType(), *DL);
2236       SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2237       MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2238                                             &CB, &MIRBuilder.getMBB(), Arg));
2239       Args.emplace_back(makeArrayRef(SwiftInVReg));
2240       SwiftErrorVReg =
2241           SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2242       continue;
2243     }
2244     Args.push_back(getOrCreateVRegs(*Arg));
2245   }
2246 
2247   // We don't set HasCalls on MFI here yet because call lowering may decide to
2248   // optimize into tail calls. Instead, we defer that to selection where a final
2249   // scan is done to check if any instructions are calls.
2250   bool Success =
2251       CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2252                      [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2253 
2254   // Check if we just inserted a tail call.
2255   if (Success) {
2256     assert(!HasTailCall && "Can't tail call return twice from block?");
2257     const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2258     HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2259   }
2260 
2261   return Success;
2262 }
2263 
translateCall(const User & U,MachineIRBuilder & MIRBuilder)2264 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2265   const CallInst &CI = cast<CallInst>(U);
2266   auto TII = MF->getTarget().getIntrinsicInfo();
2267   const Function *F = CI.getCalledFunction();
2268 
2269   // FIXME: support Windows dllimport function calls.
2270   if (F && (F->hasDLLImportStorageClass() ||
2271             (MF->getTarget().getTargetTriple().isOSWindows() &&
2272              F->hasExternalWeakLinkage())))
2273     return false;
2274 
2275   // FIXME: support control flow guard targets.
2276   if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2277     return false;
2278 
2279   if (CI.isInlineAsm())
2280     return translateInlineAsm(CI, MIRBuilder);
2281 
2282   Intrinsic::ID ID = Intrinsic::not_intrinsic;
2283   if (F && F->isIntrinsic()) {
2284     ID = F->getIntrinsicID();
2285     if (TII && ID == Intrinsic::not_intrinsic)
2286       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2287   }
2288 
2289   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2290     return translateCallBase(CI, MIRBuilder);
2291 
2292   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2293 
2294   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2295     return true;
2296 
2297   ArrayRef<Register> ResultRegs;
2298   if (!CI.getType()->isVoidTy())
2299     ResultRegs = getOrCreateVRegs(CI);
2300 
2301   // Ignore the callsite attributes. Backend code is most likely not expecting
2302   // an intrinsic to sometimes have side effects and sometimes not.
2303   MachineInstrBuilder MIB =
2304       MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
2305   if (isa<FPMathOperator>(CI))
2306     MIB->copyIRFlags(CI);
2307 
2308   for (auto &Arg : enumerate(CI.arg_operands())) {
2309     // If this is required to be an immediate, don't materialize it in a
2310     // register.
2311     if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2312       if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2313         // imm arguments are more convenient than cimm (and realistically
2314         // probably sufficient), so use them.
2315         assert(CI->getBitWidth() <= 64 &&
2316                "large intrinsic immediates not handled");
2317         MIB.addImm(CI->getSExtValue());
2318       } else {
2319         MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2320       }
2321     } else if (auto MD = dyn_cast<MetadataAsValue>(Arg.value())) {
2322       auto *MDN = dyn_cast<MDNode>(MD->getMetadata());
2323       if (!MDN) // This was probably an MDString.
2324         return false;
2325       MIB.addMetadata(MDN);
2326     } else {
2327       ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2328       if (VRegs.size() > 1)
2329         return false;
2330       MIB.addUse(VRegs[0]);
2331     }
2332   }
2333 
2334   // Add a MachineMemOperand if it is a target mem intrinsic.
2335   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2336   TargetLowering::IntrinsicInfo Info;
2337   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2338   if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2339     Align Alignment = Info.align.getValueOr(
2340         DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2341 
2342     uint64_t Size = Info.memVT.getStoreSize();
2343     MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
2344                                                Info.flags, Size, Alignment));
2345   }
2346 
2347   return true;
2348 }
2349 
findUnwindDestinations(const BasicBlock * EHPadBB,BranchProbability Prob,SmallVectorImpl<std::pair<MachineBasicBlock *,BranchProbability>> & UnwindDests)2350 bool IRTranslator::findUnwindDestinations(
2351     const BasicBlock *EHPadBB,
2352     BranchProbability Prob,
2353     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2354         &UnwindDests) {
2355   EHPersonality Personality = classifyEHPersonality(
2356       EHPadBB->getParent()->getFunction().getPersonalityFn());
2357   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2358   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2359   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2360   bool IsSEH = isAsynchronousEHPersonality(Personality);
2361 
2362   if (IsWasmCXX) {
2363     // Ignore this for now.
2364     return false;
2365   }
2366 
2367   while (EHPadBB) {
2368     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2369     BasicBlock *NewEHPadBB = nullptr;
2370     if (isa<LandingPadInst>(Pad)) {
2371       // Stop on landingpads. They are not funclets.
2372       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2373       break;
2374     }
2375     if (isa<CleanupPadInst>(Pad)) {
2376       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2377       // personalities.
2378       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2379       UnwindDests.back().first->setIsEHScopeEntry();
2380       UnwindDests.back().first->setIsEHFuncletEntry();
2381       break;
2382     }
2383     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2384       // Add the catchpad handlers to the possible destinations.
2385       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2386         UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2387         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2388         if (IsMSVCCXX || IsCoreCLR)
2389           UnwindDests.back().first->setIsEHFuncletEntry();
2390         if (!IsSEH)
2391           UnwindDests.back().first->setIsEHScopeEntry();
2392       }
2393       NewEHPadBB = CatchSwitch->getUnwindDest();
2394     } else {
2395       continue;
2396     }
2397 
2398     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2399     if (BPI && NewEHPadBB)
2400       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2401     EHPadBB = NewEHPadBB;
2402   }
2403   return true;
2404 }
2405 
translateInvoke(const User & U,MachineIRBuilder & MIRBuilder)2406 bool IRTranslator::translateInvoke(const User &U,
2407                                    MachineIRBuilder &MIRBuilder) {
2408   const InvokeInst &I = cast<InvokeInst>(U);
2409   MCContext &Context = MF->getContext();
2410 
2411   const BasicBlock *ReturnBB = I.getSuccessor(0);
2412   const BasicBlock *EHPadBB = I.getSuccessor(1);
2413 
2414   const Function *Fn = I.getCalledFunction();
2415 
2416   // FIXME: support invoking patchpoint and statepoint intrinsics.
2417   if (Fn && Fn->isIntrinsic())
2418     return false;
2419 
2420   // FIXME: support whatever these are.
2421   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
2422     return false;
2423 
2424   // FIXME: support control flow guard targets.
2425   if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2426     return false;
2427 
2428   // FIXME: support Windows exception handling.
2429   if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2430     return false;
2431 
2432   bool LowerInlineAsm = false;
2433   if (I.isInlineAsm()) {
2434     const InlineAsm *IA = cast<InlineAsm>(I.getCalledOperand());
2435     if (!IA->canThrow()) {
2436       // Fast path without emitting EH_LABELs.
2437 
2438       if (!translateInlineAsm(I, MIRBuilder))
2439         return false;
2440 
2441       MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(),
2442                         *ReturnMBB = &getMBB(*ReturnBB);
2443 
2444       // Update successor info.
2445       addSuccessorWithProb(InvokeMBB, ReturnMBB, BranchProbability::getOne());
2446 
2447       MIRBuilder.buildBr(*ReturnMBB);
2448       return true;
2449     } else {
2450       LowerInlineAsm = true;
2451     }
2452   }
2453 
2454   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2455   // the region covered by the try.
2456   MCSymbol *BeginSymbol = Context.createTempSymbol();
2457   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2458 
2459   if (LowerInlineAsm) {
2460     if (!translateInlineAsm(I, MIRBuilder))
2461       return false;
2462   } else if (!translateCallBase(I, MIRBuilder))
2463     return false;
2464 
2465   MCSymbol *EndSymbol = Context.createTempSymbol();
2466   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2467 
2468   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2469   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2470   MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2471   BranchProbability EHPadBBProb =
2472       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2473           : BranchProbability::getZero();
2474 
2475   if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2476     return false;
2477 
2478   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2479                     &ReturnMBB = getMBB(*ReturnBB);
2480   // Update successor info.
2481   addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2482   for (auto &UnwindDest : UnwindDests) {
2483     UnwindDest.first->setIsEHPad();
2484     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2485   }
2486   InvokeMBB->normalizeSuccProbs();
2487 
2488   MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2489   MIRBuilder.buildBr(ReturnMBB);
2490   return true;
2491 }
2492 
translateCallBr(const User & U,MachineIRBuilder & MIRBuilder)2493 bool IRTranslator::translateCallBr(const User &U,
2494                                    MachineIRBuilder &MIRBuilder) {
2495   // FIXME: Implement this.
2496   return false;
2497 }
2498 
translateLandingPad(const User & U,MachineIRBuilder & MIRBuilder)2499 bool IRTranslator::translateLandingPad(const User &U,
2500                                        MachineIRBuilder &MIRBuilder) {
2501   const LandingPadInst &LP = cast<LandingPadInst>(U);
2502 
2503   MachineBasicBlock &MBB = MIRBuilder.getMBB();
2504 
2505   MBB.setIsEHPad();
2506 
2507   // If there aren't registers to copy the values into (e.g., during SjLj
2508   // exceptions), then don't bother.
2509   auto &TLI = *MF->getSubtarget().getTargetLowering();
2510   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2511   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2512       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2513     return true;
2514 
2515   // If landingpad's return type is token type, we don't create DAG nodes
2516   // for its exception pointer and selector value. The extraction of exception
2517   // pointer or selector value from token type landingpads is not currently
2518   // supported.
2519   if (LP.getType()->isTokenTy())
2520     return true;
2521 
2522   // Add a label to mark the beginning of the landing pad.  Deletion of the
2523   // landing pad can thus be detected via the MachineModuleInfo.
2524   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2525     .addSym(MF->addLandingPad(&MBB));
2526 
2527   // If the unwinder does not preserve all registers, ensure that the
2528   // function marks the clobbered registers as used.
2529   const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2530   if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2531     MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
2532 
2533   LLT Ty = getLLTForType(*LP.getType(), *DL);
2534   Register Undef = MRI->createGenericVirtualRegister(Ty);
2535   MIRBuilder.buildUndef(Undef);
2536 
2537   SmallVector<LLT, 2> Tys;
2538   for (Type *Ty : cast<StructType>(LP.getType())->elements())
2539     Tys.push_back(getLLTForType(*Ty, *DL));
2540   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
2541 
2542   // Mark exception register as live in.
2543   Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2544   if (!ExceptionReg)
2545     return false;
2546 
2547   MBB.addLiveIn(ExceptionReg);
2548   ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2549   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2550 
2551   Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2552   if (!SelectorReg)
2553     return false;
2554 
2555   MBB.addLiveIn(SelectorReg);
2556   Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
2557   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
2558   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
2559 
2560   return true;
2561 }
2562 
translateAlloca(const User & U,MachineIRBuilder & MIRBuilder)2563 bool IRTranslator::translateAlloca(const User &U,
2564                                    MachineIRBuilder &MIRBuilder) {
2565   auto &AI = cast<AllocaInst>(U);
2566 
2567   if (AI.isSwiftError())
2568     return true;
2569 
2570   if (AI.isStaticAlloca()) {
2571     Register Res = getOrCreateVReg(AI);
2572     int FI = getOrCreateFrameIndex(AI);
2573     MIRBuilder.buildFrameIndex(Res, FI);
2574     return true;
2575   }
2576 
2577   // FIXME: support stack probing for Windows.
2578   if (MF->getTarget().getTargetTriple().isOSWindows())
2579     return false;
2580 
2581   // Now we're in the harder dynamic case.
2582   Register NumElts = getOrCreateVReg(*AI.getArraySize());
2583   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
2584   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
2585   if (MRI->getType(NumElts) != IntPtrTy) {
2586     Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
2587     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
2588     NumElts = ExtElts;
2589   }
2590 
2591   Type *Ty = AI.getAllocatedType();
2592 
2593   Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
2594   Register TySize =
2595       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
2596   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
2597 
2598   // Round the size of the allocation up to the stack alignment size
2599   // by add SA-1 to the size. This doesn't overflow because we're computing
2600   // an address inside an alloca.
2601   Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
2602   auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
2603   auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2604                                       MachineInstr::NoUWrap);
2605   auto AlignCst =
2606       MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
2607   auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
2608 
2609   Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
2610   if (Alignment <= StackAlign)
2611     Alignment = Align(1);
2612   MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
2613 
2614   MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
2615   assert(MF->getFrameInfo().hasVarSizedObjects());
2616   return true;
2617 }
2618 
translateVAArg(const User & U,MachineIRBuilder & MIRBuilder)2619 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
2620   // FIXME: We may need more info about the type. Because of how LLT works,
2621   // we're completely discarding the i64/double distinction here (amongst
2622   // others). Fortunately the ABIs I know of where that matters don't use va_arg
2623   // anyway but that's not guaranteed.
2624   MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2625                         {getOrCreateVReg(*U.getOperand(0)),
2626                          DL->getABITypeAlign(U.getType()).value()});
2627   return true;
2628 }
2629 
translateInsertElement(const User & U,MachineIRBuilder & MIRBuilder)2630 bool IRTranslator::translateInsertElement(const User &U,
2631                                           MachineIRBuilder &MIRBuilder) {
2632   // If it is a <1 x Ty> vector, use the scalar as it is
2633   // not a legal vector type in LLT.
2634   if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
2635     return translateCopy(U, *U.getOperand(1), MIRBuilder);
2636 
2637   Register Res = getOrCreateVReg(U);
2638   Register Val = getOrCreateVReg(*U.getOperand(0));
2639   Register Elt = getOrCreateVReg(*U.getOperand(1));
2640   Register Idx = getOrCreateVReg(*U.getOperand(2));
2641   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
2642   return true;
2643 }
2644 
translateExtractElement(const User & U,MachineIRBuilder & MIRBuilder)2645 bool IRTranslator::translateExtractElement(const User &U,
2646                                            MachineIRBuilder &MIRBuilder) {
2647   // If it is a <1 x Ty> vector, use the scalar as it is
2648   // not a legal vector type in LLT.
2649   if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
2650     return translateCopy(U, *U.getOperand(0), MIRBuilder);
2651 
2652   Register Res = getOrCreateVReg(U);
2653   Register Val = getOrCreateVReg(*U.getOperand(0));
2654   const auto &TLI = *MF->getSubtarget().getTargetLowering();
2655   unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2656   Register Idx;
2657   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
2658     if (CI->getBitWidth() != PreferredVecIdxWidth) {
2659       APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
2660       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
2661       Idx = getOrCreateVReg(*NewIdxCI);
2662     }
2663   }
2664   if (!Idx)
2665     Idx = getOrCreateVReg(*U.getOperand(1));
2666   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
2667     const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
2668     Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
2669   }
2670   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
2671   return true;
2672 }
2673 
translateShuffleVector(const User & U,MachineIRBuilder & MIRBuilder)2674 bool IRTranslator::translateShuffleVector(const User &U,
2675                                           MachineIRBuilder &MIRBuilder) {
2676   ArrayRef<int> Mask;
2677   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2678     Mask = SVI->getShuffleMask();
2679   else
2680     Mask = cast<ConstantExpr>(U).getShuffleMask();
2681   ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
2682   MIRBuilder
2683       .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2684                   {getOrCreateVReg(*U.getOperand(0)),
2685                    getOrCreateVReg(*U.getOperand(1))})
2686       .addShuffleMask(MaskAlloc);
2687   return true;
2688 }
2689 
translatePHI(const User & U,MachineIRBuilder & MIRBuilder)2690 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2691   const PHINode &PI = cast<PHINode>(U);
2692 
2693   SmallVector<MachineInstr *, 4> Insts;
2694   for (auto Reg : getOrCreateVRegs(PI)) {
2695     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2696     Insts.push_back(MIB.getInstr());
2697   }
2698 
2699   PendingPHIs.emplace_back(&PI, std::move(Insts));
2700   return true;
2701 }
2702 
translateAtomicCmpXchg(const User & U,MachineIRBuilder & MIRBuilder)2703 bool IRTranslator::translateAtomicCmpXchg(const User &U,
2704                                           MachineIRBuilder &MIRBuilder) {
2705   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2706 
2707   auto &TLI = *MF->getSubtarget().getTargetLowering();
2708   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2709 
2710   Type *ResType = I.getType();
2711   Type *ValType = ResType->Type::getStructElementType(0);
2712 
2713   auto Res = getOrCreateVRegs(I);
2714   Register OldValRes = Res[0];
2715   Register SuccessRes = Res[1];
2716   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2717   Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2718   Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2719 
2720   AAMDNodes AAMetadata;
2721   I.getAAMetadata(AAMetadata);
2722 
2723   MIRBuilder.buildAtomicCmpXchgWithSuccess(
2724       OldValRes, SuccessRes, Addr, Cmp, NewVal,
2725       *MF->getMachineMemOperand(
2726           MachinePointerInfo(I.getPointerOperand()), Flags,
2727           DL->getTypeStoreSize(ValType), getMemOpAlign(I), AAMetadata, nullptr,
2728           I.getSyncScopeID(), I.getSuccessOrdering(), I.getFailureOrdering()));
2729   return true;
2730 }
2731 
translateAtomicRMW(const User & U,MachineIRBuilder & MIRBuilder)2732 bool IRTranslator::translateAtomicRMW(const User &U,
2733                                       MachineIRBuilder &MIRBuilder) {
2734   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2735   auto &TLI = *MF->getSubtarget().getTargetLowering();
2736   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2737 
2738   Type *ResType = I.getType();
2739 
2740   Register Res = getOrCreateVReg(I);
2741   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2742   Register Val = getOrCreateVReg(*I.getValOperand());
2743 
2744   unsigned Opcode = 0;
2745   switch (I.getOperation()) {
2746   default:
2747     return false;
2748   case AtomicRMWInst::Xchg:
2749     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2750     break;
2751   case AtomicRMWInst::Add:
2752     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2753     break;
2754   case AtomicRMWInst::Sub:
2755     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2756     break;
2757   case AtomicRMWInst::And:
2758     Opcode = TargetOpcode::G_ATOMICRMW_AND;
2759     break;
2760   case AtomicRMWInst::Nand:
2761     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2762     break;
2763   case AtomicRMWInst::Or:
2764     Opcode = TargetOpcode::G_ATOMICRMW_OR;
2765     break;
2766   case AtomicRMWInst::Xor:
2767     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2768     break;
2769   case AtomicRMWInst::Max:
2770     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2771     break;
2772   case AtomicRMWInst::Min:
2773     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2774     break;
2775   case AtomicRMWInst::UMax:
2776     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2777     break;
2778   case AtomicRMWInst::UMin:
2779     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2780     break;
2781   case AtomicRMWInst::FAdd:
2782     Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2783     break;
2784   case AtomicRMWInst::FSub:
2785     Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2786     break;
2787   }
2788 
2789   AAMDNodes AAMetadata;
2790   I.getAAMetadata(AAMetadata);
2791 
2792   MIRBuilder.buildAtomicRMW(
2793       Opcode, Res, Addr, Val,
2794       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2795                                 Flags, DL->getTypeStoreSize(ResType),
2796                                 getMemOpAlign(I), AAMetadata, nullptr,
2797                                 I.getSyncScopeID(), I.getOrdering()));
2798   return true;
2799 }
2800 
translateFence(const User & U,MachineIRBuilder & MIRBuilder)2801 bool IRTranslator::translateFence(const User &U,
2802                                   MachineIRBuilder &MIRBuilder) {
2803   const FenceInst &Fence = cast<FenceInst>(U);
2804   MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2805                         Fence.getSyncScopeID());
2806   return true;
2807 }
2808 
translateFreeze(const User & U,MachineIRBuilder & MIRBuilder)2809 bool IRTranslator::translateFreeze(const User &U,
2810                                    MachineIRBuilder &MIRBuilder) {
2811   const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
2812   const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
2813 
2814   assert(DstRegs.size() == SrcRegs.size() &&
2815          "Freeze with different source and destination type?");
2816 
2817   for (unsigned I = 0; I < DstRegs.size(); ++I) {
2818     MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
2819   }
2820 
2821   return true;
2822 }
2823 
finishPendingPhis()2824 void IRTranslator::finishPendingPhis() {
2825 #ifndef NDEBUG
2826   DILocationVerifier Verifier;
2827   GISelObserverWrapper WrapperObserver(&Verifier);
2828   RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2829 #endif // ifndef NDEBUG
2830   for (auto &Phi : PendingPHIs) {
2831     const PHINode *PI = Phi.first;
2832     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2833     MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2834     EntryBuilder->setDebugLoc(PI->getDebugLoc());
2835 #ifndef NDEBUG
2836     Verifier.setCurrentInst(PI);
2837 #endif // ifndef NDEBUG
2838 
2839     SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2840     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2841       auto IRPred = PI->getIncomingBlock(i);
2842       ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2843       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2844         if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2845           continue;
2846         SeenPreds.insert(Pred);
2847         for (unsigned j = 0; j < ValRegs.size(); ++j) {
2848           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2849           MIB.addUse(ValRegs[j]);
2850           MIB.addMBB(Pred);
2851         }
2852       }
2853     }
2854   }
2855 }
2856 
valueIsSplit(const Value & V,SmallVectorImpl<uint64_t> * Offsets)2857 bool IRTranslator::valueIsSplit(const Value &V,
2858                                 SmallVectorImpl<uint64_t> *Offsets) {
2859   SmallVector<LLT, 4> SplitTys;
2860   if (Offsets && !Offsets->empty())
2861     Offsets->clear();
2862   computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2863   return SplitTys.size() > 1;
2864 }
2865 
translate(const Instruction & Inst)2866 bool IRTranslator::translate(const Instruction &Inst) {
2867   CurBuilder->setDebugLoc(Inst.getDebugLoc());
2868 
2869   auto &TLI = *MF->getSubtarget().getTargetLowering();
2870   if (TLI.fallBackToDAGISel(Inst))
2871     return false;
2872 
2873   switch (Inst.getOpcode()) {
2874 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
2875   case Instruction::OPCODE:                                                    \
2876     return translate##OPCODE(Inst, *CurBuilder.get());
2877 #include "llvm/IR/Instruction.def"
2878   default:
2879     return false;
2880   }
2881 }
2882 
translate(const Constant & C,Register Reg)2883 bool IRTranslator::translate(const Constant &C, Register Reg) {
2884   // We only emit constants into the entry block from here. To prevent jumpy
2885   // debug behaviour set the line to 0.
2886   if (auto CurrInstDL = CurBuilder->getDL())
2887     EntryBuilder->setDebugLoc(DILocation::get(C.getContext(), 0, 0,
2888                                               CurrInstDL.getScope(),
2889                                               CurrInstDL.getInlinedAt()));
2890 
2891   if (auto CI = dyn_cast<ConstantInt>(&C))
2892     EntryBuilder->buildConstant(Reg, *CI);
2893   else if (auto CF = dyn_cast<ConstantFP>(&C))
2894     EntryBuilder->buildFConstant(Reg, *CF);
2895   else if (isa<UndefValue>(C))
2896     EntryBuilder->buildUndef(Reg);
2897   else if (isa<ConstantPointerNull>(C))
2898     EntryBuilder->buildConstant(Reg, 0);
2899   else if (auto GV = dyn_cast<GlobalValue>(&C))
2900     EntryBuilder->buildGlobalValue(Reg, GV);
2901   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2902     if (!isa<FixedVectorType>(CAZ->getType()))
2903       return false;
2904     // Return the scalar if it is a <1 x Ty> vector.
2905     unsigned NumElts = CAZ->getElementCount().getFixedValue();
2906     if (NumElts == 1)
2907       return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get());
2908     SmallVector<Register, 4> Ops;
2909     for (unsigned I = 0; I < NumElts; ++I) {
2910       Constant &Elt = *CAZ->getElementValue(I);
2911       Ops.push_back(getOrCreateVReg(Elt));
2912     }
2913     EntryBuilder->buildBuildVector(Reg, Ops);
2914   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2915     // Return the scalar if it is a <1 x Ty> vector.
2916     if (CV->getNumElements() == 1)
2917       return translateCopy(C, *CV->getElementAsConstant(0),
2918                            *EntryBuilder.get());
2919     SmallVector<Register, 4> Ops;
2920     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2921       Constant &Elt = *CV->getElementAsConstant(i);
2922       Ops.push_back(getOrCreateVReg(Elt));
2923     }
2924     EntryBuilder->buildBuildVector(Reg, Ops);
2925   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2926     switch(CE->getOpcode()) {
2927 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
2928   case Instruction::OPCODE:                                                    \
2929     return translate##OPCODE(*CE, *EntryBuilder.get());
2930 #include "llvm/IR/Instruction.def"
2931     default:
2932       return false;
2933     }
2934   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2935     if (CV->getNumOperands() == 1)
2936       return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get());
2937     SmallVector<Register, 4> Ops;
2938     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2939       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2940     }
2941     EntryBuilder->buildBuildVector(Reg, Ops);
2942   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2943     EntryBuilder->buildBlockAddress(Reg, BA);
2944   } else
2945     return false;
2946 
2947   return true;
2948 }
2949 
finalizeBasicBlock()2950 void IRTranslator::finalizeBasicBlock() {
2951   for (auto &BTB : SL->BitTestCases) {
2952     // Emit header first, if it wasn't already emitted.
2953     if (!BTB.Emitted)
2954       emitBitTestHeader(BTB, BTB.Parent);
2955 
2956     BranchProbability UnhandledProb = BTB.Prob;
2957     for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
2958       UnhandledProb -= BTB.Cases[j].ExtraProb;
2959       // Set the current basic block to the mbb we wish to insert the code into
2960       MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
2961       // If all cases cover a contiguous range, it is not necessary to jump to
2962       // the default block after the last bit test fails. This is because the
2963       // range check during bit test header creation has guaranteed that every
2964       // case here doesn't go outside the range. In this case, there is no need
2965       // to perform the last bit test, as it will always be true. Instead, make
2966       // the second-to-last bit-test fall through to the target of the last bit
2967       // test, and delete the last bit test.
2968 
2969       MachineBasicBlock *NextMBB;
2970       if (BTB.ContiguousRange && j + 2 == ej) {
2971         // Second-to-last bit-test with contiguous range: fall through to the
2972         // target of the final bit test.
2973         NextMBB = BTB.Cases[j + 1].TargetBB;
2974       } else if (j + 1 == ej) {
2975         // For the last bit test, fall through to Default.
2976         NextMBB = BTB.Default;
2977       } else {
2978         // Otherwise, fall through to the next bit test.
2979         NextMBB = BTB.Cases[j + 1].ThisBB;
2980       }
2981 
2982       emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
2983 
2984       if (BTB.ContiguousRange && j + 2 == ej) {
2985         // We need to record the replacement phi edge here that normally
2986         // happens in emitBitTestCase before we delete the case, otherwise the
2987         // phi edge will be lost.
2988         addMachineCFGPred({BTB.Parent->getBasicBlock(),
2989                            BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
2990                           MBB);
2991         // Since we're not going to use the final bit test, remove it.
2992         BTB.Cases.pop_back();
2993         break;
2994       }
2995     }
2996     // This is "default" BB. We have two jumps to it. From "header" BB and from
2997     // last "case" BB, unless the latter was skipped.
2998     CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
2999                                    BTB.Default->getBasicBlock()};
3000     addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3001     if (!BTB.ContiguousRange) {
3002       addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3003     }
3004   }
3005   SL->BitTestCases.clear();
3006 
3007   for (auto &JTCase : SL->JTCases) {
3008     // Emit header first, if it wasn't already emitted.
3009     if (!JTCase.first.Emitted)
3010       emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3011 
3012     emitJumpTable(JTCase.second, JTCase.second.MBB);
3013   }
3014   SL->JTCases.clear();
3015 
3016   for (auto &SwCase : SL->SwitchCases)
3017     emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3018   SL->SwitchCases.clear();
3019 }
3020 
finalizeFunction()3021 void IRTranslator::finalizeFunction() {
3022   // Release the memory used by the different maps we
3023   // needed during the translation.
3024   PendingPHIs.clear();
3025   VMap.reset();
3026   FrameIndices.clear();
3027   MachinePreds.clear();
3028   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3029   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3030   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3031   EntryBuilder.reset();
3032   CurBuilder.reset();
3033   FuncInfo.clear();
3034 }
3035 
3036 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3037 /// variadic musttail call.
checkForMustTailInVarArgFn(bool IsVarArg,const BasicBlock & BB)3038 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3039   if (!IsVarArg)
3040     return false;
3041 
3042   // Walk the block backwards, because tail calls usually only appear at the end
3043   // of a block.
3044   return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
3045     const auto *CI = dyn_cast<CallInst>(&I);
3046     return CI && CI->isMustTailCall();
3047   });
3048 }
3049 
runOnMachineFunction(MachineFunction & CurMF)3050 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3051   MF = &CurMF;
3052   const Function &F = MF->getFunction();
3053   if (F.empty())
3054     return false;
3055   GISelCSEAnalysisWrapper &Wrapper =
3056       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3057   // Set the CSEConfig and run the analysis.
3058   GISelCSEInfo *CSEInfo = nullptr;
3059   TPC = &getAnalysis<TargetPassConfig>();
3060   bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3061                        ? EnableCSEInIRTranslator
3062                        : TPC->isGISelCSEEnabled();
3063 
3064   if (EnableCSE) {
3065     EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3066     CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3067     EntryBuilder->setCSEInfo(CSEInfo);
3068     CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3069     CurBuilder->setCSEInfo(CSEInfo);
3070   } else {
3071     EntryBuilder = std::make_unique<MachineIRBuilder>();
3072     CurBuilder = std::make_unique<MachineIRBuilder>();
3073   }
3074   CLI = MF->getSubtarget().getCallLowering();
3075   CurBuilder->setMF(*MF);
3076   EntryBuilder->setMF(*MF);
3077   MRI = &MF->getRegInfo();
3078   DL = &F.getParent()->getDataLayout();
3079   ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3080   const TargetMachine &TM = MF->getTarget();
3081   TM.resetTargetOptions(F);
3082   EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F);
3083   FuncInfo.MF = MF;
3084   if (EnableOpts)
3085     FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3086   else
3087     FuncInfo.BPI = nullptr;
3088 
3089   FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3090 
3091   const auto &TLI = *MF->getSubtarget().getTargetLowering();
3092 
3093   SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3094   SL->init(TLI, TM, *DL);
3095 
3096 
3097 
3098   assert(PendingPHIs.empty() && "stale PHIs");
3099 
3100   if (!DL->isLittleEndian()) {
3101     // Currently we don't properly handle big endian code.
3102     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3103                                F.getSubprogram(), &F.getEntryBlock());
3104     R << "unable to translate in big endian mode";
3105     reportTranslationError(*MF, *TPC, *ORE, R);
3106   }
3107 
3108   // Release the per-function state when we return, whether we succeeded or not.
3109   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3110 
3111   // Setup a separate basic-block for the arguments and constants
3112   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3113   MF->push_back(EntryBB);
3114   EntryBuilder->setMBB(*EntryBB);
3115 
3116   DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3117   SwiftError.setFunction(CurMF);
3118   SwiftError.createEntriesInEntryBlock(DbgLoc);
3119 
3120   bool IsVarArg = F.isVarArg();
3121   bool HasMustTailInVarArgFn = false;
3122 
3123   // Create all blocks, in IR order, to preserve the layout.
3124   for (const BasicBlock &BB: F) {
3125     auto *&MBB = BBToMBB[&BB];
3126 
3127     MBB = MF->CreateMachineBasicBlock(&BB);
3128     MF->push_back(MBB);
3129 
3130     if (BB.hasAddressTaken())
3131       MBB->setHasAddressTaken();
3132 
3133     if (!HasMustTailInVarArgFn)
3134       HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3135   }
3136 
3137   MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3138 
3139   // Make our arguments/constants entry block fallthrough to the IR entry block.
3140   EntryBB->addSuccessor(&getMBB(F.front()));
3141 
3142   if (CLI->fallBackToDAGISel(*MF)) {
3143     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3144                                F.getSubprogram(), &F.getEntryBlock());
3145     R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3146     reportTranslationError(*MF, *TPC, *ORE, R);
3147     return false;
3148   }
3149 
3150   // Lower the actual args into this basic block.
3151   SmallVector<ArrayRef<Register>, 8> VRegArgs;
3152   for (const Argument &Arg: F.args()) {
3153     if (DL->getTypeStoreSize(Arg.getType()).isZero())
3154       continue; // Don't handle zero sized types.
3155     ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3156     VRegArgs.push_back(VRegs);
3157 
3158     if (Arg.hasSwiftErrorAttr()) {
3159       assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3160       SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3161     }
3162   }
3163 
3164   if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) {
3165     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3166                                F.getSubprogram(), &F.getEntryBlock());
3167     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3168     reportTranslationError(*MF, *TPC, *ORE, R);
3169     return false;
3170   }
3171 
3172   // Need to visit defs before uses when translating instructions.
3173   GISelObserverWrapper WrapperObserver;
3174   if (EnableCSE && CSEInfo)
3175     WrapperObserver.addObserver(CSEInfo);
3176   {
3177     ReversePostOrderTraversal<const Function *> RPOT(&F);
3178 #ifndef NDEBUG
3179     DILocationVerifier Verifier;
3180     WrapperObserver.addObserver(&Verifier);
3181 #endif // ifndef NDEBUG
3182     RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3183     RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3184     for (const BasicBlock *BB : RPOT) {
3185       MachineBasicBlock &MBB = getMBB(*BB);
3186       // Set the insertion point of all the following translations to
3187       // the end of this basic block.
3188       CurBuilder->setMBB(MBB);
3189       HasTailCall = false;
3190       for (const Instruction &Inst : *BB) {
3191         // If we translated a tail call in the last step, then we know
3192         // everything after the call is either a return, or something that is
3193         // handled by the call itself. (E.g. a lifetime marker or assume
3194         // intrinsic.) In this case, we should stop translating the block and
3195         // move on.
3196         if (HasTailCall)
3197           break;
3198 #ifndef NDEBUG
3199         Verifier.setCurrentInst(&Inst);
3200 #endif // ifndef NDEBUG
3201         if (translate(Inst))
3202           continue;
3203 
3204         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3205                                    Inst.getDebugLoc(), BB);
3206         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3207 
3208         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3209           std::string InstStrStorage;
3210           raw_string_ostream InstStr(InstStrStorage);
3211           InstStr << Inst;
3212 
3213           R << ": '" << InstStr.str() << "'";
3214         }
3215 
3216         reportTranslationError(*MF, *TPC, *ORE, R);
3217         return false;
3218       }
3219 
3220       finalizeBasicBlock();
3221     }
3222 #ifndef NDEBUG
3223     WrapperObserver.removeObserver(&Verifier);
3224 #endif
3225   }
3226 
3227   finishPendingPhis();
3228 
3229   SwiftError.propagateVRegs();
3230 
3231   // Merge the argument lowering and constants block with its single
3232   // successor, the LLVM-IR entry block.  We want the basic block to
3233   // be maximal.
3234   assert(EntryBB->succ_size() == 1 &&
3235          "Custom BB used for lowering should have only one successor");
3236   // Get the successor of the current entry block.
3237   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
3238   assert(NewEntryBB.pred_size() == 1 &&
3239          "LLVM-IR entry block has a predecessor!?");
3240   // Move all the instruction from the current entry block to the
3241   // new entry block.
3242   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
3243                     EntryBB->end());
3244 
3245   // Update the live-in information for the new entry block.
3246   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
3247     NewEntryBB.addLiveIn(LiveIn);
3248   NewEntryBB.sortUniqueLiveIns();
3249 
3250   // Get rid of the now empty basic block.
3251   EntryBB->removeSuccessor(&NewEntryBB);
3252   MF->remove(EntryBB);
3253   MF->DeleteMachineBasicBlock(EntryBB);
3254 
3255   assert(&MF->front() == &NewEntryBB &&
3256          "New entry wasn't next in the list of basic block!");
3257 
3258   // Initialize stack protector information.
3259   StackProtector &SP = getAnalysis<StackProtector>();
3260   SP.copyToMachineFrameInfo(MF->getFrameInfo());
3261 
3262   return false;
3263 }
3264