xref: /freebsd-src/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/LowLevelType.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/RuntimeLibcalls.h"
37 #include "llvm/CodeGen/StackProtector.h"
38 #include "llvm/CodeGen/SwitchLoweringUtils.h"
39 #include "llvm/CodeGen/TargetFrameLowering.h"
40 #include "llvm/CodeGen/TargetInstrInfo.h"
41 #include "llvm/CodeGen/TargetLowering.h"
42 #include "llvm/CodeGen/TargetPassConfig.h"
43 #include "llvm/CodeGen/TargetRegisterInfo.h"
44 #include "llvm/CodeGen/TargetSubtargetInfo.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/CFG.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/DebugInfo.h"
51 #include "llvm/IR/DerivedTypes.h"
52 #include "llvm/IR/DiagnosticInfo.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GetElementPtrTypeIterator.h"
55 #include "llvm/IR/InlineAsm.h"
56 #include "llvm/IR/InstrTypes.h"
57 #include "llvm/IR/Instructions.h"
58 #include "llvm/IR/IntrinsicInst.h"
59 #include "llvm/IR/Intrinsics.h"
60 #include "llvm/IR/LLVMContext.h"
61 #include "llvm/IR/Metadata.h"
62 #include "llvm/IR/PatternMatch.h"
63 #include "llvm/IR/Type.h"
64 #include "llvm/IR/User.h"
65 #include "llvm/IR/Value.h"
66 #include "llvm/InitializePasses.h"
67 #include "llvm/MC/MCContext.h"
68 #include "llvm/Pass.h"
69 #include "llvm/Support/Casting.h"
70 #include "llvm/Support/CodeGen.h"
71 #include "llvm/Support/Debug.h"
72 #include "llvm/Support/ErrorHandling.h"
73 #include "llvm/Support/LowLevelTypeImpl.h"
74 #include "llvm/Support/MathExtras.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Target/TargetIntrinsicInfo.h"
77 #include "llvm/Target/TargetMachine.h"
78 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
79 #include <algorithm>
80 #include <cassert>
81 #include <cstddef>
82 #include <cstdint>
83 #include <iterator>
84 #include <string>
85 #include <utility>
86 #include <vector>
87 
88 #define DEBUG_TYPE "irtranslator"
89 
90 using namespace llvm;
91 
92 static cl::opt<bool>
93     EnableCSEInIRTranslator("enable-cse-in-irtranslator",
94                             cl::desc("Should enable CSE in irtranslator"),
95                             cl::Optional, cl::init(false));
96 char IRTranslator::ID = 0;
97 
98 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
99                 false, false)
100 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
101 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
102 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
103 INITIALIZE_PASS_DEPENDENCY(StackProtector)
104 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
105 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
106                 false, false)
107 
108 static void reportTranslationError(MachineFunction &MF,
109                                    const TargetPassConfig &TPC,
110                                    OptimizationRemarkEmitter &ORE,
111                                    OptimizationRemarkMissed &R) {
112   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
113 
114   // Print the function name explicitly if we don't have a debug location (which
115   // makes the diagnostic less useful) or if we're going to emit a raw error.
116   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
117     R << (" (in function: " + MF.getName() + ")").str();
118 
119   if (TPC.isGlobalISelAbortEnabled())
120     report_fatal_error(Twine(R.getMsg()));
121   else
122     ORE.emit(R);
123 }
124 
125 IRTranslator::IRTranslator(CodeGenOpt::Level optlevel)
126     : MachineFunctionPass(ID), OptLevel(optlevel) {}
127 
128 #ifndef NDEBUG
129 namespace {
130 /// Verify that every instruction created has the same DILocation as the
131 /// instruction being translated.
132 class DILocationVerifier : public GISelChangeObserver {
133   const Instruction *CurrInst = nullptr;
134 
135 public:
136   DILocationVerifier() = default;
137   ~DILocationVerifier() = default;
138 
139   const Instruction *getCurrentInst() const { return CurrInst; }
140   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
141 
142   void erasingInstr(MachineInstr &MI) override {}
143   void changingInstr(MachineInstr &MI) override {}
144   void changedInstr(MachineInstr &MI) override {}
145 
146   void createdInstr(MachineInstr &MI) override {
147     assert(getCurrentInst() && "Inserted instruction without a current MI");
148 
149     // Only print the check message if we're actually checking it.
150 #ifndef NDEBUG
151     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
152                       << " was copied to " << MI);
153 #endif
154     // We allow insts in the entry block to have a debug loc line of 0 because
155     // they could have originated from constants, and we don't want a jumpy
156     // debug experience.
157     assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
158             MI.getDebugLoc().getLine() == 0) &&
159            "Line info was not transferred to all instructions");
160   }
161 };
162 } // namespace
163 #endif // ifndef NDEBUG
164 
165 
166 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
167   AU.addRequired<StackProtector>();
168   AU.addRequired<TargetPassConfig>();
169   AU.addRequired<GISelCSEAnalysisWrapperPass>();
170   if (OptLevel != CodeGenOpt::None)
171     AU.addRequired<BranchProbabilityInfoWrapperPass>();
172   AU.addRequired<TargetLibraryInfoWrapperPass>();
173   AU.addPreserved<TargetLibraryInfoWrapperPass>();
174   getSelectionDAGFallbackAnalysisUsage(AU);
175   MachineFunctionPass::getAnalysisUsage(AU);
176 }
177 
178 IRTranslator::ValueToVRegInfo::VRegListT &
179 IRTranslator::allocateVRegs(const Value &Val) {
180   auto VRegsIt = VMap.findVRegs(Val);
181   if (VRegsIt != VMap.vregs_end())
182     return *VRegsIt->second;
183   auto *Regs = VMap.getVRegs(Val);
184   auto *Offsets = VMap.getOffsets(Val);
185   SmallVector<LLT, 4> SplitTys;
186   computeValueLLTs(*DL, *Val.getType(), SplitTys,
187                    Offsets->empty() ? Offsets : nullptr);
188   for (unsigned i = 0; i < SplitTys.size(); ++i)
189     Regs->push_back(0);
190   return *Regs;
191 }
192 
193 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
194   auto VRegsIt = VMap.findVRegs(Val);
195   if (VRegsIt != VMap.vregs_end())
196     return *VRegsIt->second;
197 
198   if (Val.getType()->isVoidTy())
199     return *VMap.getVRegs(Val);
200 
201   // Create entry for this type.
202   auto *VRegs = VMap.getVRegs(Val);
203   auto *Offsets = VMap.getOffsets(Val);
204 
205   assert(Val.getType()->isSized() &&
206          "Don't know how to create an empty vreg");
207 
208   SmallVector<LLT, 4> SplitTys;
209   computeValueLLTs(*DL, *Val.getType(), SplitTys,
210                    Offsets->empty() ? Offsets : nullptr);
211 
212   if (!isa<Constant>(Val)) {
213     for (auto Ty : SplitTys)
214       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
215     return *VRegs;
216   }
217 
218   if (Val.getType()->isAggregateType()) {
219     // UndefValue, ConstantAggregateZero
220     auto &C = cast<Constant>(Val);
221     unsigned Idx = 0;
222     while (auto Elt = C.getAggregateElement(Idx++)) {
223       auto EltRegs = getOrCreateVRegs(*Elt);
224       llvm::copy(EltRegs, std::back_inserter(*VRegs));
225     }
226   } else {
227     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
228     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
229     bool Success = translate(cast<Constant>(Val), VRegs->front());
230     if (!Success) {
231       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
232                                  MF->getFunction().getSubprogram(),
233                                  &MF->getFunction().getEntryBlock());
234       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
235       reportTranslationError(*MF, *TPC, *ORE, R);
236       return *VRegs;
237     }
238   }
239 
240   return *VRegs;
241 }
242 
243 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
244   auto MapEntry = FrameIndices.find(&AI);
245   if (MapEntry != FrameIndices.end())
246     return MapEntry->second;
247 
248   uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
249   uint64_t Size =
250       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
251 
252   // Always allocate at least one byte.
253   Size = std::max<uint64_t>(Size, 1u);
254 
255   int &FI = FrameIndices[&AI];
256   FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
257   return FI;
258 }
259 
260 Align IRTranslator::getMemOpAlign(const Instruction &I) {
261   if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
262     return SI->getAlign();
263   if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
264     return LI->getAlign();
265   if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
266     return AI->getAlign();
267   if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
268     return AI->getAlign();
269 
270   OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
271   R << "unable to translate memop: " << ore::NV("Opcode", &I);
272   reportTranslationError(*MF, *TPC, *ORE, R);
273   return Align(1);
274 }
275 
276 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
277   MachineBasicBlock *&MBB = BBToMBB[&BB];
278   assert(MBB && "BasicBlock was not encountered before");
279   return *MBB;
280 }
281 
282 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
283   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
284   MachinePreds[Edge].push_back(NewPred);
285 }
286 
287 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
288                                      MachineIRBuilder &MIRBuilder) {
289   // Get or create a virtual register for each value.
290   // Unless the value is a Constant => loadimm cst?
291   // or inline constant each time?
292   // Creation of a virtual register needs to have a size.
293   Register Op0 = getOrCreateVReg(*U.getOperand(0));
294   Register Op1 = getOrCreateVReg(*U.getOperand(1));
295   Register Res = getOrCreateVReg(U);
296   uint16_t Flags = 0;
297   if (isa<Instruction>(U)) {
298     const Instruction &I = cast<Instruction>(U);
299     Flags = MachineInstr::copyFlagsFromInstruction(I);
300   }
301 
302   MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
303   return true;
304 }
305 
306 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
307                                     MachineIRBuilder &MIRBuilder) {
308   Register Op0 = getOrCreateVReg(*U.getOperand(0));
309   Register Res = getOrCreateVReg(U);
310   uint16_t Flags = 0;
311   if (isa<Instruction>(U)) {
312     const Instruction &I = cast<Instruction>(U);
313     Flags = MachineInstr::copyFlagsFromInstruction(I);
314   }
315   MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
316   return true;
317 }
318 
319 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
320   return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
321 }
322 
323 bool IRTranslator::translateCompare(const User &U,
324                                     MachineIRBuilder &MIRBuilder) {
325   auto *CI = dyn_cast<CmpInst>(&U);
326   Register Op0 = getOrCreateVReg(*U.getOperand(0));
327   Register Op1 = getOrCreateVReg(*U.getOperand(1));
328   Register Res = getOrCreateVReg(U);
329   CmpInst::Predicate Pred =
330       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
331                                     cast<ConstantExpr>(U).getPredicate());
332   if (CmpInst::isIntPredicate(Pred))
333     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
334   else if (Pred == CmpInst::FCMP_FALSE)
335     MIRBuilder.buildCopy(
336         Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
337   else if (Pred == CmpInst::FCMP_TRUE)
338     MIRBuilder.buildCopy(
339         Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
340   else {
341     assert(CI && "Instruction should be CmpInst");
342     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1,
343                          MachineInstr::copyFlagsFromInstruction(*CI));
344   }
345 
346   return true;
347 }
348 
349 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
350   const ReturnInst &RI = cast<ReturnInst>(U);
351   const Value *Ret = RI.getReturnValue();
352   if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
353     Ret = nullptr;
354 
355   ArrayRef<Register> VRegs;
356   if (Ret)
357     VRegs = getOrCreateVRegs(*Ret);
358 
359   Register SwiftErrorVReg = 0;
360   if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
361     SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
362         &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
363   }
364 
365   // The target may mess up with the insertion point, but
366   // this is not important as a return is the last instruction
367   // of the block anyway.
368   return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
369 }
370 
371 void IRTranslator::emitBranchForMergedCondition(
372     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
373     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
374     BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
375   // If the leaf of the tree is a comparison, merge the condition into
376   // the caseblock.
377   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
378     CmpInst::Predicate Condition;
379     if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
380       Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
381     } else {
382       const FCmpInst *FC = cast<FCmpInst>(Cond);
383       Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
384     }
385 
386     SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
387                            BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
388                            CurBuilder->getDebugLoc(), TProb, FProb);
389     SL->SwitchCases.push_back(CB);
390     return;
391   }
392 
393   // Create a CaseBlock record representing this branch.
394   CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
395   SwitchCG::CaseBlock CB(
396       Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
397       nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
398   SL->SwitchCases.push_back(CB);
399 }
400 
401 static bool isValInBlock(const Value *V, const BasicBlock *BB) {
402   if (const Instruction *I = dyn_cast<Instruction>(V))
403     return I->getParent() == BB;
404   return true;
405 }
406 
407 void IRTranslator::findMergedConditions(
408     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
409     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
410     Instruction::BinaryOps Opc, BranchProbability TProb,
411     BranchProbability FProb, bool InvertCond) {
412   using namespace PatternMatch;
413   assert((Opc == Instruction::And || Opc == Instruction::Or) &&
414          "Expected Opc to be AND/OR");
415   // Skip over not part of the tree and remember to invert op and operands at
416   // next level.
417   Value *NotCond;
418   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
419       isValInBlock(NotCond, CurBB->getBasicBlock())) {
420     findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
421                          !InvertCond);
422     return;
423   }
424 
425   const Instruction *BOp = dyn_cast<Instruction>(Cond);
426   const Value *BOpOp0, *BOpOp1;
427   // Compute the effective opcode for Cond, taking into account whether it needs
428   // to be inverted, e.g.
429   //   and (not (or A, B)), C
430   // gets lowered as
431   //   and (and (not A, not B), C)
432   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
433   if (BOp) {
434     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
435                ? Instruction::And
436                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
437                       ? Instruction::Or
438                       : (Instruction::BinaryOps)0);
439     if (InvertCond) {
440       if (BOpc == Instruction::And)
441         BOpc = Instruction::Or;
442       else if (BOpc == Instruction::Or)
443         BOpc = Instruction::And;
444     }
445   }
446 
447   // If this node is not part of the or/and tree, emit it as a branch.
448   // Note that all nodes in the tree should have same opcode.
449   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
450   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
451       !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
452       !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
453     emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
454                                  InvertCond);
455     return;
456   }
457 
458   //  Create TmpBB after CurBB.
459   MachineFunction::iterator BBI(CurBB);
460   MachineBasicBlock *TmpBB =
461       MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
462   CurBB->getParent()->insert(++BBI, TmpBB);
463 
464   if (Opc == Instruction::Or) {
465     // Codegen X | Y as:
466     // BB1:
467     //   jmp_if_X TBB
468     //   jmp TmpBB
469     // TmpBB:
470     //   jmp_if_Y TBB
471     //   jmp FBB
472     //
473 
474     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
475     // The requirement is that
476     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
477     //     = TrueProb for original BB.
478     // Assuming the original probabilities are A and B, one choice is to set
479     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
480     // A/(1+B) and 2B/(1+B). This choice assumes that
481     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
482     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
483     // TmpBB, but the math is more complicated.
484 
485     auto NewTrueProb = TProb / 2;
486     auto NewFalseProb = TProb / 2 + FProb;
487     // Emit the LHS condition.
488     findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
489                          NewFalseProb, InvertCond);
490 
491     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
492     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
493     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
494     // Emit the RHS condition into TmpBB.
495     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
496                          Probs[1], InvertCond);
497   } else {
498     assert(Opc == Instruction::And && "Unknown merge op!");
499     // Codegen X & Y as:
500     // BB1:
501     //   jmp_if_X TmpBB
502     //   jmp FBB
503     // TmpBB:
504     //   jmp_if_Y TBB
505     //   jmp FBB
506     //
507     //  This requires creation of TmpBB after CurBB.
508 
509     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
510     // The requirement is that
511     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
512     //     = FalseProb for original BB.
513     // Assuming the original probabilities are A and B, one choice is to set
514     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
515     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
516     // TrueProb for BB1 * FalseProb for TmpBB.
517 
518     auto NewTrueProb = TProb + FProb / 2;
519     auto NewFalseProb = FProb / 2;
520     // Emit the LHS condition.
521     findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
522                          NewFalseProb, InvertCond);
523 
524     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
525     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
526     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
527     // Emit the RHS condition into TmpBB.
528     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
529                          Probs[1], InvertCond);
530   }
531 }
532 
533 bool IRTranslator::shouldEmitAsBranches(
534     const std::vector<SwitchCG::CaseBlock> &Cases) {
535   // For multiple cases, it's better to emit as branches.
536   if (Cases.size() != 2)
537     return true;
538 
539   // If this is two comparisons of the same values or'd or and'd together, they
540   // will get folded into a single comparison, so don't emit two blocks.
541   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
542        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
543       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
544        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
545     return false;
546   }
547 
548   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
549   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
550   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
551       Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
552       isa<Constant>(Cases[0].CmpRHS) &&
553       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
554     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
555         Cases[0].TrueBB == Cases[1].ThisBB)
556       return false;
557     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
558         Cases[0].FalseBB == Cases[1].ThisBB)
559       return false;
560   }
561 
562   return true;
563 }
564 
565 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
566   const BranchInst &BrInst = cast<BranchInst>(U);
567   auto &CurMBB = MIRBuilder.getMBB();
568   auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
569 
570   if (BrInst.isUnconditional()) {
571     // If the unconditional target is the layout successor, fallthrough.
572     if (OptLevel == CodeGenOpt::None || !CurMBB.isLayoutSuccessor(Succ0MBB))
573       MIRBuilder.buildBr(*Succ0MBB);
574 
575     // Link successors.
576     for (const BasicBlock *Succ : successors(&BrInst))
577       CurMBB.addSuccessor(&getMBB(*Succ));
578     return true;
579   }
580 
581   // If this condition is one of the special cases we handle, do special stuff
582   // now.
583   const Value *CondVal = BrInst.getCondition();
584   MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
585 
586   const auto &TLI = *MF->getSubtarget().getTargetLowering();
587 
588   // If this is a series of conditions that are or'd or and'd together, emit
589   // this as a sequence of branches instead of setcc's with and/or operations.
590   // As long as jumps are not expensive (exceptions for multi-use logic ops,
591   // unpredictable branches, and vector extracts because those jumps are likely
592   // expensive for any target), this should improve performance.
593   // For example, instead of something like:
594   //     cmp A, B
595   //     C = seteq
596   //     cmp D, E
597   //     F = setle
598   //     or C, F
599   //     jnz foo
600   // Emit:
601   //     cmp A, B
602   //     je foo
603   //     cmp D, E
604   //     jle foo
605   using namespace PatternMatch;
606   const Instruction *CondI = dyn_cast<Instruction>(CondVal);
607   if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
608       !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
609     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
610     Value *Vec;
611     const Value *BOp0, *BOp1;
612     if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
613       Opcode = Instruction::And;
614     else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
615       Opcode = Instruction::Or;
616 
617     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
618                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
619       findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
620                            getEdgeProbability(&CurMBB, Succ0MBB),
621                            getEdgeProbability(&CurMBB, Succ1MBB),
622                            /*InvertCond=*/false);
623       assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
624 
625       // Allow some cases to be rejected.
626       if (shouldEmitAsBranches(SL->SwitchCases)) {
627         // Emit the branch for this block.
628         emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
629         SL->SwitchCases.erase(SL->SwitchCases.begin());
630         return true;
631       }
632 
633       // Okay, we decided not to do this, remove any inserted MBB's and clear
634       // SwitchCases.
635       for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
636         MF->erase(SL->SwitchCases[I].ThisBB);
637 
638       SL->SwitchCases.clear();
639     }
640   }
641 
642   // Create a CaseBlock record representing this branch.
643   SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
644                          ConstantInt::getTrue(MF->getFunction().getContext()),
645                          nullptr, Succ0MBB, Succ1MBB, &CurMBB,
646                          CurBuilder->getDebugLoc());
647 
648   // Use emitSwitchCase to actually insert the fast branch sequence for this
649   // cond branch.
650   emitSwitchCase(CB, &CurMBB, *CurBuilder);
651   return true;
652 }
653 
654 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
655                                         MachineBasicBlock *Dst,
656                                         BranchProbability Prob) {
657   if (!FuncInfo.BPI) {
658     Src->addSuccessorWithoutProb(Dst);
659     return;
660   }
661   if (Prob.isUnknown())
662     Prob = getEdgeProbability(Src, Dst);
663   Src->addSuccessor(Dst, Prob);
664 }
665 
666 BranchProbability
667 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
668                                  const MachineBasicBlock *Dst) const {
669   const BasicBlock *SrcBB = Src->getBasicBlock();
670   const BasicBlock *DstBB = Dst->getBasicBlock();
671   if (!FuncInfo.BPI) {
672     // If BPI is not available, set the default probability as 1 / N, where N is
673     // the number of successors.
674     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
675     return BranchProbability(1, SuccSize);
676   }
677   return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
678 }
679 
680 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
681   using namespace SwitchCG;
682   // Extract cases from the switch.
683   const SwitchInst &SI = cast<SwitchInst>(U);
684   BranchProbabilityInfo *BPI = FuncInfo.BPI;
685   CaseClusterVector Clusters;
686   Clusters.reserve(SI.getNumCases());
687   for (auto &I : SI.cases()) {
688     MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
689     assert(Succ && "Could not find successor mbb in mapping");
690     const ConstantInt *CaseVal = I.getCaseValue();
691     BranchProbability Prob =
692         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
693             : BranchProbability(1, SI.getNumCases() + 1);
694     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
695   }
696 
697   MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
698 
699   // Cluster adjacent cases with the same destination. We do this at all
700   // optimization levels because it's cheap to do and will make codegen faster
701   // if there are many clusters.
702   sortAndRangeify(Clusters);
703 
704   MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
705 
706   // If there is only the default destination, jump there directly.
707   if (Clusters.empty()) {
708     SwitchMBB->addSuccessor(DefaultMBB);
709     if (DefaultMBB != SwitchMBB->getNextNode())
710       MIB.buildBr(*DefaultMBB);
711     return true;
712   }
713 
714   SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
715   SL->findBitTestClusters(Clusters, &SI);
716 
717   LLVM_DEBUG({
718     dbgs() << "Case clusters: ";
719     for (const CaseCluster &C : Clusters) {
720       if (C.Kind == CC_JumpTable)
721         dbgs() << "JT:";
722       if (C.Kind == CC_BitTests)
723         dbgs() << "BT:";
724 
725       C.Low->getValue().print(dbgs(), true);
726       if (C.Low != C.High) {
727         dbgs() << '-';
728         C.High->getValue().print(dbgs(), true);
729       }
730       dbgs() << ' ';
731     }
732     dbgs() << '\n';
733   });
734 
735   assert(!Clusters.empty());
736   SwitchWorkList WorkList;
737   CaseClusterIt First = Clusters.begin();
738   CaseClusterIt Last = Clusters.end() - 1;
739   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
740   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
741 
742   // FIXME: At the moment we don't do any splitting optimizations here like
743   // SelectionDAG does, so this worklist only has one entry.
744   while (!WorkList.empty()) {
745     SwitchWorkListItem W = WorkList.pop_back_val();
746     if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
747       return false;
748   }
749   return true;
750 }
751 
752 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
753                                  MachineBasicBlock *MBB) {
754   // Emit the code for the jump table
755   assert(JT.Reg != -1U && "Should lower JT Header first!");
756   MachineIRBuilder MIB(*MBB->getParent());
757   MIB.setMBB(*MBB);
758   MIB.setDebugLoc(CurBuilder->getDebugLoc());
759 
760   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
761   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
762 
763   auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
764   MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
765 }
766 
767 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
768                                        SwitchCG::JumpTableHeader &JTH,
769                                        MachineBasicBlock *HeaderBB) {
770   MachineIRBuilder MIB(*HeaderBB->getParent());
771   MIB.setMBB(*HeaderBB);
772   MIB.setDebugLoc(CurBuilder->getDebugLoc());
773 
774   const Value &SValue = *JTH.SValue;
775   // Subtract the lowest switch case value from the value being switched on.
776   const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
777   Register SwitchOpReg = getOrCreateVReg(SValue);
778   auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
779   auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
780 
781   // This value may be smaller or larger than the target's pointer type, and
782   // therefore require extension or truncating.
783   Type *PtrIRTy = SValue.getType()->getPointerTo();
784   const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
785   Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
786 
787   JT.Reg = Sub.getReg(0);
788 
789   if (JTH.FallthroughUnreachable) {
790     if (JT.MBB != HeaderBB->getNextNode())
791       MIB.buildBr(*JT.MBB);
792     return true;
793   }
794 
795   // Emit the range check for the jump table, and branch to the default block
796   // for the switch statement if the value being switched on exceeds the
797   // largest case in the switch.
798   auto Cst = getOrCreateVReg(
799       *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
800   Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
801   auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
802 
803   auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
804 
805   // Avoid emitting unnecessary branches to the next block.
806   if (JT.MBB != HeaderBB->getNextNode())
807     BrCond = MIB.buildBr(*JT.MBB);
808   return true;
809 }
810 
811 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
812                                   MachineBasicBlock *SwitchBB,
813                                   MachineIRBuilder &MIB) {
814   Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
815   Register Cond;
816   DebugLoc OldDbgLoc = MIB.getDebugLoc();
817   MIB.setDebugLoc(CB.DbgLoc);
818   MIB.setMBB(*CB.ThisBB);
819 
820   if (CB.PredInfo.NoCmp) {
821     // Branch or fall through to TrueBB.
822     addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
823     addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
824                       CB.ThisBB);
825     CB.ThisBB->normalizeSuccProbs();
826     if (CB.TrueBB != CB.ThisBB->getNextNode())
827       MIB.buildBr(*CB.TrueBB);
828     MIB.setDebugLoc(OldDbgLoc);
829     return;
830   }
831 
832   const LLT i1Ty = LLT::scalar(1);
833   // Build the compare.
834   if (!CB.CmpMHS) {
835     const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
836     // For conditional branch lowering, we might try to do something silly like
837     // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
838     // just re-use the existing condition vreg.
839     if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI &&
840         CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
841       Cond = CondLHS;
842     } else {
843       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
844       if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
845         Cond =
846             MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
847       else
848         Cond =
849             MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
850     }
851   } else {
852     assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
853            "Can only handle SLE ranges");
854 
855     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
856     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
857 
858     Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
859     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
860       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
861       Cond =
862           MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
863     } else {
864       const LLT CmpTy = MRI->getType(CmpOpReg);
865       auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
866       auto Diff = MIB.buildConstant(CmpTy, High - Low);
867       Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
868     }
869   }
870 
871   // Update successor info
872   addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
873 
874   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
875                     CB.ThisBB);
876 
877   // TrueBB and FalseBB are always different unless the incoming IR is
878   // degenerate. This only happens when running llc on weird IR.
879   if (CB.TrueBB != CB.FalseBB)
880     addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
881   CB.ThisBB->normalizeSuccProbs();
882 
883   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
884                     CB.ThisBB);
885 
886   MIB.buildBrCond(Cond, *CB.TrueBB);
887   MIB.buildBr(*CB.FalseBB);
888   MIB.setDebugLoc(OldDbgLoc);
889 }
890 
891 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
892                                           MachineBasicBlock *SwitchMBB,
893                                           MachineBasicBlock *CurMBB,
894                                           MachineBasicBlock *DefaultMBB,
895                                           MachineIRBuilder &MIB,
896                                           MachineFunction::iterator BBI,
897                                           BranchProbability UnhandledProbs,
898                                           SwitchCG::CaseClusterIt I,
899                                           MachineBasicBlock *Fallthrough,
900                                           bool FallthroughUnreachable) {
901   using namespace SwitchCG;
902   MachineFunction *CurMF = SwitchMBB->getParent();
903   // FIXME: Optimize away range check based on pivot comparisons.
904   JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
905   SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
906   BranchProbability DefaultProb = W.DefaultProb;
907 
908   // The jump block hasn't been inserted yet; insert it here.
909   MachineBasicBlock *JumpMBB = JT->MBB;
910   CurMF->insert(BBI, JumpMBB);
911 
912   // Since the jump table block is separate from the switch block, we need
913   // to keep track of it as a machine predecessor to the default block,
914   // otherwise we lose the phi edges.
915   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
916                     CurMBB);
917   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
918                     JumpMBB);
919 
920   auto JumpProb = I->Prob;
921   auto FallthroughProb = UnhandledProbs;
922 
923   // If the default statement is a target of the jump table, we evenly
924   // distribute the default probability to successors of CurMBB. Also
925   // update the probability on the edge from JumpMBB to Fallthrough.
926   for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
927                                         SE = JumpMBB->succ_end();
928        SI != SE; ++SI) {
929     if (*SI == DefaultMBB) {
930       JumpProb += DefaultProb / 2;
931       FallthroughProb -= DefaultProb / 2;
932       JumpMBB->setSuccProbability(SI, DefaultProb / 2);
933       JumpMBB->normalizeSuccProbs();
934     } else {
935       // Also record edges from the jump table block to it's successors.
936       addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
937                         JumpMBB);
938     }
939   }
940 
941   if (FallthroughUnreachable)
942     JTH->FallthroughUnreachable = true;
943 
944   if (!JTH->FallthroughUnreachable)
945     addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
946   addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
947   CurMBB->normalizeSuccProbs();
948 
949   // The jump table header will be inserted in our current block, do the
950   // range check, and fall through to our fallthrough block.
951   JTH->HeaderBB = CurMBB;
952   JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
953 
954   // If we're in the right place, emit the jump table header right now.
955   if (CurMBB == SwitchMBB) {
956     if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
957       return false;
958     JTH->Emitted = true;
959   }
960   return true;
961 }
962 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
963                                             Value *Cond,
964                                             MachineBasicBlock *Fallthrough,
965                                             bool FallthroughUnreachable,
966                                             BranchProbability UnhandledProbs,
967                                             MachineBasicBlock *CurMBB,
968                                             MachineIRBuilder &MIB,
969                                             MachineBasicBlock *SwitchMBB) {
970   using namespace SwitchCG;
971   const Value *RHS, *LHS, *MHS;
972   CmpInst::Predicate Pred;
973   if (I->Low == I->High) {
974     // Check Cond == I->Low.
975     Pred = CmpInst::ICMP_EQ;
976     LHS = Cond;
977     RHS = I->Low;
978     MHS = nullptr;
979   } else {
980     // Check I->Low <= Cond <= I->High.
981     Pred = CmpInst::ICMP_SLE;
982     LHS = I->Low;
983     MHS = Cond;
984     RHS = I->High;
985   }
986 
987   // If Fallthrough is unreachable, fold away the comparison.
988   // The false probability is the sum of all unhandled cases.
989   CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
990                CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
991 
992   emitSwitchCase(CB, SwitchMBB, MIB);
993   return true;
994 }
995 
996 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
997                                      MachineBasicBlock *SwitchBB) {
998   MachineIRBuilder &MIB = *CurBuilder;
999   MIB.setMBB(*SwitchBB);
1000 
1001   // Subtract the minimum value.
1002   Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1003 
1004   LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1005   Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1006   auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1007 
1008   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
1009   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1010 
1011   LLT MaskTy = SwitchOpTy;
1012   if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1013       !isPowerOf2_32(MaskTy.getSizeInBits()))
1014     MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1015   else {
1016     // Ensure that the type will fit the mask value.
1017     for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1018       if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1019         // Switch table case range are encoded into series of masks.
1020         // Just use pointer type, it's guaranteed to fit.
1021         MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1022         break;
1023       }
1024     }
1025   }
1026   Register SubReg = RangeSub.getReg(0);
1027   if (SwitchOpTy != MaskTy)
1028     SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1029 
1030   B.RegVT = getMVTForLLT(MaskTy);
1031   B.Reg = SubReg;
1032 
1033   MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1034 
1035   if (!B.FallthroughUnreachable)
1036     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1037   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1038 
1039   SwitchBB->normalizeSuccProbs();
1040 
1041   if (!B.FallthroughUnreachable) {
1042     // Conditional branch to the default block.
1043     auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1044     auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1045                                   RangeSub, RangeCst);
1046     MIB.buildBrCond(RangeCmp, *B.Default);
1047   }
1048 
1049   // Avoid emitting unnecessary branches to the next block.
1050   if (MBB != SwitchBB->getNextNode())
1051     MIB.buildBr(*MBB);
1052 }
1053 
1054 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1055                                    MachineBasicBlock *NextMBB,
1056                                    BranchProbability BranchProbToNext,
1057                                    Register Reg, SwitchCG::BitTestCase &B,
1058                                    MachineBasicBlock *SwitchBB) {
1059   MachineIRBuilder &MIB = *CurBuilder;
1060   MIB.setMBB(*SwitchBB);
1061 
1062   LLT SwitchTy = getLLTForMVT(BB.RegVT);
1063   Register Cmp;
1064   unsigned PopCount = countPopulation(B.Mask);
1065   if (PopCount == 1) {
1066     // Testing for a single bit; just compare the shift count with what it
1067     // would need to be to shift a 1 bit in that position.
1068     auto MaskTrailingZeros =
1069         MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask));
1070     Cmp =
1071         MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1072             .getReg(0);
1073   } else if (PopCount == BB.Range) {
1074     // There is only one zero bit in the range, test for it directly.
1075     auto MaskTrailingOnes =
1076         MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask));
1077     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1078               .getReg(0);
1079   } else {
1080     // Make desired shift.
1081     auto CstOne = MIB.buildConstant(SwitchTy, 1);
1082     auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1083 
1084     // Emit bit tests and jumps.
1085     auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1086     auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1087     auto CstZero = MIB.buildConstant(SwitchTy, 0);
1088     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1089               .getReg(0);
1090   }
1091 
1092   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1093   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1094   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1095   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1096   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1097   // one as they are relative probabilities (and thus work more like weights),
1098   // and hence we need to normalize them to let the sum of them become one.
1099   SwitchBB->normalizeSuccProbs();
1100 
1101   // Record the fact that the IR edge from the header to the bit test target
1102   // will go through our new block. Neeeded for PHIs to have nodes added.
1103   addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1104                     SwitchBB);
1105 
1106   MIB.buildBrCond(Cmp, *B.TargetBB);
1107 
1108   // Avoid emitting unnecessary branches to the next block.
1109   if (NextMBB != SwitchBB->getNextNode())
1110     MIB.buildBr(*NextMBB);
1111 }
1112 
1113 bool IRTranslator::lowerBitTestWorkItem(
1114     SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1115     MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1116     MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1117     BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1118     SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1119     bool FallthroughUnreachable) {
1120   using namespace SwitchCG;
1121   MachineFunction *CurMF = SwitchMBB->getParent();
1122   // FIXME: Optimize away range check based on pivot comparisons.
1123   BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1124   // The bit test blocks haven't been inserted yet; insert them here.
1125   for (BitTestCase &BTC : BTB->Cases)
1126     CurMF->insert(BBI, BTC.ThisBB);
1127 
1128   // Fill in fields of the BitTestBlock.
1129   BTB->Parent = CurMBB;
1130   BTB->Default = Fallthrough;
1131 
1132   BTB->DefaultProb = UnhandledProbs;
1133   // If the cases in bit test don't form a contiguous range, we evenly
1134   // distribute the probability on the edge to Fallthrough to two
1135   // successors of CurMBB.
1136   if (!BTB->ContiguousRange) {
1137     BTB->Prob += DefaultProb / 2;
1138     BTB->DefaultProb -= DefaultProb / 2;
1139   }
1140 
1141   if (FallthroughUnreachable)
1142     BTB->FallthroughUnreachable = true;
1143 
1144   // If we're in the right place, emit the bit test header right now.
1145   if (CurMBB == SwitchMBB) {
1146     emitBitTestHeader(*BTB, SwitchMBB);
1147     BTB->Emitted = true;
1148   }
1149   return true;
1150 }
1151 
1152 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1153                                        Value *Cond,
1154                                        MachineBasicBlock *SwitchMBB,
1155                                        MachineBasicBlock *DefaultMBB,
1156                                        MachineIRBuilder &MIB) {
1157   using namespace SwitchCG;
1158   MachineFunction *CurMF = FuncInfo.MF;
1159   MachineBasicBlock *NextMBB = nullptr;
1160   MachineFunction::iterator BBI(W.MBB);
1161   if (++BBI != FuncInfo.MF->end())
1162     NextMBB = &*BBI;
1163 
1164   if (EnableOpts) {
1165     // Here, we order cases by probability so the most likely case will be
1166     // checked first. However, two clusters can have the same probability in
1167     // which case their relative ordering is non-deterministic. So we use Low
1168     // as a tie-breaker as clusters are guaranteed to never overlap.
1169     llvm::sort(W.FirstCluster, W.LastCluster + 1,
1170                [](const CaseCluster &a, const CaseCluster &b) {
1171                  return a.Prob != b.Prob
1172                             ? a.Prob > b.Prob
1173                             : a.Low->getValue().slt(b.Low->getValue());
1174                });
1175 
1176     // Rearrange the case blocks so that the last one falls through if possible
1177     // without changing the order of probabilities.
1178     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1179       --I;
1180       if (I->Prob > W.LastCluster->Prob)
1181         break;
1182       if (I->Kind == CC_Range && I->MBB == NextMBB) {
1183         std::swap(*I, *W.LastCluster);
1184         break;
1185       }
1186     }
1187   }
1188 
1189   // Compute total probability.
1190   BranchProbability DefaultProb = W.DefaultProb;
1191   BranchProbability UnhandledProbs = DefaultProb;
1192   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1193     UnhandledProbs += I->Prob;
1194 
1195   MachineBasicBlock *CurMBB = W.MBB;
1196   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1197     bool FallthroughUnreachable = false;
1198     MachineBasicBlock *Fallthrough;
1199     if (I == W.LastCluster) {
1200       // For the last cluster, fall through to the default destination.
1201       Fallthrough = DefaultMBB;
1202       FallthroughUnreachable = isa<UnreachableInst>(
1203           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1204     } else {
1205       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1206       CurMF->insert(BBI, Fallthrough);
1207     }
1208     UnhandledProbs -= I->Prob;
1209 
1210     switch (I->Kind) {
1211     case CC_BitTests: {
1212       if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1213                                 DefaultProb, UnhandledProbs, I, Fallthrough,
1214                                 FallthroughUnreachable)) {
1215         LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1216         return false;
1217       }
1218       break;
1219     }
1220 
1221     case CC_JumpTable: {
1222       if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1223                                   UnhandledProbs, I, Fallthrough,
1224                                   FallthroughUnreachable)) {
1225         LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1226         return false;
1227       }
1228       break;
1229     }
1230     case CC_Range: {
1231       if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1232                                     FallthroughUnreachable, UnhandledProbs,
1233                                     CurMBB, MIB, SwitchMBB)) {
1234         LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1235         return false;
1236       }
1237       break;
1238     }
1239     }
1240     CurMBB = Fallthrough;
1241   }
1242 
1243   return true;
1244 }
1245 
1246 bool IRTranslator::translateIndirectBr(const User &U,
1247                                        MachineIRBuilder &MIRBuilder) {
1248   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1249 
1250   const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1251   MIRBuilder.buildBrIndirect(Tgt);
1252 
1253   // Link successors.
1254   SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1255   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1256   for (const BasicBlock *Succ : successors(&BrInst)) {
1257     // It's legal for indirectbr instructions to have duplicate blocks in the
1258     // destination list. We don't allow this in MIR. Skip anything that's
1259     // already a successor.
1260     if (!AddedSuccessors.insert(Succ).second)
1261       continue;
1262     CurBB.addSuccessor(&getMBB(*Succ));
1263   }
1264 
1265   return true;
1266 }
1267 
1268 static bool isSwiftError(const Value *V) {
1269   if (auto Arg = dyn_cast<Argument>(V))
1270     return Arg->hasSwiftErrorAttr();
1271   if (auto AI = dyn_cast<AllocaInst>(V))
1272     return AI->isSwiftError();
1273   return false;
1274 }
1275 
1276 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1277   const LoadInst &LI = cast<LoadInst>(U);
1278   if (DL->getTypeStoreSize(LI.getType()) == 0)
1279     return true;
1280 
1281   ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1282   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1283   Register Base = getOrCreateVReg(*LI.getPointerOperand());
1284 
1285   Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
1286   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1287 
1288   if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
1289     assert(Regs.size() == 1 && "swifterror should be single pointer");
1290     Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
1291                                                     LI.getPointerOperand());
1292     MIRBuilder.buildCopy(Regs[0], VReg);
1293     return true;
1294   }
1295 
1296   auto &TLI = *MF->getSubtarget().getTargetLowering();
1297   MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
1298 
1299   const MDNode *Ranges =
1300       Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1301   for (unsigned i = 0; i < Regs.size(); ++i) {
1302     Register Addr;
1303     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1304 
1305     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1306     Align BaseAlign = getMemOpAlign(LI);
1307     auto MMO = MF->getMachineMemOperand(
1308         Ptr, Flags, MRI->getType(Regs[i]),
1309         commonAlignment(BaseAlign, Offsets[i] / 8), LI.getAAMetadata(), Ranges,
1310         LI.getSyncScopeID(), LI.getOrdering());
1311     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1312   }
1313 
1314   return true;
1315 }
1316 
1317 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1318   const StoreInst &SI = cast<StoreInst>(U);
1319   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
1320     return true;
1321 
1322   ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1323   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1324   Register Base = getOrCreateVReg(*SI.getPointerOperand());
1325 
1326   Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
1327   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1328 
1329   if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1330     assert(Vals.size() == 1 && "swifterror should be single pointer");
1331 
1332     Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1333                                                     SI.getPointerOperand());
1334     MIRBuilder.buildCopy(VReg, Vals[0]);
1335     return true;
1336   }
1337 
1338   auto &TLI = *MF->getSubtarget().getTargetLowering();
1339   MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1340 
1341   for (unsigned i = 0; i < Vals.size(); ++i) {
1342     Register Addr;
1343     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1344 
1345     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1346     Align BaseAlign = getMemOpAlign(SI);
1347     auto MMO = MF->getMachineMemOperand(
1348         Ptr, Flags, MRI->getType(Vals[i]),
1349         commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1350         SI.getSyncScopeID(), SI.getOrdering());
1351     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1352   }
1353   return true;
1354 }
1355 
1356 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1357   const Value *Src = U.getOperand(0);
1358   Type *Int32Ty = Type::getInt32Ty(U.getContext());
1359 
1360   // getIndexedOffsetInType is designed for GEPs, so the first index is the
1361   // usual array element rather than looking into the actual aggregate.
1362   SmallVector<Value *, 1> Indices;
1363   Indices.push_back(ConstantInt::get(Int32Ty, 0));
1364 
1365   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1366     for (auto Idx : EVI->indices())
1367       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1368   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1369     for (auto Idx : IVI->indices())
1370       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1371   } else {
1372     for (unsigned i = 1; i < U.getNumOperands(); ++i)
1373       Indices.push_back(U.getOperand(i));
1374   }
1375 
1376   return 8 * static_cast<uint64_t>(
1377                  DL.getIndexedOffsetInType(Src->getType(), Indices));
1378 }
1379 
1380 bool IRTranslator::translateExtractValue(const User &U,
1381                                          MachineIRBuilder &MIRBuilder) {
1382   const Value *Src = U.getOperand(0);
1383   uint64_t Offset = getOffsetFromIndices(U, *DL);
1384   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1385   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1386   unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1387   auto &DstRegs = allocateVRegs(U);
1388 
1389   for (unsigned i = 0; i < DstRegs.size(); ++i)
1390     DstRegs[i] = SrcRegs[Idx++];
1391 
1392   return true;
1393 }
1394 
1395 bool IRTranslator::translateInsertValue(const User &U,
1396                                         MachineIRBuilder &MIRBuilder) {
1397   const Value *Src = U.getOperand(0);
1398   uint64_t Offset = getOffsetFromIndices(U, *DL);
1399   auto &DstRegs = allocateVRegs(U);
1400   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1401   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1402   ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1403   auto InsertedIt = InsertedRegs.begin();
1404 
1405   for (unsigned i = 0; i < DstRegs.size(); ++i) {
1406     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1407       DstRegs[i] = *InsertedIt++;
1408     else
1409       DstRegs[i] = SrcRegs[i];
1410   }
1411 
1412   return true;
1413 }
1414 
1415 bool IRTranslator::translateSelect(const User &U,
1416                                    MachineIRBuilder &MIRBuilder) {
1417   Register Tst = getOrCreateVReg(*U.getOperand(0));
1418   ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1419   ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1420   ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1421 
1422   uint16_t Flags = 0;
1423   if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1424     Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1425 
1426   for (unsigned i = 0; i < ResRegs.size(); ++i) {
1427     MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1428   }
1429 
1430   return true;
1431 }
1432 
1433 bool IRTranslator::translateCopy(const User &U, const Value &V,
1434                                  MachineIRBuilder &MIRBuilder) {
1435   Register Src = getOrCreateVReg(V);
1436   auto &Regs = *VMap.getVRegs(U);
1437   if (Regs.empty()) {
1438     Regs.push_back(Src);
1439     VMap.getOffsets(U)->push_back(0);
1440   } else {
1441     // If we already assigned a vreg for this instruction, we can't change that.
1442     // Emit a copy to satisfy the users we already emitted.
1443     MIRBuilder.buildCopy(Regs[0], Src);
1444   }
1445   return true;
1446 }
1447 
1448 bool IRTranslator::translateBitCast(const User &U,
1449                                     MachineIRBuilder &MIRBuilder) {
1450   // If we're bitcasting to the source type, we can reuse the source vreg.
1451   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1452       getLLTForType(*U.getType(), *DL))
1453     return translateCopy(U, *U.getOperand(0), MIRBuilder);
1454 
1455   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1456 }
1457 
1458 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1459                                  MachineIRBuilder &MIRBuilder) {
1460   Register Op = getOrCreateVReg(*U.getOperand(0));
1461   Register Res = getOrCreateVReg(U);
1462   MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1463   return true;
1464 }
1465 
1466 bool IRTranslator::translateGetElementPtr(const User &U,
1467                                           MachineIRBuilder &MIRBuilder) {
1468   Value &Op0 = *U.getOperand(0);
1469   Register BaseReg = getOrCreateVReg(Op0);
1470   Type *PtrIRTy = Op0.getType();
1471   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1472   Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1473   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1474 
1475   // Normalize Vector GEP - all scalar operands should be converted to the
1476   // splat vector.
1477   unsigned VectorWidth = 0;
1478 
1479   // True if we should use a splat vector; using VectorWidth alone is not
1480   // sufficient.
1481   bool WantSplatVector = false;
1482   if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1483     VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1484     // We don't produce 1 x N vectors; those are treated as scalars.
1485     WantSplatVector = VectorWidth > 1;
1486   }
1487 
1488   // We might need to splat the base pointer into a vector if the offsets
1489   // are vectors.
1490   if (WantSplatVector && !PtrTy.isVector()) {
1491     BaseReg =
1492         MIRBuilder
1493             .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
1494             .getReg(0);
1495     PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1496     PtrTy = getLLTForType(*PtrIRTy, *DL);
1497     OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1498     OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1499   }
1500 
1501   int64_t Offset = 0;
1502   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1503        GTI != E; ++GTI) {
1504     const Value *Idx = GTI.getOperand();
1505     if (StructType *StTy = GTI.getStructTypeOrNull()) {
1506       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1507       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1508       continue;
1509     } else {
1510       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1511 
1512       // If this is a scalar constant or a splat vector of constants,
1513       // handle it quickly.
1514       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1515         Offset += ElementSize * CI->getSExtValue();
1516         continue;
1517       }
1518 
1519       if (Offset != 0) {
1520         auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1521         BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1522                       .getReg(0);
1523         Offset = 0;
1524       }
1525 
1526       Register IdxReg = getOrCreateVReg(*Idx);
1527       LLT IdxTy = MRI->getType(IdxReg);
1528       if (IdxTy != OffsetTy) {
1529         if (!IdxTy.isVector() && WantSplatVector) {
1530           IdxReg = MIRBuilder.buildSplatVector(
1531             OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1532         }
1533 
1534         IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1535       }
1536 
1537       // N = N + Idx * ElementSize;
1538       // Avoid doing it for ElementSize of 1.
1539       Register GepOffsetReg;
1540       if (ElementSize != 1) {
1541         auto ElementSizeMIB = MIRBuilder.buildConstant(
1542             getLLTForType(*OffsetIRTy, *DL), ElementSize);
1543         GepOffsetReg =
1544             MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1545       } else
1546         GepOffsetReg = IdxReg;
1547 
1548       BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1549     }
1550   }
1551 
1552   if (Offset != 0) {
1553     auto OffsetMIB =
1554         MIRBuilder.buildConstant(OffsetTy, Offset);
1555     MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1556     return true;
1557   }
1558 
1559   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1560   return true;
1561 }
1562 
1563 bool IRTranslator::translateMemFunc(const CallInst &CI,
1564                                     MachineIRBuilder &MIRBuilder,
1565                                     unsigned Opcode) {
1566 
1567   // If the source is undef, then just emit a nop.
1568   if (isa<UndefValue>(CI.getArgOperand(1)))
1569     return true;
1570 
1571   SmallVector<Register, 3> SrcRegs;
1572 
1573   unsigned MinPtrSize = UINT_MAX;
1574   for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1575     Register SrcReg = getOrCreateVReg(**AI);
1576     LLT SrcTy = MRI->getType(SrcReg);
1577     if (SrcTy.isPointer())
1578       MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1579     SrcRegs.push_back(SrcReg);
1580   }
1581 
1582   LLT SizeTy = LLT::scalar(MinPtrSize);
1583 
1584   // The size operand should be the minimum of the pointer sizes.
1585   Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1586   if (MRI->getType(SizeOpReg) != SizeTy)
1587     SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1588 
1589   auto ICall = MIRBuilder.buildInstr(Opcode);
1590   for (Register SrcReg : SrcRegs)
1591     ICall.addUse(SrcReg);
1592 
1593   Align DstAlign;
1594   Align SrcAlign;
1595   unsigned IsVol =
1596       cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1597 
1598   if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1599     DstAlign = MCI->getDestAlign().valueOrOne();
1600     SrcAlign = MCI->getSourceAlign().valueOrOne();
1601   } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1602     DstAlign = MCI->getDestAlign().valueOrOne();
1603     SrcAlign = MCI->getSourceAlign().valueOrOne();
1604   } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1605     DstAlign = MMI->getDestAlign().valueOrOne();
1606     SrcAlign = MMI->getSourceAlign().valueOrOne();
1607   } else {
1608     auto *MSI = cast<MemSetInst>(&CI);
1609     DstAlign = MSI->getDestAlign().valueOrOne();
1610   }
1611 
1612   if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1613     // We need to propagate the tail call flag from the IR inst as an argument.
1614     // Otherwise, we have to pessimize and assume later that we cannot tail call
1615     // any memory intrinsics.
1616     ICall.addImm(CI.isTailCall() ? 1 : 0);
1617   }
1618 
1619   // Create mem operands to store the alignment and volatile info.
1620   auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1621   ICall.addMemOperand(MF->getMachineMemOperand(
1622       MachinePointerInfo(CI.getArgOperand(0)),
1623       MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1624   if (Opcode != TargetOpcode::G_MEMSET)
1625     ICall.addMemOperand(MF->getMachineMemOperand(
1626         MachinePointerInfo(CI.getArgOperand(1)),
1627         MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1628 
1629   return true;
1630 }
1631 
1632 void IRTranslator::getStackGuard(Register DstReg,
1633                                  MachineIRBuilder &MIRBuilder) {
1634   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1635   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1636   auto MIB =
1637       MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1638 
1639   auto &TLI = *MF->getSubtarget().getTargetLowering();
1640   Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1641   if (!Global)
1642     return;
1643 
1644   unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1645   LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1646 
1647   MachinePointerInfo MPInfo(Global);
1648   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1649                MachineMemOperand::MODereferenceable;
1650   MachineMemOperand *MemRef = MF->getMachineMemOperand(
1651       MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1652   MIB.setMemRefs({MemRef});
1653 }
1654 
1655 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1656                                               MachineIRBuilder &MIRBuilder) {
1657   ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1658   MIRBuilder.buildInstr(
1659       Op, {ResRegs[0], ResRegs[1]},
1660       {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1661 
1662   return true;
1663 }
1664 
1665 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1666                                                 MachineIRBuilder &MIRBuilder) {
1667   Register Dst = getOrCreateVReg(CI);
1668   Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1669   Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1670   uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1671   MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1672   return true;
1673 }
1674 
1675 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1676   switch (ID) {
1677     default:
1678       break;
1679     case Intrinsic::bswap:
1680       return TargetOpcode::G_BSWAP;
1681     case Intrinsic::bitreverse:
1682       return TargetOpcode::G_BITREVERSE;
1683     case Intrinsic::fshl:
1684       return TargetOpcode::G_FSHL;
1685     case Intrinsic::fshr:
1686       return TargetOpcode::G_FSHR;
1687     case Intrinsic::ceil:
1688       return TargetOpcode::G_FCEIL;
1689     case Intrinsic::cos:
1690       return TargetOpcode::G_FCOS;
1691     case Intrinsic::ctpop:
1692       return TargetOpcode::G_CTPOP;
1693     case Intrinsic::exp:
1694       return TargetOpcode::G_FEXP;
1695     case Intrinsic::exp2:
1696       return TargetOpcode::G_FEXP2;
1697     case Intrinsic::fabs:
1698       return TargetOpcode::G_FABS;
1699     case Intrinsic::copysign:
1700       return TargetOpcode::G_FCOPYSIGN;
1701     case Intrinsic::minnum:
1702       return TargetOpcode::G_FMINNUM;
1703     case Intrinsic::maxnum:
1704       return TargetOpcode::G_FMAXNUM;
1705     case Intrinsic::minimum:
1706       return TargetOpcode::G_FMINIMUM;
1707     case Intrinsic::maximum:
1708       return TargetOpcode::G_FMAXIMUM;
1709     case Intrinsic::canonicalize:
1710       return TargetOpcode::G_FCANONICALIZE;
1711     case Intrinsic::floor:
1712       return TargetOpcode::G_FFLOOR;
1713     case Intrinsic::fma:
1714       return TargetOpcode::G_FMA;
1715     case Intrinsic::log:
1716       return TargetOpcode::G_FLOG;
1717     case Intrinsic::log2:
1718       return TargetOpcode::G_FLOG2;
1719     case Intrinsic::log10:
1720       return TargetOpcode::G_FLOG10;
1721     case Intrinsic::nearbyint:
1722       return TargetOpcode::G_FNEARBYINT;
1723     case Intrinsic::pow:
1724       return TargetOpcode::G_FPOW;
1725     case Intrinsic::powi:
1726       return TargetOpcode::G_FPOWI;
1727     case Intrinsic::rint:
1728       return TargetOpcode::G_FRINT;
1729     case Intrinsic::round:
1730       return TargetOpcode::G_INTRINSIC_ROUND;
1731     case Intrinsic::roundeven:
1732       return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1733     case Intrinsic::sin:
1734       return TargetOpcode::G_FSIN;
1735     case Intrinsic::sqrt:
1736       return TargetOpcode::G_FSQRT;
1737     case Intrinsic::trunc:
1738       return TargetOpcode::G_INTRINSIC_TRUNC;
1739     case Intrinsic::readcyclecounter:
1740       return TargetOpcode::G_READCYCLECOUNTER;
1741     case Intrinsic::ptrmask:
1742       return TargetOpcode::G_PTRMASK;
1743     case Intrinsic::lrint:
1744       return TargetOpcode::G_INTRINSIC_LRINT;
1745     // FADD/FMUL require checking the FMF, so are handled elsewhere.
1746     case Intrinsic::vector_reduce_fmin:
1747       return TargetOpcode::G_VECREDUCE_FMIN;
1748     case Intrinsic::vector_reduce_fmax:
1749       return TargetOpcode::G_VECREDUCE_FMAX;
1750     case Intrinsic::vector_reduce_add:
1751       return TargetOpcode::G_VECREDUCE_ADD;
1752     case Intrinsic::vector_reduce_mul:
1753       return TargetOpcode::G_VECREDUCE_MUL;
1754     case Intrinsic::vector_reduce_and:
1755       return TargetOpcode::G_VECREDUCE_AND;
1756     case Intrinsic::vector_reduce_or:
1757       return TargetOpcode::G_VECREDUCE_OR;
1758     case Intrinsic::vector_reduce_xor:
1759       return TargetOpcode::G_VECREDUCE_XOR;
1760     case Intrinsic::vector_reduce_smax:
1761       return TargetOpcode::G_VECREDUCE_SMAX;
1762     case Intrinsic::vector_reduce_smin:
1763       return TargetOpcode::G_VECREDUCE_SMIN;
1764     case Intrinsic::vector_reduce_umax:
1765       return TargetOpcode::G_VECREDUCE_UMAX;
1766     case Intrinsic::vector_reduce_umin:
1767       return TargetOpcode::G_VECREDUCE_UMIN;
1768     case Intrinsic::lround:
1769       return TargetOpcode::G_LROUND;
1770     case Intrinsic::llround:
1771       return TargetOpcode::G_LLROUND;
1772   }
1773   return Intrinsic::not_intrinsic;
1774 }
1775 
1776 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1777                                             Intrinsic::ID ID,
1778                                             MachineIRBuilder &MIRBuilder) {
1779 
1780   unsigned Op = getSimpleIntrinsicOpcode(ID);
1781 
1782   // Is this a simple intrinsic?
1783   if (Op == Intrinsic::not_intrinsic)
1784     return false;
1785 
1786   // Yes. Let's translate it.
1787   SmallVector<llvm::SrcOp, 4> VRegs;
1788   for (auto &Arg : CI.args())
1789     VRegs.push_back(getOrCreateVReg(*Arg));
1790 
1791   MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1792                         MachineInstr::copyFlagsFromInstruction(CI));
1793   return true;
1794 }
1795 
1796 // TODO: Include ConstainedOps.def when all strict instructions are defined.
1797 static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1798   switch (ID) {
1799   case Intrinsic::experimental_constrained_fadd:
1800     return TargetOpcode::G_STRICT_FADD;
1801   case Intrinsic::experimental_constrained_fsub:
1802     return TargetOpcode::G_STRICT_FSUB;
1803   case Intrinsic::experimental_constrained_fmul:
1804     return TargetOpcode::G_STRICT_FMUL;
1805   case Intrinsic::experimental_constrained_fdiv:
1806     return TargetOpcode::G_STRICT_FDIV;
1807   case Intrinsic::experimental_constrained_frem:
1808     return TargetOpcode::G_STRICT_FREM;
1809   case Intrinsic::experimental_constrained_fma:
1810     return TargetOpcode::G_STRICT_FMA;
1811   case Intrinsic::experimental_constrained_sqrt:
1812     return TargetOpcode::G_STRICT_FSQRT;
1813   default:
1814     return 0;
1815   }
1816 }
1817 
1818 bool IRTranslator::translateConstrainedFPIntrinsic(
1819   const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1820   fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
1821 
1822   unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1823   if (!Opcode)
1824     return false;
1825 
1826   unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1827   if (EB == fp::ExceptionBehavior::ebIgnore)
1828     Flags |= MachineInstr::NoFPExcept;
1829 
1830   SmallVector<llvm::SrcOp, 4> VRegs;
1831   VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1832   if (!FPI.isUnaryOp())
1833     VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1834   if (FPI.isTernaryOp())
1835     VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1836 
1837   MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1838   return true;
1839 }
1840 
1841 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1842                                            MachineIRBuilder &MIRBuilder) {
1843   if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1844     if (ORE->enabled()) {
1845       const Function &F = *MI->getParent()->getParent();
1846       auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1847       if (MemoryOpRemark::canHandle(MI, TLI)) {
1848         MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
1849         R.visit(MI);
1850       }
1851     }
1852   }
1853 
1854   // If this is a simple intrinsic (that is, we just need to add a def of
1855   // a vreg, and uses for each arg operand, then translate it.
1856   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1857     return true;
1858 
1859   switch (ID) {
1860   default:
1861     break;
1862   case Intrinsic::lifetime_start:
1863   case Intrinsic::lifetime_end: {
1864     // No stack colouring in O0, discard region information.
1865     if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1866       return true;
1867 
1868     unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1869                                                   : TargetOpcode::LIFETIME_END;
1870 
1871     // Get the underlying objects for the location passed on the lifetime
1872     // marker.
1873     SmallVector<const Value *, 4> Allocas;
1874     getUnderlyingObjects(CI.getArgOperand(1), Allocas);
1875 
1876     // Iterate over each underlying object, creating lifetime markers for each
1877     // static alloca. Quit if we find a non-static alloca.
1878     for (const Value *V : Allocas) {
1879       const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1880       if (!AI)
1881         continue;
1882 
1883       if (!AI->isStaticAlloca())
1884         return true;
1885 
1886       MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1887     }
1888     return true;
1889   }
1890   case Intrinsic::dbg_declare: {
1891     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1892     assert(DI.getVariable() && "Missing variable");
1893 
1894     const Value *Address = DI.getAddress();
1895     if (!Address || isa<UndefValue>(Address)) {
1896       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1897       return true;
1898     }
1899 
1900     assert(DI.getVariable()->isValidLocationForIntrinsic(
1901                MIRBuilder.getDebugLoc()) &&
1902            "Expected inlined-at fields to agree");
1903     auto AI = dyn_cast<AllocaInst>(Address);
1904     if (AI && AI->isStaticAlloca()) {
1905       // Static allocas are tracked at the MF level, no need for DBG_VALUE
1906       // instructions (in fact, they get ignored if they *do* exist).
1907       MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1908                              getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1909     } else {
1910       // A dbg.declare describes the address of a source variable, so lower it
1911       // into an indirect DBG_VALUE.
1912       MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1913                                        DI.getVariable(), DI.getExpression());
1914     }
1915     return true;
1916   }
1917   case Intrinsic::dbg_label: {
1918     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1919     assert(DI.getLabel() && "Missing label");
1920 
1921     assert(DI.getLabel()->isValidLocationForIntrinsic(
1922                MIRBuilder.getDebugLoc()) &&
1923            "Expected inlined-at fields to agree");
1924 
1925     MIRBuilder.buildDbgLabel(DI.getLabel());
1926     return true;
1927   }
1928   case Intrinsic::vaend:
1929     // No target I know of cares about va_end. Certainly no in-tree target
1930     // does. Simplest intrinsic ever!
1931     return true;
1932   case Intrinsic::vastart: {
1933     auto &TLI = *MF->getSubtarget().getTargetLowering();
1934     Value *Ptr = CI.getArgOperand(0);
1935     unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1936 
1937     // FIXME: Get alignment
1938     MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1939         .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1940                                                 MachineMemOperand::MOStore,
1941                                                 ListSize, Align(1)));
1942     return true;
1943   }
1944   case Intrinsic::dbg_value: {
1945     // This form of DBG_VALUE is target-independent.
1946     const DbgValueInst &DI = cast<DbgValueInst>(CI);
1947     const Value *V = DI.getValue();
1948     assert(DI.getVariable()->isValidLocationForIntrinsic(
1949                MIRBuilder.getDebugLoc()) &&
1950            "Expected inlined-at fields to agree");
1951     if (!V || DI.hasArgList()) {
1952       // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
1953       // terminate any prior location.
1954       MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1955     } else if (const auto *CI = dyn_cast<Constant>(V)) {
1956       MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1957     } else {
1958       for (Register Reg : getOrCreateVRegs(*V)) {
1959         // FIXME: This does not handle register-indirect values at offset 0. The
1960         // direct/indirect thing shouldn't really be handled by something as
1961         // implicit as reg+noreg vs reg+imm in the first place, but it seems
1962         // pretty baked in right now.
1963         MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1964       }
1965     }
1966     return true;
1967   }
1968   case Intrinsic::uadd_with_overflow:
1969     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1970   case Intrinsic::sadd_with_overflow:
1971     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1972   case Intrinsic::usub_with_overflow:
1973     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1974   case Intrinsic::ssub_with_overflow:
1975     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1976   case Intrinsic::umul_with_overflow:
1977     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1978   case Intrinsic::smul_with_overflow:
1979     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1980   case Intrinsic::uadd_sat:
1981     return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
1982   case Intrinsic::sadd_sat:
1983     return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
1984   case Intrinsic::usub_sat:
1985     return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
1986   case Intrinsic::ssub_sat:
1987     return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
1988   case Intrinsic::ushl_sat:
1989     return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
1990   case Intrinsic::sshl_sat:
1991     return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
1992   case Intrinsic::umin:
1993     return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
1994   case Intrinsic::umax:
1995     return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
1996   case Intrinsic::smin:
1997     return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
1998   case Intrinsic::smax:
1999     return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2000   case Intrinsic::abs:
2001     // TODO: Preserve "int min is poison" arg in GMIR?
2002     return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2003   case Intrinsic::smul_fix:
2004     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2005   case Intrinsic::umul_fix:
2006     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2007   case Intrinsic::smul_fix_sat:
2008     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2009   case Intrinsic::umul_fix_sat:
2010     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2011   case Intrinsic::sdiv_fix:
2012     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2013   case Intrinsic::udiv_fix:
2014     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2015   case Intrinsic::sdiv_fix_sat:
2016     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2017   case Intrinsic::udiv_fix_sat:
2018     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2019   case Intrinsic::fmuladd: {
2020     const TargetMachine &TM = MF->getTarget();
2021     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2022     Register Dst = getOrCreateVReg(CI);
2023     Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2024     Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2025     Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2026     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2027         TLI.isFMAFasterThanFMulAndFAdd(*MF,
2028                                        TLI.getValueType(*DL, CI.getType()))) {
2029       // TODO: Revisit this to see if we should move this part of the
2030       // lowering to the combiner.
2031       MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2032                           MachineInstr::copyFlagsFromInstruction(CI));
2033     } else {
2034       LLT Ty = getLLTForType(*CI.getType(), *DL);
2035       auto FMul = MIRBuilder.buildFMul(
2036           Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2037       MIRBuilder.buildFAdd(Dst, FMul, Op2,
2038                            MachineInstr::copyFlagsFromInstruction(CI));
2039     }
2040     return true;
2041   }
2042   case Intrinsic::convert_from_fp16:
2043     // FIXME: This intrinsic should probably be removed from the IR.
2044     MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2045                           getOrCreateVReg(*CI.getArgOperand(0)),
2046                           MachineInstr::copyFlagsFromInstruction(CI));
2047     return true;
2048   case Intrinsic::convert_to_fp16:
2049     // FIXME: This intrinsic should probably be removed from the IR.
2050     MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2051                             getOrCreateVReg(*CI.getArgOperand(0)),
2052                             MachineInstr::copyFlagsFromInstruction(CI));
2053     return true;
2054   case Intrinsic::memcpy_inline:
2055     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2056   case Intrinsic::memcpy:
2057     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2058   case Intrinsic::memmove:
2059     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2060   case Intrinsic::memset:
2061     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2062   case Intrinsic::eh_typeid_for: {
2063     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2064     Register Reg = getOrCreateVReg(CI);
2065     unsigned TypeID = MF->getTypeIDFor(GV);
2066     MIRBuilder.buildConstant(Reg, TypeID);
2067     return true;
2068   }
2069   case Intrinsic::objectsize:
2070     llvm_unreachable("llvm.objectsize.* should have been lowered already");
2071 
2072   case Intrinsic::is_constant:
2073     llvm_unreachable("llvm.is.constant.* should have been lowered already");
2074 
2075   case Intrinsic::stackguard:
2076     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2077     return true;
2078   case Intrinsic::stackprotector: {
2079     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2080     Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2081     getStackGuard(GuardVal, MIRBuilder);
2082 
2083     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2084     int FI = getOrCreateFrameIndex(*Slot);
2085     MF->getFrameInfo().setStackProtectorIndex(FI);
2086 
2087     MIRBuilder.buildStore(
2088         GuardVal, getOrCreateVReg(*Slot),
2089         *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2090                                   MachineMemOperand::MOStore |
2091                                       MachineMemOperand::MOVolatile,
2092                                   PtrTy, Align(8)));
2093     return true;
2094   }
2095   case Intrinsic::stacksave: {
2096     // Save the stack pointer to the location provided by the intrinsic.
2097     Register Reg = getOrCreateVReg(CI);
2098     Register StackPtr = MF->getSubtarget()
2099                             .getTargetLowering()
2100                             ->getStackPointerRegisterToSaveRestore();
2101 
2102     // If the target doesn't specify a stack pointer, then fall back.
2103     if (!StackPtr)
2104       return false;
2105 
2106     MIRBuilder.buildCopy(Reg, StackPtr);
2107     return true;
2108   }
2109   case Intrinsic::stackrestore: {
2110     // Restore the stack pointer from the location provided by the intrinsic.
2111     Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
2112     Register StackPtr = MF->getSubtarget()
2113                             .getTargetLowering()
2114                             ->getStackPointerRegisterToSaveRestore();
2115 
2116     // If the target doesn't specify a stack pointer, then fall back.
2117     if (!StackPtr)
2118       return false;
2119 
2120     MIRBuilder.buildCopy(StackPtr, Reg);
2121     return true;
2122   }
2123   case Intrinsic::cttz:
2124   case Intrinsic::ctlz: {
2125     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2126     bool isTrailing = ID == Intrinsic::cttz;
2127     unsigned Opcode = isTrailing
2128                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
2129                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
2130                           : Cst->isZero() ? TargetOpcode::G_CTLZ
2131                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2132     MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2133                           {getOrCreateVReg(*CI.getArgOperand(0))});
2134     return true;
2135   }
2136   case Intrinsic::invariant_start: {
2137     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2138     Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2139     MIRBuilder.buildUndef(Undef);
2140     return true;
2141   }
2142   case Intrinsic::invariant_end:
2143     return true;
2144   case Intrinsic::expect:
2145   case Intrinsic::annotation:
2146   case Intrinsic::ptr_annotation:
2147   case Intrinsic::launder_invariant_group:
2148   case Intrinsic::strip_invariant_group: {
2149     // Drop the intrinsic, but forward the value.
2150     MIRBuilder.buildCopy(getOrCreateVReg(CI),
2151                          getOrCreateVReg(*CI.getArgOperand(0)));
2152     return true;
2153   }
2154   case Intrinsic::assume:
2155   case Intrinsic::experimental_noalias_scope_decl:
2156   case Intrinsic::var_annotation:
2157   case Intrinsic::sideeffect:
2158     // Discard annotate attributes, assumptions, and artificial side-effects.
2159     return true;
2160   case Intrinsic::read_volatile_register:
2161   case Intrinsic::read_register: {
2162     Value *Arg = CI.getArgOperand(0);
2163     MIRBuilder
2164         .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2165         .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2166     return true;
2167   }
2168   case Intrinsic::write_register: {
2169     Value *Arg = CI.getArgOperand(0);
2170     MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2171       .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2172       .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2173     return true;
2174   }
2175   case Intrinsic::localescape: {
2176     MachineBasicBlock &EntryMBB = MF->front();
2177     StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2178 
2179     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2180     // is the same on all targets.
2181     for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2182       Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2183       if (isa<ConstantPointerNull>(Arg))
2184         continue; // Skip null pointers. They represent a hole in index space.
2185 
2186       int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2187       MCSymbol *FrameAllocSym =
2188           MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2189                                                                 Idx);
2190 
2191       // This should be inserted at the start of the entry block.
2192       auto LocalEscape =
2193           MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2194               .addSym(FrameAllocSym)
2195               .addFrameIndex(FI);
2196 
2197       EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2198     }
2199 
2200     return true;
2201   }
2202   case Intrinsic::vector_reduce_fadd:
2203   case Intrinsic::vector_reduce_fmul: {
2204     // Need to check for the reassoc flag to decide whether we want a
2205     // sequential reduction opcode or not.
2206     Register Dst = getOrCreateVReg(CI);
2207     Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2208     Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2209     unsigned Opc = 0;
2210     if (!CI.hasAllowReassoc()) {
2211       // The sequential ordering case.
2212       Opc = ID == Intrinsic::vector_reduce_fadd
2213                 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2214                 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2215       MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2216                             MachineInstr::copyFlagsFromInstruction(CI));
2217       return true;
2218     }
2219     // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2220     // since the associativity doesn't matter.
2221     unsigned ScalarOpc;
2222     if (ID == Intrinsic::vector_reduce_fadd) {
2223       Opc = TargetOpcode::G_VECREDUCE_FADD;
2224       ScalarOpc = TargetOpcode::G_FADD;
2225     } else {
2226       Opc = TargetOpcode::G_VECREDUCE_FMUL;
2227       ScalarOpc = TargetOpcode::G_FMUL;
2228     }
2229     LLT DstTy = MRI->getType(Dst);
2230     auto Rdx = MIRBuilder.buildInstr(
2231         Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2232     MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2233                           MachineInstr::copyFlagsFromInstruction(CI));
2234 
2235     return true;
2236   }
2237   case Intrinsic::trap:
2238   case Intrinsic::debugtrap:
2239   case Intrinsic::ubsantrap: {
2240     StringRef TrapFuncName =
2241         CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
2242     if (TrapFuncName.empty())
2243       break; // Use the default handling.
2244     CallLowering::CallLoweringInfo Info;
2245     if (ID == Intrinsic::ubsantrap) {
2246       Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
2247                                CI.getArgOperand(0)->getType(), 0});
2248     }
2249     Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
2250     Info.CB = &CI;
2251     Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
2252     return CLI->lowerCall(MIRBuilder, Info);
2253   }
2254 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)  \
2255   case Intrinsic::INTRINSIC:
2256 #include "llvm/IR/ConstrainedOps.def"
2257     return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2258                                            MIRBuilder);
2259 
2260   }
2261   return false;
2262 }
2263 
2264 bool IRTranslator::translateInlineAsm(const CallBase &CB,
2265                                       MachineIRBuilder &MIRBuilder) {
2266 
2267   const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2268 
2269   if (!ALI) {
2270     LLVM_DEBUG(
2271         dbgs() << "Inline asm lowering is not supported for this target yet\n");
2272     return false;
2273   }
2274 
2275   return ALI->lowerInlineAsm(
2276       MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2277 }
2278 
2279 bool IRTranslator::translateCallBase(const CallBase &CB,
2280                                      MachineIRBuilder &MIRBuilder) {
2281   ArrayRef<Register> Res = getOrCreateVRegs(CB);
2282 
2283   SmallVector<ArrayRef<Register>, 8> Args;
2284   Register SwiftInVReg = 0;
2285   Register SwiftErrorVReg = 0;
2286   for (auto &Arg : CB.args()) {
2287     if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2288       assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2289       LLT Ty = getLLTForType(*Arg->getType(), *DL);
2290       SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2291       MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2292                                             &CB, &MIRBuilder.getMBB(), Arg));
2293       Args.emplace_back(makeArrayRef(SwiftInVReg));
2294       SwiftErrorVReg =
2295           SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2296       continue;
2297     }
2298     Args.push_back(getOrCreateVRegs(*Arg));
2299   }
2300 
2301   if (auto *CI = dyn_cast<CallInst>(&CB)) {
2302     if (ORE->enabled()) {
2303       const Function &F = *CI->getParent()->getParent();
2304       auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2305       if (MemoryOpRemark::canHandle(CI, TLI)) {
2306         MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
2307         R.visit(CI);
2308       }
2309     }
2310   }
2311 
2312   // We don't set HasCalls on MFI here yet because call lowering may decide to
2313   // optimize into tail calls. Instead, we defer that to selection where a final
2314   // scan is done to check if any instructions are calls.
2315   bool Success =
2316       CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2317                      [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2318 
2319   // Check if we just inserted a tail call.
2320   if (Success) {
2321     assert(!HasTailCall && "Can't tail call return twice from block?");
2322     const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2323     HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2324   }
2325 
2326   return Success;
2327 }
2328 
2329 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2330   const CallInst &CI = cast<CallInst>(U);
2331   auto TII = MF->getTarget().getIntrinsicInfo();
2332   const Function *F = CI.getCalledFunction();
2333 
2334   // FIXME: support Windows dllimport function calls.
2335   if (F && (F->hasDLLImportStorageClass() ||
2336             (MF->getTarget().getTargetTriple().isOSWindows() &&
2337              F->hasExternalWeakLinkage())))
2338     return false;
2339 
2340   // FIXME: support control flow guard targets.
2341   if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2342     return false;
2343 
2344   if (CI.isInlineAsm())
2345     return translateInlineAsm(CI, MIRBuilder);
2346 
2347   diagnoseDontCall(CI);
2348 
2349   Intrinsic::ID ID = Intrinsic::not_intrinsic;
2350   if (F && F->isIntrinsic()) {
2351     ID = F->getIntrinsicID();
2352     if (TII && ID == Intrinsic::not_intrinsic)
2353       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2354   }
2355 
2356   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2357     return translateCallBase(CI, MIRBuilder);
2358 
2359   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2360 
2361   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2362     return true;
2363 
2364   ArrayRef<Register> ResultRegs;
2365   if (!CI.getType()->isVoidTy())
2366     ResultRegs = getOrCreateVRegs(CI);
2367 
2368   // Ignore the callsite attributes. Backend code is most likely not expecting
2369   // an intrinsic to sometimes have side effects and sometimes not.
2370   MachineInstrBuilder MIB =
2371       MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
2372   if (isa<FPMathOperator>(CI))
2373     MIB->copyIRFlags(CI);
2374 
2375   for (auto &Arg : enumerate(CI.args())) {
2376     // If this is required to be an immediate, don't materialize it in a
2377     // register.
2378     if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2379       if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2380         // imm arguments are more convenient than cimm (and realistically
2381         // probably sufficient), so use them.
2382         assert(CI->getBitWidth() <= 64 &&
2383                "large intrinsic immediates not handled");
2384         MIB.addImm(CI->getSExtValue());
2385       } else {
2386         MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2387       }
2388     } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2389       auto *MD = MDVal->getMetadata();
2390       auto *MDN = dyn_cast<MDNode>(MD);
2391       if (!MDN) {
2392         if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2393           MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2394         else // This was probably an MDString.
2395           return false;
2396       }
2397       MIB.addMetadata(MDN);
2398     } else {
2399       ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2400       if (VRegs.size() > 1)
2401         return false;
2402       MIB.addUse(VRegs[0]);
2403     }
2404   }
2405 
2406   // Add a MachineMemOperand if it is a target mem intrinsic.
2407   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2408   TargetLowering::IntrinsicInfo Info;
2409   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2410   if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2411     Align Alignment = Info.align.getValueOr(
2412         DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2413     LLT MemTy = Info.memVT.isSimple()
2414                     ? getLLTForMVT(Info.memVT.getSimpleVT())
2415                     : LLT::scalar(Info.memVT.getStoreSizeInBits());
2416     MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
2417                                                Info.flags, MemTy, Alignment));
2418   }
2419 
2420   return true;
2421 }
2422 
2423 bool IRTranslator::findUnwindDestinations(
2424     const BasicBlock *EHPadBB,
2425     BranchProbability Prob,
2426     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2427         &UnwindDests) {
2428   EHPersonality Personality = classifyEHPersonality(
2429       EHPadBB->getParent()->getFunction().getPersonalityFn());
2430   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2431   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2432   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2433   bool IsSEH = isAsynchronousEHPersonality(Personality);
2434 
2435   if (IsWasmCXX) {
2436     // Ignore this for now.
2437     return false;
2438   }
2439 
2440   while (EHPadBB) {
2441     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2442     BasicBlock *NewEHPadBB = nullptr;
2443     if (isa<LandingPadInst>(Pad)) {
2444       // Stop on landingpads. They are not funclets.
2445       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2446       break;
2447     }
2448     if (isa<CleanupPadInst>(Pad)) {
2449       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2450       // personalities.
2451       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2452       UnwindDests.back().first->setIsEHScopeEntry();
2453       UnwindDests.back().first->setIsEHFuncletEntry();
2454       break;
2455     }
2456     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2457       // Add the catchpad handlers to the possible destinations.
2458       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2459         UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2460         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2461         if (IsMSVCCXX || IsCoreCLR)
2462           UnwindDests.back().first->setIsEHFuncletEntry();
2463         if (!IsSEH)
2464           UnwindDests.back().first->setIsEHScopeEntry();
2465       }
2466       NewEHPadBB = CatchSwitch->getUnwindDest();
2467     } else {
2468       continue;
2469     }
2470 
2471     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2472     if (BPI && NewEHPadBB)
2473       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2474     EHPadBB = NewEHPadBB;
2475   }
2476   return true;
2477 }
2478 
2479 bool IRTranslator::translateInvoke(const User &U,
2480                                    MachineIRBuilder &MIRBuilder) {
2481   const InvokeInst &I = cast<InvokeInst>(U);
2482   MCContext &Context = MF->getContext();
2483 
2484   const BasicBlock *ReturnBB = I.getSuccessor(0);
2485   const BasicBlock *EHPadBB = I.getSuccessor(1);
2486 
2487   const Function *Fn = I.getCalledFunction();
2488 
2489   // FIXME: support invoking patchpoint and statepoint intrinsics.
2490   if (Fn && Fn->isIntrinsic())
2491     return false;
2492 
2493   // FIXME: support whatever these are.
2494   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
2495     return false;
2496 
2497   // FIXME: support control flow guard targets.
2498   if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2499     return false;
2500 
2501   // FIXME: support Windows exception handling.
2502   if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2503     return false;
2504 
2505   bool LowerInlineAsm = I.isInlineAsm();
2506   bool NeedEHLabel = true;
2507   // If it can't throw then use a fast-path without emitting EH labels.
2508   if (LowerInlineAsm)
2509     NeedEHLabel = (cast<InlineAsm>(I.getCalledOperand()))->canThrow();
2510 
2511   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2512   // the region covered by the try.
2513   MCSymbol *BeginSymbol = nullptr;
2514   if (NeedEHLabel) {
2515     BeginSymbol = Context.createTempSymbol();
2516     MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2517   }
2518 
2519   if (LowerInlineAsm) {
2520     if (!translateInlineAsm(I, MIRBuilder))
2521       return false;
2522   } else if (!translateCallBase(I, MIRBuilder))
2523     return false;
2524 
2525   MCSymbol *EndSymbol = nullptr;
2526   if (NeedEHLabel) {
2527     EndSymbol = Context.createTempSymbol();
2528     MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2529   }
2530 
2531   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2532   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2533   MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2534   BranchProbability EHPadBBProb =
2535       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2536           : BranchProbability::getZero();
2537 
2538   if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2539     return false;
2540 
2541   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2542                     &ReturnMBB = getMBB(*ReturnBB);
2543   // Update successor info.
2544   addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2545   for (auto &UnwindDest : UnwindDests) {
2546     UnwindDest.first->setIsEHPad();
2547     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2548   }
2549   InvokeMBB->normalizeSuccProbs();
2550 
2551   if (NeedEHLabel) {
2552     assert(BeginSymbol && "Expected a begin symbol!");
2553     assert(EndSymbol && "Expected an end symbol!");
2554     MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2555   }
2556 
2557   MIRBuilder.buildBr(ReturnMBB);
2558   return true;
2559 }
2560 
2561 bool IRTranslator::translateCallBr(const User &U,
2562                                    MachineIRBuilder &MIRBuilder) {
2563   // FIXME: Implement this.
2564   return false;
2565 }
2566 
2567 bool IRTranslator::translateLandingPad(const User &U,
2568                                        MachineIRBuilder &MIRBuilder) {
2569   const LandingPadInst &LP = cast<LandingPadInst>(U);
2570 
2571   MachineBasicBlock &MBB = MIRBuilder.getMBB();
2572 
2573   MBB.setIsEHPad();
2574 
2575   // If there aren't registers to copy the values into (e.g., during SjLj
2576   // exceptions), then don't bother.
2577   auto &TLI = *MF->getSubtarget().getTargetLowering();
2578   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2579   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2580       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2581     return true;
2582 
2583   // If landingpad's return type is token type, we don't create DAG nodes
2584   // for its exception pointer and selector value. The extraction of exception
2585   // pointer or selector value from token type landingpads is not currently
2586   // supported.
2587   if (LP.getType()->isTokenTy())
2588     return true;
2589 
2590   // Add a label to mark the beginning of the landing pad.  Deletion of the
2591   // landing pad can thus be detected via the MachineModuleInfo.
2592   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2593     .addSym(MF->addLandingPad(&MBB));
2594 
2595   // If the unwinder does not preserve all registers, ensure that the
2596   // function marks the clobbered registers as used.
2597   const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2598   if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2599     MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
2600 
2601   LLT Ty = getLLTForType(*LP.getType(), *DL);
2602   Register Undef = MRI->createGenericVirtualRegister(Ty);
2603   MIRBuilder.buildUndef(Undef);
2604 
2605   SmallVector<LLT, 2> Tys;
2606   for (Type *Ty : cast<StructType>(LP.getType())->elements())
2607     Tys.push_back(getLLTForType(*Ty, *DL));
2608   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
2609 
2610   // Mark exception register as live in.
2611   Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2612   if (!ExceptionReg)
2613     return false;
2614 
2615   MBB.addLiveIn(ExceptionReg);
2616   ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2617   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2618 
2619   Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2620   if (!SelectorReg)
2621     return false;
2622 
2623   MBB.addLiveIn(SelectorReg);
2624   Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
2625   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
2626   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
2627 
2628   return true;
2629 }
2630 
2631 bool IRTranslator::translateAlloca(const User &U,
2632                                    MachineIRBuilder &MIRBuilder) {
2633   auto &AI = cast<AllocaInst>(U);
2634 
2635   if (AI.isSwiftError())
2636     return true;
2637 
2638   if (AI.isStaticAlloca()) {
2639     Register Res = getOrCreateVReg(AI);
2640     int FI = getOrCreateFrameIndex(AI);
2641     MIRBuilder.buildFrameIndex(Res, FI);
2642     return true;
2643   }
2644 
2645   // FIXME: support stack probing for Windows.
2646   if (MF->getTarget().getTargetTriple().isOSWindows())
2647     return false;
2648 
2649   // Now we're in the harder dynamic case.
2650   Register NumElts = getOrCreateVReg(*AI.getArraySize());
2651   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
2652   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
2653   if (MRI->getType(NumElts) != IntPtrTy) {
2654     Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
2655     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
2656     NumElts = ExtElts;
2657   }
2658 
2659   Type *Ty = AI.getAllocatedType();
2660 
2661   Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
2662   Register TySize =
2663       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
2664   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
2665 
2666   // Round the size of the allocation up to the stack alignment size
2667   // by add SA-1 to the size. This doesn't overflow because we're computing
2668   // an address inside an alloca.
2669   Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
2670   auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
2671   auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2672                                       MachineInstr::NoUWrap);
2673   auto AlignCst =
2674       MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
2675   auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
2676 
2677   Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
2678   if (Alignment <= StackAlign)
2679     Alignment = Align(1);
2680   MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
2681 
2682   MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
2683   assert(MF->getFrameInfo().hasVarSizedObjects());
2684   return true;
2685 }
2686 
2687 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
2688   // FIXME: We may need more info about the type. Because of how LLT works,
2689   // we're completely discarding the i64/double distinction here (amongst
2690   // others). Fortunately the ABIs I know of where that matters don't use va_arg
2691   // anyway but that's not guaranteed.
2692   MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2693                         {getOrCreateVReg(*U.getOperand(0)),
2694                          DL->getABITypeAlign(U.getType()).value()});
2695   return true;
2696 }
2697 
2698 bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
2699     if (!MF->getTarget().Options.TrapUnreachable)
2700     return true;
2701 
2702   auto &UI = cast<UnreachableInst>(U);
2703   // We may be able to ignore unreachable behind a noreturn call.
2704   if (MF->getTarget().Options.NoTrapAfterNoreturn) {
2705     const BasicBlock &BB = *UI.getParent();
2706     if (&UI != &BB.front()) {
2707       BasicBlock::const_iterator PredI =
2708         std::prev(BasicBlock::const_iterator(UI));
2709       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2710         if (Call->doesNotReturn())
2711           return true;
2712       }
2713     }
2714   }
2715 
2716   MIRBuilder.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>(), true);
2717   return true;
2718 }
2719 
2720 bool IRTranslator::translateInsertElement(const User &U,
2721                                           MachineIRBuilder &MIRBuilder) {
2722   // If it is a <1 x Ty> vector, use the scalar as it is
2723   // not a legal vector type in LLT.
2724   if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
2725     return translateCopy(U, *U.getOperand(1), MIRBuilder);
2726 
2727   Register Res = getOrCreateVReg(U);
2728   Register Val = getOrCreateVReg(*U.getOperand(0));
2729   Register Elt = getOrCreateVReg(*U.getOperand(1));
2730   Register Idx = getOrCreateVReg(*U.getOperand(2));
2731   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
2732   return true;
2733 }
2734 
2735 bool IRTranslator::translateExtractElement(const User &U,
2736                                            MachineIRBuilder &MIRBuilder) {
2737   // If it is a <1 x Ty> vector, use the scalar as it is
2738   // not a legal vector type in LLT.
2739   if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
2740     return translateCopy(U, *U.getOperand(0), MIRBuilder);
2741 
2742   Register Res = getOrCreateVReg(U);
2743   Register Val = getOrCreateVReg(*U.getOperand(0));
2744   const auto &TLI = *MF->getSubtarget().getTargetLowering();
2745   unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2746   Register Idx;
2747   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
2748     if (CI->getBitWidth() != PreferredVecIdxWidth) {
2749       APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
2750       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
2751       Idx = getOrCreateVReg(*NewIdxCI);
2752     }
2753   }
2754   if (!Idx)
2755     Idx = getOrCreateVReg(*U.getOperand(1));
2756   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
2757     const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
2758     Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
2759   }
2760   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
2761   return true;
2762 }
2763 
2764 bool IRTranslator::translateShuffleVector(const User &U,
2765                                           MachineIRBuilder &MIRBuilder) {
2766   ArrayRef<int> Mask;
2767   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2768     Mask = SVI->getShuffleMask();
2769   else
2770     Mask = cast<ConstantExpr>(U).getShuffleMask();
2771   ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
2772   MIRBuilder
2773       .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2774                   {getOrCreateVReg(*U.getOperand(0)),
2775                    getOrCreateVReg(*U.getOperand(1))})
2776       .addShuffleMask(MaskAlloc);
2777   return true;
2778 }
2779 
2780 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2781   const PHINode &PI = cast<PHINode>(U);
2782 
2783   SmallVector<MachineInstr *, 4> Insts;
2784   for (auto Reg : getOrCreateVRegs(PI)) {
2785     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2786     Insts.push_back(MIB.getInstr());
2787   }
2788 
2789   PendingPHIs.emplace_back(&PI, std::move(Insts));
2790   return true;
2791 }
2792 
2793 bool IRTranslator::translateAtomicCmpXchg(const User &U,
2794                                           MachineIRBuilder &MIRBuilder) {
2795   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2796 
2797   auto &TLI = *MF->getSubtarget().getTargetLowering();
2798   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2799 
2800   auto Res = getOrCreateVRegs(I);
2801   Register OldValRes = Res[0];
2802   Register SuccessRes = Res[1];
2803   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2804   Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2805   Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2806 
2807   MIRBuilder.buildAtomicCmpXchgWithSuccess(
2808       OldValRes, SuccessRes, Addr, Cmp, NewVal,
2809       *MF->getMachineMemOperand(
2810           MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
2811           getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
2812           I.getSuccessOrdering(), I.getFailureOrdering()));
2813   return true;
2814 }
2815 
2816 bool IRTranslator::translateAtomicRMW(const User &U,
2817                                       MachineIRBuilder &MIRBuilder) {
2818   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2819   auto &TLI = *MF->getSubtarget().getTargetLowering();
2820   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2821 
2822   Register Res = getOrCreateVReg(I);
2823   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2824   Register Val = getOrCreateVReg(*I.getValOperand());
2825 
2826   unsigned Opcode = 0;
2827   switch (I.getOperation()) {
2828   default:
2829     return false;
2830   case AtomicRMWInst::Xchg:
2831     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2832     break;
2833   case AtomicRMWInst::Add:
2834     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2835     break;
2836   case AtomicRMWInst::Sub:
2837     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2838     break;
2839   case AtomicRMWInst::And:
2840     Opcode = TargetOpcode::G_ATOMICRMW_AND;
2841     break;
2842   case AtomicRMWInst::Nand:
2843     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2844     break;
2845   case AtomicRMWInst::Or:
2846     Opcode = TargetOpcode::G_ATOMICRMW_OR;
2847     break;
2848   case AtomicRMWInst::Xor:
2849     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2850     break;
2851   case AtomicRMWInst::Max:
2852     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2853     break;
2854   case AtomicRMWInst::Min:
2855     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2856     break;
2857   case AtomicRMWInst::UMax:
2858     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2859     break;
2860   case AtomicRMWInst::UMin:
2861     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2862     break;
2863   case AtomicRMWInst::FAdd:
2864     Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2865     break;
2866   case AtomicRMWInst::FSub:
2867     Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2868     break;
2869   }
2870 
2871   MIRBuilder.buildAtomicRMW(
2872       Opcode, Res, Addr, Val,
2873       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2874                                 Flags, MRI->getType(Val), getMemOpAlign(I),
2875                                 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
2876                                 I.getOrdering()));
2877   return true;
2878 }
2879 
2880 bool IRTranslator::translateFence(const User &U,
2881                                   MachineIRBuilder &MIRBuilder) {
2882   const FenceInst &Fence = cast<FenceInst>(U);
2883   MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2884                         Fence.getSyncScopeID());
2885   return true;
2886 }
2887 
2888 bool IRTranslator::translateFreeze(const User &U,
2889                                    MachineIRBuilder &MIRBuilder) {
2890   const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
2891   const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
2892 
2893   assert(DstRegs.size() == SrcRegs.size() &&
2894          "Freeze with different source and destination type?");
2895 
2896   for (unsigned I = 0; I < DstRegs.size(); ++I) {
2897     MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
2898   }
2899 
2900   return true;
2901 }
2902 
2903 void IRTranslator::finishPendingPhis() {
2904 #ifndef NDEBUG
2905   DILocationVerifier Verifier;
2906   GISelObserverWrapper WrapperObserver(&Verifier);
2907   RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2908 #endif // ifndef NDEBUG
2909   for (auto &Phi : PendingPHIs) {
2910     const PHINode *PI = Phi.first;
2911     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2912     MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2913     EntryBuilder->setDebugLoc(PI->getDebugLoc());
2914 #ifndef NDEBUG
2915     Verifier.setCurrentInst(PI);
2916 #endif // ifndef NDEBUG
2917 
2918     SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2919     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2920       auto IRPred = PI->getIncomingBlock(i);
2921       ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2922       for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2923         if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2924           continue;
2925         SeenPreds.insert(Pred);
2926         for (unsigned j = 0; j < ValRegs.size(); ++j) {
2927           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2928           MIB.addUse(ValRegs[j]);
2929           MIB.addMBB(Pred);
2930         }
2931       }
2932     }
2933   }
2934 }
2935 
2936 bool IRTranslator::valueIsSplit(const Value &V,
2937                                 SmallVectorImpl<uint64_t> *Offsets) {
2938   SmallVector<LLT, 4> SplitTys;
2939   if (Offsets && !Offsets->empty())
2940     Offsets->clear();
2941   computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2942   return SplitTys.size() > 1;
2943 }
2944 
2945 bool IRTranslator::translate(const Instruction &Inst) {
2946   CurBuilder->setDebugLoc(Inst.getDebugLoc());
2947 
2948   auto &TLI = *MF->getSubtarget().getTargetLowering();
2949   if (TLI.fallBackToDAGISel(Inst))
2950     return false;
2951 
2952   switch (Inst.getOpcode()) {
2953 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
2954   case Instruction::OPCODE:                                                    \
2955     return translate##OPCODE(Inst, *CurBuilder.get());
2956 #include "llvm/IR/Instruction.def"
2957   default:
2958     return false;
2959   }
2960 }
2961 
2962 bool IRTranslator::translate(const Constant &C, Register Reg) {
2963   // We only emit constants into the entry block from here. To prevent jumpy
2964   // debug behaviour set the line to 0.
2965   if (auto CurrInstDL = CurBuilder->getDL())
2966     EntryBuilder->setDebugLoc(DILocation::get(C.getContext(), 0, 0,
2967                                               CurrInstDL.getScope(),
2968                                               CurrInstDL.getInlinedAt()));
2969 
2970   if (auto CI = dyn_cast<ConstantInt>(&C))
2971     EntryBuilder->buildConstant(Reg, *CI);
2972   else if (auto CF = dyn_cast<ConstantFP>(&C))
2973     EntryBuilder->buildFConstant(Reg, *CF);
2974   else if (isa<UndefValue>(C))
2975     EntryBuilder->buildUndef(Reg);
2976   else if (isa<ConstantPointerNull>(C))
2977     EntryBuilder->buildConstant(Reg, 0);
2978   else if (auto GV = dyn_cast<GlobalValue>(&C))
2979     EntryBuilder->buildGlobalValue(Reg, GV);
2980   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2981     if (!isa<FixedVectorType>(CAZ->getType()))
2982       return false;
2983     // Return the scalar if it is a <1 x Ty> vector.
2984     unsigned NumElts = CAZ->getElementCount().getFixedValue();
2985     if (NumElts == 1)
2986       return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder.get());
2987     SmallVector<Register, 4> Ops;
2988     for (unsigned I = 0; I < NumElts; ++I) {
2989       Constant &Elt = *CAZ->getElementValue(I);
2990       Ops.push_back(getOrCreateVReg(Elt));
2991     }
2992     EntryBuilder->buildBuildVector(Reg, Ops);
2993   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2994     // Return the scalar if it is a <1 x Ty> vector.
2995     if (CV->getNumElements() == 1)
2996       return translateCopy(C, *CV->getElementAsConstant(0),
2997                            *EntryBuilder.get());
2998     SmallVector<Register, 4> Ops;
2999     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3000       Constant &Elt = *CV->getElementAsConstant(i);
3001       Ops.push_back(getOrCreateVReg(Elt));
3002     }
3003     EntryBuilder->buildBuildVector(Reg, Ops);
3004   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3005     switch(CE->getOpcode()) {
3006 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
3007   case Instruction::OPCODE:                                                    \
3008     return translate##OPCODE(*CE, *EntryBuilder.get());
3009 #include "llvm/IR/Instruction.def"
3010     default:
3011       return false;
3012     }
3013   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3014     if (CV->getNumOperands() == 1)
3015       return translateCopy(C, *CV->getOperand(0), *EntryBuilder.get());
3016     SmallVector<Register, 4> Ops;
3017     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3018       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3019     }
3020     EntryBuilder->buildBuildVector(Reg, Ops);
3021   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3022     EntryBuilder->buildBlockAddress(Reg, BA);
3023   } else
3024     return false;
3025 
3026   return true;
3027 }
3028 
3029 bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3030                                       MachineBasicBlock &MBB) {
3031   for (auto &BTB : SL->BitTestCases) {
3032     // Emit header first, if it wasn't already emitted.
3033     if (!BTB.Emitted)
3034       emitBitTestHeader(BTB, BTB.Parent);
3035 
3036     BranchProbability UnhandledProb = BTB.Prob;
3037     for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3038       UnhandledProb -= BTB.Cases[j].ExtraProb;
3039       // Set the current basic block to the mbb we wish to insert the code into
3040       MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3041       // If all cases cover a contiguous range, it is not necessary to jump to
3042       // the default block after the last bit test fails. This is because the
3043       // range check during bit test header creation has guaranteed that every
3044       // case here doesn't go outside the range. In this case, there is no need
3045       // to perform the last bit test, as it will always be true. Instead, make
3046       // the second-to-last bit-test fall through to the target of the last bit
3047       // test, and delete the last bit test.
3048 
3049       MachineBasicBlock *NextMBB;
3050       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3051         // Second-to-last bit-test with contiguous range: fall through to the
3052         // target of the final bit test.
3053         NextMBB = BTB.Cases[j + 1].TargetBB;
3054       } else if (j + 1 == ej) {
3055         // For the last bit test, fall through to Default.
3056         NextMBB = BTB.Default;
3057       } else {
3058         // Otherwise, fall through to the next bit test.
3059         NextMBB = BTB.Cases[j + 1].ThisBB;
3060       }
3061 
3062       emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3063 
3064       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3065         // We need to record the replacement phi edge here that normally
3066         // happens in emitBitTestCase before we delete the case, otherwise the
3067         // phi edge will be lost.
3068         addMachineCFGPred({BTB.Parent->getBasicBlock(),
3069                            BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3070                           MBB);
3071         // Since we're not going to use the final bit test, remove it.
3072         BTB.Cases.pop_back();
3073         break;
3074       }
3075     }
3076     // This is "default" BB. We have two jumps to it. From "header" BB and from
3077     // last "case" BB, unless the latter was skipped.
3078     CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3079                                    BTB.Default->getBasicBlock()};
3080     addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3081     if (!BTB.ContiguousRange) {
3082       addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3083     }
3084   }
3085   SL->BitTestCases.clear();
3086 
3087   for (auto &JTCase : SL->JTCases) {
3088     // Emit header first, if it wasn't already emitted.
3089     if (!JTCase.first.Emitted)
3090       emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3091 
3092     emitJumpTable(JTCase.second, JTCase.second.MBB);
3093   }
3094   SL->JTCases.clear();
3095 
3096   for (auto &SwCase : SL->SwitchCases)
3097     emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3098   SL->SwitchCases.clear();
3099 
3100   // Check if we need to generate stack-protector guard checks.
3101   StackProtector &SP = getAnalysis<StackProtector>();
3102   if (SP.shouldEmitSDCheck(BB)) {
3103     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3104     bool FunctionBasedInstrumentation =
3105         TLI.getSSPStackGuardCheck(*MF->getFunction().getParent());
3106     SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3107   }
3108   // Handle stack protector.
3109   if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3110     LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3111     return false;
3112   } else if (SPDescriptor.shouldEmitStackProtector()) {
3113     MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3114     MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3115 
3116     // Find the split point to split the parent mbb. At the same time copy all
3117     // physical registers used in the tail of parent mbb into virtual registers
3118     // before the split point and back into physical registers after the split
3119     // point. This prevents us needing to deal with Live-ins and many other
3120     // register allocation issues caused by us splitting the parent mbb. The
3121     // register allocator will clean up said virtual copies later on.
3122     MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector(
3123         ParentMBB, *MF->getSubtarget().getInstrInfo());
3124 
3125     // Splice the terminator of ParentMBB into SuccessMBB.
3126     SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3127                        ParentMBB->end());
3128 
3129     // Add compare/jump on neq/jump to the parent BB.
3130     if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3131       return false;
3132 
3133     // CodeGen Failure MBB if we have not codegened it yet.
3134     MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3135     if (FailureMBB->empty()) {
3136       if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3137         return false;
3138     }
3139 
3140     // Clear the Per-BB State.
3141     SPDescriptor.resetPerBBState();
3142   }
3143   return true;
3144 }
3145 
3146 bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3147                                           MachineBasicBlock *ParentBB) {
3148   CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3149   // First create the loads to the guard/stack slot for the comparison.
3150   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3151   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
3152   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3153   LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL));
3154 
3155   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3156   int FI = MFI.getStackProtectorIndex();
3157 
3158   Register Guard;
3159   Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3160   const Module &M = *ParentBB->getParent()->getFunction().getParent();
3161   Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
3162 
3163   // Generate code to load the content of the guard slot.
3164   Register GuardVal =
3165       CurBuilder
3166           ->buildLoad(PtrMemTy, StackSlotPtr,
3167                       MachinePointerInfo::getFixedStack(*MF, FI), Align,
3168                       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
3169           .getReg(0);
3170 
3171   if (TLI.useStackGuardXorFP()) {
3172     LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3173     return false;
3174   }
3175 
3176   // Retrieve guard check function, nullptr if instrumentation is inlined.
3177   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3178     // This path is currently untestable on GlobalISel, since the only platform
3179     // that needs this seems to be Windows, and we fall back on that currently.
3180     // The code still lives here in case that changes.
3181     // Silence warning about unused variable until the code below that uses
3182     // 'GuardCheckFn' is enabled.
3183     (void)GuardCheckFn;
3184     return false;
3185 #if 0
3186     // The target provides a guard check function to validate the guard value.
3187     // Generate a call to that function with the content of the guard slot as
3188     // argument.
3189     FunctionType *FnTy = GuardCheckFn->getFunctionType();
3190     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3191     ISD::ArgFlagsTy Flags;
3192     if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3193       Flags.setInReg();
3194     CallLowering::ArgInfo GuardArgInfo(
3195         {GuardVal, FnTy->getParamType(0), {Flags}});
3196 
3197     CallLowering::CallLoweringInfo Info;
3198     Info.OrigArgs.push_back(GuardArgInfo);
3199     Info.CallConv = GuardCheckFn->getCallingConv();
3200     Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3201     Info.OrigRet = {Register(), FnTy->getReturnType()};
3202     if (!CLI->lowerCall(MIRBuilder, Info)) {
3203       LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3204       return false;
3205     }
3206     return true;
3207 #endif
3208   }
3209 
3210   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3211   // Otherwise, emit a volatile load to retrieve the stack guard value.
3212   if (TLI.useLoadStackGuardNode()) {
3213     Guard =
3214         MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
3215     getStackGuard(Guard, *CurBuilder);
3216   } else {
3217     // TODO: test using android subtarget when we support @llvm.thread.pointer.
3218     const Value *IRGuard = TLI.getSDagStackGuard(M);
3219     Register GuardPtr = getOrCreateVReg(*IRGuard);
3220 
3221     Guard = CurBuilder
3222                 ->buildLoad(PtrMemTy, GuardPtr,
3223                             MachinePointerInfo::getFixedStack(*MF, FI), Align,
3224                             MachineMemOperand::MOLoad |
3225                                 MachineMemOperand::MOVolatile)
3226                 .getReg(0);
3227   }
3228 
3229   // Perform the comparison.
3230   auto Cmp =
3231       CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3232   // If the guard/stackslot do not equal, branch to failure MBB.
3233   CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3234   // Otherwise branch to success MBB.
3235   CurBuilder->buildBr(*SPD.getSuccessMBB());
3236   return true;
3237 }
3238 
3239 bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3240                                            MachineBasicBlock *FailureBB) {
3241   CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3242   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3243 
3244   const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3245   const char *Name = TLI.getLibcallName(Libcall);
3246 
3247   CallLowering::CallLoweringInfo Info;
3248   Info.CallConv = TLI.getLibcallCallingConv(Libcall);
3249   Info.Callee = MachineOperand::CreateES(Name);
3250   Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3251                   0};
3252   if (!CLI->lowerCall(*CurBuilder, Info)) {
3253     LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3254     return false;
3255   }
3256 
3257   // On PS4, the "return address" must still be within the calling function,
3258   // even if it's at the very end, so emit an explicit TRAP here.
3259   // Passing 'true' for doesNotReturn above won't generate the trap for us.
3260   // WebAssembly needs an unreachable instruction after a non-returning call,
3261   // because the function return type can be different from __stack_chk_fail's
3262   // return type (void).
3263   const TargetMachine &TM = MF->getTarget();
3264   if (TM.getTargetTriple().isPS4CPU() || TM.getTargetTriple().isWasm()) {
3265     LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3266     return false;
3267   }
3268   return true;
3269 }
3270 
3271 void IRTranslator::finalizeFunction() {
3272   // Release the memory used by the different maps we
3273   // needed during the translation.
3274   PendingPHIs.clear();
3275   VMap.reset();
3276   FrameIndices.clear();
3277   MachinePreds.clear();
3278   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3279   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3280   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3281   EntryBuilder.reset();
3282   CurBuilder.reset();
3283   FuncInfo.clear();
3284   SPDescriptor.resetPerFunctionState();
3285 }
3286 
3287 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3288 /// variadic musttail call.
3289 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3290   if (!IsVarArg)
3291     return false;
3292 
3293   // Walk the block backwards, because tail calls usually only appear at the end
3294   // of a block.
3295   return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3296     const auto *CI = dyn_cast<CallInst>(&I);
3297     return CI && CI->isMustTailCall();
3298   });
3299 }
3300 
3301 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3302   MF = &CurMF;
3303   const Function &F = MF->getFunction();
3304   GISelCSEAnalysisWrapper &Wrapper =
3305       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3306   // Set the CSEConfig and run the analysis.
3307   GISelCSEInfo *CSEInfo = nullptr;
3308   TPC = &getAnalysis<TargetPassConfig>();
3309   bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3310                        ? EnableCSEInIRTranslator
3311                        : TPC->isGISelCSEEnabled();
3312 
3313   if (EnableCSE) {
3314     EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3315     CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3316     EntryBuilder->setCSEInfo(CSEInfo);
3317     CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3318     CurBuilder->setCSEInfo(CSEInfo);
3319   } else {
3320     EntryBuilder = std::make_unique<MachineIRBuilder>();
3321     CurBuilder = std::make_unique<MachineIRBuilder>();
3322   }
3323   CLI = MF->getSubtarget().getCallLowering();
3324   CurBuilder->setMF(*MF);
3325   EntryBuilder->setMF(*MF);
3326   MRI = &MF->getRegInfo();
3327   DL = &F.getParent()->getDataLayout();
3328   ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3329   const TargetMachine &TM = MF->getTarget();
3330   TM.resetTargetOptions(F);
3331   EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F);
3332   FuncInfo.MF = MF;
3333   if (EnableOpts)
3334     FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3335   else
3336     FuncInfo.BPI = nullptr;
3337 
3338   FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3339 
3340   const auto &TLI = *MF->getSubtarget().getTargetLowering();
3341 
3342   SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3343   SL->init(TLI, TM, *DL);
3344 
3345 
3346 
3347   assert(PendingPHIs.empty() && "stale PHIs");
3348 
3349   // Targets which want to use big endian can enable it using
3350   // enableBigEndian()
3351   if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3352     // Currently we don't properly handle big endian code.
3353     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3354                                F.getSubprogram(), &F.getEntryBlock());
3355     R << "unable to translate in big endian mode";
3356     reportTranslationError(*MF, *TPC, *ORE, R);
3357   }
3358 
3359   // Release the per-function state when we return, whether we succeeded or not.
3360   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3361 
3362   // Setup a separate basic-block for the arguments and constants
3363   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3364   MF->push_back(EntryBB);
3365   EntryBuilder->setMBB(*EntryBB);
3366 
3367   DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3368   SwiftError.setFunction(CurMF);
3369   SwiftError.createEntriesInEntryBlock(DbgLoc);
3370 
3371   bool IsVarArg = F.isVarArg();
3372   bool HasMustTailInVarArgFn = false;
3373 
3374   // Create all blocks, in IR order, to preserve the layout.
3375   for (const BasicBlock &BB: F) {
3376     auto *&MBB = BBToMBB[&BB];
3377 
3378     MBB = MF->CreateMachineBasicBlock(&BB);
3379     MF->push_back(MBB);
3380 
3381     if (BB.hasAddressTaken())
3382       MBB->setHasAddressTaken();
3383 
3384     if (!HasMustTailInVarArgFn)
3385       HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3386   }
3387 
3388   MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3389 
3390   // Make our arguments/constants entry block fallthrough to the IR entry block.
3391   EntryBB->addSuccessor(&getMBB(F.front()));
3392 
3393   if (CLI->fallBackToDAGISel(*MF)) {
3394     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3395                                F.getSubprogram(), &F.getEntryBlock());
3396     R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3397     reportTranslationError(*MF, *TPC, *ORE, R);
3398     return false;
3399   }
3400 
3401   // Lower the actual args into this basic block.
3402   SmallVector<ArrayRef<Register>, 8> VRegArgs;
3403   for (const Argument &Arg: F.args()) {
3404     if (DL->getTypeStoreSize(Arg.getType()).isZero())
3405       continue; // Don't handle zero sized types.
3406     ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3407     VRegArgs.push_back(VRegs);
3408 
3409     if (Arg.hasSwiftErrorAttr()) {
3410       assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3411       SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3412     }
3413   }
3414 
3415   if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs, FuncInfo)) {
3416     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3417                                F.getSubprogram(), &F.getEntryBlock());
3418     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3419     reportTranslationError(*MF, *TPC, *ORE, R);
3420     return false;
3421   }
3422 
3423   // Need to visit defs before uses when translating instructions.
3424   GISelObserverWrapper WrapperObserver;
3425   if (EnableCSE && CSEInfo)
3426     WrapperObserver.addObserver(CSEInfo);
3427   {
3428     ReversePostOrderTraversal<const Function *> RPOT(&F);
3429 #ifndef NDEBUG
3430     DILocationVerifier Verifier;
3431     WrapperObserver.addObserver(&Verifier);
3432 #endif // ifndef NDEBUG
3433     RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3434     RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3435     for (const BasicBlock *BB : RPOT) {
3436       MachineBasicBlock &MBB = getMBB(*BB);
3437       // Set the insertion point of all the following translations to
3438       // the end of this basic block.
3439       CurBuilder->setMBB(MBB);
3440       HasTailCall = false;
3441       for (const Instruction &Inst : *BB) {
3442         // If we translated a tail call in the last step, then we know
3443         // everything after the call is either a return, or something that is
3444         // handled by the call itself. (E.g. a lifetime marker or assume
3445         // intrinsic.) In this case, we should stop translating the block and
3446         // move on.
3447         if (HasTailCall)
3448           break;
3449 #ifndef NDEBUG
3450         Verifier.setCurrentInst(&Inst);
3451 #endif // ifndef NDEBUG
3452         if (translate(Inst))
3453           continue;
3454 
3455         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3456                                    Inst.getDebugLoc(), BB);
3457         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3458 
3459         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3460           std::string InstStrStorage;
3461           raw_string_ostream InstStr(InstStrStorage);
3462           InstStr << Inst;
3463 
3464           R << ": '" << InstStr.str() << "'";
3465         }
3466 
3467         reportTranslationError(*MF, *TPC, *ORE, R);
3468         return false;
3469       }
3470 
3471       if (!finalizeBasicBlock(*BB, MBB))
3472         return false;
3473     }
3474 #ifndef NDEBUG
3475     WrapperObserver.removeObserver(&Verifier);
3476 #endif
3477   }
3478 
3479   finishPendingPhis();
3480 
3481   SwiftError.propagateVRegs();
3482 
3483   // Merge the argument lowering and constants block with its single
3484   // successor, the LLVM-IR entry block.  We want the basic block to
3485   // be maximal.
3486   assert(EntryBB->succ_size() == 1 &&
3487          "Custom BB used for lowering should have only one successor");
3488   // Get the successor of the current entry block.
3489   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
3490   assert(NewEntryBB.pred_size() == 1 &&
3491          "LLVM-IR entry block has a predecessor!?");
3492   // Move all the instruction from the current entry block to the
3493   // new entry block.
3494   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
3495                     EntryBB->end());
3496 
3497   // Update the live-in information for the new entry block.
3498   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
3499     NewEntryBB.addLiveIn(LiveIn);
3500   NewEntryBB.sortUniqueLiveIns();
3501 
3502   // Get rid of the now empty basic block.
3503   EntryBB->removeSuccessor(&NewEntryBB);
3504   MF->remove(EntryBB);
3505   MF->DeleteMachineBasicBlock(EntryBB);
3506 
3507   assert(&MF->front() == &NewEntryBB &&
3508          "New entry wasn't next in the list of basic block!");
3509 
3510   // Initialize stack protector information.
3511   StackProtector &SP = getAnalysis<StackProtector>();
3512   SP.copyToMachineFrameInfo(MF->getFrameInfo());
3513 
3514   return false;
3515 }
3516