xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 167601e6296fe90f7cde8219ed07407c68a19bb0)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/Analysis/AliasAnalysis.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/Analysis/VectorUtils.h"
39 #include "llvm/CodeGen/Analysis.h"
40 #include "llvm/CodeGen/FunctionLoweringInfo.h"
41 #include "llvm/CodeGen/GCMetadata.h"
42 #include "llvm/CodeGen/ISDOpcodes.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineJumpTableInfo.h"
49 #include "llvm/CodeGen/MachineMemOperand.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/RuntimeLibcalls.h"
54 #include "llvm/CodeGen/SelectionDAG.h"
55 #include "llvm/CodeGen/SelectionDAGNodes.h"
56 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
57 #include "llvm/CodeGen/StackMaps.h"
58 #include "llvm/CodeGen/TargetFrameLowering.h"
59 #include "llvm/CodeGen/TargetInstrInfo.h"
60 #include "llvm/CodeGen/TargetLowering.h"
61 #include "llvm/CodeGen/TargetOpcodes.h"
62 #include "llvm/CodeGen/TargetRegisterInfo.h"
63 #include "llvm/CodeGen/TargetSubtargetInfo.h"
64 #include "llvm/CodeGen/ValueTypes.h"
65 #include "llvm/CodeGen/WinEHFuncInfo.h"
66 #include "llvm/IR/Argument.h"
67 #include "llvm/IR/Attributes.h"
68 #include "llvm/IR/BasicBlock.h"
69 #include "llvm/IR/CFG.h"
70 #include "llvm/IR/CallSite.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/ConstantRange.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DebugInfoMetadata.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/IR/DerivedTypes.h"
79 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GetElementPtrTypeIterator.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/Intrinsics.h"
87 #include "llvm/IR/LLVMContext.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Module.h"
90 #include "llvm/IR/Operator.h"
91 #include "llvm/IR/Statepoint.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/User.h"
94 #include "llvm/IR/Value.h"
95 #include "llvm/MC/MCContext.h"
96 #include "llvm/MC/MCSymbol.h"
97 #include "llvm/Support/AtomicOrdering.h"
98 #include "llvm/Support/BranchProbability.h"
99 #include "llvm/Support/Casting.h"
100 #include "llvm/Support/CodeGen.h"
101 #include "llvm/Support/CommandLine.h"
102 #include "llvm/Support/Compiler.h"
103 #include "llvm/Support/Debug.h"
104 #include "llvm/Support/ErrorHandling.h"
105 #include "llvm/Support/MachineValueType.h"
106 #include "llvm/Support/MathExtras.h"
107 #include "llvm/Support/raw_ostream.h"
108 #include "llvm/Target/TargetIntrinsicInfo.h"
109 #include "llvm/Target/TargetMachine.h"
110 #include "llvm/Target/TargetOptions.h"
111 #include <algorithm>
112 #include <cassert>
113 #include <cstddef>
114 #include <cstdint>
115 #include <cstring>
116 #include <iterator>
117 #include <limits>
118 #include <numeric>
119 #include <tuple>
120 #include <utility>
121 #include <vector>
122 
123 using namespace llvm;
124 
125 #define DEBUG_TYPE "isel"
126 
127 /// LimitFloatPrecision - Generate low-precision inline sequences for
128 /// some float libcalls (6, 8 or 12 bits).
129 static unsigned LimitFloatPrecision;
130 
131 static cl::opt<unsigned, true>
132     LimitFPPrecision("limit-float-precision",
133                      cl::desc("Generate low-precision inline sequences "
134                               "for some float libcalls"),
135                      cl::location(LimitFloatPrecision), cl::Hidden,
136                      cl::init(0));
137 
138 static cl::opt<unsigned> SwitchPeelThreshold(
139     "switch-peel-threshold", cl::Hidden, cl::init(66),
140     cl::desc("Set the case probability threshold for peeling the case from a "
141              "switch statement. A value greater than 100 will void this "
142              "optimization"));
143 
144 // Limit the width of DAG chains. This is important in general to prevent
145 // DAG-based analysis from blowing up. For example, alias analysis and
146 // load clustering may not complete in reasonable time. It is difficult to
147 // recognize and avoid this situation within each individual analysis, and
148 // future analyses are likely to have the same behavior. Limiting DAG width is
149 // the safe approach and will be especially important with global DAGs.
150 //
151 // MaxParallelChains default is arbitrarily high to avoid affecting
152 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
153 // sequence over this should have been converted to llvm.memcpy by the
154 // frontend. It is easy to induce this behavior with .ll code such as:
155 // %buffer = alloca [4096 x i8]
156 // %data = load [4096 x i8]* %argPtr
157 // store [4096 x i8] %data, [4096 x i8]* %buffer
158 static const unsigned MaxParallelChains = 64;
159 
160 // Return the calling convention if the Value passed requires ABI mangling as it
161 // is a parameter to a function or a return value from a function which is not
162 // an intrinsic.
163 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
164   if (auto *R = dyn_cast<ReturnInst>(V))
165     return R->getParent()->getParent()->getCallingConv();
166 
167   if (auto *CI = dyn_cast<CallInst>(V)) {
168     const bool IsInlineAsm = CI->isInlineAsm();
169     const bool IsIndirectFunctionCall =
170         !IsInlineAsm && !CI->getCalledFunction();
171 
172     // It is possible that the call instruction is an inline asm statement or an
173     // indirect function call in which case the return value of
174     // getCalledFunction() would be nullptr.
175     const bool IsInstrinsicCall =
176         !IsInlineAsm && !IsIndirectFunctionCall &&
177         CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
178 
179     if (!IsInlineAsm && !IsInstrinsicCall)
180       return CI->getCallingConv();
181   }
182 
183   return None;
184 }
185 
186 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
187                                       const SDValue *Parts, unsigned NumParts,
188                                       MVT PartVT, EVT ValueVT, const Value *V,
189                                       Optional<CallingConv::ID> CC);
190 
191 /// getCopyFromParts - Create a value that contains the specified legal parts
192 /// combined into the value they represent.  If the parts combine to a type
193 /// larger than ValueVT then AssertOp can be used to specify whether the extra
194 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
195 /// (ISD::AssertSext).
196 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
197                                 const SDValue *Parts, unsigned NumParts,
198                                 MVT PartVT, EVT ValueVT, const Value *V,
199                                 Optional<CallingConv::ID> CC = None,
200                                 Optional<ISD::NodeType> AssertOp = None) {
201   if (ValueVT.isVector())
202     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
203                                   CC);
204 
205   assert(NumParts > 0 && "No parts to assemble!");
206   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
207   SDValue Val = Parts[0];
208 
209   if (NumParts > 1) {
210     // Assemble the value from multiple parts.
211     if (ValueVT.isInteger()) {
212       unsigned PartBits = PartVT.getSizeInBits();
213       unsigned ValueBits = ValueVT.getSizeInBits();
214 
215       // Assemble the power of 2 part.
216       unsigned RoundParts = NumParts & (NumParts - 1) ?
217         1 << Log2_32(NumParts) : NumParts;
218       unsigned RoundBits = PartBits * RoundParts;
219       EVT RoundVT = RoundBits == ValueBits ?
220         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
221       SDValue Lo, Hi;
222 
223       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
224 
225       if (RoundParts > 2) {
226         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
227                               PartVT, HalfVT, V);
228         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
229                               RoundParts / 2, PartVT, HalfVT, V);
230       } else {
231         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
232         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
233       }
234 
235       if (DAG.getDataLayout().isBigEndian())
236         std::swap(Lo, Hi);
237 
238       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
239 
240       if (RoundParts < NumParts) {
241         // Assemble the trailing non-power-of-2 part.
242         unsigned OddParts = NumParts - RoundParts;
243         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
244         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
245                               OddVT, V, CC);
246 
247         // Combine the round and odd parts.
248         Lo = Val;
249         if (DAG.getDataLayout().isBigEndian())
250           std::swap(Lo, Hi);
251         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
252         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
253         Hi =
254             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
255                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
256                                         TLI.getPointerTy(DAG.getDataLayout())));
257         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
258         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
259       }
260     } else if (PartVT.isFloatingPoint()) {
261       // FP split into multiple FP parts (for ppcf128)
262       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
263              "Unexpected split");
264       SDValue Lo, Hi;
265       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
266       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
267       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
268         std::swap(Lo, Hi);
269       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
270     } else {
271       // FP split into integer parts (soft fp)
272       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
273              !PartVT.isVector() && "Unexpected split");
274       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
275       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
276     }
277   }
278 
279   // There is now one part, held in Val.  Correct it to match ValueVT.
280   // PartEVT is the type of the register class that holds the value.
281   // ValueVT is the type of the inline asm operation.
282   EVT PartEVT = Val.getValueType();
283 
284   if (PartEVT == ValueVT)
285     return Val;
286 
287   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
288       ValueVT.bitsLT(PartEVT)) {
289     // For an FP value in an integer part, we need to truncate to the right
290     // width first.
291     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
292     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
293   }
294 
295   // Handle types that have the same size.
296   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
297     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
298 
299   // Handle types with different sizes.
300   if (PartEVT.isInteger() && ValueVT.isInteger()) {
301     if (ValueVT.bitsLT(PartEVT)) {
302       // For a truncate, see if we have any information to
303       // indicate whether the truncated bits will always be
304       // zero or sign-extension.
305       if (AssertOp.hasValue())
306         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
307                           DAG.getValueType(ValueVT));
308       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
309     }
310     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
311   }
312 
313   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
314     // FP_ROUND's are always exact here.
315     if (ValueVT.bitsLT(Val.getValueType()))
316       return DAG.getNode(
317           ISD::FP_ROUND, DL, ValueVT, Val,
318           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
319 
320     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
321   }
322 
323   llvm_unreachable("Unknown mismatch!");
324 }
325 
326 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
327                                               const Twine &ErrMsg) {
328   const Instruction *I = dyn_cast_or_null<Instruction>(V);
329   if (!V)
330     return Ctx.emitError(ErrMsg);
331 
332   const char *AsmError = ", possible invalid constraint for vector type";
333   if (const CallInst *CI = dyn_cast<CallInst>(I))
334     if (isa<InlineAsm>(CI->getCalledValue()))
335       return Ctx.emitError(I, ErrMsg + AsmError);
336 
337   return Ctx.emitError(I, ErrMsg);
338 }
339 
340 /// getCopyFromPartsVector - Create a value that contains the specified legal
341 /// parts combined into the value they represent.  If the parts combine to a
342 /// type larger than ValueVT then AssertOp can be used to specify whether the
343 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
344 /// ValueVT (ISD::AssertSext).
345 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
346                                       const SDValue *Parts, unsigned NumParts,
347                                       MVT PartVT, EVT ValueVT, const Value *V,
348                                       Optional<CallingConv::ID> CallConv) {
349   assert(ValueVT.isVector() && "Not a vector value");
350   assert(NumParts > 0 && "No parts to assemble!");
351   const bool IsABIRegCopy = CallConv.hasValue();
352 
353   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
354   SDValue Val = Parts[0];
355 
356   // Handle a multi-element vector.
357   if (NumParts > 1) {
358     EVT IntermediateVT;
359     MVT RegisterVT;
360     unsigned NumIntermediates;
361     unsigned NumRegs;
362 
363     if (IsABIRegCopy) {
364       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
365           *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
366           NumIntermediates, RegisterVT);
367     } else {
368       NumRegs =
369           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
370                                      NumIntermediates, RegisterVT);
371     }
372 
373     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
374     NumParts = NumRegs; // Silence a compiler warning.
375     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
376     assert(RegisterVT.getSizeInBits() ==
377            Parts[0].getSimpleValueType().getSizeInBits() &&
378            "Part type sizes don't match!");
379 
380     // Assemble the parts into intermediate operands.
381     SmallVector<SDValue, 8> Ops(NumIntermediates);
382     if (NumIntermediates == NumParts) {
383       // If the register was not expanded, truncate or copy the value,
384       // as appropriate.
385       for (unsigned i = 0; i != NumParts; ++i)
386         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
387                                   PartVT, IntermediateVT, V);
388     } else if (NumParts > 0) {
389       // If the intermediate type was expanded, build the intermediate
390       // operands from the parts.
391       assert(NumParts % NumIntermediates == 0 &&
392              "Must expand into a divisible number of parts!");
393       unsigned Factor = NumParts / NumIntermediates;
394       for (unsigned i = 0; i != NumIntermediates; ++i)
395         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
396                                   PartVT, IntermediateVT, V);
397     }
398 
399     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
400     // intermediate operands.
401     EVT BuiltVectorTy =
402         EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
403                          (IntermediateVT.isVector()
404                               ? IntermediateVT.getVectorNumElements() * NumParts
405                               : NumIntermediates));
406     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
407                                                 : ISD::BUILD_VECTOR,
408                       DL, BuiltVectorTy, Ops);
409   }
410 
411   // There is now one part, held in Val.  Correct it to match ValueVT.
412   EVT PartEVT = Val.getValueType();
413 
414   if (PartEVT == ValueVT)
415     return Val;
416 
417   if (PartEVT.isVector()) {
418     // If the element type of the source/dest vectors are the same, but the
419     // parts vector has more elements than the value vector, then we have a
420     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
421     // elements we want.
422     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
423       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
424              "Cannot narrow, it would be a lossy transformation");
425       return DAG.getNode(
426           ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
427           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
428     }
429 
430     // Vector/Vector bitcast.
431     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
432       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
433 
434     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
435       "Cannot handle this kind of promotion");
436     // Promoted vector extract
437     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
438 
439   }
440 
441   // Trivial bitcast if the types are the same size and the destination
442   // vector type is legal.
443   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
444       TLI.isTypeLegal(ValueVT))
445     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
446 
447   if (ValueVT.getVectorNumElements() != 1) {
448      // Certain ABIs require that vectors are passed as integers. For vectors
449      // are the same size, this is an obvious bitcast.
450      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
451        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
452      } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
453        // Bitcast Val back the original type and extract the corresponding
454        // vector we want.
455        unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
456        EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
457                                            ValueVT.getVectorElementType(), Elts);
458        Val = DAG.getBitcast(WiderVecType, Val);
459        return DAG.getNode(
460            ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
461            DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
462      }
463 
464      diagnosePossiblyInvalidConstraint(
465          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
466      return DAG.getUNDEF(ValueVT);
467   }
468 
469   // Handle cases such as i8 -> <1 x i1>
470   EVT ValueSVT = ValueVT.getVectorElementType();
471   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
472     Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
473                                     : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
474 
475   return DAG.getBuildVector(ValueVT, DL, Val);
476 }
477 
478 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
479                                  SDValue Val, SDValue *Parts, unsigned NumParts,
480                                  MVT PartVT, const Value *V,
481                                  Optional<CallingConv::ID> CallConv);
482 
483 /// getCopyToParts - Create a series of nodes that contain the specified value
484 /// split into legal parts.  If the parts contain more bits than Val, then, for
485 /// integers, ExtendKind can be used to specify how to generate the extra bits.
486 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
487                            SDValue *Parts, unsigned NumParts, MVT PartVT,
488                            const Value *V,
489                            Optional<CallingConv::ID> CallConv = None,
490                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
491   EVT ValueVT = Val.getValueType();
492 
493   // Handle the vector case separately.
494   if (ValueVT.isVector())
495     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
496                                 CallConv);
497 
498   unsigned PartBits = PartVT.getSizeInBits();
499   unsigned OrigNumParts = NumParts;
500   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
501          "Copying to an illegal type!");
502 
503   if (NumParts == 0)
504     return;
505 
506   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
507   EVT PartEVT = PartVT;
508   if (PartEVT == ValueVT) {
509     assert(NumParts == 1 && "No-op copy with multiple parts!");
510     Parts[0] = Val;
511     return;
512   }
513 
514   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
515     // If the parts cover more bits than the value has, promote the value.
516     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
517       assert(NumParts == 1 && "Do not know what to promote to!");
518       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
519     } else {
520       if (ValueVT.isFloatingPoint()) {
521         // FP values need to be bitcast, then extended if they are being put
522         // into a larger container.
523         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
524         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
525       }
526       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
527              ValueVT.isInteger() &&
528              "Unknown mismatch!");
529       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
530       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
531       if (PartVT == MVT::x86mmx)
532         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
533     }
534   } else if (PartBits == ValueVT.getSizeInBits()) {
535     // Different types of the same size.
536     assert(NumParts == 1 && PartEVT != ValueVT);
537     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
538   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
539     // If the parts cover less bits than value has, truncate the value.
540     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
541            ValueVT.isInteger() &&
542            "Unknown mismatch!");
543     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
544     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
545     if (PartVT == MVT::x86mmx)
546       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
547   }
548 
549   // The value may have changed - recompute ValueVT.
550   ValueVT = Val.getValueType();
551   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
552          "Failed to tile the value with PartVT!");
553 
554   if (NumParts == 1) {
555     if (PartEVT != ValueVT) {
556       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
557                                         "scalar-to-vector conversion failed");
558       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
559     }
560 
561     Parts[0] = Val;
562     return;
563   }
564 
565   // Expand the value into multiple parts.
566   if (NumParts & (NumParts - 1)) {
567     // The number of parts is not a power of 2.  Split off and copy the tail.
568     assert(PartVT.isInteger() && ValueVT.isInteger() &&
569            "Do not know what to expand to!");
570     unsigned RoundParts = 1 << Log2_32(NumParts);
571     unsigned RoundBits = RoundParts * PartBits;
572     unsigned OddParts = NumParts - RoundParts;
573     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
574                                  DAG.getIntPtrConstant(RoundBits, DL));
575     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
576                    CallConv);
577 
578     if (DAG.getDataLayout().isBigEndian())
579       // The odd parts were reversed by getCopyToParts - unreverse them.
580       std::reverse(Parts + RoundParts, Parts + NumParts);
581 
582     NumParts = RoundParts;
583     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
584     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
585   }
586 
587   // The number of parts is a power of 2.  Repeatedly bisect the value using
588   // EXTRACT_ELEMENT.
589   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
590                          EVT::getIntegerVT(*DAG.getContext(),
591                                            ValueVT.getSizeInBits()),
592                          Val);
593 
594   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
595     for (unsigned i = 0; i < NumParts; i += StepSize) {
596       unsigned ThisBits = StepSize * PartBits / 2;
597       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
598       SDValue &Part0 = Parts[i];
599       SDValue &Part1 = Parts[i+StepSize/2];
600 
601       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
602                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
603       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
604                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
605 
606       if (ThisBits == PartBits && ThisVT != PartVT) {
607         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
608         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
609       }
610     }
611   }
612 
613   if (DAG.getDataLayout().isBigEndian())
614     std::reverse(Parts, Parts + OrigNumParts);
615 }
616 
617 /// getCopyToPartsVector - Create a series of nodes that contain the specified
618 /// value split into legal parts.
619 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
620                                  SDValue Val, SDValue *Parts, unsigned NumParts,
621                                  MVT PartVT, const Value *V,
622                                  Optional<CallingConv::ID> CallConv) {
623   EVT ValueVT = Val.getValueType();
624   assert(ValueVT.isVector() && "Not a vector");
625   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
626   const bool IsABIRegCopy = CallConv.hasValue();
627 
628   if (NumParts == 1) {
629     EVT PartEVT = PartVT;
630     if (PartEVT == ValueVT) {
631       // Nothing to do.
632     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
633       // Bitconvert vector->vector case.
634       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
635     } else if (PartVT.isVector() &&
636                PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
637                PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
638       EVT ElementVT = PartVT.getVectorElementType();
639       // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
640       // undef elements.
641       SmallVector<SDValue, 16> Ops;
642       for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
643         Ops.push_back(DAG.getNode(
644             ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val,
645             DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
646 
647       for (unsigned i = ValueVT.getVectorNumElements(),
648            e = PartVT.getVectorNumElements(); i != e; ++i)
649         Ops.push_back(DAG.getUNDEF(ElementVT));
650 
651       Val = DAG.getBuildVector(PartVT, DL, Ops);
652 
653       // FIXME: Use CONCAT for 2x -> 4x.
654 
655       //SDValue UndefElts = DAG.getUNDEF(VectorTy);
656       //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
657     } else if (PartVT.isVector() &&
658                PartEVT.getVectorElementType().bitsGE(
659                  ValueVT.getVectorElementType()) &&
660                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
661 
662       // Promoted vector extract
663       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
664     } else {
665       if (ValueVT.getVectorNumElements() == 1) {
666         Val = DAG.getNode(
667             ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
668             DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
669       } else {
670         assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
671                "lossy conversion of vector to scalar type");
672         EVT IntermediateType =
673             EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
674         Val = DAG.getBitcast(IntermediateType, Val);
675         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
676       }
677     }
678 
679     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
680     Parts[0] = Val;
681     return;
682   }
683 
684   // Handle a multi-element vector.
685   EVT IntermediateVT;
686   MVT RegisterVT;
687   unsigned NumIntermediates;
688   unsigned NumRegs;
689   if (IsABIRegCopy) {
690     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
691         *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
692         NumIntermediates, RegisterVT);
693   } else {
694     NumRegs =
695         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
696                                    NumIntermediates, RegisterVT);
697   }
698   unsigned NumElements = ValueVT.getVectorNumElements();
699 
700   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
701   NumParts = NumRegs; // Silence a compiler warning.
702   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
703 
704   // Convert the vector to the appropiate type if necessary.
705   unsigned DestVectorNoElts =
706       NumIntermediates *
707       (IntermediateVT.isVector() ? IntermediateVT.getVectorNumElements() : 1);
708   EVT BuiltVectorTy = EVT::getVectorVT(
709       *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
710   if (Val.getValueType() != BuiltVectorTy)
711     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
712 
713   // Split the vector into intermediate operands.
714   SmallVector<SDValue, 8> Ops(NumIntermediates);
715   for (unsigned i = 0; i != NumIntermediates; ++i) {
716     if (IntermediateVT.isVector())
717       Ops[i] =
718           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
719                       DAG.getConstant(i * (NumElements / NumIntermediates), DL,
720                                       TLI.getVectorIdxTy(DAG.getDataLayout())));
721     else
722       Ops[i] = DAG.getNode(
723           ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
724           DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
725   }
726 
727   // Split the intermediate operands into legal parts.
728   if (NumParts == NumIntermediates) {
729     // If the register was not expanded, promote or copy the value,
730     // as appropriate.
731     for (unsigned i = 0; i != NumParts; ++i)
732       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
733   } else if (NumParts > 0) {
734     // If the intermediate type was expanded, split each the value into
735     // legal parts.
736     assert(NumIntermediates != 0 && "division by zero");
737     assert(NumParts % NumIntermediates == 0 &&
738            "Must expand into a divisible number of parts!");
739     unsigned Factor = NumParts / NumIntermediates;
740     for (unsigned i = 0; i != NumIntermediates; ++i)
741       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
742                      CallConv);
743   }
744 }
745 
746 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
747                            EVT valuevt, Optional<CallingConv::ID> CC)
748     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
749       RegCount(1, regs.size()), CallConv(CC) {}
750 
751 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
752                            const DataLayout &DL, unsigned Reg, Type *Ty,
753                            Optional<CallingConv::ID> CC) {
754   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
755 
756   CallConv = CC;
757 
758   for (EVT ValueVT : ValueVTs) {
759     unsigned NumRegs =
760         isABIMangled()
761             ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
762             : TLI.getNumRegisters(Context, ValueVT);
763     MVT RegisterVT =
764         isABIMangled()
765             ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
766             : TLI.getRegisterType(Context, ValueVT);
767     for (unsigned i = 0; i != NumRegs; ++i)
768       Regs.push_back(Reg + i);
769     RegVTs.push_back(RegisterVT);
770     RegCount.push_back(NumRegs);
771     Reg += NumRegs;
772   }
773 }
774 
775 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
776                                       FunctionLoweringInfo &FuncInfo,
777                                       const SDLoc &dl, SDValue &Chain,
778                                       SDValue *Flag, const Value *V) const {
779   // A Value with type {} or [0 x %t] needs no registers.
780   if (ValueVTs.empty())
781     return SDValue();
782 
783   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
784 
785   // Assemble the legal parts into the final values.
786   SmallVector<SDValue, 4> Values(ValueVTs.size());
787   SmallVector<SDValue, 8> Parts;
788   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
789     // Copy the legal parts from the registers.
790     EVT ValueVT = ValueVTs[Value];
791     unsigned NumRegs = RegCount[Value];
792     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
793                                           *DAG.getContext(),
794                                           CallConv.getValue(), RegVTs[Value])
795                                     : RegVTs[Value];
796 
797     Parts.resize(NumRegs);
798     for (unsigned i = 0; i != NumRegs; ++i) {
799       SDValue P;
800       if (!Flag) {
801         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
802       } else {
803         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
804         *Flag = P.getValue(2);
805       }
806 
807       Chain = P.getValue(1);
808       Parts[i] = P;
809 
810       // If the source register was virtual and if we know something about it,
811       // add an assert node.
812       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
813           !RegisterVT.isInteger() || RegisterVT.isVector())
814         continue;
815 
816       const FunctionLoweringInfo::LiveOutInfo *LOI =
817         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
818       if (!LOI)
819         continue;
820 
821       unsigned RegSize = RegisterVT.getSizeInBits();
822       unsigned NumSignBits = LOI->NumSignBits;
823       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
824 
825       if (NumZeroBits == RegSize) {
826         // The current value is a zero.
827         // Explicitly express that as it would be easier for
828         // optimizations to kick in.
829         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
830         continue;
831       }
832 
833       // FIXME: We capture more information than the dag can represent.  For
834       // now, just use the tightest assertzext/assertsext possible.
835       bool isSExt;
836       EVT FromVT(MVT::Other);
837       if (NumZeroBits) {
838         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
839         isSExt = false;
840       } else if (NumSignBits > 1) {
841         FromVT =
842             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
843         isSExt = true;
844       } else {
845         continue;
846       }
847       // Add an assertion node.
848       assert(FromVT != MVT::Other);
849       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
850                              RegisterVT, P, DAG.getValueType(FromVT));
851     }
852 
853     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
854                                      RegisterVT, ValueVT, V, CallConv);
855     Part += NumRegs;
856     Parts.clear();
857   }
858 
859   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
860 }
861 
862 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
863                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
864                                  const Value *V,
865                                  ISD::NodeType PreferredExtendType) const {
866   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
867   ISD::NodeType ExtendKind = PreferredExtendType;
868 
869   // Get the list of the values's legal parts.
870   unsigned NumRegs = Regs.size();
871   SmallVector<SDValue, 8> Parts(NumRegs);
872   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
873     unsigned NumParts = RegCount[Value];
874 
875     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
876                                           *DAG.getContext(),
877                                           CallConv.getValue(), RegVTs[Value])
878                                     : RegVTs[Value];
879 
880     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
881       ExtendKind = ISD::ZERO_EXTEND;
882 
883     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
884                    NumParts, RegisterVT, V, CallConv, ExtendKind);
885     Part += NumParts;
886   }
887 
888   // Copy the parts into the registers.
889   SmallVector<SDValue, 8> Chains(NumRegs);
890   for (unsigned i = 0; i != NumRegs; ++i) {
891     SDValue Part;
892     if (!Flag) {
893       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
894     } else {
895       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
896       *Flag = Part.getValue(1);
897     }
898 
899     Chains[i] = Part.getValue(0);
900   }
901 
902   if (NumRegs == 1 || Flag)
903     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
904     // flagged to it. That is the CopyToReg nodes and the user are considered
905     // a single scheduling unit. If we create a TokenFactor and return it as
906     // chain, then the TokenFactor is both a predecessor (operand) of the
907     // user as well as a successor (the TF operands are flagged to the user).
908     // c1, f1 = CopyToReg
909     // c2, f2 = CopyToReg
910     // c3     = TokenFactor c1, c2
911     // ...
912     //        = op c3, ..., f2
913     Chain = Chains[NumRegs-1];
914   else
915     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
916 }
917 
918 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
919                                         unsigned MatchingIdx, const SDLoc &dl,
920                                         SelectionDAG &DAG,
921                                         std::vector<SDValue> &Ops) const {
922   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
923 
924   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
925   if (HasMatching)
926     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
927   else if (!Regs.empty() &&
928            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
929     // Put the register class of the virtual registers in the flag word.  That
930     // way, later passes can recompute register class constraints for inline
931     // assembly as well as normal instructions.
932     // Don't do this for tied operands that can use the regclass information
933     // from the def.
934     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
935     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
936     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
937   }
938 
939   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
940   Ops.push_back(Res);
941 
942   if (Code == InlineAsm::Kind_Clobber) {
943     // Clobbers should always have a 1:1 mapping with registers, and may
944     // reference registers that have illegal (e.g. vector) types. Hence, we
945     // shouldn't try to apply any sort of splitting logic to them.
946     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
947            "No 1:1 mapping from clobbers to regs?");
948     unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
949     (void)SP;
950     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
951       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
952       assert(
953           (Regs[I] != SP ||
954            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
955           "If we clobbered the stack pointer, MFI should know about it.");
956     }
957     return;
958   }
959 
960   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
961     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
962     MVT RegisterVT = RegVTs[Value];
963     for (unsigned i = 0; i != NumRegs; ++i) {
964       assert(Reg < Regs.size() && "Mismatch in # registers expected");
965       unsigned TheReg = Regs[Reg++];
966       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
967     }
968   }
969 }
970 
971 SmallVector<std::pair<unsigned, unsigned>, 4>
972 RegsForValue::getRegsAndSizes() const {
973   SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
974   unsigned I = 0;
975   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
976     unsigned RegCount = std::get<0>(CountAndVT);
977     MVT RegisterVT = std::get<1>(CountAndVT);
978     unsigned RegisterSize = RegisterVT.getSizeInBits();
979     for (unsigned E = I + RegCount; I != E; ++I)
980       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
981   }
982   return OutVec;
983 }
984 
985 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
986                                const TargetLibraryInfo *li) {
987   AA = aa;
988   GFI = gfi;
989   LibInfo = li;
990   DL = &DAG.getDataLayout();
991   Context = DAG.getContext();
992   LPadToCallSiteMap.clear();
993 }
994 
995 void SelectionDAGBuilder::clear() {
996   NodeMap.clear();
997   UnusedArgNodeMap.clear();
998   PendingLoads.clear();
999   PendingExports.clear();
1000   CurInst = nullptr;
1001   HasTailCall = false;
1002   SDNodeOrder = LowestSDNodeOrder;
1003   StatepointLowering.clear();
1004 }
1005 
1006 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1007   DanglingDebugInfoMap.clear();
1008 }
1009 
1010 SDValue SelectionDAGBuilder::getRoot() {
1011   if (PendingLoads.empty())
1012     return DAG.getRoot();
1013 
1014   if (PendingLoads.size() == 1) {
1015     SDValue Root = PendingLoads[0];
1016     DAG.setRoot(Root);
1017     PendingLoads.clear();
1018     return Root;
1019   }
1020 
1021   // Otherwise, we have to make a token factor node.
1022   SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1023                              PendingLoads);
1024   PendingLoads.clear();
1025   DAG.setRoot(Root);
1026   return Root;
1027 }
1028 
1029 SDValue SelectionDAGBuilder::getControlRoot() {
1030   SDValue Root = DAG.getRoot();
1031 
1032   if (PendingExports.empty())
1033     return Root;
1034 
1035   // Turn all of the CopyToReg chains into one factored node.
1036   if (Root.getOpcode() != ISD::EntryToken) {
1037     unsigned i = 0, e = PendingExports.size();
1038     for (; i != e; ++i) {
1039       assert(PendingExports[i].getNode()->getNumOperands() > 1);
1040       if (PendingExports[i].getNode()->getOperand(0) == Root)
1041         break;  // Don't add the root if we already indirectly depend on it.
1042     }
1043 
1044     if (i == e)
1045       PendingExports.push_back(Root);
1046   }
1047 
1048   Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1049                      PendingExports);
1050   PendingExports.clear();
1051   DAG.setRoot(Root);
1052   return Root;
1053 }
1054 
1055 void SelectionDAGBuilder::visit(const Instruction &I) {
1056   // Set up outgoing PHI node register values before emitting the terminator.
1057   if (I.isTerminator()) {
1058     HandlePHINodesInSuccessorBlocks(I.getParent());
1059   }
1060 
1061   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1062   if (!isa<DbgInfoIntrinsic>(I))
1063     ++SDNodeOrder;
1064 
1065   CurInst = &I;
1066 
1067   visit(I.getOpcode(), I);
1068 
1069   if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1070     // Propagate the fast-math-flags of this IR instruction to the DAG node that
1071     // maps to this instruction.
1072     // TODO: We could handle all flags (nsw, etc) here.
1073     // TODO: If an IR instruction maps to >1 node, only the final node will have
1074     //       flags set.
1075     if (SDNode *Node = getNodeForIRValue(&I)) {
1076       SDNodeFlags IncomingFlags;
1077       IncomingFlags.copyFMF(*FPMO);
1078       if (!Node->getFlags().isDefined())
1079         Node->setFlags(IncomingFlags);
1080       else
1081         Node->intersectFlagsWith(IncomingFlags);
1082     }
1083   }
1084 
1085   if (!I.isTerminator() && !HasTailCall &&
1086       !isStatepoint(&I)) // statepoints handle their exports internally
1087     CopyToExportRegsIfNeeded(&I);
1088 
1089   CurInst = nullptr;
1090 }
1091 
1092 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1093   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1094 }
1095 
1096 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1097   // Note: this doesn't use InstVisitor, because it has to work with
1098   // ConstantExpr's in addition to instructions.
1099   switch (Opcode) {
1100   default: llvm_unreachable("Unknown instruction type encountered!");
1101     // Build the switch statement using the Instruction.def file.
1102 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1103     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1104 #include "llvm/IR/Instruction.def"
1105   }
1106 }
1107 
1108 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1109                                                 const DIExpression *Expr) {
1110   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1111     const DbgValueInst *DI = DDI.getDI();
1112     DIVariable *DanglingVariable = DI->getVariable();
1113     DIExpression *DanglingExpr = DI->getExpression();
1114     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1115       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1116       return true;
1117     }
1118     return false;
1119   };
1120 
1121   for (auto &DDIMI : DanglingDebugInfoMap) {
1122     DanglingDebugInfoVector &DDIV = DDIMI.second;
1123     DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1124   }
1125 }
1126 
1127 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1128 // generate the debug data structures now that we've seen its definition.
1129 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1130                                                    SDValue Val) {
1131   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1132   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1133     return;
1134 
1135   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1136   for (auto &DDI : DDIV) {
1137     const DbgValueInst *DI = DDI.getDI();
1138     assert(DI && "Ill-formed DanglingDebugInfo");
1139     DebugLoc dl = DDI.getdl();
1140     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1141     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1142     DILocalVariable *Variable = DI->getVariable();
1143     DIExpression *Expr = DI->getExpression();
1144     assert(Variable->isValidLocationForIntrinsic(dl) &&
1145            "Expected inlined-at fields to agree");
1146     SDDbgValue *SDV;
1147     if (Val.getNode()) {
1148       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1149         LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1150                           << DbgSDNodeOrder << "] for:\n  " << *DI << "\n");
1151         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1152         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1153         // inserted after the definition of Val when emitting the instructions
1154         // after ISel. An alternative could be to teach
1155         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1156         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1157                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1158                    << ValSDNodeOrder << "\n");
1159         SDV = getDbgValue(Val, Variable, Expr, dl,
1160                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1161         DAG.AddDbgValue(SDV, Val.getNode(), false);
1162       } else
1163         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1164                           << "in EmitFuncArgumentDbgValue\n");
1165     } else
1166       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1167   }
1168   DDIV.clear();
1169 }
1170 
1171 /// getCopyFromRegs - If there was virtual register allocated for the value V
1172 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1173 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1174   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1175   SDValue Result;
1176 
1177   if (It != FuncInfo.ValueMap.end()) {
1178     unsigned InReg = It->second;
1179 
1180     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1181                      DAG.getDataLayout(), InReg, Ty,
1182                      None); // This is not an ABI copy.
1183     SDValue Chain = DAG.getEntryNode();
1184     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1185                                  V);
1186     resolveDanglingDebugInfo(V, Result);
1187   }
1188 
1189   return Result;
1190 }
1191 
1192 /// getValue - Return an SDValue for the given Value.
1193 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1194   // If we already have an SDValue for this value, use it. It's important
1195   // to do this first, so that we don't create a CopyFromReg if we already
1196   // have a regular SDValue.
1197   SDValue &N = NodeMap[V];
1198   if (N.getNode()) return N;
1199 
1200   // If there's a virtual register allocated and initialized for this
1201   // value, use it.
1202   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1203     return copyFromReg;
1204 
1205   // Otherwise create a new SDValue and remember it.
1206   SDValue Val = getValueImpl(V);
1207   NodeMap[V] = Val;
1208   resolveDanglingDebugInfo(V, Val);
1209   return Val;
1210 }
1211 
1212 // Return true if SDValue exists for the given Value
1213 bool SelectionDAGBuilder::findValue(const Value *V) const {
1214   return (NodeMap.find(V) != NodeMap.end()) ||
1215     (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1216 }
1217 
1218 /// getNonRegisterValue - Return an SDValue for the given Value, but
1219 /// don't look in FuncInfo.ValueMap for a virtual register.
1220 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1221   // If we already have an SDValue for this value, use it.
1222   SDValue &N = NodeMap[V];
1223   if (N.getNode()) {
1224     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1225       // Remove the debug location from the node as the node is about to be used
1226       // in a location which may differ from the original debug location.  This
1227       // is relevant to Constant and ConstantFP nodes because they can appear
1228       // as constant expressions inside PHI nodes.
1229       N->setDebugLoc(DebugLoc());
1230     }
1231     return N;
1232   }
1233 
1234   // Otherwise create a new SDValue and remember it.
1235   SDValue Val = getValueImpl(V);
1236   NodeMap[V] = Val;
1237   resolveDanglingDebugInfo(V, Val);
1238   return Val;
1239 }
1240 
1241 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1242 /// Create an SDValue for the given value.
1243 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1244   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1245 
1246   if (const Constant *C = dyn_cast<Constant>(V)) {
1247     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1248 
1249     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1250       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1251 
1252     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1253       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1254 
1255     if (isa<ConstantPointerNull>(C)) {
1256       unsigned AS = V->getType()->getPointerAddressSpace();
1257       return DAG.getConstant(0, getCurSDLoc(),
1258                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1259     }
1260 
1261     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1262       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1263 
1264     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1265       return DAG.getUNDEF(VT);
1266 
1267     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1268       visit(CE->getOpcode(), *CE);
1269       SDValue N1 = NodeMap[V];
1270       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1271       return N1;
1272     }
1273 
1274     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1275       SmallVector<SDValue, 4> Constants;
1276       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1277            OI != OE; ++OI) {
1278         SDNode *Val = getValue(*OI).getNode();
1279         // If the operand is an empty aggregate, there are no values.
1280         if (!Val) continue;
1281         // Add each leaf value from the operand to the Constants list
1282         // to form a flattened list of all the values.
1283         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1284           Constants.push_back(SDValue(Val, i));
1285       }
1286 
1287       return DAG.getMergeValues(Constants, getCurSDLoc());
1288     }
1289 
1290     if (const ConstantDataSequential *CDS =
1291           dyn_cast<ConstantDataSequential>(C)) {
1292       SmallVector<SDValue, 4> Ops;
1293       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1294         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1295         // Add each leaf value from the operand to the Constants list
1296         // to form a flattened list of all the values.
1297         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1298           Ops.push_back(SDValue(Val, i));
1299       }
1300 
1301       if (isa<ArrayType>(CDS->getType()))
1302         return DAG.getMergeValues(Ops, getCurSDLoc());
1303       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1304     }
1305 
1306     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1307       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1308              "Unknown struct or array constant!");
1309 
1310       SmallVector<EVT, 4> ValueVTs;
1311       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1312       unsigned NumElts = ValueVTs.size();
1313       if (NumElts == 0)
1314         return SDValue(); // empty struct
1315       SmallVector<SDValue, 4> Constants(NumElts);
1316       for (unsigned i = 0; i != NumElts; ++i) {
1317         EVT EltVT = ValueVTs[i];
1318         if (isa<UndefValue>(C))
1319           Constants[i] = DAG.getUNDEF(EltVT);
1320         else if (EltVT.isFloatingPoint())
1321           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1322         else
1323           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1324       }
1325 
1326       return DAG.getMergeValues(Constants, getCurSDLoc());
1327     }
1328 
1329     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1330       return DAG.getBlockAddress(BA, VT);
1331 
1332     VectorType *VecTy = cast<VectorType>(V->getType());
1333     unsigned NumElements = VecTy->getNumElements();
1334 
1335     // Now that we know the number and type of the elements, get that number of
1336     // elements into the Ops array based on what kind of constant it is.
1337     SmallVector<SDValue, 16> Ops;
1338     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1339       for (unsigned i = 0; i != NumElements; ++i)
1340         Ops.push_back(getValue(CV->getOperand(i)));
1341     } else {
1342       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1343       EVT EltVT =
1344           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1345 
1346       SDValue Op;
1347       if (EltVT.isFloatingPoint())
1348         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1349       else
1350         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1351       Ops.assign(NumElements, Op);
1352     }
1353 
1354     // Create a BUILD_VECTOR node.
1355     return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1356   }
1357 
1358   // If this is a static alloca, generate it as the frameindex instead of
1359   // computation.
1360   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1361     DenseMap<const AllocaInst*, int>::iterator SI =
1362       FuncInfo.StaticAllocaMap.find(AI);
1363     if (SI != FuncInfo.StaticAllocaMap.end())
1364       return DAG.getFrameIndex(SI->second,
1365                                TLI.getFrameIndexTy(DAG.getDataLayout()));
1366   }
1367 
1368   // If this is an instruction which fast-isel has deferred, select it now.
1369   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1370     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1371 
1372     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1373                      Inst->getType(), getABIRegCopyCC(V));
1374     SDValue Chain = DAG.getEntryNode();
1375     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1376   }
1377 
1378   llvm_unreachable("Can't get register for value!");
1379 }
1380 
1381 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1382   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1383   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1384   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1385   bool IsSEH = isAsynchronousEHPersonality(Pers);
1386   bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
1387   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1388   if (!IsSEH)
1389     CatchPadMBB->setIsEHScopeEntry();
1390   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1391   if (IsMSVCCXX || IsCoreCLR)
1392     CatchPadMBB->setIsEHFuncletEntry();
1393   // Wasm does not need catchpads anymore
1394   if (!IsWasmCXX)
1395     DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
1396                             getControlRoot()));
1397 }
1398 
1399 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1400   // Update machine-CFG edge.
1401   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1402   FuncInfo.MBB->addSuccessor(TargetMBB);
1403 
1404   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1405   bool IsSEH = isAsynchronousEHPersonality(Pers);
1406   if (IsSEH) {
1407     // If this is not a fall-through branch or optimizations are switched off,
1408     // emit the branch.
1409     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1410         TM.getOptLevel() == CodeGenOpt::None)
1411       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1412                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1413     return;
1414   }
1415 
1416   // Figure out the funclet membership for the catchret's successor.
1417   // This will be used by the FuncletLayout pass to determine how to order the
1418   // BB's.
1419   // A 'catchret' returns to the outer scope's color.
1420   Value *ParentPad = I.getCatchSwitchParentPad();
1421   const BasicBlock *SuccessorColor;
1422   if (isa<ConstantTokenNone>(ParentPad))
1423     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1424   else
1425     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1426   assert(SuccessorColor && "No parent funclet for catchret!");
1427   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1428   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1429 
1430   // Create the terminator node.
1431   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1432                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1433                             DAG.getBasicBlock(SuccessorColorMBB));
1434   DAG.setRoot(Ret);
1435 }
1436 
1437 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1438   // Don't emit any special code for the cleanuppad instruction. It just marks
1439   // the start of an EH scope/funclet.
1440   FuncInfo.MBB->setIsEHScopeEntry();
1441   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1442   if (Pers != EHPersonality::Wasm_CXX) {
1443     FuncInfo.MBB->setIsEHFuncletEntry();
1444     FuncInfo.MBB->setIsCleanupFuncletEntry();
1445   }
1446 }
1447 
1448 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1449 /// many places it could ultimately go. In the IR, we have a single unwind
1450 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1451 /// This function skips over imaginary basic blocks that hold catchswitch
1452 /// instructions, and finds all the "real" machine
1453 /// basic block destinations. As those destinations may not be successors of
1454 /// EHPadBB, here we also calculate the edge probability to those destinations.
1455 /// The passed-in Prob is the edge probability to EHPadBB.
1456 static void findUnwindDestinations(
1457     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1458     BranchProbability Prob,
1459     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1460         &UnwindDests) {
1461   EHPersonality Personality =
1462     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1463   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1464   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1465   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1466   bool IsSEH = isAsynchronousEHPersonality(Personality);
1467 
1468   while (EHPadBB) {
1469     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1470     BasicBlock *NewEHPadBB = nullptr;
1471     if (isa<LandingPadInst>(Pad)) {
1472       // Stop on landingpads. They are not funclets.
1473       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1474       break;
1475     } else if (isa<CleanupPadInst>(Pad)) {
1476       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1477       // personalities.
1478       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1479       UnwindDests.back().first->setIsEHScopeEntry();
1480       if (!IsWasmCXX)
1481         UnwindDests.back().first->setIsEHFuncletEntry();
1482       break;
1483     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1484       // Add the catchpad handlers to the possible destinations.
1485       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1486         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1487         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1488         if (IsMSVCCXX || IsCoreCLR)
1489           UnwindDests.back().first->setIsEHFuncletEntry();
1490         if (!IsSEH)
1491           UnwindDests.back().first->setIsEHScopeEntry();
1492       }
1493       NewEHPadBB = CatchSwitch->getUnwindDest();
1494     } else {
1495       continue;
1496     }
1497 
1498     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1499     if (BPI && NewEHPadBB)
1500       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1501     EHPadBB = NewEHPadBB;
1502   }
1503 }
1504 
1505 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1506   // Update successor info.
1507   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1508   auto UnwindDest = I.getUnwindDest();
1509   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1510   BranchProbability UnwindDestProb =
1511       (BPI && UnwindDest)
1512           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1513           : BranchProbability::getZero();
1514   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1515   for (auto &UnwindDest : UnwindDests) {
1516     UnwindDest.first->setIsEHPad();
1517     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1518   }
1519   FuncInfo.MBB->normalizeSuccProbs();
1520 
1521   // Create the terminator node.
1522   SDValue Ret =
1523       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1524   DAG.setRoot(Ret);
1525 }
1526 
1527 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1528   report_fatal_error("visitCatchSwitch not yet implemented!");
1529 }
1530 
1531 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1532   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1533   auto &DL = DAG.getDataLayout();
1534   SDValue Chain = getControlRoot();
1535   SmallVector<ISD::OutputArg, 8> Outs;
1536   SmallVector<SDValue, 8> OutVals;
1537 
1538   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1539   // lower
1540   //
1541   //   %val = call <ty> @llvm.experimental.deoptimize()
1542   //   ret <ty> %val
1543   //
1544   // differently.
1545   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1546     LowerDeoptimizingReturn();
1547     return;
1548   }
1549 
1550   if (!FuncInfo.CanLowerReturn) {
1551     unsigned DemoteReg = FuncInfo.DemoteRegister;
1552     const Function *F = I.getParent()->getParent();
1553 
1554     // Emit a store of the return value through the virtual register.
1555     // Leave Outs empty so that LowerReturn won't try to load return
1556     // registers the usual way.
1557     SmallVector<EVT, 1> PtrValueVTs;
1558     ComputeValueVTs(TLI, DL,
1559                     F->getReturnType()->getPointerTo(
1560                         DAG.getDataLayout().getAllocaAddrSpace()),
1561                     PtrValueVTs);
1562 
1563     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1564                                         DemoteReg, PtrValueVTs[0]);
1565     SDValue RetOp = getValue(I.getOperand(0));
1566 
1567     SmallVector<EVT, 4> ValueVTs;
1568     SmallVector<uint64_t, 4> Offsets;
1569     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1570     unsigned NumValues = ValueVTs.size();
1571 
1572     SmallVector<SDValue, 4> Chains(NumValues);
1573     for (unsigned i = 0; i != NumValues; ++i) {
1574       // An aggregate return value cannot wrap around the address space, so
1575       // offsets to its parts don't wrap either.
1576       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1577       Chains[i] = DAG.getStore(
1578           Chain, getCurSDLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1579           // FIXME: better loc info would be nice.
1580           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1581     }
1582 
1583     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1584                         MVT::Other, Chains);
1585   } else if (I.getNumOperands() != 0) {
1586     SmallVector<EVT, 4> ValueVTs;
1587     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1588     unsigned NumValues = ValueVTs.size();
1589     if (NumValues) {
1590       SDValue RetOp = getValue(I.getOperand(0));
1591 
1592       const Function *F = I.getParent()->getParent();
1593 
1594       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1595       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1596                                           Attribute::SExt))
1597         ExtendKind = ISD::SIGN_EXTEND;
1598       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1599                                                Attribute::ZExt))
1600         ExtendKind = ISD::ZERO_EXTEND;
1601 
1602       LLVMContext &Context = F->getContext();
1603       bool RetInReg = F->getAttributes().hasAttribute(
1604           AttributeList::ReturnIndex, Attribute::InReg);
1605 
1606       for (unsigned j = 0; j != NumValues; ++j) {
1607         EVT VT = ValueVTs[j];
1608 
1609         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1610           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1611 
1612         CallingConv::ID CC = F->getCallingConv();
1613 
1614         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1615         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1616         SmallVector<SDValue, 4> Parts(NumParts);
1617         getCopyToParts(DAG, getCurSDLoc(),
1618                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1619                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1620 
1621         // 'inreg' on function refers to return value
1622         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1623         if (RetInReg)
1624           Flags.setInReg();
1625 
1626         // Propagate extension type if any
1627         if (ExtendKind == ISD::SIGN_EXTEND)
1628           Flags.setSExt();
1629         else if (ExtendKind == ISD::ZERO_EXTEND)
1630           Flags.setZExt();
1631 
1632         for (unsigned i = 0; i < NumParts; ++i) {
1633           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1634                                         VT, /*isfixed=*/true, 0, 0));
1635           OutVals.push_back(Parts[i]);
1636         }
1637       }
1638     }
1639   }
1640 
1641   // Push in swifterror virtual register as the last element of Outs. This makes
1642   // sure swifterror virtual register will be returned in the swifterror
1643   // physical register.
1644   const Function *F = I.getParent()->getParent();
1645   if (TLI.supportSwiftError() &&
1646       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1647     assert(FuncInfo.SwiftErrorArg && "Need a swift error argument");
1648     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1649     Flags.setSwiftError();
1650     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1651                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1652                                   true /*isfixed*/, 1 /*origidx*/,
1653                                   0 /*partOffs*/));
1654     // Create SDNode for the swifterror virtual register.
1655     OutVals.push_back(
1656         DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt(
1657                             &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first,
1658                         EVT(TLI.getPointerTy(DL))));
1659   }
1660 
1661   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1662   CallingConv::ID CallConv =
1663     DAG.getMachineFunction().getFunction().getCallingConv();
1664   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1665       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1666 
1667   // Verify that the target's LowerReturn behaved as expected.
1668   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1669          "LowerReturn didn't return a valid chain!");
1670 
1671   // Update the DAG with the new chain value resulting from return lowering.
1672   DAG.setRoot(Chain);
1673 }
1674 
1675 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1676 /// created for it, emit nodes to copy the value into the virtual
1677 /// registers.
1678 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1679   // Skip empty types
1680   if (V->getType()->isEmptyTy())
1681     return;
1682 
1683   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1684   if (VMI != FuncInfo.ValueMap.end()) {
1685     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1686     CopyValueToVirtualRegister(V, VMI->second);
1687   }
1688 }
1689 
1690 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1691 /// the current basic block, add it to ValueMap now so that we'll get a
1692 /// CopyTo/FromReg.
1693 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1694   // No need to export constants.
1695   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1696 
1697   // Already exported?
1698   if (FuncInfo.isExportedInst(V)) return;
1699 
1700   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1701   CopyValueToVirtualRegister(V, Reg);
1702 }
1703 
1704 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1705                                                      const BasicBlock *FromBB) {
1706   // The operands of the setcc have to be in this block.  We don't know
1707   // how to export them from some other block.
1708   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1709     // Can export from current BB.
1710     if (VI->getParent() == FromBB)
1711       return true;
1712 
1713     // Is already exported, noop.
1714     return FuncInfo.isExportedInst(V);
1715   }
1716 
1717   // If this is an argument, we can export it if the BB is the entry block or
1718   // if it is already exported.
1719   if (isa<Argument>(V)) {
1720     if (FromBB == &FromBB->getParent()->getEntryBlock())
1721       return true;
1722 
1723     // Otherwise, can only export this if it is already exported.
1724     return FuncInfo.isExportedInst(V);
1725   }
1726 
1727   // Otherwise, constants can always be exported.
1728   return true;
1729 }
1730 
1731 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1732 BranchProbability
1733 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1734                                         const MachineBasicBlock *Dst) const {
1735   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1736   const BasicBlock *SrcBB = Src->getBasicBlock();
1737   const BasicBlock *DstBB = Dst->getBasicBlock();
1738   if (!BPI) {
1739     // If BPI is not available, set the default probability as 1 / N, where N is
1740     // the number of successors.
1741     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
1742     return BranchProbability(1, SuccSize);
1743   }
1744   return BPI->getEdgeProbability(SrcBB, DstBB);
1745 }
1746 
1747 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1748                                                MachineBasicBlock *Dst,
1749                                                BranchProbability Prob) {
1750   if (!FuncInfo.BPI)
1751     Src->addSuccessorWithoutProb(Dst);
1752   else {
1753     if (Prob.isUnknown())
1754       Prob = getEdgeProbability(Src, Dst);
1755     Src->addSuccessor(Dst, Prob);
1756   }
1757 }
1758 
1759 static bool InBlock(const Value *V, const BasicBlock *BB) {
1760   if (const Instruction *I = dyn_cast<Instruction>(V))
1761     return I->getParent() == BB;
1762   return true;
1763 }
1764 
1765 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1766 /// This function emits a branch and is used at the leaves of an OR or an
1767 /// AND operator tree.
1768 void
1769 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1770                                                   MachineBasicBlock *TBB,
1771                                                   MachineBasicBlock *FBB,
1772                                                   MachineBasicBlock *CurBB,
1773                                                   MachineBasicBlock *SwitchBB,
1774                                                   BranchProbability TProb,
1775                                                   BranchProbability FProb,
1776                                                   bool InvertCond) {
1777   const BasicBlock *BB = CurBB->getBasicBlock();
1778 
1779   // If the leaf of the tree is a comparison, merge the condition into
1780   // the caseblock.
1781   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1782     // The operands of the cmp have to be in this block.  We don't know
1783     // how to export them from some other block.  If this is the first block
1784     // of the sequence, no exporting is needed.
1785     if (CurBB == SwitchBB ||
1786         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1787          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1788       ISD::CondCode Condition;
1789       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1790         ICmpInst::Predicate Pred =
1791             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1792         Condition = getICmpCondCode(Pred);
1793       } else {
1794         const FCmpInst *FC = cast<FCmpInst>(Cond);
1795         FCmpInst::Predicate Pred =
1796             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
1797         Condition = getFCmpCondCode(Pred);
1798         if (TM.Options.NoNaNsFPMath)
1799           Condition = getFCmpCodeWithoutNaN(Condition);
1800       }
1801 
1802       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1803                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1804       SwitchCases.push_back(CB);
1805       return;
1806     }
1807   }
1808 
1809   // Create a CaseBlock record representing this branch.
1810   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
1811   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
1812                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1813   SwitchCases.push_back(CB);
1814 }
1815 
1816 /// FindMergedConditions - If Cond is an expression like
1817 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1818                                                MachineBasicBlock *TBB,
1819                                                MachineBasicBlock *FBB,
1820                                                MachineBasicBlock *CurBB,
1821                                                MachineBasicBlock *SwitchBB,
1822                                                Instruction::BinaryOps Opc,
1823                                                BranchProbability TProb,
1824                                                BranchProbability FProb,
1825                                                bool InvertCond) {
1826   // Skip over not part of the tree and remember to invert op and operands at
1827   // next level.
1828   if (BinaryOperator::isNot(Cond) && Cond->hasOneUse()) {
1829     const Value *CondOp = BinaryOperator::getNotArgument(Cond);
1830     if (InBlock(CondOp, CurBB->getBasicBlock())) {
1831       FindMergedConditions(CondOp, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1832                            !InvertCond);
1833       return;
1834     }
1835   }
1836 
1837   const Instruction *BOp = dyn_cast<Instruction>(Cond);
1838   // Compute the effective opcode for Cond, taking into account whether it needs
1839   // to be inverted, e.g.
1840   //   and (not (or A, B)), C
1841   // gets lowered as
1842   //   and (and (not A, not B), C)
1843   unsigned BOpc = 0;
1844   if (BOp) {
1845     BOpc = BOp->getOpcode();
1846     if (InvertCond) {
1847       if (BOpc == Instruction::And)
1848         BOpc = Instruction::Or;
1849       else if (BOpc == Instruction::Or)
1850         BOpc = Instruction::And;
1851     }
1852   }
1853 
1854   // If this node is not part of the or/and tree, emit it as a branch.
1855   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1856       BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
1857       BOp->getParent() != CurBB->getBasicBlock() ||
1858       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1859       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1860     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1861                                  TProb, FProb, InvertCond);
1862     return;
1863   }
1864 
1865   //  Create TmpBB after CurBB.
1866   MachineFunction::iterator BBI(CurBB);
1867   MachineFunction &MF = DAG.getMachineFunction();
1868   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1869   CurBB->getParent()->insert(++BBI, TmpBB);
1870 
1871   if (Opc == Instruction::Or) {
1872     // Codegen X | Y as:
1873     // BB1:
1874     //   jmp_if_X TBB
1875     //   jmp TmpBB
1876     // TmpBB:
1877     //   jmp_if_Y TBB
1878     //   jmp FBB
1879     //
1880 
1881     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1882     // The requirement is that
1883     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1884     //     = TrueProb for original BB.
1885     // Assuming the original probabilities are A and B, one choice is to set
1886     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
1887     // A/(1+B) and 2B/(1+B). This choice assumes that
1888     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1889     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1890     // TmpBB, but the math is more complicated.
1891 
1892     auto NewTrueProb = TProb / 2;
1893     auto NewFalseProb = TProb / 2 + FProb;
1894     // Emit the LHS condition.
1895     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1896                          NewTrueProb, NewFalseProb, InvertCond);
1897 
1898     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
1899     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
1900     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1901     // Emit the RHS condition into TmpBB.
1902     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1903                          Probs[0], Probs[1], InvertCond);
1904   } else {
1905     assert(Opc == Instruction::And && "Unknown merge op!");
1906     // Codegen X & Y as:
1907     // BB1:
1908     //   jmp_if_X TmpBB
1909     //   jmp FBB
1910     // TmpBB:
1911     //   jmp_if_Y TBB
1912     //   jmp FBB
1913     //
1914     //  This requires creation of TmpBB after CurBB.
1915 
1916     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1917     // The requirement is that
1918     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1919     //     = FalseProb for original BB.
1920     // Assuming the original probabilities are A and B, one choice is to set
1921     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
1922     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
1923     // TrueProb for BB1 * FalseProb for TmpBB.
1924 
1925     auto NewTrueProb = TProb + FProb / 2;
1926     auto NewFalseProb = FProb / 2;
1927     // Emit the LHS condition.
1928     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1929                          NewTrueProb, NewFalseProb, InvertCond);
1930 
1931     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
1932     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
1933     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1934     // Emit the RHS condition into TmpBB.
1935     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1936                          Probs[0], Probs[1], InvertCond);
1937   }
1938 }
1939 
1940 /// If the set of cases should be emitted as a series of branches, return true.
1941 /// If we should emit this as a bunch of and/or'd together conditions, return
1942 /// false.
1943 bool
1944 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1945   if (Cases.size() != 2) return true;
1946 
1947   // If this is two comparisons of the same values or'd or and'd together, they
1948   // will get folded into a single comparison, so don't emit two blocks.
1949   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1950        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1951       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1952        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1953     return false;
1954   }
1955 
1956   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1957   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1958   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1959       Cases[0].CC == Cases[1].CC &&
1960       isa<Constant>(Cases[0].CmpRHS) &&
1961       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1962     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1963       return false;
1964     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1965       return false;
1966   }
1967 
1968   return true;
1969 }
1970 
1971 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1972   MachineBasicBlock *BrMBB = FuncInfo.MBB;
1973 
1974   // Update machine-CFG edges.
1975   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1976 
1977   if (I.isUnconditional()) {
1978     // Update machine-CFG edges.
1979     BrMBB->addSuccessor(Succ0MBB);
1980 
1981     // If this is not a fall-through branch or optimizations are switched off,
1982     // emit the branch.
1983     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
1984       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1985                               MVT::Other, getControlRoot(),
1986                               DAG.getBasicBlock(Succ0MBB)));
1987 
1988     return;
1989   }
1990 
1991   // If this condition is one of the special cases we handle, do special stuff
1992   // now.
1993   const Value *CondVal = I.getCondition();
1994   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1995 
1996   // If this is a series of conditions that are or'd or and'd together, emit
1997   // this as a sequence of branches instead of setcc's with and/or operations.
1998   // As long as jumps are not expensive, this should improve performance.
1999   // For example, instead of something like:
2000   //     cmp A, B
2001   //     C = seteq
2002   //     cmp D, E
2003   //     F = setle
2004   //     or C, F
2005   //     jnz foo
2006   // Emit:
2007   //     cmp A, B
2008   //     je foo
2009   //     cmp D, E
2010   //     jle foo
2011   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2012     Instruction::BinaryOps Opcode = BOp->getOpcode();
2013     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2014         !I.getMetadata(LLVMContext::MD_unpredictable) &&
2015         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2016       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2017                            Opcode,
2018                            getEdgeProbability(BrMBB, Succ0MBB),
2019                            getEdgeProbability(BrMBB, Succ1MBB),
2020                            /*InvertCond=*/false);
2021       // If the compares in later blocks need to use values not currently
2022       // exported from this block, export them now.  This block should always
2023       // be the first entry.
2024       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2025 
2026       // Allow some cases to be rejected.
2027       if (ShouldEmitAsBranches(SwitchCases)) {
2028         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
2029           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
2030           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
2031         }
2032 
2033         // Emit the branch for this block.
2034         visitSwitchCase(SwitchCases[0], BrMBB);
2035         SwitchCases.erase(SwitchCases.begin());
2036         return;
2037       }
2038 
2039       // Okay, we decided not to do this, remove any inserted MBB's and clear
2040       // SwitchCases.
2041       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
2042         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
2043 
2044       SwitchCases.clear();
2045     }
2046   }
2047 
2048   // Create a CaseBlock record representing this branch.
2049   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2050                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2051 
2052   // Use visitSwitchCase to actually insert the fast branch sequence for this
2053   // cond branch.
2054   visitSwitchCase(CB, BrMBB);
2055 }
2056 
2057 /// visitSwitchCase - Emits the necessary code to represent a single node in
2058 /// the binary search tree resulting from lowering a switch instruction.
2059 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2060                                           MachineBasicBlock *SwitchBB) {
2061   SDValue Cond;
2062   SDValue CondLHS = getValue(CB.CmpLHS);
2063   SDLoc dl = CB.DL;
2064 
2065   // Build the setcc now.
2066   if (!CB.CmpMHS) {
2067     // Fold "(X == true)" to X and "(X == false)" to !X to
2068     // handle common cases produced by branch lowering.
2069     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2070         CB.CC == ISD::SETEQ)
2071       Cond = CondLHS;
2072     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2073              CB.CC == ISD::SETEQ) {
2074       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2075       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2076     } else
2077       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
2078   } else {
2079     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2080 
2081     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2082     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2083 
2084     SDValue CmpOp = getValue(CB.CmpMHS);
2085     EVT VT = CmpOp.getValueType();
2086 
2087     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2088       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2089                           ISD::SETLE);
2090     } else {
2091       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2092                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2093       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2094                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2095     }
2096   }
2097 
2098   // Update successor info
2099   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2100   // TrueBB and FalseBB are always different unless the incoming IR is
2101   // degenerate. This only happens when running llc on weird IR.
2102   if (CB.TrueBB != CB.FalseBB)
2103     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2104   SwitchBB->normalizeSuccProbs();
2105 
2106   // If the lhs block is the next block, invert the condition so that we can
2107   // fall through to the lhs instead of the rhs block.
2108   if (CB.TrueBB == NextBlock(SwitchBB)) {
2109     std::swap(CB.TrueBB, CB.FalseBB);
2110     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2111     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2112   }
2113 
2114   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2115                                MVT::Other, getControlRoot(), Cond,
2116                                DAG.getBasicBlock(CB.TrueBB));
2117 
2118   // Insert the false branch. Do this even if it's a fall through branch,
2119   // this makes it easier to do DAG optimizations which require inverting
2120   // the branch condition.
2121   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2122                        DAG.getBasicBlock(CB.FalseBB));
2123 
2124   DAG.setRoot(BrCond);
2125 }
2126 
2127 /// visitJumpTable - Emit JumpTable node in the current MBB
2128 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
2129   // Emit the code for the jump table
2130   assert(JT.Reg != -1U && "Should lower JT Header first!");
2131   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2132   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2133                                      JT.Reg, PTy);
2134   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2135   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2136                                     MVT::Other, Index.getValue(1),
2137                                     Table, Index);
2138   DAG.setRoot(BrJumpTable);
2139 }
2140 
2141 /// visitJumpTableHeader - This function emits necessary code to produce index
2142 /// in the JumpTable from switch case.
2143 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
2144                                                JumpTableHeader &JTH,
2145                                                MachineBasicBlock *SwitchBB) {
2146   SDLoc dl = getCurSDLoc();
2147 
2148   // Subtract the lowest switch case value from the value being switched on and
2149   // conditional branch to default mbb if the result is greater than the
2150   // difference between smallest and largest cases.
2151   SDValue SwitchOp = getValue(JTH.SValue);
2152   EVT VT = SwitchOp.getValueType();
2153   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2154                             DAG.getConstant(JTH.First, dl, VT));
2155 
2156   // The SDNode we just created, which holds the value being switched on minus
2157   // the smallest case value, needs to be copied to a virtual register so it
2158   // can be used as an index into the jump table in a subsequent basic block.
2159   // This value may be smaller or larger than the target's pointer type, and
2160   // therefore require extension or truncating.
2161   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2162   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2163 
2164   unsigned JumpTableReg =
2165       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2166   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2167                                     JumpTableReg, SwitchOp);
2168   JT.Reg = JumpTableReg;
2169 
2170   // Emit the range check for the jump table, and branch to the default block
2171   // for the switch statement if the value being switched on exceeds the largest
2172   // case in the switch.
2173   SDValue CMP = DAG.getSetCC(
2174       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2175                                  Sub.getValueType()),
2176       Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2177 
2178   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2179                                MVT::Other, CopyTo, CMP,
2180                                DAG.getBasicBlock(JT.Default));
2181 
2182   // Avoid emitting unnecessary branches to the next block.
2183   if (JT.MBB != NextBlock(SwitchBB))
2184     BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2185                          DAG.getBasicBlock(JT.MBB));
2186 
2187   DAG.setRoot(BrCond);
2188 }
2189 
2190 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2191 /// variable if there exists one.
2192 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2193                                  SDValue &Chain) {
2194   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2195   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2196   MachineFunction &MF = DAG.getMachineFunction();
2197   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2198   MachineSDNode *Node =
2199       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2200   if (Global) {
2201     MachinePointerInfo MPInfo(Global);
2202     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2203                  MachineMemOperand::MODereferenceable;
2204     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2205         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy));
2206     DAG.setNodeMemRefs(Node, {MemRef});
2207   }
2208   return SDValue(Node, 0);
2209 }
2210 
2211 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2212 /// tail spliced into a stack protector check success bb.
2213 ///
2214 /// For a high level explanation of how this fits into the stack protector
2215 /// generation see the comment on the declaration of class
2216 /// StackProtectorDescriptor.
2217 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2218                                                   MachineBasicBlock *ParentBB) {
2219 
2220   // First create the loads to the guard/stack slot for the comparison.
2221   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2222   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2223 
2224   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2225   int FI = MFI.getStackProtectorIndex();
2226 
2227   SDValue Guard;
2228   SDLoc dl = getCurSDLoc();
2229   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2230   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2231   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2232 
2233   // Generate code to load the content of the guard slot.
2234   SDValue GuardVal = DAG.getLoad(
2235       PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
2236       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2237       MachineMemOperand::MOVolatile);
2238 
2239   if (TLI.useStackGuardXorFP())
2240     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2241 
2242   // Retrieve guard check function, nullptr if instrumentation is inlined.
2243   if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
2244     // The target provides a guard check function to validate the guard value.
2245     // Generate a call to that function with the content of the guard slot as
2246     // argument.
2247     auto *Fn = cast<Function>(GuardCheck);
2248     FunctionType *FnTy = Fn->getFunctionType();
2249     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2250 
2251     TargetLowering::ArgListTy Args;
2252     TargetLowering::ArgListEntry Entry;
2253     Entry.Node = GuardVal;
2254     Entry.Ty = FnTy->getParamType(0);
2255     if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2256       Entry.IsInReg = true;
2257     Args.push_back(Entry);
2258 
2259     TargetLowering::CallLoweringInfo CLI(DAG);
2260     CLI.setDebugLoc(getCurSDLoc())
2261       .setChain(DAG.getEntryNode())
2262       .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2263                  getValue(GuardCheck), std::move(Args));
2264 
2265     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2266     DAG.setRoot(Result.second);
2267     return;
2268   }
2269 
2270   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2271   // Otherwise, emit a volatile load to retrieve the stack guard value.
2272   SDValue Chain = DAG.getEntryNode();
2273   if (TLI.useLoadStackGuardNode()) {
2274     Guard = getLoadStackGuard(DAG, dl, Chain);
2275   } else {
2276     const Value *IRGuard = TLI.getSDagStackGuard(M);
2277     SDValue GuardPtr = getValue(IRGuard);
2278 
2279     Guard =
2280         DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2281                     Align, MachineMemOperand::MOVolatile);
2282   }
2283 
2284   // Perform the comparison via a subtract/getsetcc.
2285   EVT VT = Guard.getValueType();
2286   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal);
2287 
2288   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2289                                                         *DAG.getContext(),
2290                                                         Sub.getValueType()),
2291                              Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2292 
2293   // If the sub is not 0, then we know the guard/stackslot do not equal, so
2294   // branch to failure MBB.
2295   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2296                                MVT::Other, GuardVal.getOperand(0),
2297                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2298   // Otherwise branch to success MBB.
2299   SDValue Br = DAG.getNode(ISD::BR, dl,
2300                            MVT::Other, BrCond,
2301                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2302 
2303   DAG.setRoot(Br);
2304 }
2305 
2306 /// Codegen the failure basic block for a stack protector check.
2307 ///
2308 /// A failure stack protector machine basic block consists simply of a call to
2309 /// __stack_chk_fail().
2310 ///
2311 /// For a high level explanation of how this fits into the stack protector
2312 /// generation see the comment on the declaration of class
2313 /// StackProtectorDescriptor.
2314 void
2315 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2316   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2317   SDValue Chain =
2318       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2319                       None, false, getCurSDLoc(), false, false).second;
2320   DAG.setRoot(Chain);
2321 }
2322 
2323 /// visitBitTestHeader - This function emits necessary code to produce value
2324 /// suitable for "bit tests"
2325 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2326                                              MachineBasicBlock *SwitchBB) {
2327   SDLoc dl = getCurSDLoc();
2328 
2329   // Subtract the minimum value
2330   SDValue SwitchOp = getValue(B.SValue);
2331   EVT VT = SwitchOp.getValueType();
2332   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2333                             DAG.getConstant(B.First, dl, VT));
2334 
2335   // Check range
2336   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2337   SDValue RangeCmp = DAG.getSetCC(
2338       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2339                                  Sub.getValueType()),
2340       Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2341 
2342   // Determine the type of the test operands.
2343   bool UsePtrType = false;
2344   if (!TLI.isTypeLegal(VT))
2345     UsePtrType = true;
2346   else {
2347     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2348       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2349         // Switch table case range are encoded into series of masks.
2350         // Just use pointer type, it's guaranteed to fit.
2351         UsePtrType = true;
2352         break;
2353       }
2354   }
2355   if (UsePtrType) {
2356     VT = TLI.getPointerTy(DAG.getDataLayout());
2357     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2358   }
2359 
2360   B.RegVT = VT.getSimpleVT();
2361   B.Reg = FuncInfo.CreateReg(B.RegVT);
2362   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2363 
2364   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2365 
2366   addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2367   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2368   SwitchBB->normalizeSuccProbs();
2369 
2370   SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2371                                 MVT::Other, CopyTo, RangeCmp,
2372                                 DAG.getBasicBlock(B.Default));
2373 
2374   // Avoid emitting unnecessary branches to the next block.
2375   if (MBB != NextBlock(SwitchBB))
2376     BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2377                           DAG.getBasicBlock(MBB));
2378 
2379   DAG.setRoot(BrRange);
2380 }
2381 
2382 /// visitBitTestCase - this function produces one "bit test"
2383 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2384                                            MachineBasicBlock* NextMBB,
2385                                            BranchProbability BranchProbToNext,
2386                                            unsigned Reg,
2387                                            BitTestCase &B,
2388                                            MachineBasicBlock *SwitchBB) {
2389   SDLoc dl = getCurSDLoc();
2390   MVT VT = BB.RegVT;
2391   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2392   SDValue Cmp;
2393   unsigned PopCount = countPopulation(B.Mask);
2394   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2395   if (PopCount == 1) {
2396     // Testing for a single bit; just compare the shift count with what it
2397     // would need to be to shift a 1 bit in that position.
2398     Cmp = DAG.getSetCC(
2399         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2400         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2401         ISD::SETEQ);
2402   } else if (PopCount == BB.Range) {
2403     // There is only one zero bit in the range, test for it directly.
2404     Cmp = DAG.getSetCC(
2405         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2406         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2407         ISD::SETNE);
2408   } else {
2409     // Make desired shift
2410     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2411                                     DAG.getConstant(1, dl, VT), ShiftOp);
2412 
2413     // Emit bit tests and jumps
2414     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2415                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2416     Cmp = DAG.getSetCC(
2417         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2418         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2419   }
2420 
2421   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2422   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2423   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2424   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2425   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2426   // one as they are relative probabilities (and thus work more like weights),
2427   // and hence we need to normalize them to let the sum of them become one.
2428   SwitchBB->normalizeSuccProbs();
2429 
2430   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2431                               MVT::Other, getControlRoot(),
2432                               Cmp, DAG.getBasicBlock(B.TargetBB));
2433 
2434   // Avoid emitting unnecessary branches to the next block.
2435   if (NextMBB != NextBlock(SwitchBB))
2436     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2437                         DAG.getBasicBlock(NextMBB));
2438 
2439   DAG.setRoot(BrAnd);
2440 }
2441 
2442 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2443   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2444 
2445   // Retrieve successors. Look through artificial IR level blocks like
2446   // catchswitch for successors.
2447   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2448   const BasicBlock *EHPadBB = I.getSuccessor(1);
2449 
2450   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2451   // have to do anything here to lower funclet bundles.
2452   assert(!I.hasOperandBundlesOtherThan(
2453              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2454          "Cannot lower invokes with arbitrary operand bundles yet!");
2455 
2456   const Value *Callee(I.getCalledValue());
2457   const Function *Fn = dyn_cast<Function>(Callee);
2458   if (isa<InlineAsm>(Callee))
2459     visitInlineAsm(&I);
2460   else if (Fn && Fn->isIntrinsic()) {
2461     switch (Fn->getIntrinsicID()) {
2462     default:
2463       llvm_unreachable("Cannot invoke this intrinsic");
2464     case Intrinsic::donothing:
2465       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2466       break;
2467     case Intrinsic::experimental_patchpoint_void:
2468     case Intrinsic::experimental_patchpoint_i64:
2469       visitPatchpoint(&I, EHPadBB);
2470       break;
2471     case Intrinsic::experimental_gc_statepoint:
2472       LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2473       break;
2474     }
2475   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2476     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2477     // Eventually we will support lowering the @llvm.experimental.deoptimize
2478     // intrinsic, and right now there are no plans to support other intrinsics
2479     // with deopt state.
2480     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2481   } else {
2482     LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2483   }
2484 
2485   // If the value of the invoke is used outside of its defining block, make it
2486   // available as a virtual register.
2487   // We already took care of the exported value for the statepoint instruction
2488   // during call to the LowerStatepoint.
2489   if (!isStatepoint(I)) {
2490     CopyToExportRegsIfNeeded(&I);
2491   }
2492 
2493   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2494   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2495   BranchProbability EHPadBBProb =
2496       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2497           : BranchProbability::getZero();
2498   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2499 
2500   // Update successor info.
2501   addSuccessorWithProb(InvokeMBB, Return);
2502   for (auto &UnwindDest : UnwindDests) {
2503     UnwindDest.first->setIsEHPad();
2504     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2505   }
2506   InvokeMBB->normalizeSuccProbs();
2507 
2508   // Drop into normal successor.
2509   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2510                           MVT::Other, getControlRoot(),
2511                           DAG.getBasicBlock(Return)));
2512 }
2513 
2514 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2515   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2516 }
2517 
2518 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2519   assert(FuncInfo.MBB->isEHPad() &&
2520          "Call to landingpad not in landing pad!");
2521 
2522   MachineBasicBlock *MBB = FuncInfo.MBB;
2523   addLandingPadInfo(LP, *MBB);
2524 
2525   // If there aren't registers to copy the values into (e.g., during SjLj
2526   // exceptions), then don't bother to create these DAG nodes.
2527   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2528   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2529   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2530       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2531     return;
2532 
2533   // If landingpad's return type is token type, we don't create DAG nodes
2534   // for its exception pointer and selector value. The extraction of exception
2535   // pointer or selector value from token type landingpads is not currently
2536   // supported.
2537   if (LP.getType()->isTokenTy())
2538     return;
2539 
2540   SmallVector<EVT, 2> ValueVTs;
2541   SDLoc dl = getCurSDLoc();
2542   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2543   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2544 
2545   // Get the two live-in registers as SDValues. The physregs have already been
2546   // copied into virtual registers.
2547   SDValue Ops[2];
2548   if (FuncInfo.ExceptionPointerVirtReg) {
2549     Ops[0] = DAG.getZExtOrTrunc(
2550         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2551                            FuncInfo.ExceptionPointerVirtReg,
2552                            TLI.getPointerTy(DAG.getDataLayout())),
2553         dl, ValueVTs[0]);
2554   } else {
2555     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2556   }
2557   Ops[1] = DAG.getZExtOrTrunc(
2558       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2559                          FuncInfo.ExceptionSelectorVirtReg,
2560                          TLI.getPointerTy(DAG.getDataLayout())),
2561       dl, ValueVTs[1]);
2562 
2563   // Merge into one.
2564   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2565                             DAG.getVTList(ValueVTs), Ops);
2566   setValue(&LP, Res);
2567 }
2568 
2569 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2570 #ifndef NDEBUG
2571   for (const CaseCluster &CC : Clusters)
2572     assert(CC.Low == CC.High && "Input clusters must be single-case");
2573 #endif
2574 
2575   llvm::sort(Clusters.begin(), Clusters.end(),
2576              [](const CaseCluster &a, const CaseCluster &b) {
2577     return a.Low->getValue().slt(b.Low->getValue());
2578   });
2579 
2580   // Merge adjacent clusters with the same destination.
2581   const unsigned N = Clusters.size();
2582   unsigned DstIndex = 0;
2583   for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2584     CaseCluster &CC = Clusters[SrcIndex];
2585     const ConstantInt *CaseVal = CC.Low;
2586     MachineBasicBlock *Succ = CC.MBB;
2587 
2588     if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2589         (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2590       // If this case has the same successor and is a neighbour, merge it into
2591       // the previous cluster.
2592       Clusters[DstIndex - 1].High = CaseVal;
2593       Clusters[DstIndex - 1].Prob += CC.Prob;
2594     } else {
2595       std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2596                    sizeof(Clusters[SrcIndex]));
2597     }
2598   }
2599   Clusters.resize(DstIndex);
2600 }
2601 
2602 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2603                                            MachineBasicBlock *Last) {
2604   // Update JTCases.
2605   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2606     if (JTCases[i].first.HeaderBB == First)
2607       JTCases[i].first.HeaderBB = Last;
2608 
2609   // Update BitTestCases.
2610   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2611     if (BitTestCases[i].Parent == First)
2612       BitTestCases[i].Parent = Last;
2613 }
2614 
2615 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2616   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2617 
2618   // Update machine-CFG edges with unique successors.
2619   SmallSet<BasicBlock*, 32> Done;
2620   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2621     BasicBlock *BB = I.getSuccessor(i);
2622     bool Inserted = Done.insert(BB).second;
2623     if (!Inserted)
2624         continue;
2625 
2626     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2627     addSuccessorWithProb(IndirectBrMBB, Succ);
2628   }
2629   IndirectBrMBB->normalizeSuccProbs();
2630 
2631   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2632                           MVT::Other, getControlRoot(),
2633                           getValue(I.getAddress())));
2634 }
2635 
2636 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2637   if (!DAG.getTarget().Options.TrapUnreachable)
2638     return;
2639 
2640   // We may be able to ignore unreachable behind a noreturn call.
2641   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2642     const BasicBlock &BB = *I.getParent();
2643     if (&I != &BB.front()) {
2644       BasicBlock::const_iterator PredI =
2645         std::prev(BasicBlock::const_iterator(&I));
2646       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2647         if (Call->doesNotReturn())
2648           return;
2649       }
2650     }
2651   }
2652 
2653   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2654 }
2655 
2656 void SelectionDAGBuilder::visitFSub(const User &I) {
2657   // -0.0 - X --> fneg
2658   Type *Ty = I.getType();
2659   if (isa<Constant>(I.getOperand(0)) &&
2660       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2661     SDValue Op2 = getValue(I.getOperand(1));
2662     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2663                              Op2.getValueType(), Op2));
2664     return;
2665   }
2666 
2667   visitBinary(I, ISD::FSUB);
2668 }
2669 
2670 /// Checks if the given instruction performs a vector reduction, in which case
2671 /// we have the freedom to alter the elements in the result as long as the
2672 /// reduction of them stays unchanged.
2673 static bool isVectorReductionOp(const User *I) {
2674   const Instruction *Inst = dyn_cast<Instruction>(I);
2675   if (!Inst || !Inst->getType()->isVectorTy())
2676     return false;
2677 
2678   auto OpCode = Inst->getOpcode();
2679   switch (OpCode) {
2680   case Instruction::Add:
2681   case Instruction::Mul:
2682   case Instruction::And:
2683   case Instruction::Or:
2684   case Instruction::Xor:
2685     break;
2686   case Instruction::FAdd:
2687   case Instruction::FMul:
2688     if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2689       if (FPOp->getFastMathFlags().isFast())
2690         break;
2691     LLVM_FALLTHROUGH;
2692   default:
2693     return false;
2694   }
2695 
2696   unsigned ElemNum = Inst->getType()->getVectorNumElements();
2697   // Ensure the reduction size is a power of 2.
2698   if (!isPowerOf2_32(ElemNum))
2699     return false;
2700 
2701   unsigned ElemNumToReduce = ElemNum;
2702 
2703   // Do DFS search on the def-use chain from the given instruction. We only
2704   // allow four kinds of operations during the search until we reach the
2705   // instruction that extracts the first element from the vector:
2706   //
2707   //   1. The reduction operation of the same opcode as the given instruction.
2708   //
2709   //   2. PHI node.
2710   //
2711   //   3. ShuffleVector instruction together with a reduction operation that
2712   //      does a partial reduction.
2713   //
2714   //   4. ExtractElement that extracts the first element from the vector, and we
2715   //      stop searching the def-use chain here.
2716   //
2717   // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2718   // from 1-3 to the stack to continue the DFS. The given instruction is not
2719   // a reduction operation if we meet any other instructions other than those
2720   // listed above.
2721 
2722   SmallVector<const User *, 16> UsersToVisit{Inst};
2723   SmallPtrSet<const User *, 16> Visited;
2724   bool ReduxExtracted = false;
2725 
2726   while (!UsersToVisit.empty()) {
2727     auto User = UsersToVisit.back();
2728     UsersToVisit.pop_back();
2729     if (!Visited.insert(User).second)
2730       continue;
2731 
2732     for (const auto &U : User->users()) {
2733       auto Inst = dyn_cast<Instruction>(U);
2734       if (!Inst)
2735         return false;
2736 
2737       if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2738         if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2739           if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
2740             return false;
2741         UsersToVisit.push_back(U);
2742       } else if (const ShuffleVectorInst *ShufInst =
2743                      dyn_cast<ShuffleVectorInst>(U)) {
2744         // Detect the following pattern: A ShuffleVector instruction together
2745         // with a reduction that do partial reduction on the first and second
2746         // ElemNumToReduce / 2 elements, and store the result in
2747         // ElemNumToReduce / 2 elements in another vector.
2748 
2749         unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2750         if (ResultElements < ElemNum)
2751           return false;
2752 
2753         if (ElemNumToReduce == 1)
2754           return false;
2755         if (!isa<UndefValue>(U->getOperand(1)))
2756           return false;
2757         for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2758           if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2759             return false;
2760         for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2761           if (ShufInst->getMaskValue(i) != -1)
2762             return false;
2763 
2764         // There is only one user of this ShuffleVector instruction, which
2765         // must be a reduction operation.
2766         if (!U->hasOneUse())
2767           return false;
2768 
2769         auto U2 = dyn_cast<Instruction>(*U->user_begin());
2770         if (!U2 || U2->getOpcode() != OpCode)
2771           return false;
2772 
2773         // Check operands of the reduction operation.
2774         if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2775             (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2776           UsersToVisit.push_back(U2);
2777           ElemNumToReduce /= 2;
2778         } else
2779           return false;
2780       } else if (isa<ExtractElementInst>(U)) {
2781         // At this moment we should have reduced all elements in the vector.
2782         if (ElemNumToReduce != 1)
2783           return false;
2784 
2785         const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
2786         if (!Val || !Val->isZero())
2787           return false;
2788 
2789         ReduxExtracted = true;
2790       } else
2791         return false;
2792     }
2793   }
2794   return ReduxExtracted;
2795 }
2796 
2797 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
2798   SDNodeFlags Flags;
2799   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
2800     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
2801     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
2802   }
2803   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
2804     Flags.setExact(ExactOp->isExact());
2805   }
2806   if (isVectorReductionOp(&I)) {
2807     Flags.setVectorReduction(true);
2808     LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
2809   }
2810 
2811   SDValue Op1 = getValue(I.getOperand(0));
2812   SDValue Op2 = getValue(I.getOperand(1));
2813   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
2814                                      Op1, Op2, Flags);
2815   setValue(&I, BinNodeValue);
2816 }
2817 
2818 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2819   SDValue Op1 = getValue(I.getOperand(0));
2820   SDValue Op2 = getValue(I.getOperand(1));
2821 
2822   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
2823       Op2.getValueType(), DAG.getDataLayout());
2824 
2825   // Coerce the shift amount to the right type if we can.
2826   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2827     unsigned ShiftSize = ShiftTy.getSizeInBits();
2828     unsigned Op2Size = Op2.getValueSizeInBits();
2829     SDLoc DL = getCurSDLoc();
2830 
2831     // If the operand is smaller than the shift count type, promote it.
2832     if (ShiftSize > Op2Size)
2833       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2834 
2835     // If the operand is larger than the shift count type but the shift
2836     // count type has enough bits to represent any shift value, truncate
2837     // it now. This is a common case and it exposes the truncate to
2838     // optimization early.
2839     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
2840       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2841     // Otherwise we'll need to temporarily settle for some other convenient
2842     // type.  Type legalization will make adjustments once the shiftee is split.
2843     else
2844       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2845   }
2846 
2847   bool nuw = false;
2848   bool nsw = false;
2849   bool exact = false;
2850 
2851   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2852 
2853     if (const OverflowingBinaryOperator *OFBinOp =
2854             dyn_cast<const OverflowingBinaryOperator>(&I)) {
2855       nuw = OFBinOp->hasNoUnsignedWrap();
2856       nsw = OFBinOp->hasNoSignedWrap();
2857     }
2858     if (const PossiblyExactOperator *ExactOp =
2859             dyn_cast<const PossiblyExactOperator>(&I))
2860       exact = ExactOp->isExact();
2861   }
2862   SDNodeFlags Flags;
2863   Flags.setExact(exact);
2864   Flags.setNoSignedWrap(nsw);
2865   Flags.setNoUnsignedWrap(nuw);
2866   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2867                             Flags);
2868   setValue(&I, Res);
2869 }
2870 
2871 void SelectionDAGBuilder::visitSDiv(const User &I) {
2872   SDValue Op1 = getValue(I.getOperand(0));
2873   SDValue Op2 = getValue(I.getOperand(1));
2874 
2875   SDNodeFlags Flags;
2876   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2877                  cast<PossiblyExactOperator>(&I)->isExact());
2878   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2879                            Op2, Flags));
2880 }
2881 
2882 void SelectionDAGBuilder::visitICmp(const User &I) {
2883   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2884   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2885     predicate = IC->getPredicate();
2886   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2887     predicate = ICmpInst::Predicate(IC->getPredicate());
2888   SDValue Op1 = getValue(I.getOperand(0));
2889   SDValue Op2 = getValue(I.getOperand(1));
2890   ISD::CondCode Opcode = getICmpCondCode(predicate);
2891 
2892   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2893                                                         I.getType());
2894   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2895 }
2896 
2897 void SelectionDAGBuilder::visitFCmp(const User &I) {
2898   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2899   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2900     predicate = FC->getPredicate();
2901   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2902     predicate = FCmpInst::Predicate(FC->getPredicate());
2903   SDValue Op1 = getValue(I.getOperand(0));
2904   SDValue Op2 = getValue(I.getOperand(1));
2905 
2906   ISD::CondCode Condition = getFCmpCondCode(predicate);
2907   auto *FPMO = dyn_cast<FPMathOperator>(&I);
2908   if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
2909     Condition = getFCmpCodeWithoutNaN(Condition);
2910 
2911   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2912                                                         I.getType());
2913   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2914 }
2915 
2916 // Check if the condition of the select has one use or two users that are both
2917 // selects with the same condition.
2918 static bool hasOnlySelectUsers(const Value *Cond) {
2919   return llvm::all_of(Cond->users(), [](const Value *V) {
2920     return isa<SelectInst>(V);
2921   });
2922 }
2923 
2924 void SelectionDAGBuilder::visitSelect(const User &I) {
2925   SmallVector<EVT, 4> ValueVTs;
2926   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2927                   ValueVTs);
2928   unsigned NumValues = ValueVTs.size();
2929   if (NumValues == 0) return;
2930 
2931   SmallVector<SDValue, 4> Values(NumValues);
2932   SDValue Cond     = getValue(I.getOperand(0));
2933   SDValue LHSVal   = getValue(I.getOperand(1));
2934   SDValue RHSVal   = getValue(I.getOperand(2));
2935   auto BaseOps = {Cond};
2936   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2937     ISD::VSELECT : ISD::SELECT;
2938 
2939   // Min/max matching is only viable if all output VTs are the same.
2940   if (is_splat(ValueVTs)) {
2941     EVT VT = ValueVTs[0];
2942     LLVMContext &Ctx = *DAG.getContext();
2943     auto &TLI = DAG.getTargetLoweringInfo();
2944 
2945     // We care about the legality of the operation after it has been type
2946     // legalized.
2947     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
2948            VT != TLI.getTypeToTransformTo(Ctx, VT))
2949       VT = TLI.getTypeToTransformTo(Ctx, VT);
2950 
2951     // If the vselect is legal, assume we want to leave this as a vector setcc +
2952     // vselect. Otherwise, if this is going to be scalarized, we want to see if
2953     // min/max is legal on the scalar type.
2954     bool UseScalarMinMax = VT.isVector() &&
2955       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
2956 
2957     Value *LHS, *RHS;
2958     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2959     ISD::NodeType Opc = ISD::DELETED_NODE;
2960     switch (SPR.Flavor) {
2961     case SPF_UMAX:    Opc = ISD::UMAX; break;
2962     case SPF_UMIN:    Opc = ISD::UMIN; break;
2963     case SPF_SMAX:    Opc = ISD::SMAX; break;
2964     case SPF_SMIN:    Opc = ISD::SMIN; break;
2965     case SPF_FMINNUM:
2966       switch (SPR.NaNBehavior) {
2967       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2968       case SPNB_RETURNS_NAN:   Opc = ISD::FMINNAN; break;
2969       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
2970       case SPNB_RETURNS_ANY: {
2971         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
2972           Opc = ISD::FMINNUM;
2973         else if (TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT))
2974           Opc = ISD::FMINNAN;
2975         else if (UseScalarMinMax)
2976           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
2977             ISD::FMINNUM : ISD::FMINNAN;
2978         break;
2979       }
2980       }
2981       break;
2982     case SPF_FMAXNUM:
2983       switch (SPR.NaNBehavior) {
2984       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2985       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXNAN; break;
2986       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
2987       case SPNB_RETURNS_ANY:
2988 
2989         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
2990           Opc = ISD::FMAXNUM;
2991         else if (TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT))
2992           Opc = ISD::FMAXNAN;
2993         else if (UseScalarMinMax)
2994           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
2995             ISD::FMAXNUM : ISD::FMAXNAN;
2996         break;
2997       }
2998       break;
2999     default: break;
3000     }
3001 
3002     if (Opc != ISD::DELETED_NODE &&
3003         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3004          (UseScalarMinMax &&
3005           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3006         // If the underlying comparison instruction is used by any other
3007         // instruction, the consumed instructions won't be destroyed, so it is
3008         // not profitable to convert to a min/max.
3009         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3010       OpCode = Opc;
3011       LHSVal = getValue(LHS);
3012       RHSVal = getValue(RHS);
3013       BaseOps = {};
3014     }
3015   }
3016 
3017   for (unsigned i = 0; i != NumValues; ++i) {
3018     SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3019     Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3020     Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3021     Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
3022                             LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
3023                             Ops);
3024   }
3025 
3026   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3027                            DAG.getVTList(ValueVTs), Values));
3028 }
3029 
3030 void SelectionDAGBuilder::visitTrunc(const User &I) {
3031   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3032   SDValue N = getValue(I.getOperand(0));
3033   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3034                                                         I.getType());
3035   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3036 }
3037 
3038 void SelectionDAGBuilder::visitZExt(const User &I) {
3039   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3040   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3041   SDValue N = getValue(I.getOperand(0));
3042   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3043                                                         I.getType());
3044   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3045 }
3046 
3047 void SelectionDAGBuilder::visitSExt(const User &I) {
3048   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3049   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3050   SDValue N = getValue(I.getOperand(0));
3051   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3052                                                         I.getType());
3053   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3054 }
3055 
3056 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3057   // FPTrunc is never a no-op cast, no need to check
3058   SDValue N = getValue(I.getOperand(0));
3059   SDLoc dl = getCurSDLoc();
3060   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3061   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3062   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3063                            DAG.getTargetConstant(
3064                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3065 }
3066 
3067 void SelectionDAGBuilder::visitFPExt(const User &I) {
3068   // FPExt is never a no-op cast, no need to check
3069   SDValue N = getValue(I.getOperand(0));
3070   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3071                                                         I.getType());
3072   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3073 }
3074 
3075 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3076   // FPToUI is never a no-op cast, no need to check
3077   SDValue N = getValue(I.getOperand(0));
3078   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3079                                                         I.getType());
3080   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3081 }
3082 
3083 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3084   // FPToSI is never a no-op cast, no need to check
3085   SDValue N = getValue(I.getOperand(0));
3086   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3087                                                         I.getType());
3088   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3089 }
3090 
3091 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3092   // UIToFP is never a no-op cast, no need to check
3093   SDValue N = getValue(I.getOperand(0));
3094   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3095                                                         I.getType());
3096   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3097 }
3098 
3099 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3100   // SIToFP is never a no-op cast, no need to check
3101   SDValue N = getValue(I.getOperand(0));
3102   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3103                                                         I.getType());
3104   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3105 }
3106 
3107 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3108   // What to do depends on the size of the integer and the size of the pointer.
3109   // We can either truncate, zero extend, or no-op, accordingly.
3110   SDValue N = getValue(I.getOperand(0));
3111   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3112                                                         I.getType());
3113   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3114 }
3115 
3116 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3117   // What to do depends on the size of the integer and the size of the pointer.
3118   // We can either truncate, zero extend, or no-op, accordingly.
3119   SDValue N = getValue(I.getOperand(0));
3120   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3121                                                         I.getType());
3122   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3123 }
3124 
3125 void SelectionDAGBuilder::visitBitCast(const User &I) {
3126   SDValue N = getValue(I.getOperand(0));
3127   SDLoc dl = getCurSDLoc();
3128   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3129                                                         I.getType());
3130 
3131   // BitCast assures us that source and destination are the same size so this is
3132   // either a BITCAST or a no-op.
3133   if (DestVT != N.getValueType())
3134     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3135                              DestVT, N)); // convert types.
3136   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3137   // might fold any kind of constant expression to an integer constant and that
3138   // is not what we are looking for. Only recognize a bitcast of a genuine
3139   // constant integer as an opaque constant.
3140   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3141     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3142                                  /*isOpaque*/true));
3143   else
3144     setValue(&I, N);            // noop cast.
3145 }
3146 
3147 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3148   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3149   const Value *SV = I.getOperand(0);
3150   SDValue N = getValue(SV);
3151   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3152 
3153   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3154   unsigned DestAS = I.getType()->getPointerAddressSpace();
3155 
3156   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3157     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3158 
3159   setValue(&I, N);
3160 }
3161 
3162 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3163   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3164   SDValue InVec = getValue(I.getOperand(0));
3165   SDValue InVal = getValue(I.getOperand(1));
3166   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3167                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3168   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3169                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3170                            InVec, InVal, InIdx));
3171 }
3172 
3173 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3174   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3175   SDValue InVec = getValue(I.getOperand(0));
3176   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3177                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3178   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3179                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3180                            InVec, InIdx));
3181 }
3182 
3183 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3184   SDValue Src1 = getValue(I.getOperand(0));
3185   SDValue Src2 = getValue(I.getOperand(1));
3186   SDLoc DL = getCurSDLoc();
3187 
3188   SmallVector<int, 8> Mask;
3189   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3190   unsigned MaskNumElts = Mask.size();
3191 
3192   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3193   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3194   EVT SrcVT = Src1.getValueType();
3195   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3196 
3197   if (SrcNumElts == MaskNumElts) {
3198     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3199     return;
3200   }
3201 
3202   // Normalize the shuffle vector since mask and vector length don't match.
3203   if (SrcNumElts < MaskNumElts) {
3204     // Mask is longer than the source vectors. We can use concatenate vector to
3205     // make the mask and vectors lengths match.
3206 
3207     if (MaskNumElts % SrcNumElts == 0) {
3208       // Mask length is a multiple of the source vector length.
3209       // Check if the shuffle is some kind of concatenation of the input
3210       // vectors.
3211       unsigned NumConcat = MaskNumElts / SrcNumElts;
3212       bool IsConcat = true;
3213       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3214       for (unsigned i = 0; i != MaskNumElts; ++i) {
3215         int Idx = Mask[i];
3216         if (Idx < 0)
3217           continue;
3218         // Ensure the indices in each SrcVT sized piece are sequential and that
3219         // the same source is used for the whole piece.
3220         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3221             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3222              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3223           IsConcat = false;
3224           break;
3225         }
3226         // Remember which source this index came from.
3227         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3228       }
3229 
3230       // The shuffle is concatenating multiple vectors together. Just emit
3231       // a CONCAT_VECTORS operation.
3232       if (IsConcat) {
3233         SmallVector<SDValue, 8> ConcatOps;
3234         for (auto Src : ConcatSrcs) {
3235           if (Src < 0)
3236             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3237           else if (Src == 0)
3238             ConcatOps.push_back(Src1);
3239           else
3240             ConcatOps.push_back(Src2);
3241         }
3242         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3243         return;
3244       }
3245     }
3246 
3247     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3248     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3249     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3250                                     PaddedMaskNumElts);
3251 
3252     // Pad both vectors with undefs to make them the same length as the mask.
3253     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3254 
3255     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3256     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3257     MOps1[0] = Src1;
3258     MOps2[0] = Src2;
3259 
3260     Src1 = Src1.isUndef()
3261                ? DAG.getUNDEF(PaddedVT)
3262                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3263     Src2 = Src2.isUndef()
3264                ? DAG.getUNDEF(PaddedVT)
3265                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3266 
3267     // Readjust mask for new input vector length.
3268     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3269     for (unsigned i = 0; i != MaskNumElts; ++i) {
3270       int Idx = Mask[i];
3271       if (Idx >= (int)SrcNumElts)
3272         Idx -= SrcNumElts - PaddedMaskNumElts;
3273       MappedOps[i] = Idx;
3274     }
3275 
3276     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3277 
3278     // If the concatenated vector was padded, extract a subvector with the
3279     // correct number of elements.
3280     if (MaskNumElts != PaddedMaskNumElts)
3281       Result = DAG.getNode(
3282           ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3283           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3284 
3285     setValue(&I, Result);
3286     return;
3287   }
3288 
3289   if (SrcNumElts > MaskNumElts) {
3290     // Analyze the access pattern of the vector to see if we can extract
3291     // two subvectors and do the shuffle.
3292     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3293     bool CanExtract = true;
3294     for (int Idx : Mask) {
3295       unsigned Input = 0;
3296       if (Idx < 0)
3297         continue;
3298 
3299       if (Idx >= (int)SrcNumElts) {
3300         Input = 1;
3301         Idx -= SrcNumElts;
3302       }
3303 
3304       // If all the indices come from the same MaskNumElts sized portion of
3305       // the sources we can use extract. Also make sure the extract wouldn't
3306       // extract past the end of the source.
3307       int NewStartIdx = alignDown(Idx, MaskNumElts);
3308       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3309           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3310         CanExtract = false;
3311       // Make sure we always update StartIdx as we use it to track if all
3312       // elements are undef.
3313       StartIdx[Input] = NewStartIdx;
3314     }
3315 
3316     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3317       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3318       return;
3319     }
3320     if (CanExtract) {
3321       // Extract appropriate subvector and generate a vector shuffle
3322       for (unsigned Input = 0; Input < 2; ++Input) {
3323         SDValue &Src = Input == 0 ? Src1 : Src2;
3324         if (StartIdx[Input] < 0)
3325           Src = DAG.getUNDEF(VT);
3326         else {
3327           Src = DAG.getNode(
3328               ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3329               DAG.getConstant(StartIdx[Input], DL,
3330                               TLI.getVectorIdxTy(DAG.getDataLayout())));
3331         }
3332       }
3333 
3334       // Calculate new mask.
3335       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3336       for (int &Idx : MappedOps) {
3337         if (Idx >= (int)SrcNumElts)
3338           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3339         else if (Idx >= 0)
3340           Idx -= StartIdx[0];
3341       }
3342 
3343       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3344       return;
3345     }
3346   }
3347 
3348   // We can't use either concat vectors or extract subvectors so fall back to
3349   // replacing the shuffle with extract and build vector.
3350   // to insert and build vector.
3351   EVT EltVT = VT.getVectorElementType();
3352   EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3353   SmallVector<SDValue,8> Ops;
3354   for (int Idx : Mask) {
3355     SDValue Res;
3356 
3357     if (Idx < 0) {
3358       Res = DAG.getUNDEF(EltVT);
3359     } else {
3360       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3361       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3362 
3363       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3364                         EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3365     }
3366 
3367     Ops.push_back(Res);
3368   }
3369 
3370   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3371 }
3372 
3373 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3374   ArrayRef<unsigned> Indices;
3375   if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3376     Indices = IV->getIndices();
3377   else
3378     Indices = cast<ConstantExpr>(&I)->getIndices();
3379 
3380   const Value *Op0 = I.getOperand(0);
3381   const Value *Op1 = I.getOperand(1);
3382   Type *AggTy = I.getType();
3383   Type *ValTy = Op1->getType();
3384   bool IntoUndef = isa<UndefValue>(Op0);
3385   bool FromUndef = isa<UndefValue>(Op1);
3386 
3387   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3388 
3389   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3390   SmallVector<EVT, 4> AggValueVTs;
3391   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3392   SmallVector<EVT, 4> ValValueVTs;
3393   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3394 
3395   unsigned NumAggValues = AggValueVTs.size();
3396   unsigned NumValValues = ValValueVTs.size();
3397   SmallVector<SDValue, 4> Values(NumAggValues);
3398 
3399   // Ignore an insertvalue that produces an empty object
3400   if (!NumAggValues) {
3401     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3402     return;
3403   }
3404 
3405   SDValue Agg = getValue(Op0);
3406   unsigned i = 0;
3407   // Copy the beginning value(s) from the original aggregate.
3408   for (; i != LinearIndex; ++i)
3409     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3410                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3411   // Copy values from the inserted value(s).
3412   if (NumValValues) {
3413     SDValue Val = getValue(Op1);
3414     for (; i != LinearIndex + NumValValues; ++i)
3415       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3416                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3417   }
3418   // Copy remaining value(s) from the original aggregate.
3419   for (; i != NumAggValues; ++i)
3420     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3421                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3422 
3423   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3424                            DAG.getVTList(AggValueVTs), Values));
3425 }
3426 
3427 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3428   ArrayRef<unsigned> Indices;
3429   if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3430     Indices = EV->getIndices();
3431   else
3432     Indices = cast<ConstantExpr>(&I)->getIndices();
3433 
3434   const Value *Op0 = I.getOperand(0);
3435   Type *AggTy = Op0->getType();
3436   Type *ValTy = I.getType();
3437   bool OutOfUndef = isa<UndefValue>(Op0);
3438 
3439   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3440 
3441   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3442   SmallVector<EVT, 4> ValValueVTs;
3443   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3444 
3445   unsigned NumValValues = ValValueVTs.size();
3446 
3447   // Ignore a extractvalue that produces an empty object
3448   if (!NumValValues) {
3449     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3450     return;
3451   }
3452 
3453   SmallVector<SDValue, 4> Values(NumValValues);
3454 
3455   SDValue Agg = getValue(Op0);
3456   // Copy out the selected value(s).
3457   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3458     Values[i - LinearIndex] =
3459       OutOfUndef ?
3460         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3461         SDValue(Agg.getNode(), Agg.getResNo() + i);
3462 
3463   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3464                            DAG.getVTList(ValValueVTs), Values));
3465 }
3466 
3467 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3468   Value *Op0 = I.getOperand(0);
3469   // Note that the pointer operand may be a vector of pointers. Take the scalar
3470   // element which holds a pointer.
3471   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3472   SDValue N = getValue(Op0);
3473   SDLoc dl = getCurSDLoc();
3474 
3475   // Normalize Vector GEP - all scalar operands should be converted to the
3476   // splat vector.
3477   unsigned VectorWidth = I.getType()->isVectorTy() ?
3478     cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3479 
3480   if (VectorWidth && !N.getValueType().isVector()) {
3481     LLVMContext &Context = *DAG.getContext();
3482     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3483     N = DAG.getSplatBuildVector(VT, dl, N);
3484   }
3485 
3486   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3487        GTI != E; ++GTI) {
3488     const Value *Idx = GTI.getOperand();
3489     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3490       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3491       if (Field) {
3492         // N = N + Offset
3493         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3494 
3495         // In an inbounds GEP with an offset that is nonnegative even when
3496         // interpreted as signed, assume there is no unsigned overflow.
3497         SDNodeFlags Flags;
3498         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3499           Flags.setNoUnsignedWrap(true);
3500 
3501         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3502                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3503       }
3504     } else {
3505       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3506       MVT IdxTy = MVT::getIntegerVT(IdxSize);
3507       APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3508 
3509       // If this is a scalar constant or a splat vector of constants,
3510       // handle it quickly.
3511       const auto *CI = dyn_cast<ConstantInt>(Idx);
3512       if (!CI && isa<ConstantDataVector>(Idx) &&
3513           cast<ConstantDataVector>(Idx)->getSplatValue())
3514         CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3515 
3516       if (CI) {
3517         if (CI->isZero())
3518           continue;
3519         APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize);
3520         LLVMContext &Context = *DAG.getContext();
3521         SDValue OffsVal = VectorWidth ?
3522           DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) :
3523           DAG.getConstant(Offs, dl, IdxTy);
3524 
3525         // In an inbouds GEP with an offset that is nonnegative even when
3526         // interpreted as signed, assume there is no unsigned overflow.
3527         SDNodeFlags Flags;
3528         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3529           Flags.setNoUnsignedWrap(true);
3530 
3531         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3532         continue;
3533       }
3534 
3535       // N = N + Idx * ElementSize;
3536       SDValue IdxN = getValue(Idx);
3537 
3538       if (!IdxN.getValueType().isVector() && VectorWidth) {
3539         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth);
3540         IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3541       }
3542 
3543       // If the index is smaller or larger than intptr_t, truncate or extend
3544       // it.
3545       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3546 
3547       // If this is a multiply by a power of two, turn it into a shl
3548       // immediately.  This is a very common case.
3549       if (ElementSize != 1) {
3550         if (ElementSize.isPowerOf2()) {
3551           unsigned Amt = ElementSize.logBase2();
3552           IdxN = DAG.getNode(ISD::SHL, dl,
3553                              N.getValueType(), IdxN,
3554                              DAG.getConstant(Amt, dl, IdxN.getValueType()));
3555         } else {
3556           SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3557           IdxN = DAG.getNode(ISD::MUL, dl,
3558                              N.getValueType(), IdxN, Scale);
3559         }
3560       }
3561 
3562       N = DAG.getNode(ISD::ADD, dl,
3563                       N.getValueType(), N, IdxN);
3564     }
3565   }
3566 
3567   setValue(&I, N);
3568 }
3569 
3570 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3571   // If this is a fixed sized alloca in the entry block of the function,
3572   // allocate it statically on the stack.
3573   if (FuncInfo.StaticAllocaMap.count(&I))
3574     return;   // getValue will auto-populate this.
3575 
3576   SDLoc dl = getCurSDLoc();
3577   Type *Ty = I.getAllocatedType();
3578   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3579   auto &DL = DAG.getDataLayout();
3580   uint64_t TySize = DL.getTypeAllocSize(Ty);
3581   unsigned Align =
3582       std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3583 
3584   SDValue AllocSize = getValue(I.getArraySize());
3585 
3586   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3587   if (AllocSize.getValueType() != IntPtr)
3588     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3589 
3590   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3591                           AllocSize,
3592                           DAG.getConstant(TySize, dl, IntPtr));
3593 
3594   // Handle alignment.  If the requested alignment is less than or equal to
3595   // the stack alignment, ignore it.  If the size is greater than or equal to
3596   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3597   unsigned StackAlign =
3598       DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3599   if (Align <= StackAlign)
3600     Align = 0;
3601 
3602   // Round the size of the allocation up to the stack alignment size
3603   // by add SA-1 to the size. This doesn't overflow because we're computing
3604   // an address inside an alloca.
3605   SDNodeFlags Flags;
3606   Flags.setNoUnsignedWrap(true);
3607   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3608                           DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags);
3609 
3610   // Mask out the low bits for alignment purposes.
3611   AllocSize =
3612       DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3613                   DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
3614 
3615   SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)};
3616   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3617   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3618   setValue(&I, DSA);
3619   DAG.setRoot(DSA.getValue(1));
3620 
3621   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3622 }
3623 
3624 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3625   if (I.isAtomic())
3626     return visitAtomicLoad(I);
3627 
3628   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3629   const Value *SV = I.getOperand(0);
3630   if (TLI.supportSwiftError()) {
3631     // Swifterror values can come from either a function parameter with
3632     // swifterror attribute or an alloca with swifterror attribute.
3633     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3634       if (Arg->hasSwiftErrorAttr())
3635         return visitLoadFromSwiftError(I);
3636     }
3637 
3638     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3639       if (Alloca->isSwiftError())
3640         return visitLoadFromSwiftError(I);
3641     }
3642   }
3643 
3644   SDValue Ptr = getValue(SV);
3645 
3646   Type *Ty = I.getType();
3647 
3648   bool isVolatile = I.isVolatile();
3649   bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3650   bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3651   bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3652   unsigned Alignment = I.getAlignment();
3653 
3654   AAMDNodes AAInfo;
3655   I.getAAMetadata(AAInfo);
3656   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3657 
3658   SmallVector<EVT, 4> ValueVTs;
3659   SmallVector<uint64_t, 4> Offsets;
3660   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3661   unsigned NumValues = ValueVTs.size();
3662   if (NumValues == 0)
3663     return;
3664 
3665   SDValue Root;
3666   bool ConstantMemory = false;
3667   if (isVolatile || NumValues > MaxParallelChains)
3668     // Serialize volatile loads with other side effects.
3669     Root = getRoot();
3670   else if (AA && AA->pointsToConstantMemory(MemoryLocation(
3671                SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
3672     // Do not serialize (non-volatile) loads of constant memory with anything.
3673     Root = DAG.getEntryNode();
3674     ConstantMemory = true;
3675   } else {
3676     // Do not serialize non-volatile loads against each other.
3677     Root = DAG.getRoot();
3678   }
3679 
3680   SDLoc dl = getCurSDLoc();
3681 
3682   if (isVolatile)
3683     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3684 
3685   // An aggregate load cannot wrap around the address space, so offsets to its
3686   // parts don't wrap either.
3687   SDNodeFlags Flags;
3688   Flags.setNoUnsignedWrap(true);
3689 
3690   SmallVector<SDValue, 4> Values(NumValues);
3691   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3692   EVT PtrVT = Ptr.getValueType();
3693   unsigned ChainI = 0;
3694   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3695     // Serializing loads here may result in excessive register pressure, and
3696     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3697     // could recover a bit by hoisting nodes upward in the chain by recognizing
3698     // they are side-effect free or do not alias. The optimizer should really
3699     // avoid this case by converting large object/array copies to llvm.memcpy
3700     // (MaxParallelChains should always remain as failsafe).
3701     if (ChainI == MaxParallelChains) {
3702       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3703       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3704                                   makeArrayRef(Chains.data(), ChainI));
3705       Root = Chain;
3706       ChainI = 0;
3707     }
3708     SDValue A = DAG.getNode(ISD::ADD, dl,
3709                             PtrVT, Ptr,
3710                             DAG.getConstant(Offsets[i], dl, PtrVT),
3711                             Flags);
3712     auto MMOFlags = MachineMemOperand::MONone;
3713     if (isVolatile)
3714       MMOFlags |= MachineMemOperand::MOVolatile;
3715     if (isNonTemporal)
3716       MMOFlags |= MachineMemOperand::MONonTemporal;
3717     if (isInvariant)
3718       MMOFlags |= MachineMemOperand::MOInvariant;
3719     if (isDereferenceable)
3720       MMOFlags |= MachineMemOperand::MODereferenceable;
3721     MMOFlags |= TLI.getMMOFlags(I);
3722 
3723     SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3724                             MachinePointerInfo(SV, Offsets[i]), Alignment,
3725                             MMOFlags, AAInfo, Ranges);
3726 
3727     Values[i] = L;
3728     Chains[ChainI] = L.getValue(1);
3729   }
3730 
3731   if (!ConstantMemory) {
3732     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3733                                 makeArrayRef(Chains.data(), ChainI));
3734     if (isVolatile)
3735       DAG.setRoot(Chain);
3736     else
3737       PendingLoads.push_back(Chain);
3738   }
3739 
3740   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3741                            DAG.getVTList(ValueVTs), Values));
3742 }
3743 
3744 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
3745   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3746          "call visitStoreToSwiftError when backend supports swifterror");
3747 
3748   SmallVector<EVT, 4> ValueVTs;
3749   SmallVector<uint64_t, 4> Offsets;
3750   const Value *SrcV = I.getOperand(0);
3751   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3752                   SrcV->getType(), ValueVTs, &Offsets);
3753   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3754          "expect a single EVT for swifterror");
3755 
3756   SDValue Src = getValue(SrcV);
3757   // Create a virtual register, then update the virtual register.
3758   unsigned VReg; bool CreatedVReg;
3759   std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I);
3760   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
3761   // Chain can be getRoot or getControlRoot.
3762   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
3763                                       SDValue(Src.getNode(), Src.getResNo()));
3764   DAG.setRoot(CopyNode);
3765   if (CreatedVReg)
3766     FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
3767 }
3768 
3769 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
3770   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3771          "call visitLoadFromSwiftError when backend supports swifterror");
3772 
3773   assert(!I.isVolatile() &&
3774          I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&
3775          I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&
3776          "Support volatile, non temporal, invariant for load_from_swift_error");
3777 
3778   const Value *SV = I.getOperand(0);
3779   Type *Ty = I.getType();
3780   AAMDNodes AAInfo;
3781   I.getAAMetadata(AAInfo);
3782   assert((!AA || !AA->pointsToConstantMemory(MemoryLocation(
3783              SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) &&
3784          "load_from_swift_error should not be constant memory");
3785 
3786   SmallVector<EVT, 4> ValueVTs;
3787   SmallVector<uint64_t, 4> Offsets;
3788   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
3789                   ValueVTs, &Offsets);
3790   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3791          "expect a single EVT for swifterror");
3792 
3793   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
3794   SDValue L = DAG.getCopyFromReg(
3795       getRoot(), getCurSDLoc(),
3796       FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first,
3797       ValueVTs[0]);
3798 
3799   setValue(&I, L);
3800 }
3801 
3802 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3803   if (I.isAtomic())
3804     return visitAtomicStore(I);
3805 
3806   const Value *SrcV = I.getOperand(0);
3807   const Value *PtrV = I.getOperand(1);
3808 
3809   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3810   if (TLI.supportSwiftError()) {
3811     // Swifterror values can come from either a function parameter with
3812     // swifterror attribute or an alloca with swifterror attribute.
3813     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
3814       if (Arg->hasSwiftErrorAttr())
3815         return visitStoreToSwiftError(I);
3816     }
3817 
3818     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3819       if (Alloca->isSwiftError())
3820         return visitStoreToSwiftError(I);
3821     }
3822   }
3823 
3824   SmallVector<EVT, 4> ValueVTs;
3825   SmallVector<uint64_t, 4> Offsets;
3826   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3827                   SrcV->getType(), ValueVTs, &Offsets);
3828   unsigned NumValues = ValueVTs.size();
3829   if (NumValues == 0)
3830     return;
3831 
3832   // Get the lowered operands. Note that we do this after
3833   // checking if NumResults is zero, because with zero results
3834   // the operands won't have values in the map.
3835   SDValue Src = getValue(SrcV);
3836   SDValue Ptr = getValue(PtrV);
3837 
3838   SDValue Root = getRoot();
3839   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3840   SDLoc dl = getCurSDLoc();
3841   EVT PtrVT = Ptr.getValueType();
3842   unsigned Alignment = I.getAlignment();
3843   AAMDNodes AAInfo;
3844   I.getAAMetadata(AAInfo);
3845 
3846   auto MMOFlags = MachineMemOperand::MONone;
3847   if (I.isVolatile())
3848     MMOFlags |= MachineMemOperand::MOVolatile;
3849   if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
3850     MMOFlags |= MachineMemOperand::MONonTemporal;
3851   MMOFlags |= TLI.getMMOFlags(I);
3852 
3853   // An aggregate load cannot wrap around the address space, so offsets to its
3854   // parts don't wrap either.
3855   SDNodeFlags Flags;
3856   Flags.setNoUnsignedWrap(true);
3857 
3858   unsigned ChainI = 0;
3859   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3860     // See visitLoad comments.
3861     if (ChainI == MaxParallelChains) {
3862       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3863                                   makeArrayRef(Chains.data(), ChainI));
3864       Root = Chain;
3865       ChainI = 0;
3866     }
3867     SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3868                               DAG.getConstant(Offsets[i], dl, PtrVT), Flags);
3869     SDValue St = DAG.getStore(
3870         Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
3871         MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
3872     Chains[ChainI] = St;
3873   }
3874 
3875   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3876                                   makeArrayRef(Chains.data(), ChainI));
3877   DAG.setRoot(StoreNode);
3878 }
3879 
3880 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
3881                                            bool IsCompressing) {
3882   SDLoc sdl = getCurSDLoc();
3883 
3884   auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3885                            unsigned& Alignment) {
3886     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
3887     Src0 = I.getArgOperand(0);
3888     Ptr = I.getArgOperand(1);
3889     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3890     Mask = I.getArgOperand(3);
3891   };
3892   auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3893                            unsigned& Alignment) {
3894     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
3895     Src0 = I.getArgOperand(0);
3896     Ptr = I.getArgOperand(1);
3897     Mask = I.getArgOperand(2);
3898     Alignment = 0;
3899   };
3900 
3901   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3902   unsigned Alignment;
3903   if (IsCompressing)
3904     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3905   else
3906     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3907 
3908   SDValue Ptr = getValue(PtrOperand);
3909   SDValue Src0 = getValue(Src0Operand);
3910   SDValue Mask = getValue(MaskOperand);
3911 
3912   EVT VT = Src0.getValueType();
3913   if (!Alignment)
3914     Alignment = DAG.getEVTAlignment(VT);
3915 
3916   AAMDNodes AAInfo;
3917   I.getAAMetadata(AAInfo);
3918 
3919   MachineMemOperand *MMO =
3920     DAG.getMachineFunction().
3921     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3922                           MachineMemOperand::MOStore,  VT.getStoreSize(),
3923                           Alignment, AAInfo);
3924   SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3925                                          MMO, false /* Truncating */,
3926                                          IsCompressing);
3927   DAG.setRoot(StoreNode);
3928   setValue(&I, StoreNode);
3929 }
3930 
3931 // Get a uniform base for the Gather/Scatter intrinsic.
3932 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
3933 // We try to represent it as a base pointer + vector of indices.
3934 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
3935 // The first operand of the GEP may be a single pointer or a vector of pointers
3936 // Example:
3937 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
3938 //  or
3939 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
3940 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
3941 //
3942 // When the first GEP operand is a single pointer - it is the uniform base we
3943 // are looking for. If first operand of the GEP is a splat vector - we
3944 // extract the splat value and use it as a uniform base.
3945 // In all other cases the function returns 'false'.
3946 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
3947                            SDValue &Scale, SelectionDAGBuilder* SDB) {
3948   SelectionDAG& DAG = SDB->DAG;
3949   LLVMContext &Context = *DAG.getContext();
3950 
3951   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
3952   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
3953   if (!GEP)
3954     return false;
3955 
3956   const Value *GEPPtr = GEP->getPointerOperand();
3957   if (!GEPPtr->getType()->isVectorTy())
3958     Ptr = GEPPtr;
3959   else if (!(Ptr = getSplatValue(GEPPtr)))
3960     return false;
3961 
3962   unsigned FinalIndex = GEP->getNumOperands() - 1;
3963   Value *IndexVal = GEP->getOperand(FinalIndex);
3964 
3965   // Ensure all the other indices are 0.
3966   for (unsigned i = 1; i < FinalIndex; ++i) {
3967     auto *C = dyn_cast<ConstantInt>(GEP->getOperand(i));
3968     if (!C || !C->isZero())
3969       return false;
3970   }
3971 
3972   // The operands of the GEP may be defined in another basic block.
3973   // In this case we'll not find nodes for the operands.
3974   if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
3975     return false;
3976 
3977   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3978   const DataLayout &DL = DAG.getDataLayout();
3979   Scale = DAG.getTargetConstant(DL.getTypeAllocSize(GEP->getResultElementType()),
3980                                 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
3981   Base = SDB->getValue(Ptr);
3982   Index = SDB->getValue(IndexVal);
3983 
3984   if (!Index.getValueType().isVector()) {
3985     unsigned GEPWidth = GEP->getType()->getVectorNumElements();
3986     EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
3987     Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
3988   }
3989   return true;
3990 }
3991 
3992 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
3993   SDLoc sdl = getCurSDLoc();
3994 
3995   // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
3996   const Value *Ptr = I.getArgOperand(1);
3997   SDValue Src0 = getValue(I.getArgOperand(0));
3998   SDValue Mask = getValue(I.getArgOperand(3));
3999   EVT VT = Src0.getValueType();
4000   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
4001   if (!Alignment)
4002     Alignment = DAG.getEVTAlignment(VT);
4003   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4004 
4005   AAMDNodes AAInfo;
4006   I.getAAMetadata(AAInfo);
4007 
4008   SDValue Base;
4009   SDValue Index;
4010   SDValue Scale;
4011   const Value *BasePtr = Ptr;
4012   bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4013 
4014   const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
4015   MachineMemOperand *MMO = DAG.getMachineFunction().
4016     getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
4017                          MachineMemOperand::MOStore,  VT.getStoreSize(),
4018                          Alignment, AAInfo);
4019   if (!UniformBase) {
4020     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4021     Index = getValue(Ptr);
4022     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4023   }
4024   SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index, Scale };
4025   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4026                                          Ops, MMO);
4027   DAG.setRoot(Scatter);
4028   setValue(&I, Scatter);
4029 }
4030 
4031 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4032   SDLoc sdl = getCurSDLoc();
4033 
4034   auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4035                            unsigned& Alignment) {
4036     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4037     Ptr = I.getArgOperand(0);
4038     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
4039     Mask = I.getArgOperand(2);
4040     Src0 = I.getArgOperand(3);
4041   };
4042   auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4043                            unsigned& Alignment) {
4044     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4045     Ptr = I.getArgOperand(0);
4046     Alignment = 0;
4047     Mask = I.getArgOperand(1);
4048     Src0 = I.getArgOperand(2);
4049   };
4050 
4051   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4052   unsigned Alignment;
4053   if (IsExpanding)
4054     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4055   else
4056     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4057 
4058   SDValue Ptr = getValue(PtrOperand);
4059   SDValue Src0 = getValue(Src0Operand);
4060   SDValue Mask = getValue(MaskOperand);
4061 
4062   EVT VT = Src0.getValueType();
4063   if (!Alignment)
4064     Alignment = DAG.getEVTAlignment(VT);
4065 
4066   AAMDNodes AAInfo;
4067   I.getAAMetadata(AAInfo);
4068   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4069 
4070   // Do not serialize masked loads of constant memory with anything.
4071   bool AddToChain = !AA || !AA->pointsToConstantMemory(MemoryLocation(
4072       PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
4073   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4074 
4075   MachineMemOperand *MMO =
4076     DAG.getMachineFunction().
4077     getMachineMemOperand(MachinePointerInfo(PtrOperand),
4078                           MachineMemOperand::MOLoad,  VT.getStoreSize(),
4079                           Alignment, AAInfo, Ranges);
4080 
4081   SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
4082                                    ISD::NON_EXTLOAD, IsExpanding);
4083   if (AddToChain)
4084     PendingLoads.push_back(Load.getValue(1));
4085   setValue(&I, Load);
4086 }
4087 
4088 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4089   SDLoc sdl = getCurSDLoc();
4090 
4091   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4092   const Value *Ptr = I.getArgOperand(0);
4093   SDValue Src0 = getValue(I.getArgOperand(3));
4094   SDValue Mask = getValue(I.getArgOperand(2));
4095 
4096   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4097   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4098   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
4099   if (!Alignment)
4100     Alignment = DAG.getEVTAlignment(VT);
4101 
4102   AAMDNodes AAInfo;
4103   I.getAAMetadata(AAInfo);
4104   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4105 
4106   SDValue Root = DAG.getRoot();
4107   SDValue Base;
4108   SDValue Index;
4109   SDValue Scale;
4110   const Value *BasePtr = Ptr;
4111   bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4112   bool ConstantMemory = false;
4113   if (UniformBase &&
4114       AA && AA->pointsToConstantMemory(MemoryLocation(
4115           BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
4116           AAInfo))) {
4117     // Do not serialize (non-volatile) loads of constant memory with anything.
4118     Root = DAG.getEntryNode();
4119     ConstantMemory = true;
4120   }
4121 
4122   MachineMemOperand *MMO =
4123     DAG.getMachineFunction().
4124     getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
4125                          MachineMemOperand::MOLoad,  VT.getStoreSize(),
4126                          Alignment, AAInfo, Ranges);
4127 
4128   if (!UniformBase) {
4129     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4130     Index = getValue(Ptr);
4131     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4132   }
4133   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4134   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4135                                        Ops, MMO);
4136 
4137   SDValue OutChain = Gather.getValue(1);
4138   if (!ConstantMemory)
4139     PendingLoads.push_back(OutChain);
4140   setValue(&I, Gather);
4141 }
4142 
4143 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4144   SDLoc dl = getCurSDLoc();
4145   AtomicOrdering SuccessOrder = I.getSuccessOrdering();
4146   AtomicOrdering FailureOrder = I.getFailureOrdering();
4147   SyncScope::ID SSID = I.getSyncScopeID();
4148 
4149   SDValue InChain = getRoot();
4150 
4151   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4152   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4153   SDValue L = DAG.getAtomicCmpSwap(
4154       ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
4155       getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
4156       getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
4157       /*Alignment=*/ 0, SuccessOrder, FailureOrder, SSID);
4158 
4159   SDValue OutChain = L.getValue(2);
4160 
4161   setValue(&I, L);
4162   DAG.setRoot(OutChain);
4163 }
4164 
4165 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4166   SDLoc dl = getCurSDLoc();
4167   ISD::NodeType NT;
4168   switch (I.getOperation()) {
4169   default: llvm_unreachable("Unknown atomicrmw operation");
4170   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4171   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4172   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4173   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4174   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4175   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4176   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4177   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4178   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4179   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4180   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4181   }
4182   AtomicOrdering Order = I.getOrdering();
4183   SyncScope::ID SSID = I.getSyncScopeID();
4184 
4185   SDValue InChain = getRoot();
4186 
4187   SDValue L =
4188     DAG.getAtomic(NT, dl,
4189                   getValue(I.getValOperand()).getSimpleValueType(),
4190                   InChain,
4191                   getValue(I.getPointerOperand()),
4192                   getValue(I.getValOperand()),
4193                   I.getPointerOperand(),
4194                   /* Alignment=*/ 0, Order, SSID);
4195 
4196   SDValue OutChain = L.getValue(1);
4197 
4198   setValue(&I, L);
4199   DAG.setRoot(OutChain);
4200 }
4201 
4202 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4203   SDLoc dl = getCurSDLoc();
4204   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4205   SDValue Ops[3];
4206   Ops[0] = getRoot();
4207   Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
4208                            TLI.getFenceOperandTy(DAG.getDataLayout()));
4209   Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl,
4210                            TLI.getFenceOperandTy(DAG.getDataLayout()));
4211   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4212 }
4213 
4214 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4215   SDLoc dl = getCurSDLoc();
4216   AtomicOrdering Order = I.getOrdering();
4217   SyncScope::ID SSID = I.getSyncScopeID();
4218 
4219   SDValue InChain = getRoot();
4220 
4221   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4222   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4223 
4224   if (!TLI.supportsUnalignedAtomics() &&
4225       I.getAlignment() < VT.getStoreSize())
4226     report_fatal_error("Cannot generate unaligned atomic load");
4227 
4228   MachineMemOperand *MMO =
4229       DAG.getMachineFunction().
4230       getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4231                            MachineMemOperand::MOVolatile |
4232                            MachineMemOperand::MOLoad,
4233                            VT.getStoreSize(),
4234                            I.getAlignment() ? I.getAlignment() :
4235                                               DAG.getEVTAlignment(VT),
4236                            AAMDNodes(), nullptr, SSID, Order);
4237 
4238   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4239   SDValue L =
4240       DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
4241                     getValue(I.getPointerOperand()), MMO);
4242 
4243   SDValue OutChain = L.getValue(1);
4244 
4245   setValue(&I, L);
4246   DAG.setRoot(OutChain);
4247 }
4248 
4249 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4250   SDLoc dl = getCurSDLoc();
4251 
4252   AtomicOrdering Order = I.getOrdering();
4253   SyncScope::ID SSID = I.getSyncScopeID();
4254 
4255   SDValue InChain = getRoot();
4256 
4257   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4258   EVT VT =
4259       TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4260 
4261   if (I.getAlignment() < VT.getStoreSize())
4262     report_fatal_error("Cannot generate unaligned atomic store");
4263 
4264   SDValue OutChain =
4265     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
4266                   InChain,
4267                   getValue(I.getPointerOperand()),
4268                   getValue(I.getValueOperand()),
4269                   I.getPointerOperand(), I.getAlignment(),
4270                   Order, SSID);
4271 
4272   DAG.setRoot(OutChain);
4273 }
4274 
4275 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4276 /// node.
4277 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4278                                                unsigned Intrinsic) {
4279   // Ignore the callsite's attributes. A specific call site may be marked with
4280   // readnone, but the lowering code will expect the chain based on the
4281   // definition.
4282   const Function *F = I.getCalledFunction();
4283   bool HasChain = !F->doesNotAccessMemory();
4284   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4285 
4286   // Build the operand list.
4287   SmallVector<SDValue, 8> Ops;
4288   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4289     if (OnlyLoad) {
4290       // We don't need to serialize loads against other loads.
4291       Ops.push_back(DAG.getRoot());
4292     } else {
4293       Ops.push_back(getRoot());
4294     }
4295   }
4296 
4297   // Info is set by getTgtMemInstrinsic
4298   TargetLowering::IntrinsicInfo Info;
4299   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4300   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4301                                                DAG.getMachineFunction(),
4302                                                Intrinsic);
4303 
4304   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4305   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4306       Info.opc == ISD::INTRINSIC_W_CHAIN)
4307     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4308                                         TLI.getPointerTy(DAG.getDataLayout())));
4309 
4310   // Add all operands of the call to the operand list.
4311   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4312     SDValue Op = getValue(I.getArgOperand(i));
4313     Ops.push_back(Op);
4314   }
4315 
4316   SmallVector<EVT, 4> ValueVTs;
4317   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4318 
4319   if (HasChain)
4320     ValueVTs.push_back(MVT::Other);
4321 
4322   SDVTList VTs = DAG.getVTList(ValueVTs);
4323 
4324   // Create the node.
4325   SDValue Result;
4326   if (IsTgtIntrinsic) {
4327     // This is target intrinsic that touches memory
4328     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs,
4329       Ops, Info.memVT,
4330       MachinePointerInfo(Info.ptrVal, Info.offset), Info.align,
4331       Info.flags, Info.size);
4332   } else if (!HasChain) {
4333     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4334   } else if (!I.getType()->isVoidTy()) {
4335     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4336   } else {
4337     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4338   }
4339 
4340   if (HasChain) {
4341     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4342     if (OnlyLoad)
4343       PendingLoads.push_back(Chain);
4344     else
4345       DAG.setRoot(Chain);
4346   }
4347 
4348   if (!I.getType()->isVoidTy()) {
4349     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4350       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4351       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4352     } else
4353       Result = lowerRangeToAssertZExt(DAG, I, Result);
4354 
4355     setValue(&I, Result);
4356   }
4357 }
4358 
4359 /// GetSignificand - Get the significand and build it into a floating-point
4360 /// number with exponent of 1:
4361 ///
4362 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4363 ///
4364 /// where Op is the hexadecimal representation of floating point value.
4365 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4366   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4367                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4368   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4369                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4370   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4371 }
4372 
4373 /// GetExponent - Get the exponent:
4374 ///
4375 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4376 ///
4377 /// where Op is the hexadecimal representation of floating point value.
4378 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4379                            const TargetLowering &TLI, const SDLoc &dl) {
4380   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4381                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4382   SDValue t1 = DAG.getNode(
4383       ISD::SRL, dl, MVT::i32, t0,
4384       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4385   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4386                            DAG.getConstant(127, dl, MVT::i32));
4387   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4388 }
4389 
4390 /// getF32Constant - Get 32-bit floating point constant.
4391 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4392                               const SDLoc &dl) {
4393   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4394                            MVT::f32);
4395 }
4396 
4397 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4398                                        SelectionDAG &DAG) {
4399   // TODO: What fast-math-flags should be set on the floating-point nodes?
4400 
4401   //   IntegerPartOfX = ((int32_t)(t0);
4402   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4403 
4404   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4405   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4406   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4407 
4408   //   IntegerPartOfX <<= 23;
4409   IntegerPartOfX = DAG.getNode(
4410       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4411       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4412                                   DAG.getDataLayout())));
4413 
4414   SDValue TwoToFractionalPartOfX;
4415   if (LimitFloatPrecision <= 6) {
4416     // For floating-point precision of 6:
4417     //
4418     //   TwoToFractionalPartOfX =
4419     //     0.997535578f +
4420     //       (0.735607626f + 0.252464424f * x) * x;
4421     //
4422     // error 0.0144103317, which is 6 bits
4423     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4424                              getF32Constant(DAG, 0x3e814304, dl));
4425     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4426                              getF32Constant(DAG, 0x3f3c50c8, dl));
4427     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4428     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4429                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4430   } else if (LimitFloatPrecision <= 12) {
4431     // For floating-point precision of 12:
4432     //
4433     //   TwoToFractionalPartOfX =
4434     //     0.999892986f +
4435     //       (0.696457318f +
4436     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4437     //
4438     // error 0.000107046256, which is 13 to 14 bits
4439     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4440                              getF32Constant(DAG, 0x3da235e3, dl));
4441     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4442                              getF32Constant(DAG, 0x3e65b8f3, dl));
4443     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4444     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4445                              getF32Constant(DAG, 0x3f324b07, dl));
4446     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4447     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4448                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4449   } else { // LimitFloatPrecision <= 18
4450     // For floating-point precision of 18:
4451     //
4452     //   TwoToFractionalPartOfX =
4453     //     0.999999982f +
4454     //       (0.693148872f +
4455     //         (0.240227044f +
4456     //           (0.554906021e-1f +
4457     //             (0.961591928e-2f +
4458     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4459     // error 2.47208000*10^(-7), which is better than 18 bits
4460     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4461                              getF32Constant(DAG, 0x3924b03e, dl));
4462     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4463                              getF32Constant(DAG, 0x3ab24b87, dl));
4464     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4465     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4466                              getF32Constant(DAG, 0x3c1d8c17, dl));
4467     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4468     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4469                              getF32Constant(DAG, 0x3d634a1d, dl));
4470     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4471     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4472                              getF32Constant(DAG, 0x3e75fe14, dl));
4473     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4474     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4475                               getF32Constant(DAG, 0x3f317234, dl));
4476     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4477     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4478                                          getF32Constant(DAG, 0x3f800000, dl));
4479   }
4480 
4481   // Add the exponent into the result in integer domain.
4482   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4483   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4484                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4485 }
4486 
4487 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4488 /// limited-precision mode.
4489 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4490                          const TargetLowering &TLI) {
4491   if (Op.getValueType() == MVT::f32 &&
4492       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4493 
4494     // Put the exponent in the right bit position for later addition to the
4495     // final result:
4496     //
4497     //   #define LOG2OFe 1.4426950f
4498     //   t0 = Op * LOG2OFe
4499 
4500     // TODO: What fast-math-flags should be set here?
4501     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4502                              getF32Constant(DAG, 0x3fb8aa3b, dl));
4503     return getLimitedPrecisionExp2(t0, dl, DAG);
4504   }
4505 
4506   // No special expansion.
4507   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4508 }
4509 
4510 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4511 /// limited-precision mode.
4512 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4513                          const TargetLowering &TLI) {
4514   // TODO: What fast-math-flags should be set on the floating-point nodes?
4515 
4516   if (Op.getValueType() == MVT::f32 &&
4517       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4518     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4519 
4520     // Scale the exponent by log(2) [0.69314718f].
4521     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4522     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4523                                         getF32Constant(DAG, 0x3f317218, dl));
4524 
4525     // Get the significand and build it into a floating-point number with
4526     // exponent of 1.
4527     SDValue X = GetSignificand(DAG, Op1, dl);
4528 
4529     SDValue LogOfMantissa;
4530     if (LimitFloatPrecision <= 6) {
4531       // For floating-point precision of 6:
4532       //
4533       //   LogofMantissa =
4534       //     -1.1609546f +
4535       //       (1.4034025f - 0.23903021f * x) * x;
4536       //
4537       // error 0.0034276066, which is better than 8 bits
4538       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4539                                getF32Constant(DAG, 0xbe74c456, dl));
4540       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4541                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4542       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4543       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4544                                   getF32Constant(DAG, 0x3f949a29, dl));
4545     } else if (LimitFloatPrecision <= 12) {
4546       // For floating-point precision of 12:
4547       //
4548       //   LogOfMantissa =
4549       //     -1.7417939f +
4550       //       (2.8212026f +
4551       //         (-1.4699568f +
4552       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4553       //
4554       // error 0.000061011436, which is 14 bits
4555       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4556                                getF32Constant(DAG, 0xbd67b6d6, dl));
4557       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4558                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4559       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4560       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4561                                getF32Constant(DAG, 0x3fbc278b, dl));
4562       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4563       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4564                                getF32Constant(DAG, 0x40348e95, dl));
4565       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4566       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4567                                   getF32Constant(DAG, 0x3fdef31a, dl));
4568     } else { // LimitFloatPrecision <= 18
4569       // For floating-point precision of 18:
4570       //
4571       //   LogOfMantissa =
4572       //     -2.1072184f +
4573       //       (4.2372794f +
4574       //         (-3.7029485f +
4575       //           (2.2781945f +
4576       //             (-0.87823314f +
4577       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4578       //
4579       // error 0.0000023660568, which is better than 18 bits
4580       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4581                                getF32Constant(DAG, 0xbc91e5ac, dl));
4582       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4583                                getF32Constant(DAG, 0x3e4350aa, dl));
4584       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4585       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4586                                getF32Constant(DAG, 0x3f60d3e3, dl));
4587       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4588       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4589                                getF32Constant(DAG, 0x4011cdf0, dl));
4590       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4591       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4592                                getF32Constant(DAG, 0x406cfd1c, dl));
4593       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4594       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4595                                getF32Constant(DAG, 0x408797cb, dl));
4596       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4597       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4598                                   getF32Constant(DAG, 0x4006dcab, dl));
4599     }
4600 
4601     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4602   }
4603 
4604   // No special expansion.
4605   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4606 }
4607 
4608 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4609 /// limited-precision mode.
4610 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4611                           const TargetLowering &TLI) {
4612   // TODO: What fast-math-flags should be set on the floating-point nodes?
4613 
4614   if (Op.getValueType() == MVT::f32 &&
4615       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4616     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4617 
4618     // Get the exponent.
4619     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4620 
4621     // Get the significand and build it into a floating-point number with
4622     // exponent of 1.
4623     SDValue X = GetSignificand(DAG, Op1, dl);
4624 
4625     // Different possible minimax approximations of significand in
4626     // floating-point for various degrees of accuracy over [1,2].
4627     SDValue Log2ofMantissa;
4628     if (LimitFloatPrecision <= 6) {
4629       // For floating-point precision of 6:
4630       //
4631       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4632       //
4633       // error 0.0049451742, which is more than 7 bits
4634       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4635                                getF32Constant(DAG, 0xbeb08fe0, dl));
4636       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4637                                getF32Constant(DAG, 0x40019463, dl));
4638       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4639       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4640                                    getF32Constant(DAG, 0x3fd6633d, dl));
4641     } else if (LimitFloatPrecision <= 12) {
4642       // For floating-point precision of 12:
4643       //
4644       //   Log2ofMantissa =
4645       //     -2.51285454f +
4646       //       (4.07009056f +
4647       //         (-2.12067489f +
4648       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4649       //
4650       // error 0.0000876136000, which is better than 13 bits
4651       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4652                                getF32Constant(DAG, 0xbda7262e, dl));
4653       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4654                                getF32Constant(DAG, 0x3f25280b, dl));
4655       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4656       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4657                                getF32Constant(DAG, 0x4007b923, dl));
4658       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4659       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4660                                getF32Constant(DAG, 0x40823e2f, dl));
4661       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4662       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4663                                    getF32Constant(DAG, 0x4020d29c, dl));
4664     } else { // LimitFloatPrecision <= 18
4665       // For floating-point precision of 18:
4666       //
4667       //   Log2ofMantissa =
4668       //     -3.0400495f +
4669       //       (6.1129976f +
4670       //         (-5.3420409f +
4671       //           (3.2865683f +
4672       //             (-1.2669343f +
4673       //               (0.27515199f -
4674       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4675       //
4676       // error 0.0000018516, which is better than 18 bits
4677       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4678                                getF32Constant(DAG, 0xbcd2769e, dl));
4679       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4680                                getF32Constant(DAG, 0x3e8ce0b9, dl));
4681       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4682       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4683                                getF32Constant(DAG, 0x3fa22ae7, dl));
4684       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4685       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4686                                getF32Constant(DAG, 0x40525723, dl));
4687       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4688       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4689                                getF32Constant(DAG, 0x40aaf200, dl));
4690       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4691       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4692                                getF32Constant(DAG, 0x40c39dad, dl));
4693       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4694       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4695                                    getF32Constant(DAG, 0x4042902c, dl));
4696     }
4697 
4698     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4699   }
4700 
4701   // No special expansion.
4702   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4703 }
4704 
4705 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4706 /// limited-precision mode.
4707 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4708                            const TargetLowering &TLI) {
4709   // TODO: What fast-math-flags should be set on the floating-point nodes?
4710 
4711   if (Op.getValueType() == MVT::f32 &&
4712       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4713     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4714 
4715     // Scale the exponent by log10(2) [0.30102999f].
4716     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4717     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4718                                         getF32Constant(DAG, 0x3e9a209a, dl));
4719 
4720     // Get the significand and build it into a floating-point number with
4721     // exponent of 1.
4722     SDValue X = GetSignificand(DAG, Op1, dl);
4723 
4724     SDValue Log10ofMantissa;
4725     if (LimitFloatPrecision <= 6) {
4726       // For floating-point precision of 6:
4727       //
4728       //   Log10ofMantissa =
4729       //     -0.50419619f +
4730       //       (0.60948995f - 0.10380950f * x) * x;
4731       //
4732       // error 0.0014886165, which is 6 bits
4733       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4734                                getF32Constant(DAG, 0xbdd49a13, dl));
4735       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4736                                getF32Constant(DAG, 0x3f1c0789, dl));
4737       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4738       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4739                                     getF32Constant(DAG, 0x3f011300, dl));
4740     } else if (LimitFloatPrecision <= 12) {
4741       // For floating-point precision of 12:
4742       //
4743       //   Log10ofMantissa =
4744       //     -0.64831180f +
4745       //       (0.91751397f +
4746       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4747       //
4748       // error 0.00019228036, which is better than 12 bits
4749       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4750                                getF32Constant(DAG, 0x3d431f31, dl));
4751       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4752                                getF32Constant(DAG, 0x3ea21fb2, dl));
4753       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4754       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4755                                getF32Constant(DAG, 0x3f6ae232, dl));
4756       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4757       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4758                                     getF32Constant(DAG, 0x3f25f7c3, dl));
4759     } else { // LimitFloatPrecision <= 18
4760       // For floating-point precision of 18:
4761       //
4762       //   Log10ofMantissa =
4763       //     -0.84299375f +
4764       //       (1.5327582f +
4765       //         (-1.0688956f +
4766       //           (0.49102474f +
4767       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4768       //
4769       // error 0.0000037995730, which is better than 18 bits
4770       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4771                                getF32Constant(DAG, 0x3c5d51ce, dl));
4772       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4773                                getF32Constant(DAG, 0x3e00685a, dl));
4774       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4775       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4776                                getF32Constant(DAG, 0x3efb6798, dl));
4777       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4778       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4779                                getF32Constant(DAG, 0x3f88d192, dl));
4780       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4781       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4782                                getF32Constant(DAG, 0x3fc4316c, dl));
4783       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4784       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4785                                     getF32Constant(DAG, 0x3f57ce70, dl));
4786     }
4787 
4788     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4789   }
4790 
4791   // No special expansion.
4792   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4793 }
4794 
4795 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4796 /// limited-precision mode.
4797 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4798                           const TargetLowering &TLI) {
4799   if (Op.getValueType() == MVT::f32 &&
4800       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4801     return getLimitedPrecisionExp2(Op, dl, DAG);
4802 
4803   // No special expansion.
4804   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4805 }
4806 
4807 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4808 /// limited-precision mode with x == 10.0f.
4809 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
4810                          SelectionDAG &DAG, const TargetLowering &TLI) {
4811   bool IsExp10 = false;
4812   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4813       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4814     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4815       APFloat Ten(10.0f);
4816       IsExp10 = LHSC->isExactlyValue(Ten);
4817     }
4818   }
4819 
4820   // TODO: What fast-math-flags should be set on the FMUL node?
4821   if (IsExp10) {
4822     // Put the exponent in the right bit position for later addition to the
4823     // final result:
4824     //
4825     //   #define LOG2OF10 3.3219281f
4826     //   t0 = Op * LOG2OF10;
4827     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4828                              getF32Constant(DAG, 0x40549a78, dl));
4829     return getLimitedPrecisionExp2(t0, dl, DAG);
4830   }
4831 
4832   // No special expansion.
4833   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4834 }
4835 
4836 /// ExpandPowI - Expand a llvm.powi intrinsic.
4837 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
4838                           SelectionDAG &DAG) {
4839   // If RHS is a constant, we can expand this out to a multiplication tree,
4840   // otherwise we end up lowering to a call to __powidf2 (for example).  When
4841   // optimizing for size, we only want to do this if the expansion would produce
4842   // a small number of multiplies, otherwise we do the full expansion.
4843   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4844     // Get the exponent as a positive value.
4845     unsigned Val = RHSC->getSExtValue();
4846     if ((int)Val < 0) Val = -Val;
4847 
4848     // powi(x, 0) -> 1.0
4849     if (Val == 0)
4850       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
4851 
4852     const Function &F = DAG.getMachineFunction().getFunction();
4853     if (!F.optForSize() ||
4854         // If optimizing for size, don't insert too many multiplies.
4855         // This inserts up to 5 multiplies.
4856         countPopulation(Val) + Log2_32(Val) < 7) {
4857       // We use the simple binary decomposition method to generate the multiply
4858       // sequence.  There are more optimal ways to do this (for example,
4859       // powi(x,15) generates one more multiply than it should), but this has
4860       // the benefit of being both really simple and much better than a libcall.
4861       SDValue Res;  // Logically starts equal to 1.0
4862       SDValue CurSquare = LHS;
4863       // TODO: Intrinsics should have fast-math-flags that propagate to these
4864       // nodes.
4865       while (Val) {
4866         if (Val & 1) {
4867           if (Res.getNode())
4868             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4869           else
4870             Res = CurSquare;  // 1.0*CurSquare.
4871         }
4872 
4873         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4874                                 CurSquare, CurSquare);
4875         Val >>= 1;
4876       }
4877 
4878       // If the original was negative, invert the result, producing 1/(x*x*x).
4879       if (RHSC->getSExtValue() < 0)
4880         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4881                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
4882       return Res;
4883     }
4884   }
4885 
4886   // Otherwise, expand to a libcall.
4887   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4888 }
4889 
4890 // getUnderlyingArgReg - Find underlying register used for a truncated or
4891 // bitcasted argument.
4892 static unsigned getUnderlyingArgReg(const SDValue &N) {
4893   switch (N.getOpcode()) {
4894   case ISD::CopyFromReg:
4895     return cast<RegisterSDNode>(N.getOperand(1))->getReg();
4896   case ISD::BITCAST:
4897   case ISD::AssertZext:
4898   case ISD::AssertSext:
4899   case ISD::TRUNCATE:
4900     return getUnderlyingArgReg(N.getOperand(0));
4901   default:
4902     return 0;
4903   }
4904 }
4905 
4906 /// If the DbgValueInst is a dbg_value of a function argument, create the
4907 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
4908 /// instruction selection, they will be inserted to the entry BB.
4909 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4910     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
4911     DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
4912   const Argument *Arg = dyn_cast<Argument>(V);
4913   if (!Arg)
4914     return false;
4915 
4916   MachineFunction &MF = DAG.getMachineFunction();
4917   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4918 
4919   bool IsIndirect = false;
4920   Optional<MachineOperand> Op;
4921   // Some arguments' frame index is recorded during argument lowering.
4922   int FI = FuncInfo.getArgumentFrameIndex(Arg);
4923   if (FI != std::numeric_limits<int>::max())
4924     Op = MachineOperand::CreateFI(FI);
4925 
4926   if (!Op && N.getNode()) {
4927     unsigned Reg = getUnderlyingArgReg(N);
4928     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4929       MachineRegisterInfo &RegInfo = MF.getRegInfo();
4930       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4931       if (PR)
4932         Reg = PR;
4933     }
4934     if (Reg) {
4935       Op = MachineOperand::CreateReg(Reg, false);
4936       IsIndirect = IsDbgDeclare;
4937     }
4938   }
4939 
4940   if (!Op && N.getNode())
4941     // Check if frame index is available.
4942     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4943       if (FrameIndexSDNode *FINode =
4944           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4945         Op = MachineOperand::CreateFI(FINode->getIndex());
4946 
4947   if (!Op) {
4948     // Check if ValueMap has reg number.
4949     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4950     if (VMI != FuncInfo.ValueMap.end()) {
4951       const auto &TLI = DAG.getTargetLoweringInfo();
4952       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
4953                        V->getType(), getABIRegCopyCC(V));
4954       if (RFV.occupiesMultipleRegs()) {
4955         unsigned Offset = 0;
4956         for (auto RegAndSize : RFV.getRegsAndSizes()) {
4957           Op = MachineOperand::CreateReg(RegAndSize.first, false);
4958           auto FragmentExpr = DIExpression::createFragmentExpression(
4959               Expr, Offset, RegAndSize.second);
4960           if (!FragmentExpr)
4961             continue;
4962           FuncInfo.ArgDbgValues.push_back(
4963               BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
4964                       Op->getReg(), Variable, *FragmentExpr));
4965           Offset += RegAndSize.second;
4966         }
4967         return true;
4968       }
4969       Op = MachineOperand::CreateReg(VMI->second, false);
4970       IsIndirect = IsDbgDeclare;
4971     }
4972   }
4973 
4974   if (!Op)
4975     return false;
4976 
4977   assert(Variable->isValidLocationForIntrinsic(DL) &&
4978          "Expected inlined-at fields to agree");
4979   IsIndirect = (Op->isReg()) ? IsIndirect : true;
4980   FuncInfo.ArgDbgValues.push_back(
4981       BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
4982               *Op, Variable, Expr));
4983 
4984   return true;
4985 }
4986 
4987 /// Return the appropriate SDDbgValue based on N.
4988 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
4989                                              DILocalVariable *Variable,
4990                                              DIExpression *Expr,
4991                                              const DebugLoc &dl,
4992                                              unsigned DbgSDNodeOrder) {
4993   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
4994     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
4995     // stack slot locations.
4996     //
4997     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
4998     // debug values here after optimization:
4999     //
5000     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5001     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5002     //
5003     // Both describe the direct values of their associated variables.
5004     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5005                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5006   }
5007   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5008                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5009 }
5010 
5011 // VisualStudio defines setjmp as _setjmp
5012 #if defined(_MSC_VER) && defined(setjmp) && \
5013                          !defined(setjmp_undefined_for_msvc)
5014 #  pragma push_macro("setjmp")
5015 #  undef setjmp
5016 #  define setjmp_undefined_for_msvc
5017 #endif
5018 
5019 /// Lower the call to the specified intrinsic function. If we want to emit this
5020 /// as a call to a named external function, return the name. Otherwise, lower it
5021 /// and return null.
5022 const char *
5023 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
5024   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5025   SDLoc sdl = getCurSDLoc();
5026   DebugLoc dl = getCurDebugLoc();
5027   SDValue Res;
5028 
5029   switch (Intrinsic) {
5030   default:
5031     // By default, turn this into a target intrinsic node.
5032     visitTargetIntrinsic(I, Intrinsic);
5033     return nullptr;
5034   case Intrinsic::vastart:  visitVAStart(I); return nullptr;
5035   case Intrinsic::vaend:    visitVAEnd(I); return nullptr;
5036   case Intrinsic::vacopy:   visitVACopy(I); return nullptr;
5037   case Intrinsic::returnaddress:
5038     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5039                              TLI.getPointerTy(DAG.getDataLayout()),
5040                              getValue(I.getArgOperand(0))));
5041     return nullptr;
5042   case Intrinsic::addressofreturnaddress:
5043     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5044                              TLI.getPointerTy(DAG.getDataLayout())));
5045     return nullptr;
5046   case Intrinsic::frameaddress:
5047     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5048                              TLI.getPointerTy(DAG.getDataLayout()),
5049                              getValue(I.getArgOperand(0))));
5050     return nullptr;
5051   case Intrinsic::read_register: {
5052     Value *Reg = I.getArgOperand(0);
5053     SDValue Chain = getRoot();
5054     SDValue RegName =
5055         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5056     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5057     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5058       DAG.getVTList(VT, MVT::Other), Chain, RegName);
5059     setValue(&I, Res);
5060     DAG.setRoot(Res.getValue(1));
5061     return nullptr;
5062   }
5063   case Intrinsic::write_register: {
5064     Value *Reg = I.getArgOperand(0);
5065     Value *RegValue = I.getArgOperand(1);
5066     SDValue Chain = getRoot();
5067     SDValue RegName =
5068         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5069     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5070                             RegName, getValue(RegValue)));
5071     return nullptr;
5072   }
5073   case Intrinsic::setjmp:
5074     return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
5075   case Intrinsic::longjmp:
5076     return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
5077   case Intrinsic::memcpy: {
5078     const auto &MCI = cast<MemCpyInst>(I);
5079     SDValue Op1 = getValue(I.getArgOperand(0));
5080     SDValue Op2 = getValue(I.getArgOperand(1));
5081     SDValue Op3 = getValue(I.getArgOperand(2));
5082     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5083     unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1);
5084     unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1);
5085     unsigned Align = MinAlign(DstAlign, SrcAlign);
5086     bool isVol = MCI.isVolatile();
5087     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5088     // FIXME: Support passing different dest/src alignments to the memcpy DAG
5089     // node.
5090     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5091                                false, isTC,
5092                                MachinePointerInfo(I.getArgOperand(0)),
5093                                MachinePointerInfo(I.getArgOperand(1)));
5094     updateDAGForMaybeTailCall(MC);
5095     return nullptr;
5096   }
5097   case Intrinsic::memset: {
5098     const auto &MSI = cast<MemSetInst>(I);
5099     SDValue Op1 = getValue(I.getArgOperand(0));
5100     SDValue Op2 = getValue(I.getArgOperand(1));
5101     SDValue Op3 = getValue(I.getArgOperand(2));
5102     // @llvm.memset defines 0 and 1 to both mean no alignment.
5103     unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
5104     bool isVol = MSI.isVolatile();
5105     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5106     SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5107                                isTC, MachinePointerInfo(I.getArgOperand(0)));
5108     updateDAGForMaybeTailCall(MS);
5109     return nullptr;
5110   }
5111   case Intrinsic::memmove: {
5112     const auto &MMI = cast<MemMoveInst>(I);
5113     SDValue Op1 = getValue(I.getArgOperand(0));
5114     SDValue Op2 = getValue(I.getArgOperand(1));
5115     SDValue Op3 = getValue(I.getArgOperand(2));
5116     // @llvm.memmove defines 0 and 1 to both mean no alignment.
5117     unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1);
5118     unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1);
5119     unsigned Align = MinAlign(DstAlign, SrcAlign);
5120     bool isVol = MMI.isVolatile();
5121     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5122     // FIXME: Support passing different dest/src alignments to the memmove DAG
5123     // node.
5124     SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5125                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
5126                                 MachinePointerInfo(I.getArgOperand(1)));
5127     updateDAGForMaybeTailCall(MM);
5128     return nullptr;
5129   }
5130   case Intrinsic::memcpy_element_unordered_atomic: {
5131     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5132     SDValue Dst = getValue(MI.getRawDest());
5133     SDValue Src = getValue(MI.getRawSource());
5134     SDValue Length = getValue(MI.getLength());
5135 
5136     unsigned DstAlign = MI.getDestAlignment();
5137     unsigned SrcAlign = MI.getSourceAlignment();
5138     Type *LengthTy = MI.getLength()->getType();
5139     unsigned ElemSz = MI.getElementSizeInBytes();
5140     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5141     SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5142                                      SrcAlign, Length, LengthTy, ElemSz, isTC,
5143                                      MachinePointerInfo(MI.getRawDest()),
5144                                      MachinePointerInfo(MI.getRawSource()));
5145     updateDAGForMaybeTailCall(MC);
5146     return nullptr;
5147   }
5148   case Intrinsic::memmove_element_unordered_atomic: {
5149     auto &MI = cast<AtomicMemMoveInst>(I);
5150     SDValue Dst = getValue(MI.getRawDest());
5151     SDValue Src = getValue(MI.getRawSource());
5152     SDValue Length = getValue(MI.getLength());
5153 
5154     unsigned DstAlign = MI.getDestAlignment();
5155     unsigned SrcAlign = MI.getSourceAlignment();
5156     Type *LengthTy = MI.getLength()->getType();
5157     unsigned ElemSz = MI.getElementSizeInBytes();
5158     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5159     SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5160                                       SrcAlign, Length, LengthTy, ElemSz, isTC,
5161                                       MachinePointerInfo(MI.getRawDest()),
5162                                       MachinePointerInfo(MI.getRawSource()));
5163     updateDAGForMaybeTailCall(MC);
5164     return nullptr;
5165   }
5166   case Intrinsic::memset_element_unordered_atomic: {
5167     auto &MI = cast<AtomicMemSetInst>(I);
5168     SDValue Dst = getValue(MI.getRawDest());
5169     SDValue Val = getValue(MI.getValue());
5170     SDValue Length = getValue(MI.getLength());
5171 
5172     unsigned DstAlign = MI.getDestAlignment();
5173     Type *LengthTy = MI.getLength()->getType();
5174     unsigned ElemSz = MI.getElementSizeInBytes();
5175     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5176     SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5177                                      LengthTy, ElemSz, isTC,
5178                                      MachinePointerInfo(MI.getRawDest()));
5179     updateDAGForMaybeTailCall(MC);
5180     return nullptr;
5181   }
5182   case Intrinsic::dbg_addr:
5183   case Intrinsic::dbg_declare: {
5184     const auto &DI = cast<DbgVariableIntrinsic>(I);
5185     DILocalVariable *Variable = DI.getVariable();
5186     DIExpression *Expression = DI.getExpression();
5187     dropDanglingDebugInfo(Variable, Expression);
5188     assert(Variable && "Missing variable");
5189 
5190     // Check if address has undef value.
5191     const Value *Address = DI.getVariableLocation();
5192     if (!Address || isa<UndefValue>(Address) ||
5193         (Address->use_empty() && !isa<Argument>(Address))) {
5194       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5195       return nullptr;
5196     }
5197 
5198     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5199 
5200     // Check if this variable can be described by a frame index, typically
5201     // either as a static alloca or a byval parameter.
5202     int FI = std::numeric_limits<int>::max();
5203     if (const auto *AI =
5204             dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5205       if (AI->isStaticAlloca()) {
5206         auto I = FuncInfo.StaticAllocaMap.find(AI);
5207         if (I != FuncInfo.StaticAllocaMap.end())
5208           FI = I->second;
5209       }
5210     } else if (const auto *Arg = dyn_cast<Argument>(
5211                    Address->stripInBoundsConstantOffsets())) {
5212       FI = FuncInfo.getArgumentFrameIndex(Arg);
5213     }
5214 
5215     // llvm.dbg.addr is control dependent and always generates indirect
5216     // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5217     // the MachineFunction variable table.
5218     if (FI != std::numeric_limits<int>::max()) {
5219       if (Intrinsic == Intrinsic::dbg_addr) {
5220         SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5221             Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5222         DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5223       }
5224       return nullptr;
5225     }
5226 
5227     SDValue &N = NodeMap[Address];
5228     if (!N.getNode() && isa<Argument>(Address))
5229       // Check unused arguments map.
5230       N = UnusedArgNodeMap[Address];
5231     SDDbgValue *SDV;
5232     if (N.getNode()) {
5233       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5234         Address = BCI->getOperand(0);
5235       // Parameters are handled specially.
5236       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5237       if (isParameter && FINode) {
5238         // Byval parameter. We have a frame index at this point.
5239         SDV =
5240             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5241                                       /*IsIndirect*/ true, dl, SDNodeOrder);
5242       } else if (isa<Argument>(Address)) {
5243         // Address is an argument, so try to emit its dbg value using
5244         // virtual register info from the FuncInfo.ValueMap.
5245         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5246         return nullptr;
5247       } else {
5248         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5249                               true, dl, SDNodeOrder);
5250       }
5251       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5252     } else {
5253       // If Address is an argument then try to emit its dbg value using
5254       // virtual register info from the FuncInfo.ValueMap.
5255       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5256                                     N)) {
5257         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5258       }
5259     }
5260     return nullptr;
5261   }
5262   case Intrinsic::dbg_label: {
5263     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5264     DILabel *Label = DI.getLabel();
5265     assert(Label && "Missing label");
5266 
5267     SDDbgLabel *SDV;
5268     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5269     DAG.AddDbgLabel(SDV);
5270     return nullptr;
5271   }
5272   case Intrinsic::dbg_value: {
5273     const DbgValueInst &DI = cast<DbgValueInst>(I);
5274     assert(DI.getVariable() && "Missing variable");
5275 
5276     DILocalVariable *Variable = DI.getVariable();
5277     DIExpression *Expression = DI.getExpression();
5278     dropDanglingDebugInfo(Variable, Expression);
5279     const Value *V = DI.getValue();
5280     if (!V)
5281       return nullptr;
5282 
5283     SDDbgValue *SDV;
5284     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
5285       SDV = DAG.getConstantDbgValue(Variable, Expression, V, dl, SDNodeOrder);
5286       DAG.AddDbgValue(SDV, nullptr, false);
5287       return nullptr;
5288     }
5289 
5290     // Do not use getValue() in here; we don't want to generate code at
5291     // this point if it hasn't been done yet.
5292     SDValue N = NodeMap[V];
5293     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
5294       N = UnusedArgNodeMap[V];
5295     if (N.getNode()) {
5296       if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, false, N))
5297         return nullptr;
5298       SDV = getDbgValue(N, Variable, Expression, dl, SDNodeOrder);
5299       DAG.AddDbgValue(SDV, N.getNode(), false);
5300       return nullptr;
5301     }
5302 
5303     // PHI nodes have already been selected, so we should know which VReg that
5304     // is assigns to already.
5305     if (isa<PHINode>(V)) {
5306       auto VMI = FuncInfo.ValueMap.find(V);
5307       if (VMI != FuncInfo.ValueMap.end()) {
5308         unsigned Reg = VMI->second;
5309         // The PHI node may be split up into several MI PHI nodes (in
5310         // FunctionLoweringInfo::set).
5311         RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
5312                          V->getType(), None);
5313         if (RFV.occupiesMultipleRegs()) {
5314           unsigned Offset = 0;
5315           unsigned BitsToDescribe = 0;
5316           if (auto VarSize = Variable->getSizeInBits())
5317             BitsToDescribe = *VarSize;
5318           if (auto Fragment = Expression->getFragmentInfo())
5319             BitsToDescribe = Fragment->SizeInBits;
5320           for (auto RegAndSize : RFV.getRegsAndSizes()) {
5321             unsigned RegisterSize = RegAndSize.second;
5322             // Bail out if all bits are described already.
5323             if (Offset >= BitsToDescribe)
5324               break;
5325             unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
5326                 ? BitsToDescribe - Offset
5327                 : RegisterSize;
5328             auto FragmentExpr = DIExpression::createFragmentExpression(
5329                 Expression, Offset, FragmentSize);
5330             if (!FragmentExpr)
5331                 continue;
5332             SDV = DAG.getVRegDbgValue(Variable, *FragmentExpr, RegAndSize.first,
5333                                       false, dl, SDNodeOrder);
5334             DAG.AddDbgValue(SDV, nullptr, false);
5335             Offset += RegisterSize;
5336           }
5337         } else {
5338           SDV = DAG.getVRegDbgValue(Variable, Expression, Reg, false, dl,
5339                                     SDNodeOrder);
5340           DAG.AddDbgValue(SDV, nullptr, false);
5341         }
5342         return nullptr;
5343       }
5344     }
5345 
5346     // TODO: When we get here we will either drop the dbg.value completely, or
5347     // we try to move it forward by letting it dangle for awhile. So we should
5348     // probably add an extra DbgValue to the DAG here, with a reference to
5349     // "noreg", to indicate that we have lost the debug location for the
5350     // variable.
5351 
5352     if (!V->use_empty() ) {
5353       // Do not call getValue(V) yet, as we don't want to generate code.
5354       // Remember it for later.
5355       DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5356       return nullptr;
5357     }
5358 
5359     LLVM_DEBUG(dbgs() << "Dropping debug location info for:\n  " << DI << "\n");
5360     LLVM_DEBUG(dbgs() << "  Last seen at:\n    " << *V << "\n");
5361     return nullptr;
5362   }
5363 
5364   case Intrinsic::eh_typeid_for: {
5365     // Find the type id for the given typeinfo.
5366     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5367     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5368     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5369     setValue(&I, Res);
5370     return nullptr;
5371   }
5372 
5373   case Intrinsic::eh_return_i32:
5374   case Intrinsic::eh_return_i64:
5375     DAG.getMachineFunction().setCallsEHReturn(true);
5376     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5377                             MVT::Other,
5378                             getControlRoot(),
5379                             getValue(I.getArgOperand(0)),
5380                             getValue(I.getArgOperand(1))));
5381     return nullptr;
5382   case Intrinsic::eh_unwind_init:
5383     DAG.getMachineFunction().setCallsUnwindInit(true);
5384     return nullptr;
5385   case Intrinsic::eh_dwarf_cfa:
5386     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5387                              TLI.getPointerTy(DAG.getDataLayout()),
5388                              getValue(I.getArgOperand(0))));
5389     return nullptr;
5390   case Intrinsic::eh_sjlj_callsite: {
5391     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5392     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5393     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5394     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5395 
5396     MMI.setCurrentCallSite(CI->getZExtValue());
5397     return nullptr;
5398   }
5399   case Intrinsic::eh_sjlj_functioncontext: {
5400     // Get and store the index of the function context.
5401     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5402     AllocaInst *FnCtx =
5403       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5404     int FI = FuncInfo.StaticAllocaMap[FnCtx];
5405     MFI.setFunctionContextIndex(FI);
5406     return nullptr;
5407   }
5408   case Intrinsic::eh_sjlj_setjmp: {
5409     SDValue Ops[2];
5410     Ops[0] = getRoot();
5411     Ops[1] = getValue(I.getArgOperand(0));
5412     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5413                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
5414     setValue(&I, Op.getValue(0));
5415     DAG.setRoot(Op.getValue(1));
5416     return nullptr;
5417   }
5418   case Intrinsic::eh_sjlj_longjmp:
5419     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5420                             getRoot(), getValue(I.getArgOperand(0))));
5421     return nullptr;
5422   case Intrinsic::eh_sjlj_setup_dispatch:
5423     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5424                             getRoot()));
5425     return nullptr;
5426   case Intrinsic::masked_gather:
5427     visitMaskedGather(I);
5428     return nullptr;
5429   case Intrinsic::masked_load:
5430     visitMaskedLoad(I);
5431     return nullptr;
5432   case Intrinsic::masked_scatter:
5433     visitMaskedScatter(I);
5434     return nullptr;
5435   case Intrinsic::masked_store:
5436     visitMaskedStore(I);
5437     return nullptr;
5438   case Intrinsic::masked_expandload:
5439     visitMaskedLoad(I, true /* IsExpanding */);
5440     return nullptr;
5441   case Intrinsic::masked_compressstore:
5442     visitMaskedStore(I, true /* IsCompressing */);
5443     return nullptr;
5444   case Intrinsic::x86_mmx_pslli_w:
5445   case Intrinsic::x86_mmx_pslli_d:
5446   case Intrinsic::x86_mmx_pslli_q:
5447   case Intrinsic::x86_mmx_psrli_w:
5448   case Intrinsic::x86_mmx_psrli_d:
5449   case Intrinsic::x86_mmx_psrli_q:
5450   case Intrinsic::x86_mmx_psrai_w:
5451   case Intrinsic::x86_mmx_psrai_d: {
5452     SDValue ShAmt = getValue(I.getArgOperand(1));
5453     if (isa<ConstantSDNode>(ShAmt)) {
5454       visitTargetIntrinsic(I, Intrinsic);
5455       return nullptr;
5456     }
5457     unsigned NewIntrinsic = 0;
5458     EVT ShAmtVT = MVT::v2i32;
5459     switch (Intrinsic) {
5460     case Intrinsic::x86_mmx_pslli_w:
5461       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5462       break;
5463     case Intrinsic::x86_mmx_pslli_d:
5464       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5465       break;
5466     case Intrinsic::x86_mmx_pslli_q:
5467       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5468       break;
5469     case Intrinsic::x86_mmx_psrli_w:
5470       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5471       break;
5472     case Intrinsic::x86_mmx_psrli_d:
5473       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5474       break;
5475     case Intrinsic::x86_mmx_psrli_q:
5476       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5477       break;
5478     case Intrinsic::x86_mmx_psrai_w:
5479       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5480       break;
5481     case Intrinsic::x86_mmx_psrai_d:
5482       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5483       break;
5484     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5485     }
5486 
5487     // The vector shift intrinsics with scalars uses 32b shift amounts but
5488     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5489     // to be zero.
5490     // We must do this early because v2i32 is not a legal type.
5491     SDValue ShOps[2];
5492     ShOps[0] = ShAmt;
5493     ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5494     ShAmt =  DAG.getBuildVector(ShAmtVT, sdl, ShOps);
5495     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5496     ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5497     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5498                        DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5499                        getValue(I.getArgOperand(0)), ShAmt);
5500     setValue(&I, Res);
5501     return nullptr;
5502   }
5503   case Intrinsic::powi:
5504     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5505                             getValue(I.getArgOperand(1)), DAG));
5506     return nullptr;
5507   case Intrinsic::log:
5508     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5509     return nullptr;
5510   case Intrinsic::log2:
5511     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5512     return nullptr;
5513   case Intrinsic::log10:
5514     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5515     return nullptr;
5516   case Intrinsic::exp:
5517     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5518     return nullptr;
5519   case Intrinsic::exp2:
5520     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5521     return nullptr;
5522   case Intrinsic::pow:
5523     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5524                            getValue(I.getArgOperand(1)), DAG, TLI));
5525     return nullptr;
5526   case Intrinsic::sqrt:
5527   case Intrinsic::fabs:
5528   case Intrinsic::sin:
5529   case Intrinsic::cos:
5530   case Intrinsic::floor:
5531   case Intrinsic::ceil:
5532   case Intrinsic::trunc:
5533   case Intrinsic::rint:
5534   case Intrinsic::nearbyint:
5535   case Intrinsic::round:
5536   case Intrinsic::canonicalize: {
5537     unsigned Opcode;
5538     switch (Intrinsic) {
5539     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5540     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
5541     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
5542     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
5543     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
5544     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
5545     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
5546     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
5547     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
5548     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5549     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
5550     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5551     }
5552 
5553     setValue(&I, DAG.getNode(Opcode, sdl,
5554                              getValue(I.getArgOperand(0)).getValueType(),
5555                              getValue(I.getArgOperand(0))));
5556     return nullptr;
5557   }
5558   case Intrinsic::minnum: {
5559     auto VT = getValue(I.getArgOperand(0)).getValueType();
5560     unsigned Opc =
5561         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT)
5562             ? ISD::FMINNAN
5563             : ISD::FMINNUM;
5564     setValue(&I, DAG.getNode(Opc, sdl, VT,
5565                              getValue(I.getArgOperand(0)),
5566                              getValue(I.getArgOperand(1))));
5567     return nullptr;
5568   }
5569   case Intrinsic::maxnum: {
5570     auto VT = getValue(I.getArgOperand(0)).getValueType();
5571     unsigned Opc =
5572         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT)
5573             ? ISD::FMAXNAN
5574             : ISD::FMAXNUM;
5575     setValue(&I, DAG.getNode(Opc, sdl, VT,
5576                              getValue(I.getArgOperand(0)),
5577                              getValue(I.getArgOperand(1))));
5578     return nullptr;
5579   }
5580   case Intrinsic::copysign:
5581     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5582                              getValue(I.getArgOperand(0)).getValueType(),
5583                              getValue(I.getArgOperand(0)),
5584                              getValue(I.getArgOperand(1))));
5585     return nullptr;
5586   case Intrinsic::fma:
5587     setValue(&I, DAG.getNode(ISD::FMA, sdl,
5588                              getValue(I.getArgOperand(0)).getValueType(),
5589                              getValue(I.getArgOperand(0)),
5590                              getValue(I.getArgOperand(1)),
5591                              getValue(I.getArgOperand(2))));
5592     return nullptr;
5593   case Intrinsic::experimental_constrained_fadd:
5594   case Intrinsic::experimental_constrained_fsub:
5595   case Intrinsic::experimental_constrained_fmul:
5596   case Intrinsic::experimental_constrained_fdiv:
5597   case Intrinsic::experimental_constrained_frem:
5598   case Intrinsic::experimental_constrained_fma:
5599   case Intrinsic::experimental_constrained_sqrt:
5600   case Intrinsic::experimental_constrained_pow:
5601   case Intrinsic::experimental_constrained_powi:
5602   case Intrinsic::experimental_constrained_sin:
5603   case Intrinsic::experimental_constrained_cos:
5604   case Intrinsic::experimental_constrained_exp:
5605   case Intrinsic::experimental_constrained_exp2:
5606   case Intrinsic::experimental_constrained_log:
5607   case Intrinsic::experimental_constrained_log10:
5608   case Intrinsic::experimental_constrained_log2:
5609   case Intrinsic::experimental_constrained_rint:
5610   case Intrinsic::experimental_constrained_nearbyint:
5611     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
5612     return nullptr;
5613   case Intrinsic::fmuladd: {
5614     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5615     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5616         TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5617       setValue(&I, DAG.getNode(ISD::FMA, sdl,
5618                                getValue(I.getArgOperand(0)).getValueType(),
5619                                getValue(I.getArgOperand(0)),
5620                                getValue(I.getArgOperand(1)),
5621                                getValue(I.getArgOperand(2))));
5622     } else {
5623       // TODO: Intrinsic calls should have fast-math-flags.
5624       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5625                                 getValue(I.getArgOperand(0)).getValueType(),
5626                                 getValue(I.getArgOperand(0)),
5627                                 getValue(I.getArgOperand(1)));
5628       SDValue Add = DAG.getNode(ISD::FADD, sdl,
5629                                 getValue(I.getArgOperand(0)).getValueType(),
5630                                 Mul,
5631                                 getValue(I.getArgOperand(2)));
5632       setValue(&I, Add);
5633     }
5634     return nullptr;
5635   }
5636   case Intrinsic::convert_to_fp16:
5637     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5638                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5639                                          getValue(I.getArgOperand(0)),
5640                                          DAG.getTargetConstant(0, sdl,
5641                                                                MVT::i32))));
5642     return nullptr;
5643   case Intrinsic::convert_from_fp16:
5644     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5645                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5646                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5647                                          getValue(I.getArgOperand(0)))));
5648     return nullptr;
5649   case Intrinsic::pcmarker: {
5650     SDValue Tmp = getValue(I.getArgOperand(0));
5651     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5652     return nullptr;
5653   }
5654   case Intrinsic::readcyclecounter: {
5655     SDValue Op = getRoot();
5656     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5657                       DAG.getVTList(MVT::i64, MVT::Other), Op);
5658     setValue(&I, Res);
5659     DAG.setRoot(Res.getValue(1));
5660     return nullptr;
5661   }
5662   case Intrinsic::bitreverse:
5663     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5664                              getValue(I.getArgOperand(0)).getValueType(),
5665                              getValue(I.getArgOperand(0))));
5666     return nullptr;
5667   case Intrinsic::bswap:
5668     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5669                              getValue(I.getArgOperand(0)).getValueType(),
5670                              getValue(I.getArgOperand(0))));
5671     return nullptr;
5672   case Intrinsic::cttz: {
5673     SDValue Arg = getValue(I.getArgOperand(0));
5674     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5675     EVT Ty = Arg.getValueType();
5676     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5677                              sdl, Ty, Arg));
5678     return nullptr;
5679   }
5680   case Intrinsic::ctlz: {
5681     SDValue Arg = getValue(I.getArgOperand(0));
5682     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5683     EVT Ty = Arg.getValueType();
5684     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5685                              sdl, Ty, Arg));
5686     return nullptr;
5687   }
5688   case Intrinsic::ctpop: {
5689     SDValue Arg = getValue(I.getArgOperand(0));
5690     EVT Ty = Arg.getValueType();
5691     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5692     return nullptr;
5693   }
5694   case Intrinsic::fshl:
5695   case Intrinsic::fshr: {
5696     bool IsFSHL = Intrinsic == Intrinsic::fshl;
5697     SDValue X = getValue(I.getArgOperand(0));
5698     SDValue Y = getValue(I.getArgOperand(1));
5699     SDValue Z = getValue(I.getArgOperand(2));
5700     EVT VT = X.getValueType();
5701     SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
5702     SDValue Zero = DAG.getConstant(0, sdl, VT);
5703     SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
5704 
5705     // When X == Y, this is rotate. If the data type has a power-of-2 size, we
5706     // avoid the select that is necessary in the general case to filter out
5707     // the 0-shift possibility that leads to UB.
5708     if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
5709       // TODO: This should also be done if the operation is custom, but we have
5710       // to make sure targets are handling the modulo shift amount as expected.
5711       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
5712       if (TLI.isOperationLegal(RotateOpcode, VT)) {
5713         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
5714         return nullptr;
5715       }
5716 
5717       // Some targets only rotate one way. Try the opposite direction.
5718       RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
5719       if (TLI.isOperationLegal(RotateOpcode, VT)) {
5720         // Negate the shift amount because it is safe to ignore the high bits.
5721         SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5722         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
5723         return nullptr;
5724       }
5725 
5726       // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
5727       // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
5728       SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5729       SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
5730       SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
5731       SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
5732       setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
5733       return nullptr;
5734     }
5735 
5736     // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
5737     // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
5738     SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
5739     SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
5740     SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
5741     SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
5742 
5743     // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
5744     // and that is undefined. We must compare and select to avoid UB.
5745     EVT CCVT = MVT::i1;
5746     if (VT.isVector())
5747       CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
5748 
5749     // For fshl, 0-shift returns the 1st arg (X).
5750     // For fshr, 0-shift returns the 2nd arg (Y).
5751     SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
5752     setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
5753     return nullptr;
5754   }
5755   case Intrinsic::stacksave: {
5756     SDValue Op = getRoot();
5757     Res = DAG.getNode(
5758         ISD::STACKSAVE, sdl,
5759         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
5760     setValue(&I, Res);
5761     DAG.setRoot(Res.getValue(1));
5762     return nullptr;
5763   }
5764   case Intrinsic::stackrestore:
5765     Res = getValue(I.getArgOperand(0));
5766     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5767     return nullptr;
5768   case Intrinsic::get_dynamic_area_offset: {
5769     SDValue Op = getRoot();
5770     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5771     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
5772     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
5773     // target.
5774     if (PtrTy != ResTy)
5775       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
5776                          " intrinsic!");
5777     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
5778                       Op);
5779     DAG.setRoot(Op);
5780     setValue(&I, Res);
5781     return nullptr;
5782   }
5783   case Intrinsic::stackguard: {
5784     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5785     MachineFunction &MF = DAG.getMachineFunction();
5786     const Module &M = *MF.getFunction().getParent();
5787     SDValue Chain = getRoot();
5788     if (TLI.useLoadStackGuardNode()) {
5789       Res = getLoadStackGuard(DAG, sdl, Chain);
5790     } else {
5791       const Value *Global = TLI.getSDagStackGuard(M);
5792       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
5793       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
5794                         MachinePointerInfo(Global, 0), Align,
5795                         MachineMemOperand::MOVolatile);
5796     }
5797     if (TLI.useStackGuardXorFP())
5798       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
5799     DAG.setRoot(Chain);
5800     setValue(&I, Res);
5801     return nullptr;
5802   }
5803   case Intrinsic::stackprotector: {
5804     // Emit code into the DAG to store the stack guard onto the stack.
5805     MachineFunction &MF = DAG.getMachineFunction();
5806     MachineFrameInfo &MFI = MF.getFrameInfo();
5807     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5808     SDValue Src, Chain = getRoot();
5809 
5810     if (TLI.useLoadStackGuardNode())
5811       Src = getLoadStackGuard(DAG, sdl, Chain);
5812     else
5813       Src = getValue(I.getArgOperand(0));   // The guard's value.
5814 
5815     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5816 
5817     int FI = FuncInfo.StaticAllocaMap[Slot];
5818     MFI.setStackProtectorIndex(FI);
5819 
5820     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5821 
5822     // Store the stack protector onto the stack.
5823     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
5824                                                  DAG.getMachineFunction(), FI),
5825                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
5826     setValue(&I, Res);
5827     DAG.setRoot(Res);
5828     return nullptr;
5829   }
5830   case Intrinsic::objectsize: {
5831     // If we don't know by now, we're never going to know.
5832     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5833 
5834     assert(CI && "Non-constant type in __builtin_object_size?");
5835 
5836     SDValue Arg = getValue(I.getCalledValue());
5837     EVT Ty = Arg.getValueType();
5838 
5839     if (CI->isZero())
5840       Res = DAG.getConstant(-1ULL, sdl, Ty);
5841     else
5842       Res = DAG.getConstant(0, sdl, Ty);
5843 
5844     setValue(&I, Res);
5845     return nullptr;
5846   }
5847   case Intrinsic::annotation:
5848   case Intrinsic::ptr_annotation:
5849   case Intrinsic::launder_invariant_group:
5850   case Intrinsic::strip_invariant_group:
5851     // Drop the intrinsic, but forward the value
5852     setValue(&I, getValue(I.getOperand(0)));
5853     return nullptr;
5854   case Intrinsic::assume:
5855   case Intrinsic::var_annotation:
5856   case Intrinsic::sideeffect:
5857     // Discard annotate attributes, assumptions, and artificial side-effects.
5858     return nullptr;
5859 
5860   case Intrinsic::codeview_annotation: {
5861     // Emit a label associated with this metadata.
5862     MachineFunction &MF = DAG.getMachineFunction();
5863     MCSymbol *Label =
5864         MF.getMMI().getContext().createTempSymbol("annotation", true);
5865     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
5866     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
5867     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
5868     DAG.setRoot(Res);
5869     return nullptr;
5870   }
5871 
5872   case Intrinsic::init_trampoline: {
5873     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5874 
5875     SDValue Ops[6];
5876     Ops[0] = getRoot();
5877     Ops[1] = getValue(I.getArgOperand(0));
5878     Ops[2] = getValue(I.getArgOperand(1));
5879     Ops[3] = getValue(I.getArgOperand(2));
5880     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5881     Ops[5] = DAG.getSrcValue(F);
5882 
5883     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5884 
5885     DAG.setRoot(Res);
5886     return nullptr;
5887   }
5888   case Intrinsic::adjust_trampoline:
5889     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5890                              TLI.getPointerTy(DAG.getDataLayout()),
5891                              getValue(I.getArgOperand(0))));
5892     return nullptr;
5893   case Intrinsic::gcroot: {
5894     assert(DAG.getMachineFunction().getFunction().hasGC() &&
5895            "only valid in functions with gc specified, enforced by Verifier");
5896     assert(GFI && "implied by previous");
5897     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5898     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5899 
5900     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5901     GFI->addStackRoot(FI->getIndex(), TypeMap);
5902     return nullptr;
5903   }
5904   case Intrinsic::gcread:
5905   case Intrinsic::gcwrite:
5906     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5907   case Intrinsic::flt_rounds:
5908     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5909     return nullptr;
5910 
5911   case Intrinsic::expect:
5912     // Just replace __builtin_expect(exp, c) with EXP.
5913     setValue(&I, getValue(I.getArgOperand(0)));
5914     return nullptr;
5915 
5916   case Intrinsic::debugtrap:
5917   case Intrinsic::trap: {
5918     StringRef TrapFuncName =
5919         I.getAttributes()
5920             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
5921             .getValueAsString();
5922     if (TrapFuncName.empty()) {
5923       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5924         ISD::TRAP : ISD::DEBUGTRAP;
5925       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5926       return nullptr;
5927     }
5928     TargetLowering::ArgListTy Args;
5929 
5930     TargetLowering::CallLoweringInfo CLI(DAG);
5931     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
5932         CallingConv::C, I.getType(),
5933         DAG.getExternalSymbol(TrapFuncName.data(),
5934                               TLI.getPointerTy(DAG.getDataLayout())),
5935         std::move(Args));
5936 
5937     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5938     DAG.setRoot(Result.second);
5939     return nullptr;
5940   }
5941 
5942   case Intrinsic::uadd_with_overflow:
5943   case Intrinsic::sadd_with_overflow:
5944   case Intrinsic::usub_with_overflow:
5945   case Intrinsic::ssub_with_overflow:
5946   case Intrinsic::umul_with_overflow:
5947   case Intrinsic::smul_with_overflow: {
5948     ISD::NodeType Op;
5949     switch (Intrinsic) {
5950     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5951     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5952     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5953     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5954     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5955     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5956     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5957     }
5958     SDValue Op1 = getValue(I.getArgOperand(0));
5959     SDValue Op2 = getValue(I.getArgOperand(1));
5960 
5961     SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5962     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5963     return nullptr;
5964   }
5965   case Intrinsic::prefetch: {
5966     SDValue Ops[5];
5967     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5968     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
5969     Ops[0] = DAG.getRoot();
5970     Ops[1] = getValue(I.getArgOperand(0));
5971     Ops[2] = getValue(I.getArgOperand(1));
5972     Ops[3] = getValue(I.getArgOperand(2));
5973     Ops[4] = getValue(I.getArgOperand(3));
5974     SDValue Result = DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5975                                              DAG.getVTList(MVT::Other), Ops,
5976                                              EVT::getIntegerVT(*Context, 8),
5977                                              MachinePointerInfo(I.getArgOperand(0)),
5978                                              0, /* align */
5979                                              Flags);
5980 
5981     // Chain the prefetch in parallell with any pending loads, to stay out of
5982     // the way of later optimizations.
5983     PendingLoads.push_back(Result);
5984     Result = getRoot();
5985     DAG.setRoot(Result);
5986     return nullptr;
5987   }
5988   case Intrinsic::lifetime_start:
5989   case Intrinsic::lifetime_end: {
5990     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
5991     // Stack coloring is not enabled in O0, discard region information.
5992     if (TM.getOptLevel() == CodeGenOpt::None)
5993       return nullptr;
5994 
5995     SmallVector<Value *, 4> Allocas;
5996     GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
5997 
5998     for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
5999            E = Allocas.end(); Object != E; ++Object) {
6000       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6001 
6002       // Could not find an Alloca.
6003       if (!LifetimeObject)
6004         continue;
6005 
6006       // First check that the Alloca is static, otherwise it won't have a
6007       // valid frame index.
6008       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6009       if (SI == FuncInfo.StaticAllocaMap.end())
6010         return nullptr;
6011 
6012       int FI = SI->second;
6013 
6014       SDValue Ops[2];
6015       Ops[0] = getRoot();
6016       Ops[1] =
6017           DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true);
6018       unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
6019 
6020       Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
6021       DAG.setRoot(Res);
6022     }
6023     return nullptr;
6024   }
6025   case Intrinsic::invariant_start:
6026     // Discard region information.
6027     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6028     return nullptr;
6029   case Intrinsic::invariant_end:
6030     // Discard region information.
6031     return nullptr;
6032   case Intrinsic::clear_cache:
6033     return TLI.getClearCacheBuiltinName();
6034   case Intrinsic::donothing:
6035     // ignore
6036     return nullptr;
6037   case Intrinsic::experimental_stackmap:
6038     visitStackmap(I);
6039     return nullptr;
6040   case Intrinsic::experimental_patchpoint_void:
6041   case Intrinsic::experimental_patchpoint_i64:
6042     visitPatchpoint(&I);
6043     return nullptr;
6044   case Intrinsic::experimental_gc_statepoint:
6045     LowerStatepoint(ImmutableStatepoint(&I));
6046     return nullptr;
6047   case Intrinsic::experimental_gc_result:
6048     visitGCResult(cast<GCResultInst>(I));
6049     return nullptr;
6050   case Intrinsic::experimental_gc_relocate:
6051     visitGCRelocate(cast<GCRelocateInst>(I));
6052     return nullptr;
6053   case Intrinsic::instrprof_increment:
6054     llvm_unreachable("instrprof failed to lower an increment");
6055   case Intrinsic::instrprof_value_profile:
6056     llvm_unreachable("instrprof failed to lower a value profiling call");
6057   case Intrinsic::localescape: {
6058     MachineFunction &MF = DAG.getMachineFunction();
6059     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6060 
6061     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6062     // is the same on all targets.
6063     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6064       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6065       if (isa<ConstantPointerNull>(Arg))
6066         continue; // Skip null pointers. They represent a hole in index space.
6067       AllocaInst *Slot = cast<AllocaInst>(Arg);
6068       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6069              "can only escape static allocas");
6070       int FI = FuncInfo.StaticAllocaMap[Slot];
6071       MCSymbol *FrameAllocSym =
6072           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6073               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6074       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6075               TII->get(TargetOpcode::LOCAL_ESCAPE))
6076           .addSym(FrameAllocSym)
6077           .addFrameIndex(FI);
6078     }
6079 
6080     return nullptr;
6081   }
6082 
6083   case Intrinsic::localrecover: {
6084     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6085     MachineFunction &MF = DAG.getMachineFunction();
6086     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
6087 
6088     // Get the symbol that defines the frame offset.
6089     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6090     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6091     unsigned IdxVal =
6092         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6093     MCSymbol *FrameAllocSym =
6094         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6095             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6096 
6097     // Create a MCSymbol for the label to avoid any target lowering
6098     // that would make this PC relative.
6099     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6100     SDValue OffsetVal =
6101         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6102 
6103     // Add the offset to the FP.
6104     Value *FP = I.getArgOperand(1);
6105     SDValue FPVal = getValue(FP);
6106     SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
6107     setValue(&I, Add);
6108 
6109     return nullptr;
6110   }
6111 
6112   case Intrinsic::eh_exceptionpointer:
6113   case Intrinsic::eh_exceptioncode: {
6114     // Get the exception pointer vreg, copy from it, and resize it to fit.
6115     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6116     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6117     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6118     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6119     SDValue N =
6120         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6121     if (Intrinsic == Intrinsic::eh_exceptioncode)
6122       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6123     setValue(&I, N);
6124     return nullptr;
6125   }
6126   case Intrinsic::xray_customevent: {
6127     // Here we want to make sure that the intrinsic behaves as if it has a
6128     // specific calling convention, and only for x86_64.
6129     // FIXME: Support other platforms later.
6130     const auto &Triple = DAG.getTarget().getTargetTriple();
6131     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6132       return nullptr;
6133 
6134     SDLoc DL = getCurSDLoc();
6135     SmallVector<SDValue, 8> Ops;
6136 
6137     // We want to say that we always want the arguments in registers.
6138     SDValue LogEntryVal = getValue(I.getArgOperand(0));
6139     SDValue StrSizeVal = getValue(I.getArgOperand(1));
6140     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6141     SDValue Chain = getRoot();
6142     Ops.push_back(LogEntryVal);
6143     Ops.push_back(StrSizeVal);
6144     Ops.push_back(Chain);
6145 
6146     // We need to enforce the calling convention for the callsite, so that
6147     // argument ordering is enforced correctly, and that register allocation can
6148     // see that some registers may be assumed clobbered and have to preserve
6149     // them across calls to the intrinsic.
6150     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6151                                            DL, NodeTys, Ops);
6152     SDValue patchableNode = SDValue(MN, 0);
6153     DAG.setRoot(patchableNode);
6154     setValue(&I, patchableNode);
6155     return nullptr;
6156   }
6157   case Intrinsic::xray_typedevent: {
6158     // Here we want to make sure that the intrinsic behaves as if it has a
6159     // specific calling convention, and only for x86_64.
6160     // FIXME: Support other platforms later.
6161     const auto &Triple = DAG.getTarget().getTargetTriple();
6162     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6163       return nullptr;
6164 
6165     SDLoc DL = getCurSDLoc();
6166     SmallVector<SDValue, 8> Ops;
6167 
6168     // We want to say that we always want the arguments in registers.
6169     // It's unclear to me how manipulating the selection DAG here forces callers
6170     // to provide arguments in registers instead of on the stack.
6171     SDValue LogTypeId = getValue(I.getArgOperand(0));
6172     SDValue LogEntryVal = getValue(I.getArgOperand(1));
6173     SDValue StrSizeVal = getValue(I.getArgOperand(2));
6174     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6175     SDValue Chain = getRoot();
6176     Ops.push_back(LogTypeId);
6177     Ops.push_back(LogEntryVal);
6178     Ops.push_back(StrSizeVal);
6179     Ops.push_back(Chain);
6180 
6181     // We need to enforce the calling convention for the callsite, so that
6182     // argument ordering is enforced correctly, and that register allocation can
6183     // see that some registers may be assumed clobbered and have to preserve
6184     // them across calls to the intrinsic.
6185     MachineSDNode *MN = DAG.getMachineNode(
6186         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6187     SDValue patchableNode = SDValue(MN, 0);
6188     DAG.setRoot(patchableNode);
6189     setValue(&I, patchableNode);
6190     return nullptr;
6191   }
6192   case Intrinsic::experimental_deoptimize:
6193     LowerDeoptimizeCall(&I);
6194     return nullptr;
6195 
6196   case Intrinsic::experimental_vector_reduce_fadd:
6197   case Intrinsic::experimental_vector_reduce_fmul:
6198   case Intrinsic::experimental_vector_reduce_add:
6199   case Intrinsic::experimental_vector_reduce_mul:
6200   case Intrinsic::experimental_vector_reduce_and:
6201   case Intrinsic::experimental_vector_reduce_or:
6202   case Intrinsic::experimental_vector_reduce_xor:
6203   case Intrinsic::experimental_vector_reduce_smax:
6204   case Intrinsic::experimental_vector_reduce_smin:
6205   case Intrinsic::experimental_vector_reduce_umax:
6206   case Intrinsic::experimental_vector_reduce_umin:
6207   case Intrinsic::experimental_vector_reduce_fmax:
6208   case Intrinsic::experimental_vector_reduce_fmin:
6209     visitVectorReduce(I, Intrinsic);
6210     return nullptr;
6211 
6212   case Intrinsic::icall_branch_funnel: {
6213     SmallVector<SDValue, 16> Ops;
6214     Ops.push_back(DAG.getRoot());
6215     Ops.push_back(getValue(I.getArgOperand(0)));
6216 
6217     int64_t Offset;
6218     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6219         I.getArgOperand(1), Offset, DAG.getDataLayout()));
6220     if (!Base)
6221       report_fatal_error(
6222           "llvm.icall.branch.funnel operand must be a GlobalValue");
6223     Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6224 
6225     struct BranchFunnelTarget {
6226       int64_t Offset;
6227       SDValue Target;
6228     };
6229     SmallVector<BranchFunnelTarget, 8> Targets;
6230 
6231     for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6232       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6233           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6234       if (ElemBase != Base)
6235         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6236                            "to the same GlobalValue");
6237 
6238       SDValue Val = getValue(I.getArgOperand(Op + 1));
6239       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6240       if (!GA)
6241         report_fatal_error(
6242             "llvm.icall.branch.funnel operand must be a GlobalValue");
6243       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6244                                      GA->getGlobal(), getCurSDLoc(),
6245                                      Val.getValueType(), GA->getOffset())});
6246     }
6247     llvm::sort(Targets.begin(), Targets.end(),
6248                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6249                  return T1.Offset < T2.Offset;
6250                });
6251 
6252     for (auto &T : Targets) {
6253       Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6254       Ops.push_back(T.Target);
6255     }
6256 
6257     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6258                                  getCurSDLoc(), MVT::Other, Ops),
6259               0);
6260     DAG.setRoot(N);
6261     setValue(&I, N);
6262     HasTailCall = true;
6263     return nullptr;
6264   }
6265 
6266   case Intrinsic::wasm_landingpad_index: {
6267     // TODO store landing pad index in a map, which will be used when generating
6268     // LSDA information
6269     return nullptr;
6270   }
6271   }
6272 }
6273 
6274 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6275     const ConstrainedFPIntrinsic &FPI) {
6276   SDLoc sdl = getCurSDLoc();
6277   unsigned Opcode;
6278   switch (FPI.getIntrinsicID()) {
6279   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6280   case Intrinsic::experimental_constrained_fadd:
6281     Opcode = ISD::STRICT_FADD;
6282     break;
6283   case Intrinsic::experimental_constrained_fsub:
6284     Opcode = ISD::STRICT_FSUB;
6285     break;
6286   case Intrinsic::experimental_constrained_fmul:
6287     Opcode = ISD::STRICT_FMUL;
6288     break;
6289   case Intrinsic::experimental_constrained_fdiv:
6290     Opcode = ISD::STRICT_FDIV;
6291     break;
6292   case Intrinsic::experimental_constrained_frem:
6293     Opcode = ISD::STRICT_FREM;
6294     break;
6295   case Intrinsic::experimental_constrained_fma:
6296     Opcode = ISD::STRICT_FMA;
6297     break;
6298   case Intrinsic::experimental_constrained_sqrt:
6299     Opcode = ISD::STRICT_FSQRT;
6300     break;
6301   case Intrinsic::experimental_constrained_pow:
6302     Opcode = ISD::STRICT_FPOW;
6303     break;
6304   case Intrinsic::experimental_constrained_powi:
6305     Opcode = ISD::STRICT_FPOWI;
6306     break;
6307   case Intrinsic::experimental_constrained_sin:
6308     Opcode = ISD::STRICT_FSIN;
6309     break;
6310   case Intrinsic::experimental_constrained_cos:
6311     Opcode = ISD::STRICT_FCOS;
6312     break;
6313   case Intrinsic::experimental_constrained_exp:
6314     Opcode = ISD::STRICT_FEXP;
6315     break;
6316   case Intrinsic::experimental_constrained_exp2:
6317     Opcode = ISD::STRICT_FEXP2;
6318     break;
6319   case Intrinsic::experimental_constrained_log:
6320     Opcode = ISD::STRICT_FLOG;
6321     break;
6322   case Intrinsic::experimental_constrained_log10:
6323     Opcode = ISD::STRICT_FLOG10;
6324     break;
6325   case Intrinsic::experimental_constrained_log2:
6326     Opcode = ISD::STRICT_FLOG2;
6327     break;
6328   case Intrinsic::experimental_constrained_rint:
6329     Opcode = ISD::STRICT_FRINT;
6330     break;
6331   case Intrinsic::experimental_constrained_nearbyint:
6332     Opcode = ISD::STRICT_FNEARBYINT;
6333     break;
6334   }
6335   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6336   SDValue Chain = getRoot();
6337   SmallVector<EVT, 4> ValueVTs;
6338   ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
6339   ValueVTs.push_back(MVT::Other); // Out chain
6340 
6341   SDVTList VTs = DAG.getVTList(ValueVTs);
6342   SDValue Result;
6343   if (FPI.isUnaryOp())
6344     Result = DAG.getNode(Opcode, sdl, VTs,
6345                          { Chain, getValue(FPI.getArgOperand(0)) });
6346   else if (FPI.isTernaryOp())
6347     Result = DAG.getNode(Opcode, sdl, VTs,
6348                          { Chain, getValue(FPI.getArgOperand(0)),
6349                                   getValue(FPI.getArgOperand(1)),
6350                                   getValue(FPI.getArgOperand(2)) });
6351   else
6352     Result = DAG.getNode(Opcode, sdl, VTs,
6353                          { Chain, getValue(FPI.getArgOperand(0)),
6354                            getValue(FPI.getArgOperand(1))  });
6355 
6356   assert(Result.getNode()->getNumValues() == 2);
6357   SDValue OutChain = Result.getValue(1);
6358   DAG.setRoot(OutChain);
6359   SDValue FPResult = Result.getValue(0);
6360   setValue(&FPI, FPResult);
6361 }
6362 
6363 std::pair<SDValue, SDValue>
6364 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
6365                                     const BasicBlock *EHPadBB) {
6366   MachineFunction &MF = DAG.getMachineFunction();
6367   MachineModuleInfo &MMI = MF.getMMI();
6368   MCSymbol *BeginLabel = nullptr;
6369 
6370   if (EHPadBB) {
6371     // Insert a label before the invoke call to mark the try range.  This can be
6372     // used to detect deletion of the invoke via the MachineModuleInfo.
6373     BeginLabel = MMI.getContext().createTempSymbol();
6374 
6375     // For SjLj, keep track of which landing pads go with which invokes
6376     // so as to maintain the ordering of pads in the LSDA.
6377     unsigned CallSiteIndex = MMI.getCurrentCallSite();
6378     if (CallSiteIndex) {
6379       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
6380       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
6381 
6382       // Now that the call site is handled, stop tracking it.
6383       MMI.setCurrentCallSite(0);
6384     }
6385 
6386     // Both PendingLoads and PendingExports must be flushed here;
6387     // this call might not return.
6388     (void)getRoot();
6389     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
6390 
6391     CLI.setChain(getRoot());
6392   }
6393   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6394   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6395 
6396   assert((CLI.IsTailCall || Result.second.getNode()) &&
6397          "Non-null chain expected with non-tail call!");
6398   assert((Result.second.getNode() || !Result.first.getNode()) &&
6399          "Null value expected with tail call!");
6400 
6401   if (!Result.second.getNode()) {
6402     // As a special case, a null chain means that a tail call has been emitted
6403     // and the DAG root is already updated.
6404     HasTailCall = true;
6405 
6406     // Since there's no actual continuation from this block, nothing can be
6407     // relying on us setting vregs for them.
6408     PendingExports.clear();
6409   } else {
6410     DAG.setRoot(Result.second);
6411   }
6412 
6413   if (EHPadBB) {
6414     // Insert a label at the end of the invoke call to mark the try range.  This
6415     // can be used to detect deletion of the invoke via the MachineModuleInfo.
6416     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
6417     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
6418 
6419     // Inform MachineModuleInfo of range.
6420     auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
6421     // There is a platform (e.g. wasm) that uses funclet style IR but does not
6422     // actually use outlined funclets and their LSDA info style.
6423     if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
6424       assert(CLI.CS);
6425       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
6426       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()),
6427                                 BeginLabel, EndLabel);
6428     } else {
6429       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
6430     }
6431   }
6432 
6433   return Result;
6434 }
6435 
6436 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
6437                                       bool isTailCall,
6438                                       const BasicBlock *EHPadBB) {
6439   auto &DL = DAG.getDataLayout();
6440   FunctionType *FTy = CS.getFunctionType();
6441   Type *RetTy = CS.getType();
6442 
6443   TargetLowering::ArgListTy Args;
6444   Args.reserve(CS.arg_size());
6445 
6446   const Value *SwiftErrorVal = nullptr;
6447   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6448 
6449   // We can't tail call inside a function with a swifterror argument. Lowering
6450   // does not support this yet. It would have to move into the swifterror
6451   // register before the call.
6452   auto *Caller = CS.getInstruction()->getParent()->getParent();
6453   if (TLI.supportSwiftError() &&
6454       Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
6455     isTailCall = false;
6456 
6457   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
6458        i != e; ++i) {
6459     TargetLowering::ArgListEntry Entry;
6460     const Value *V = *i;
6461 
6462     // Skip empty types
6463     if (V->getType()->isEmptyTy())
6464       continue;
6465 
6466     SDValue ArgNode = getValue(V);
6467     Entry.Node = ArgNode; Entry.Ty = V->getType();
6468 
6469     Entry.setAttributes(&CS, i - CS.arg_begin());
6470 
6471     // Use swifterror virtual register as input to the call.
6472     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
6473       SwiftErrorVal = V;
6474       // We find the virtual register for the actual swifterror argument.
6475       // Instead of using the Value, we use the virtual register instead.
6476       Entry.Node = DAG.getRegister(FuncInfo
6477                                        .getOrCreateSwiftErrorVRegUseAt(
6478                                            CS.getInstruction(), FuncInfo.MBB, V)
6479                                        .first,
6480                                    EVT(TLI.getPointerTy(DL)));
6481     }
6482 
6483     Args.push_back(Entry);
6484 
6485     // If we have an explicit sret argument that is an Instruction, (i.e., it
6486     // might point to function-local memory), we can't meaningfully tail-call.
6487     if (Entry.IsSRet && isa<Instruction>(V))
6488       isTailCall = false;
6489   }
6490 
6491   // Check if target-independent constraints permit a tail call here.
6492   // Target-dependent constraints are checked within TLI->LowerCallTo.
6493   if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
6494     isTailCall = false;
6495 
6496   // Disable tail calls if there is an swifterror argument. Targets have not
6497   // been updated to support tail calls.
6498   if (TLI.supportSwiftError() && SwiftErrorVal)
6499     isTailCall = false;
6500 
6501   TargetLowering::CallLoweringInfo CLI(DAG);
6502   CLI.setDebugLoc(getCurSDLoc())
6503       .setChain(getRoot())
6504       .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
6505       .setTailCall(isTailCall)
6506       .setConvergent(CS.isConvergent());
6507   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
6508 
6509   if (Result.first.getNode()) {
6510     const Instruction *Inst = CS.getInstruction();
6511     Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
6512     setValue(Inst, Result.first);
6513   }
6514 
6515   // The last element of CLI.InVals has the SDValue for swifterror return.
6516   // Here we copy it to a virtual register and update SwiftErrorMap for
6517   // book-keeping.
6518   if (SwiftErrorVal && TLI.supportSwiftError()) {
6519     // Get the last element of InVals.
6520     SDValue Src = CLI.InVals.back();
6521     unsigned VReg; bool CreatedVReg;
6522     std::tie(VReg, CreatedVReg) =
6523         FuncInfo.getOrCreateSwiftErrorVRegDefAt(CS.getInstruction());
6524     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
6525     // We update the virtual register for the actual swifterror argument.
6526     if (CreatedVReg)
6527       FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
6528     DAG.setRoot(CopyNode);
6529   }
6530 }
6531 
6532 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
6533                              SelectionDAGBuilder &Builder) {
6534   // Check to see if this load can be trivially constant folded, e.g. if the
6535   // input is from a string literal.
6536   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
6537     // Cast pointer to the type we really want to load.
6538     Type *LoadTy =
6539         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
6540     if (LoadVT.isVector())
6541       LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
6542 
6543     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
6544                                          PointerType::getUnqual(LoadTy));
6545 
6546     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
6547             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
6548       return Builder.getValue(LoadCst);
6549   }
6550 
6551   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
6552   // still constant memory, the input chain can be the entry node.
6553   SDValue Root;
6554   bool ConstantMemory = false;
6555 
6556   // Do not serialize (non-volatile) loads of constant memory with anything.
6557   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
6558     Root = Builder.DAG.getEntryNode();
6559     ConstantMemory = true;
6560   } else {
6561     // Do not serialize non-volatile loads against each other.
6562     Root = Builder.DAG.getRoot();
6563   }
6564 
6565   SDValue Ptr = Builder.getValue(PtrVal);
6566   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
6567                                         Ptr, MachinePointerInfo(PtrVal),
6568                                         /* Alignment = */ 1);
6569 
6570   if (!ConstantMemory)
6571     Builder.PendingLoads.push_back(LoadVal.getValue(1));
6572   return LoadVal;
6573 }
6574 
6575 /// Record the value for an instruction that produces an integer result,
6576 /// converting the type where necessary.
6577 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
6578                                                   SDValue Value,
6579                                                   bool IsSigned) {
6580   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6581                                                     I.getType(), true);
6582   if (IsSigned)
6583     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
6584   else
6585     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
6586   setValue(&I, Value);
6587 }
6588 
6589 /// See if we can lower a memcmp call into an optimized form. If so, return
6590 /// true and lower it. Otherwise return false, and it will be lowered like a
6591 /// normal call.
6592 /// The caller already checked that \p I calls the appropriate LibFunc with a
6593 /// correct prototype.
6594 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
6595   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
6596   const Value *Size = I.getArgOperand(2);
6597   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
6598   if (CSize && CSize->getZExtValue() == 0) {
6599     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6600                                                           I.getType(), true);
6601     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
6602     return true;
6603   }
6604 
6605   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6606   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
6607       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
6608       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
6609   if (Res.first.getNode()) {
6610     processIntegerCallValue(I, Res.first, true);
6611     PendingLoads.push_back(Res.second);
6612     return true;
6613   }
6614 
6615   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
6616   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
6617   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
6618     return false;
6619 
6620   // If the target has a fast compare for the given size, it will return a
6621   // preferred load type for that size. Require that the load VT is legal and
6622   // that the target supports unaligned loads of that type. Otherwise, return
6623   // INVALID.
6624   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
6625     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6626     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
6627     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
6628       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
6629       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
6630       // TODO: Check alignment of src and dest ptrs.
6631       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
6632       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
6633       if (!TLI.isTypeLegal(LVT) ||
6634           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
6635           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
6636         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
6637     }
6638 
6639     return LVT;
6640   };
6641 
6642   // This turns into unaligned loads. We only do this if the target natively
6643   // supports the MVT we'll be loading or if it is small enough (<= 4) that
6644   // we'll only produce a small number of byte loads.
6645   MVT LoadVT;
6646   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
6647   switch (NumBitsToCompare) {
6648   default:
6649     return false;
6650   case 16:
6651     LoadVT = MVT::i16;
6652     break;
6653   case 32:
6654     LoadVT = MVT::i32;
6655     break;
6656   case 64:
6657   case 128:
6658   case 256:
6659     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
6660     break;
6661   }
6662 
6663   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
6664     return false;
6665 
6666   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
6667   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
6668 
6669   // Bitcast to a wide integer type if the loads are vectors.
6670   if (LoadVT.isVector()) {
6671     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
6672     LoadL = DAG.getBitcast(CmpVT, LoadL);
6673     LoadR = DAG.getBitcast(CmpVT, LoadR);
6674   }
6675 
6676   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
6677   processIntegerCallValue(I, Cmp, false);
6678   return true;
6679 }
6680 
6681 /// See if we can lower a memchr call into an optimized form. If so, return
6682 /// true and lower it. Otherwise return false, and it will be lowered like a
6683 /// normal call.
6684 /// The caller already checked that \p I calls the appropriate LibFunc with a
6685 /// correct prototype.
6686 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
6687   const Value *Src = I.getArgOperand(0);
6688   const Value *Char = I.getArgOperand(1);
6689   const Value *Length = I.getArgOperand(2);
6690 
6691   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6692   std::pair<SDValue, SDValue> Res =
6693     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
6694                                 getValue(Src), getValue(Char), getValue(Length),
6695                                 MachinePointerInfo(Src));
6696   if (Res.first.getNode()) {
6697     setValue(&I, Res.first);
6698     PendingLoads.push_back(Res.second);
6699     return true;
6700   }
6701 
6702   return false;
6703 }
6704 
6705 /// See if we can lower a mempcpy call into an optimized form. If so, return
6706 /// true and lower it. Otherwise return false, and it will be lowered like a
6707 /// normal call.
6708 /// The caller already checked that \p I calls the appropriate LibFunc with a
6709 /// correct prototype.
6710 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
6711   SDValue Dst = getValue(I.getArgOperand(0));
6712   SDValue Src = getValue(I.getArgOperand(1));
6713   SDValue Size = getValue(I.getArgOperand(2));
6714 
6715   unsigned DstAlign = DAG.InferPtrAlignment(Dst);
6716   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6717   unsigned Align = std::min(DstAlign, SrcAlign);
6718   if (Align == 0) // Alignment of one or both could not be inferred.
6719     Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
6720 
6721   bool isVol = false;
6722   SDLoc sdl = getCurSDLoc();
6723 
6724   // In the mempcpy context we need to pass in a false value for isTailCall
6725   // because the return pointer needs to be adjusted by the size of
6726   // the copied memory.
6727   SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
6728                              false, /*isTailCall=*/false,
6729                              MachinePointerInfo(I.getArgOperand(0)),
6730                              MachinePointerInfo(I.getArgOperand(1)));
6731   assert(MC.getNode() != nullptr &&
6732          "** memcpy should not be lowered as TailCall in mempcpy context **");
6733   DAG.setRoot(MC);
6734 
6735   // Check if Size needs to be truncated or extended.
6736   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
6737 
6738   // Adjust return pointer to point just past the last dst byte.
6739   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
6740                                     Dst, Size);
6741   setValue(&I, DstPlusSize);
6742   return true;
6743 }
6744 
6745 /// See if we can lower a strcpy call into an optimized form.  If so, return
6746 /// true and lower it, otherwise return false and it will be lowered like a
6747 /// normal call.
6748 /// The caller already checked that \p I calls the appropriate LibFunc with a
6749 /// correct prototype.
6750 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
6751   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6752 
6753   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6754   std::pair<SDValue, SDValue> Res =
6755     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
6756                                 getValue(Arg0), getValue(Arg1),
6757                                 MachinePointerInfo(Arg0),
6758                                 MachinePointerInfo(Arg1), isStpcpy);
6759   if (Res.first.getNode()) {
6760     setValue(&I, Res.first);
6761     DAG.setRoot(Res.second);
6762     return true;
6763   }
6764 
6765   return false;
6766 }
6767 
6768 /// See if we can lower a strcmp call into an optimized form.  If so, return
6769 /// true and lower it, otherwise return false and it will be lowered like a
6770 /// normal call.
6771 /// The caller already checked that \p I calls the appropriate LibFunc with a
6772 /// correct prototype.
6773 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
6774   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6775 
6776   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6777   std::pair<SDValue, SDValue> Res =
6778     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
6779                                 getValue(Arg0), getValue(Arg1),
6780                                 MachinePointerInfo(Arg0),
6781                                 MachinePointerInfo(Arg1));
6782   if (Res.first.getNode()) {
6783     processIntegerCallValue(I, Res.first, true);
6784     PendingLoads.push_back(Res.second);
6785     return true;
6786   }
6787 
6788   return false;
6789 }
6790 
6791 /// See if we can lower a strlen call into an optimized form.  If so, return
6792 /// true and lower it, otherwise return false and it will be lowered like a
6793 /// normal call.
6794 /// The caller already checked that \p I calls the appropriate LibFunc with a
6795 /// correct prototype.
6796 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
6797   const Value *Arg0 = I.getArgOperand(0);
6798 
6799   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6800   std::pair<SDValue, SDValue> Res =
6801     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
6802                                 getValue(Arg0), MachinePointerInfo(Arg0));
6803   if (Res.first.getNode()) {
6804     processIntegerCallValue(I, Res.first, false);
6805     PendingLoads.push_back(Res.second);
6806     return true;
6807   }
6808 
6809   return false;
6810 }
6811 
6812 /// See if we can lower a strnlen call into an optimized form.  If so, return
6813 /// true and lower it, otherwise return false and it will be lowered like a
6814 /// normal call.
6815 /// The caller already checked that \p I calls the appropriate LibFunc with a
6816 /// correct prototype.
6817 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
6818   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6819 
6820   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6821   std::pair<SDValue, SDValue> Res =
6822     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
6823                                  getValue(Arg0), getValue(Arg1),
6824                                  MachinePointerInfo(Arg0));
6825   if (Res.first.getNode()) {
6826     processIntegerCallValue(I, Res.first, false);
6827     PendingLoads.push_back(Res.second);
6828     return true;
6829   }
6830 
6831   return false;
6832 }
6833 
6834 /// See if we can lower a unary floating-point operation into an SDNode with
6835 /// the specified Opcode.  If so, return true and lower it, otherwise return
6836 /// false and it will be lowered like a normal call.
6837 /// The caller already checked that \p I calls the appropriate LibFunc with a
6838 /// correct prototype.
6839 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
6840                                               unsigned Opcode) {
6841   // We already checked this call's prototype; verify it doesn't modify errno.
6842   if (!I.onlyReadsMemory())
6843     return false;
6844 
6845   SDValue Tmp = getValue(I.getArgOperand(0));
6846   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
6847   return true;
6848 }
6849 
6850 /// See if we can lower a binary floating-point operation into an SDNode with
6851 /// the specified Opcode. If so, return true and lower it. Otherwise return
6852 /// false, and it will be lowered like a normal call.
6853 /// The caller already checked that \p I calls the appropriate LibFunc with a
6854 /// correct prototype.
6855 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
6856                                                unsigned Opcode) {
6857   // We already checked this call's prototype; verify it doesn't modify errno.
6858   if (!I.onlyReadsMemory())
6859     return false;
6860 
6861   SDValue Tmp0 = getValue(I.getArgOperand(0));
6862   SDValue Tmp1 = getValue(I.getArgOperand(1));
6863   EVT VT = Tmp0.getValueType();
6864   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
6865   return true;
6866 }
6867 
6868 void SelectionDAGBuilder::visitCall(const CallInst &I) {
6869   // Handle inline assembly differently.
6870   if (isa<InlineAsm>(I.getCalledValue())) {
6871     visitInlineAsm(&I);
6872     return;
6873   }
6874 
6875   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6876   computeUsesVAFloatArgument(I, MMI);
6877 
6878   const char *RenameFn = nullptr;
6879   if (Function *F = I.getCalledFunction()) {
6880     if (F->isDeclaration()) {
6881       // Is this an LLVM intrinsic or a target-specific intrinsic?
6882       unsigned IID = F->getIntrinsicID();
6883       if (!IID)
6884         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
6885           IID = II->getIntrinsicID(F);
6886 
6887       if (IID) {
6888         RenameFn = visitIntrinsicCall(I, IID);
6889         if (!RenameFn)
6890           return;
6891       }
6892     }
6893 
6894     // Check for well-known libc/libm calls.  If the function is internal, it
6895     // can't be a library call.  Don't do the check if marked as nobuiltin for
6896     // some reason or the call site requires strict floating point semantics.
6897     LibFunc Func;
6898     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
6899         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
6900         LibInfo->hasOptimizedCodeGen(Func)) {
6901       switch (Func) {
6902       default: break;
6903       case LibFunc_copysign:
6904       case LibFunc_copysignf:
6905       case LibFunc_copysignl:
6906         // We already checked this call's prototype; verify it doesn't modify
6907         // errno.
6908         if (I.onlyReadsMemory()) {
6909           SDValue LHS = getValue(I.getArgOperand(0));
6910           SDValue RHS = getValue(I.getArgOperand(1));
6911           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
6912                                    LHS.getValueType(), LHS, RHS));
6913           return;
6914         }
6915         break;
6916       case LibFunc_fabs:
6917       case LibFunc_fabsf:
6918       case LibFunc_fabsl:
6919         if (visitUnaryFloatCall(I, ISD::FABS))
6920           return;
6921         break;
6922       case LibFunc_fmin:
6923       case LibFunc_fminf:
6924       case LibFunc_fminl:
6925         if (visitBinaryFloatCall(I, ISD::FMINNUM))
6926           return;
6927         break;
6928       case LibFunc_fmax:
6929       case LibFunc_fmaxf:
6930       case LibFunc_fmaxl:
6931         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
6932           return;
6933         break;
6934       case LibFunc_sin:
6935       case LibFunc_sinf:
6936       case LibFunc_sinl:
6937         if (visitUnaryFloatCall(I, ISD::FSIN))
6938           return;
6939         break;
6940       case LibFunc_cos:
6941       case LibFunc_cosf:
6942       case LibFunc_cosl:
6943         if (visitUnaryFloatCall(I, ISD::FCOS))
6944           return;
6945         break;
6946       case LibFunc_sqrt:
6947       case LibFunc_sqrtf:
6948       case LibFunc_sqrtl:
6949       case LibFunc_sqrt_finite:
6950       case LibFunc_sqrtf_finite:
6951       case LibFunc_sqrtl_finite:
6952         if (visitUnaryFloatCall(I, ISD::FSQRT))
6953           return;
6954         break;
6955       case LibFunc_floor:
6956       case LibFunc_floorf:
6957       case LibFunc_floorl:
6958         if (visitUnaryFloatCall(I, ISD::FFLOOR))
6959           return;
6960         break;
6961       case LibFunc_nearbyint:
6962       case LibFunc_nearbyintf:
6963       case LibFunc_nearbyintl:
6964         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6965           return;
6966         break;
6967       case LibFunc_ceil:
6968       case LibFunc_ceilf:
6969       case LibFunc_ceill:
6970         if (visitUnaryFloatCall(I, ISD::FCEIL))
6971           return;
6972         break;
6973       case LibFunc_rint:
6974       case LibFunc_rintf:
6975       case LibFunc_rintl:
6976         if (visitUnaryFloatCall(I, ISD::FRINT))
6977           return;
6978         break;
6979       case LibFunc_round:
6980       case LibFunc_roundf:
6981       case LibFunc_roundl:
6982         if (visitUnaryFloatCall(I, ISD::FROUND))
6983           return;
6984         break;
6985       case LibFunc_trunc:
6986       case LibFunc_truncf:
6987       case LibFunc_truncl:
6988         if (visitUnaryFloatCall(I, ISD::FTRUNC))
6989           return;
6990         break;
6991       case LibFunc_log2:
6992       case LibFunc_log2f:
6993       case LibFunc_log2l:
6994         if (visitUnaryFloatCall(I, ISD::FLOG2))
6995           return;
6996         break;
6997       case LibFunc_exp2:
6998       case LibFunc_exp2f:
6999       case LibFunc_exp2l:
7000         if (visitUnaryFloatCall(I, ISD::FEXP2))
7001           return;
7002         break;
7003       case LibFunc_memcmp:
7004         if (visitMemCmpCall(I))
7005           return;
7006         break;
7007       case LibFunc_mempcpy:
7008         if (visitMemPCpyCall(I))
7009           return;
7010         break;
7011       case LibFunc_memchr:
7012         if (visitMemChrCall(I))
7013           return;
7014         break;
7015       case LibFunc_strcpy:
7016         if (visitStrCpyCall(I, false))
7017           return;
7018         break;
7019       case LibFunc_stpcpy:
7020         if (visitStrCpyCall(I, true))
7021           return;
7022         break;
7023       case LibFunc_strcmp:
7024         if (visitStrCmpCall(I))
7025           return;
7026         break;
7027       case LibFunc_strlen:
7028         if (visitStrLenCall(I))
7029           return;
7030         break;
7031       case LibFunc_strnlen:
7032         if (visitStrNLenCall(I))
7033           return;
7034         break;
7035       }
7036     }
7037   }
7038 
7039   SDValue Callee;
7040   if (!RenameFn)
7041     Callee = getValue(I.getCalledValue());
7042   else
7043     Callee = DAG.getExternalSymbol(
7044         RenameFn,
7045         DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
7046 
7047   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7048   // have to do anything here to lower funclet bundles.
7049   assert(!I.hasOperandBundlesOtherThan(
7050              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
7051          "Cannot lower calls with arbitrary operand bundles!");
7052 
7053   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7054     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7055   else
7056     // Check if we can potentially perform a tail call. More detailed checking
7057     // is be done within LowerCallTo, after more information about the call is
7058     // known.
7059     LowerCallTo(&I, Callee, I.isTailCall());
7060 }
7061 
7062 namespace {
7063 
7064 /// AsmOperandInfo - This contains information for each constraint that we are
7065 /// lowering.
7066 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7067 public:
7068   /// CallOperand - If this is the result output operand or a clobber
7069   /// this is null, otherwise it is the incoming operand to the CallInst.
7070   /// This gets modified as the asm is processed.
7071   SDValue CallOperand;
7072 
7073   /// AssignedRegs - If this is a register or register class operand, this
7074   /// contains the set of register corresponding to the operand.
7075   RegsForValue AssignedRegs;
7076 
7077   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7078     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7079   }
7080 
7081   /// Whether or not this operand accesses memory
7082   bool hasMemory(const TargetLowering &TLI) const {
7083     // Indirect operand accesses access memory.
7084     if (isIndirect)
7085       return true;
7086 
7087     for (const auto &Code : Codes)
7088       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7089         return true;
7090 
7091     return false;
7092   }
7093 
7094   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7095   /// corresponds to.  If there is no Value* for this operand, it returns
7096   /// MVT::Other.
7097   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7098                            const DataLayout &DL) const {
7099     if (!CallOperandVal) return MVT::Other;
7100 
7101     if (isa<BasicBlock>(CallOperandVal))
7102       return TLI.getPointerTy(DL);
7103 
7104     llvm::Type *OpTy = CallOperandVal->getType();
7105 
7106     // FIXME: code duplicated from TargetLowering::ParseConstraints().
7107     // If this is an indirect operand, the operand is a pointer to the
7108     // accessed type.
7109     if (isIndirect) {
7110       PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7111       if (!PtrTy)
7112         report_fatal_error("Indirect operand for inline asm not a pointer!");
7113       OpTy = PtrTy->getElementType();
7114     }
7115 
7116     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7117     if (StructType *STy = dyn_cast<StructType>(OpTy))
7118       if (STy->getNumElements() == 1)
7119         OpTy = STy->getElementType(0);
7120 
7121     // If OpTy is not a single value, it may be a struct/union that we
7122     // can tile with integers.
7123     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7124       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7125       switch (BitSize) {
7126       default: break;
7127       case 1:
7128       case 8:
7129       case 16:
7130       case 32:
7131       case 64:
7132       case 128:
7133         OpTy = IntegerType::get(Context, BitSize);
7134         break;
7135       }
7136     }
7137 
7138     return TLI.getValueType(DL, OpTy, true);
7139   }
7140 };
7141 
7142 using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>;
7143 
7144 } // end anonymous namespace
7145 
7146 /// Make sure that the output operand \p OpInfo and its corresponding input
7147 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7148 /// out).
7149 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7150                                SDISelAsmOperandInfo &MatchingOpInfo,
7151                                SelectionDAG &DAG) {
7152   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7153     return;
7154 
7155   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7156   const auto &TLI = DAG.getTargetLoweringInfo();
7157 
7158   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7159       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7160                                        OpInfo.ConstraintVT);
7161   std::pair<unsigned, const TargetRegisterClass *> InputRC =
7162       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7163                                        MatchingOpInfo.ConstraintVT);
7164   if ((OpInfo.ConstraintVT.isInteger() !=
7165        MatchingOpInfo.ConstraintVT.isInteger()) ||
7166       (MatchRC.second != InputRC.second)) {
7167     // FIXME: error out in a more elegant fashion
7168     report_fatal_error("Unsupported asm: input constraint"
7169                        " with a matching output constraint of"
7170                        " incompatible type!");
7171   }
7172   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7173 }
7174 
7175 /// Get a direct memory input to behave well as an indirect operand.
7176 /// This may introduce stores, hence the need for a \p Chain.
7177 /// \return The (possibly updated) chain.
7178 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7179                                         SDISelAsmOperandInfo &OpInfo,
7180                                         SelectionDAG &DAG) {
7181   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7182 
7183   // If we don't have an indirect input, put it in the constpool if we can,
7184   // otherwise spill it to a stack slot.
7185   // TODO: This isn't quite right. We need to handle these according to
7186   // the addressing mode that the constraint wants. Also, this may take
7187   // an additional register for the computation and we don't want that
7188   // either.
7189 
7190   // If the operand is a float, integer, or vector constant, spill to a
7191   // constant pool entry to get its address.
7192   const Value *OpVal = OpInfo.CallOperandVal;
7193   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7194       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7195     OpInfo.CallOperand = DAG.getConstantPool(
7196         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7197     return Chain;
7198   }
7199 
7200   // Otherwise, create a stack slot and emit a store to it before the asm.
7201   Type *Ty = OpVal->getType();
7202   auto &DL = DAG.getDataLayout();
7203   uint64_t TySize = DL.getTypeAllocSize(Ty);
7204   unsigned Align = DL.getPrefTypeAlignment(Ty);
7205   MachineFunction &MF = DAG.getMachineFunction();
7206   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7207   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7208   Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7209                        MachinePointerInfo::getFixedStack(MF, SSFI));
7210   OpInfo.CallOperand = StackSlot;
7211 
7212   return Chain;
7213 }
7214 
7215 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7216 /// specified operand.  We prefer to assign virtual registers, to allow the
7217 /// register allocator to handle the assignment process.  However, if the asm
7218 /// uses features that we can't model on machineinstrs, we have SDISel do the
7219 /// allocation.  This produces generally horrible, but correct, code.
7220 ///
7221 ///   OpInfo describes the operand
7222 ///   RefOpInfo describes the matching operand if any, the operand otherwise
7223 static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
7224                                  const SDLoc &DL, SDISelAsmOperandInfo &OpInfo,
7225                                  SDISelAsmOperandInfo &RefOpInfo) {
7226   LLVMContext &Context = *DAG.getContext();
7227 
7228   MachineFunction &MF = DAG.getMachineFunction();
7229   SmallVector<unsigned, 4> Regs;
7230   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7231 
7232   // If this is a constraint for a single physreg, or a constraint for a
7233   // register class, find it.
7234   std::pair<unsigned, const TargetRegisterClass *> PhysReg =
7235       TLI.getRegForInlineAsmConstraint(&TRI, RefOpInfo.ConstraintCode,
7236                                        RefOpInfo.ConstraintVT);
7237 
7238   unsigned NumRegs = 1;
7239   if (OpInfo.ConstraintVT != MVT::Other) {
7240     // If this is an FP operand in an integer register (or visa versa), or more
7241     // generally if the operand value disagrees with the register class we plan
7242     // to stick it in, fix the operand type.
7243     //
7244     // If this is an input value, the bitcast to the new type is done now.
7245     // Bitcast for output value is done at the end of visitInlineAsm().
7246     if ((OpInfo.Type == InlineAsm::isOutput ||
7247          OpInfo.Type == InlineAsm::isInput) &&
7248         PhysReg.second &&
7249         !TRI.isTypeLegalForClass(*PhysReg.second, OpInfo.ConstraintVT)) {
7250       // Try to convert to the first EVT that the reg class contains.  If the
7251       // types are identical size, use a bitcast to convert (e.g. two differing
7252       // vector types).  Note: output bitcast is done at the end of
7253       // visitInlineAsm().
7254       MVT RegVT = *TRI.legalclasstypes_begin(*PhysReg.second);
7255       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7256         // Exclude indirect inputs while they are unsupported because the code
7257         // to perform the load is missing and thus OpInfo.CallOperand still
7258         // refers to the input address rather than the pointed-to value.
7259         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7260           OpInfo.CallOperand =
7261               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7262         OpInfo.ConstraintVT = RegVT;
7263         // If the operand is an FP value and we want it in integer registers,
7264         // use the corresponding integer type. This turns an f64 value into
7265         // i64, which can be passed with two i32 values on a 32-bit machine.
7266       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7267         RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7268         if (OpInfo.Type == InlineAsm::isInput)
7269           OpInfo.CallOperand =
7270               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7271         OpInfo.ConstraintVT = RegVT;
7272       }
7273     }
7274 
7275     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
7276   }
7277 
7278   // No need to allocate a matching input constraint since the constraint it's
7279   // matching to has already been allocated.
7280   if (OpInfo.isMatchingInputConstraint())
7281     return;
7282 
7283   MVT RegVT;
7284   EVT ValueVT = OpInfo.ConstraintVT;
7285 
7286   // If this is a constraint for a specific physical register, like {r17},
7287   // assign it now.
7288   if (unsigned AssignedReg = PhysReg.first) {
7289     const TargetRegisterClass *RC = PhysReg.second;
7290     if (OpInfo.ConstraintVT == MVT::Other)
7291       ValueVT = *TRI.legalclasstypes_begin(*RC);
7292 
7293     // Get the actual register value type.  This is important, because the user
7294     // may have asked for (e.g.) the AX register in i32 type.  We need to
7295     // remember that AX is actually i16 to get the right extension.
7296     RegVT = *TRI.legalclasstypes_begin(*RC);
7297 
7298     // This is an explicit reference to a physical register.
7299     Regs.push_back(AssignedReg);
7300 
7301     // If this is an expanded reference, add the rest of the regs to Regs.
7302     if (NumRegs != 1) {
7303       TargetRegisterClass::iterator I = RC->begin();
7304       for (; *I != AssignedReg; ++I)
7305         assert(I != RC->end() && "Didn't find reg!");
7306 
7307       // Already added the first reg.
7308       --NumRegs; ++I;
7309       for (; NumRegs; --NumRegs, ++I) {
7310         assert(I != RC->end() && "Ran out of registers to allocate!");
7311         Regs.push_back(*I);
7312       }
7313     }
7314 
7315     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7316     return;
7317   }
7318 
7319   // Otherwise, if this was a reference to an LLVM register class, create vregs
7320   // for this reference.
7321   if (const TargetRegisterClass *RC = PhysReg.second) {
7322     RegVT = *TRI.legalclasstypes_begin(*RC);
7323     if (OpInfo.ConstraintVT == MVT::Other)
7324       ValueVT = RegVT;
7325 
7326     // Create the appropriate number of virtual registers.
7327     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7328     for (; NumRegs; --NumRegs)
7329       Regs.push_back(RegInfo.createVirtualRegister(RC));
7330 
7331     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7332     return;
7333   }
7334 
7335   // Otherwise, we couldn't allocate enough registers for this.
7336 }
7337 
7338 static unsigned
7339 findMatchingInlineAsmOperand(unsigned OperandNo,
7340                              const std::vector<SDValue> &AsmNodeOperands) {
7341   // Scan until we find the definition we already emitted of this operand.
7342   unsigned CurOp = InlineAsm::Op_FirstOperand;
7343   for (; OperandNo; --OperandNo) {
7344     // Advance to the next operand.
7345     unsigned OpFlag =
7346         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7347     assert((InlineAsm::isRegDefKind(OpFlag) ||
7348             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
7349             InlineAsm::isMemKind(OpFlag)) &&
7350            "Skipped past definitions?");
7351     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
7352   }
7353   return CurOp;
7354 }
7355 
7356 /// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT
7357 /// \return true if it has succeeded, false otherwise
7358 static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs,
7359                               MVT RegVT, SelectionDAG &DAG) {
7360   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7361   MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
7362   for (unsigned i = 0, e = NumRegs; i != e; ++i) {
7363     if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
7364       Regs.push_back(RegInfo.createVirtualRegister(RC));
7365     else
7366       return false;
7367   }
7368   return true;
7369 }
7370 
7371 namespace {
7372 
7373 class ExtraFlags {
7374   unsigned Flags = 0;
7375 
7376 public:
7377   explicit ExtraFlags(ImmutableCallSite CS) {
7378     const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7379     if (IA->hasSideEffects())
7380       Flags |= InlineAsm::Extra_HasSideEffects;
7381     if (IA->isAlignStack())
7382       Flags |= InlineAsm::Extra_IsAlignStack;
7383     if (CS.isConvergent())
7384       Flags |= InlineAsm::Extra_IsConvergent;
7385     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
7386   }
7387 
7388   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
7389     // Ideally, we would only check against memory constraints.  However, the
7390     // meaning of an Other constraint can be target-specific and we can't easily
7391     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
7392     // for Other constraints as well.
7393     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
7394         OpInfo.ConstraintType == TargetLowering::C_Other) {
7395       if (OpInfo.Type == InlineAsm::isInput)
7396         Flags |= InlineAsm::Extra_MayLoad;
7397       else if (OpInfo.Type == InlineAsm::isOutput)
7398         Flags |= InlineAsm::Extra_MayStore;
7399       else if (OpInfo.Type == InlineAsm::isClobber)
7400         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
7401     }
7402   }
7403 
7404   unsigned get() const { return Flags; }
7405 };
7406 
7407 } // end anonymous namespace
7408 
7409 /// visitInlineAsm - Handle a call to an InlineAsm object.
7410 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
7411   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7412 
7413   /// ConstraintOperands - Information about all of the constraints.
7414   SDISelAsmOperandInfoVector ConstraintOperands;
7415 
7416   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7417   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
7418       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
7419 
7420   bool hasMemory = false;
7421 
7422   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
7423   ExtraFlags ExtraInfo(CS);
7424 
7425   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
7426   unsigned ResNo = 0;   // ResNo - The result number of the next output.
7427   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
7428     ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
7429     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
7430 
7431     MVT OpVT = MVT::Other;
7432 
7433     // Compute the value type for each operand.
7434     if (OpInfo.Type == InlineAsm::isInput ||
7435         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
7436       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
7437 
7438       // Process the call argument. BasicBlocks are labels, currently appearing
7439       // only in asm's.
7440       if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
7441         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
7442       } else {
7443         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
7444       }
7445 
7446       OpVT =
7447           OpInfo
7448               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
7449               .getSimpleVT();
7450     }
7451 
7452     if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
7453       // The return value of the call is this value.  As such, there is no
7454       // corresponding argument.
7455       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7456       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
7457         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
7458                                       STy->getElementType(ResNo));
7459       } else {
7460         assert(ResNo == 0 && "Asm only has one result!");
7461         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
7462       }
7463       ++ResNo;
7464     }
7465 
7466     OpInfo.ConstraintVT = OpVT;
7467 
7468     if (!hasMemory)
7469       hasMemory = OpInfo.hasMemory(TLI);
7470 
7471     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
7472     // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]?
7473     auto TargetConstraint = TargetConstraints[i];
7474 
7475     // Compute the constraint code and ConstraintType to use.
7476     TLI.ComputeConstraintToUse(TargetConstraint, SDValue());
7477 
7478     ExtraInfo.update(TargetConstraint);
7479   }
7480 
7481   SDValue Chain, Flag;
7482 
7483   // We won't need to flush pending loads if this asm doesn't touch
7484   // memory and is nonvolatile.
7485   if (hasMemory || IA->hasSideEffects())
7486     Chain = getRoot();
7487   else
7488     Chain = DAG.getRoot();
7489 
7490   // Second pass over the constraints: compute which constraint option to use
7491   // and assign registers to constraints that want a specific physreg.
7492   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7493     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7494 
7495     // If this is an output operand with a matching input operand, look up the
7496     // matching input. If their types mismatch, e.g. one is an integer, the
7497     // other is floating point, or their sizes are different, flag it as an
7498     // error.
7499     if (OpInfo.hasMatchingInput()) {
7500       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
7501       patchMatchingInput(OpInfo, Input, DAG);
7502     }
7503 
7504     // Compute the constraint code and ConstraintType to use.
7505     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
7506 
7507     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7508         OpInfo.Type == InlineAsm::isClobber)
7509       continue;
7510 
7511     // If this is a memory input, and if the operand is not indirect, do what we
7512     // need to provide an address for the memory input.
7513     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7514         !OpInfo.isIndirect) {
7515       assert((OpInfo.isMultipleAlternative ||
7516               (OpInfo.Type == InlineAsm::isInput)) &&
7517              "Can only indirectify direct input operands!");
7518 
7519       // Memory operands really want the address of the value.
7520       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
7521 
7522       // There is no longer a Value* corresponding to this operand.
7523       OpInfo.CallOperandVal = nullptr;
7524 
7525       // It is now an indirect operand.
7526       OpInfo.isIndirect = true;
7527     }
7528 
7529     // If this constraint is for a specific register, allocate it before
7530     // anything else.
7531     SDISelAsmOperandInfo &RefOpInfo =
7532         OpInfo.isMatchingInputConstraint()
7533             ? ConstraintOperands[OpInfo.getMatchedOperand()]
7534             : ConstraintOperands[i];
7535     if (RefOpInfo.ConstraintType == TargetLowering::C_Register)
7536       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
7537   }
7538 
7539   // Third pass - Loop over all of the operands, assigning virtual or physregs
7540   // to register class operands.
7541   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7542     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7543     SDISelAsmOperandInfo &RefOpInfo =
7544         OpInfo.isMatchingInputConstraint()
7545             ? ConstraintOperands[OpInfo.getMatchedOperand()]
7546             : ConstraintOperands[i];
7547 
7548     // C_Register operands have already been allocated, Other/Memory don't need
7549     // to be.
7550     if (RefOpInfo.ConstraintType == TargetLowering::C_RegisterClass)
7551       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
7552   }
7553 
7554   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
7555   std::vector<SDValue> AsmNodeOperands;
7556   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
7557   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
7558       IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
7559 
7560   // If we have a !srcloc metadata node associated with it, we want to attach
7561   // this to the ultimately generated inline asm machineinstr.  To do this, we
7562   // pass in the third operand as this (potentially null) inline asm MDNode.
7563   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
7564   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
7565 
7566   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
7567   // bits as operand 3.
7568   AsmNodeOperands.push_back(DAG.getTargetConstant(
7569       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7570 
7571   // Loop over all of the inputs, copying the operand values into the
7572   // appropriate registers and processing the output regs.
7573   RegsForValue RetValRegs;
7574 
7575   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
7576   std::vector<std::pair<RegsForValue, Value *>> IndirectStoresToEmit;
7577 
7578   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7579     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7580 
7581     switch (OpInfo.Type) {
7582     case InlineAsm::isOutput:
7583       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
7584           OpInfo.ConstraintType != TargetLowering::C_Register) {
7585         // Memory output, or 'other' output (e.g. 'X' constraint).
7586         assert(OpInfo.isIndirect && "Memory output must be indirect operand");
7587 
7588         unsigned ConstraintID =
7589             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7590         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7591                "Failed to convert memory constraint code to constraint id.");
7592 
7593         // Add information to the INLINEASM node to know about this output.
7594         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7595         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
7596         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
7597                                                         MVT::i32));
7598         AsmNodeOperands.push_back(OpInfo.CallOperand);
7599         break;
7600       }
7601 
7602       // Otherwise, this is a register or register class output.
7603 
7604       // Copy the output from the appropriate register.  Find a register that
7605       // we can use.
7606       if (OpInfo.AssignedRegs.Regs.empty()) {
7607         emitInlineAsmError(
7608             CS, "couldn't allocate output register for constraint '" +
7609                     Twine(OpInfo.ConstraintCode) + "'");
7610         return;
7611       }
7612 
7613       // If this is an indirect operand, store through the pointer after the
7614       // asm.
7615       if (OpInfo.isIndirect) {
7616         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
7617                                                       OpInfo.CallOperandVal));
7618       } else {
7619         // This is the result value of the call.
7620         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7621         // Concatenate this output onto the outputs list.
7622         RetValRegs.append(OpInfo.AssignedRegs);
7623       }
7624 
7625       // Add information to the INLINEASM node to know that this register is
7626       // set.
7627       OpInfo.AssignedRegs
7628           .AddInlineAsmOperands(OpInfo.isEarlyClobber
7629                                     ? InlineAsm::Kind_RegDefEarlyClobber
7630                                     : InlineAsm::Kind_RegDef,
7631                                 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7632       break;
7633 
7634     case InlineAsm::isInput: {
7635       SDValue InOperandVal = OpInfo.CallOperand;
7636 
7637       if (OpInfo.isMatchingInputConstraint()) {
7638         // If this is required to match an output register we have already set,
7639         // just use its register.
7640         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
7641                                                   AsmNodeOperands);
7642         unsigned OpFlag =
7643           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7644         if (InlineAsm::isRegDefKind(OpFlag) ||
7645             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
7646           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
7647           if (OpInfo.isIndirect) {
7648             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
7649             emitInlineAsmError(CS, "inline asm not supported yet:"
7650                                    " don't know how to handle tied "
7651                                    "indirect register inputs");
7652             return;
7653           }
7654 
7655           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7656           SmallVector<unsigned, 4> Regs;
7657 
7658           if (!createVirtualRegs(Regs,
7659                                  InlineAsm::getNumOperandRegisters(OpFlag),
7660                                  RegVT, DAG)) {
7661             emitInlineAsmError(CS, "inline asm error: This value type register "
7662                                    "class is not natively supported!");
7663             return;
7664           }
7665 
7666           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
7667 
7668           SDLoc dl = getCurSDLoc();
7669           // Use the produced MatchedRegs object to
7670           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
7671                                     CS.getInstruction());
7672           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
7673                                            true, OpInfo.getMatchedOperand(), dl,
7674                                            DAG, AsmNodeOperands);
7675           break;
7676         }
7677 
7678         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
7679         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
7680                "Unexpected number of operands");
7681         // Add information to the INLINEASM node to know about this input.
7682         // See InlineAsm.h isUseOperandTiedToDef.
7683         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
7684         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
7685                                                     OpInfo.getMatchedOperand());
7686         AsmNodeOperands.push_back(DAG.getTargetConstant(
7687             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7688         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
7689         break;
7690       }
7691 
7692       // Treat indirect 'X' constraint as memory.
7693       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
7694           OpInfo.isIndirect)
7695         OpInfo.ConstraintType = TargetLowering::C_Memory;
7696 
7697       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
7698         std::vector<SDValue> Ops;
7699         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
7700                                           Ops, DAG);
7701         if (Ops.empty()) {
7702           emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
7703                                      Twine(OpInfo.ConstraintCode) + "'");
7704           return;
7705         }
7706 
7707         // Add information to the INLINEASM node to know about this input.
7708         unsigned ResOpType =
7709           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
7710         AsmNodeOperands.push_back(DAG.getTargetConstant(
7711             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7712         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
7713         break;
7714       }
7715 
7716       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
7717         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
7718         assert(InOperandVal.getValueType() ==
7719                    TLI.getPointerTy(DAG.getDataLayout()) &&
7720                "Memory operands expect pointer values");
7721 
7722         unsigned ConstraintID =
7723             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7724         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7725                "Failed to convert memory constraint code to constraint id.");
7726 
7727         // Add information to the INLINEASM node to know about this input.
7728         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7729         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
7730         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
7731                                                         getCurSDLoc(),
7732                                                         MVT::i32));
7733         AsmNodeOperands.push_back(InOperandVal);
7734         break;
7735       }
7736 
7737       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
7738               OpInfo.ConstraintType == TargetLowering::C_Register) &&
7739              "Unknown constraint type!");
7740 
7741       // TODO: Support this.
7742       if (OpInfo.isIndirect) {
7743         emitInlineAsmError(
7744             CS, "Don't know how to handle indirect register inputs yet "
7745                 "for constraint '" +
7746                     Twine(OpInfo.ConstraintCode) + "'");
7747         return;
7748       }
7749 
7750       // Copy the input into the appropriate registers.
7751       if (OpInfo.AssignedRegs.Regs.empty()) {
7752         emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
7753                                    Twine(OpInfo.ConstraintCode) + "'");
7754         return;
7755       }
7756 
7757       SDLoc dl = getCurSDLoc();
7758 
7759       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7760                                         Chain, &Flag, CS.getInstruction());
7761 
7762       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
7763                                                dl, DAG, AsmNodeOperands);
7764       break;
7765     }
7766     case InlineAsm::isClobber:
7767       // Add the clobbered value to the operand list, so that the register
7768       // allocator is aware that the physreg got clobbered.
7769       if (!OpInfo.AssignedRegs.Regs.empty())
7770         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
7771                                                  false, 0, getCurSDLoc(), DAG,
7772                                                  AsmNodeOperands);
7773       break;
7774     }
7775   }
7776 
7777   // Finish up input operands.  Set the input chain and add the flag last.
7778   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
7779   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
7780 
7781   Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
7782                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
7783   Flag = Chain.getValue(1);
7784 
7785   // If this asm returns a register value, copy the result from that register
7786   // and set it as the value of the call.
7787   if (!RetValRegs.Regs.empty()) {
7788     SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7789                                              Chain, &Flag, CS.getInstruction());
7790 
7791     llvm::Type *CSResultType = CS.getType();
7792     unsigned numRet;
7793     ArrayRef<Type *> ResultTypes;
7794     SmallVector<SDValue, 1> ResultValues(1);
7795     if (CSResultType->isSingleValueType()) {
7796       numRet = 1;
7797       ResultValues[0] = Val;
7798       ResultTypes = makeArrayRef(CSResultType);
7799     } else {
7800       numRet = CSResultType->getNumContainedTypes();
7801       assert(Val->getNumOperands() == numRet &&
7802              "Mismatch in number of output operands in asm result");
7803       ResultTypes = CSResultType->subtypes();
7804       ArrayRef<SDUse> ValueUses = Val->ops();
7805       ResultValues.resize(numRet);
7806       std::transform(ValueUses.begin(), ValueUses.end(), ResultValues.begin(),
7807                      [](const SDUse &u) -> SDValue { return u.get(); });
7808     }
7809     SmallVector<EVT, 1> ResultVTs(numRet);
7810     for (unsigned i = 0; i < numRet; i++) {
7811       EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), ResultTypes[i]);
7812       SDValue Val = ResultValues[i];
7813       assert(ResultTypes[i]->isSized() && "Unexpected unsized type");
7814       // If the type of the inline asm call site return value is different but
7815       // has same size as the type of the asm output bitcast it.  One example
7816       // of this is for vectors with different width / number of elements.
7817       // This can happen for register classes that can contain multiple
7818       // different value types.  The preg or vreg allocated may not have the
7819       // same VT as was expected.
7820       //
7821       // This can also happen for a return value that disagrees with the
7822       // register class it is put in, eg. a double in a general-purpose
7823       // register on a 32-bit machine.
7824       if (ResultVT != Val.getValueType() &&
7825           ResultVT.getSizeInBits() == Val.getValueSizeInBits())
7826         Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, Val);
7827       else if (ResultVT != Val.getValueType() && ResultVT.isInteger() &&
7828                Val.getValueType().isInteger()) {
7829         // If a result value was tied to an input value, the computed result
7830         // may have a wider width than the expected result.  Extract the
7831         // relevant portion.
7832         Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, Val);
7833       }
7834 
7835       assert(ResultVT == Val.getValueType() && "Asm result value mismatch!");
7836       ResultVTs[i] = ResultVT;
7837       ResultValues[i] = Val;
7838     }
7839 
7840     Val = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
7841                       DAG.getVTList(ResultVTs), ResultValues);
7842     setValue(CS.getInstruction(), Val);
7843     // Don't need to use this as a chain in this case.
7844     if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
7845       return;
7846   }
7847 
7848   std::vector<std::pair<SDValue, const Value *>> StoresToEmit;
7849 
7850   // Process indirect outputs, first output all of the flagged copies out of
7851   // physregs.
7852   for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
7853     RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
7854     const Value *Ptr = IndirectStoresToEmit[i].second;
7855     SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7856                                              Chain, &Flag, IA);
7857     StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
7858   }
7859 
7860   // Emit the non-flagged stores from the physregs.
7861   SmallVector<SDValue, 8> OutChains;
7862   for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
7863     SDValue Val = DAG.getStore(Chain, getCurSDLoc(), StoresToEmit[i].first,
7864                                getValue(StoresToEmit[i].second),
7865                                MachinePointerInfo(StoresToEmit[i].second));
7866     OutChains.push_back(Val);
7867   }
7868 
7869   if (!OutChains.empty())
7870     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
7871 
7872   DAG.setRoot(Chain);
7873 }
7874 
7875 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
7876                                              const Twine &Message) {
7877   LLVMContext &Ctx = *DAG.getContext();
7878   Ctx.emitError(CS.getInstruction(), Message);
7879 
7880   // Make sure we leave the DAG in a valid state
7881   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7882   SmallVector<EVT, 1> ValueVTs;
7883   ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
7884 
7885   if (ValueVTs.empty())
7886     return;
7887 
7888   SmallVector<SDValue, 1> Ops;
7889   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
7890     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
7891 
7892   setValue(CS.getInstruction(), DAG.getMergeValues(Ops, getCurSDLoc()));
7893 }
7894 
7895 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
7896   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
7897                           MVT::Other, getRoot(),
7898                           getValue(I.getArgOperand(0)),
7899                           DAG.getSrcValue(I.getArgOperand(0))));
7900 }
7901 
7902 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
7903   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7904   const DataLayout &DL = DAG.getDataLayout();
7905   SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7906                            getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
7907                            DAG.getSrcValue(I.getOperand(0)),
7908                            DL.getABITypeAlignment(I.getType()));
7909   setValue(&I, V);
7910   DAG.setRoot(V.getValue(1));
7911 }
7912 
7913 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
7914   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
7915                           MVT::Other, getRoot(),
7916                           getValue(I.getArgOperand(0)),
7917                           DAG.getSrcValue(I.getArgOperand(0))));
7918 }
7919 
7920 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
7921   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
7922                           MVT::Other, getRoot(),
7923                           getValue(I.getArgOperand(0)),
7924                           getValue(I.getArgOperand(1)),
7925                           DAG.getSrcValue(I.getArgOperand(0)),
7926                           DAG.getSrcValue(I.getArgOperand(1))));
7927 }
7928 
7929 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
7930                                                     const Instruction &I,
7931                                                     SDValue Op) {
7932   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
7933   if (!Range)
7934     return Op;
7935 
7936   ConstantRange CR = getConstantRangeFromMetadata(*Range);
7937   if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet())
7938     return Op;
7939 
7940   APInt Lo = CR.getUnsignedMin();
7941   if (!Lo.isMinValue())
7942     return Op;
7943 
7944   APInt Hi = CR.getUnsignedMax();
7945   unsigned Bits = Hi.getActiveBits();
7946 
7947   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7948 
7949   SDLoc SL = getCurSDLoc();
7950 
7951   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
7952                              DAG.getValueType(SmallVT));
7953   unsigned NumVals = Op.getNode()->getNumValues();
7954   if (NumVals == 1)
7955     return ZExt;
7956 
7957   SmallVector<SDValue, 4> Ops;
7958 
7959   Ops.push_back(ZExt);
7960   for (unsigned I = 1; I != NumVals; ++I)
7961     Ops.push_back(Op.getValue(I));
7962 
7963   return DAG.getMergeValues(Ops, SL);
7964 }
7965 
7966 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
7967 /// the call being lowered.
7968 ///
7969 /// This is a helper for lowering intrinsics that follow a target calling
7970 /// convention or require stack pointer adjustment. Only a subset of the
7971 /// intrinsic's operands need to participate in the calling convention.
7972 void SelectionDAGBuilder::populateCallLoweringInfo(
7973     TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS,
7974     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
7975     bool IsPatchPoint) {
7976   TargetLowering::ArgListTy Args;
7977   Args.reserve(NumArgs);
7978 
7979   // Populate the argument list.
7980   // Attributes for args start at offset 1, after the return attribute.
7981   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
7982        ArgI != ArgE; ++ArgI) {
7983     const Value *V = CS->getOperand(ArgI);
7984 
7985     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
7986 
7987     TargetLowering::ArgListEntry Entry;
7988     Entry.Node = getValue(V);
7989     Entry.Ty = V->getType();
7990     Entry.setAttributes(&CS, ArgI);
7991     Args.push_back(Entry);
7992   }
7993 
7994   CLI.setDebugLoc(getCurSDLoc())
7995       .setChain(getRoot())
7996       .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args))
7997       .setDiscardResult(CS->use_empty())
7998       .setIsPatchPoint(IsPatchPoint);
7999 }
8000 
8001 /// Add a stack map intrinsic call's live variable operands to a stackmap
8002 /// or patchpoint target node's operand list.
8003 ///
8004 /// Constants are converted to TargetConstants purely as an optimization to
8005 /// avoid constant materialization and register allocation.
8006 ///
8007 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8008 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
8009 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8010 /// address materialization and register allocation, but may also be required
8011 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8012 /// alloca in the entry block, then the runtime may assume that the alloca's
8013 /// StackMap location can be read immediately after compilation and that the
8014 /// location is valid at any point during execution (this is similar to the
8015 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8016 /// only available in a register, then the runtime would need to trap when
8017 /// execution reaches the StackMap in order to read the alloca's location.
8018 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
8019                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8020                                 SelectionDAGBuilder &Builder) {
8021   for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
8022     SDValue OpVal = Builder.getValue(CS.getArgument(i));
8023     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8024       Ops.push_back(
8025         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8026       Ops.push_back(
8027         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8028     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8029       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8030       Ops.push_back(Builder.DAG.getTargetFrameIndex(
8031           FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8032     } else
8033       Ops.push_back(OpVal);
8034   }
8035 }
8036 
8037 /// Lower llvm.experimental.stackmap directly to its target opcode.
8038 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8039   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8040   //                                  [live variables...])
8041 
8042   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8043 
8044   SDValue Chain, InFlag, Callee, NullPtr;
8045   SmallVector<SDValue, 32> Ops;
8046 
8047   SDLoc DL = getCurSDLoc();
8048   Callee = getValue(CI.getCalledValue());
8049   NullPtr = DAG.getIntPtrConstant(0, DL, true);
8050 
8051   // The stackmap intrinsic only records the live variables (the arguemnts
8052   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8053   // intrinsic, this won't be lowered to a function call. This means we don't
8054   // have to worry about calling conventions and target specific lowering code.
8055   // Instead we perform the call lowering right here.
8056   //
8057   // chain, flag = CALLSEQ_START(chain, 0, 0)
8058   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8059   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8060   //
8061   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8062   InFlag = Chain.getValue(1);
8063 
8064   // Add the <id> and <numBytes> constants.
8065   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8066   Ops.push_back(DAG.getTargetConstant(
8067                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8068   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8069   Ops.push_back(DAG.getTargetConstant(
8070                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8071                   MVT::i32));
8072 
8073   // Push live variables for the stack map.
8074   addStackMapLiveVars(&CI, 2, DL, Ops, *this);
8075 
8076   // We are not pushing any register mask info here on the operands list,
8077   // because the stackmap doesn't clobber anything.
8078 
8079   // Push the chain and the glue flag.
8080   Ops.push_back(Chain);
8081   Ops.push_back(InFlag);
8082 
8083   // Create the STACKMAP node.
8084   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8085   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8086   Chain = SDValue(SM, 0);
8087   InFlag = Chain.getValue(1);
8088 
8089   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8090 
8091   // Stackmaps don't generate values, so nothing goes into the NodeMap.
8092 
8093   // Set the root to the target-lowered call chain.
8094   DAG.setRoot(Chain);
8095 
8096   // Inform the Frame Information that we have a stackmap in this function.
8097   FuncInfo.MF->getFrameInfo().setHasStackMap();
8098 }
8099 
8100 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8101 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
8102                                           const BasicBlock *EHPadBB) {
8103   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8104   //                                                 i32 <numBytes>,
8105   //                                                 i8* <target>,
8106   //                                                 i32 <numArgs>,
8107   //                                                 [Args...],
8108   //                                                 [live variables...])
8109 
8110   CallingConv::ID CC = CS.getCallingConv();
8111   bool IsAnyRegCC = CC == CallingConv::AnyReg;
8112   bool HasDef = !CS->getType()->isVoidTy();
8113   SDLoc dl = getCurSDLoc();
8114   SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
8115 
8116   // Handle immediate and symbolic callees.
8117   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8118     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8119                                    /*isTarget=*/true);
8120   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8121     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8122                                          SDLoc(SymbolicCallee),
8123                                          SymbolicCallee->getValueType(0));
8124 
8125   // Get the real number of arguments participating in the call <numArgs>
8126   SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
8127   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8128 
8129   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8130   // Intrinsics include all meta-operands up to but not including CC.
8131   unsigned NumMetaOpers = PatchPointOpers::CCPos;
8132   assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
8133          "Not enough arguments provided to the patchpoint intrinsic");
8134 
8135   // For AnyRegCC the arguments are lowered later on manually.
8136   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8137   Type *ReturnTy =
8138     IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
8139 
8140   TargetLowering::CallLoweringInfo CLI(DAG);
8141   populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
8142                            true);
8143   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8144 
8145   SDNode *CallEnd = Result.second.getNode();
8146   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8147     CallEnd = CallEnd->getOperand(0).getNode();
8148 
8149   /// Get a call instruction from the call sequence chain.
8150   /// Tail calls are not allowed.
8151   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8152          "Expected a callseq node.");
8153   SDNode *Call = CallEnd->getOperand(0).getNode();
8154   bool HasGlue = Call->getGluedNode();
8155 
8156   // Replace the target specific call node with the patchable intrinsic.
8157   SmallVector<SDValue, 8> Ops;
8158 
8159   // Add the <id> and <numBytes> constants.
8160   SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
8161   Ops.push_back(DAG.getTargetConstant(
8162                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8163   SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
8164   Ops.push_back(DAG.getTargetConstant(
8165                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8166                   MVT::i32));
8167 
8168   // Add the callee.
8169   Ops.push_back(Callee);
8170 
8171   // Adjust <numArgs> to account for any arguments that have been passed on the
8172   // stack instead.
8173   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8174   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8175   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8176   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8177 
8178   // Add the calling convention
8179   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8180 
8181   // Add the arguments we omitted previously. The register allocator should
8182   // place these in any free register.
8183   if (IsAnyRegCC)
8184     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8185       Ops.push_back(getValue(CS.getArgument(i)));
8186 
8187   // Push the arguments from the call instruction up to the register mask.
8188   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8189   Ops.append(Call->op_begin() + 2, e);
8190 
8191   // Push live variables for the stack map.
8192   addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
8193 
8194   // Push the register mask info.
8195   if (HasGlue)
8196     Ops.push_back(*(Call->op_end()-2));
8197   else
8198     Ops.push_back(*(Call->op_end()-1));
8199 
8200   // Push the chain (this is originally the first operand of the call, but
8201   // becomes now the last or second to last operand).
8202   Ops.push_back(*(Call->op_begin()));
8203 
8204   // Push the glue flag (last operand).
8205   if (HasGlue)
8206     Ops.push_back(*(Call->op_end()-1));
8207 
8208   SDVTList NodeTys;
8209   if (IsAnyRegCC && HasDef) {
8210     // Create the return types based on the intrinsic definition
8211     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8212     SmallVector<EVT, 3> ValueVTs;
8213     ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8214     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8215 
8216     // There is always a chain and a glue type at the end
8217     ValueVTs.push_back(MVT::Other);
8218     ValueVTs.push_back(MVT::Glue);
8219     NodeTys = DAG.getVTList(ValueVTs);
8220   } else
8221     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8222 
8223   // Replace the target specific call node with a PATCHPOINT node.
8224   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8225                                          dl, NodeTys, Ops);
8226 
8227   // Update the NodeMap.
8228   if (HasDef) {
8229     if (IsAnyRegCC)
8230       setValue(CS.getInstruction(), SDValue(MN, 0));
8231     else
8232       setValue(CS.getInstruction(), Result.first);
8233   }
8234 
8235   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8236   // call sequence. Furthermore the location of the chain and glue can change
8237   // when the AnyReg calling convention is used and the intrinsic returns a
8238   // value.
8239   if (IsAnyRegCC && HasDef) {
8240     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8241     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8242     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8243   } else
8244     DAG.ReplaceAllUsesWith(Call, MN);
8245   DAG.DeleteNode(Call);
8246 
8247   // Inform the Frame Information that we have a patchpoint in this function.
8248   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8249 }
8250 
8251 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8252                                             unsigned Intrinsic) {
8253   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8254   SDValue Op1 = getValue(I.getArgOperand(0));
8255   SDValue Op2;
8256   if (I.getNumArgOperands() > 1)
8257     Op2 = getValue(I.getArgOperand(1));
8258   SDLoc dl = getCurSDLoc();
8259   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8260   SDValue Res;
8261   FastMathFlags FMF;
8262   if (isa<FPMathOperator>(I))
8263     FMF = I.getFastMathFlags();
8264 
8265   switch (Intrinsic) {
8266   case Intrinsic::experimental_vector_reduce_fadd:
8267     if (FMF.isFast())
8268       Res = DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2);
8269     else
8270       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8271     break;
8272   case Intrinsic::experimental_vector_reduce_fmul:
8273     if (FMF.isFast())
8274       Res = DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2);
8275     else
8276       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8277     break;
8278   case Intrinsic::experimental_vector_reduce_add:
8279     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8280     break;
8281   case Intrinsic::experimental_vector_reduce_mul:
8282     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8283     break;
8284   case Intrinsic::experimental_vector_reduce_and:
8285     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8286     break;
8287   case Intrinsic::experimental_vector_reduce_or:
8288     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8289     break;
8290   case Intrinsic::experimental_vector_reduce_xor:
8291     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8292     break;
8293   case Intrinsic::experimental_vector_reduce_smax:
8294     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8295     break;
8296   case Intrinsic::experimental_vector_reduce_smin:
8297     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
8298     break;
8299   case Intrinsic::experimental_vector_reduce_umax:
8300     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
8301     break;
8302   case Intrinsic::experimental_vector_reduce_umin:
8303     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
8304     break;
8305   case Intrinsic::experimental_vector_reduce_fmax:
8306     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
8307     break;
8308   case Intrinsic::experimental_vector_reduce_fmin:
8309     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
8310     break;
8311   default:
8312     llvm_unreachable("Unhandled vector reduce intrinsic");
8313   }
8314   setValue(&I, Res);
8315 }
8316 
8317 /// Returns an AttributeList representing the attributes applied to the return
8318 /// value of the given call.
8319 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
8320   SmallVector<Attribute::AttrKind, 2> Attrs;
8321   if (CLI.RetSExt)
8322     Attrs.push_back(Attribute::SExt);
8323   if (CLI.RetZExt)
8324     Attrs.push_back(Attribute::ZExt);
8325   if (CLI.IsInReg)
8326     Attrs.push_back(Attribute::InReg);
8327 
8328   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
8329                             Attrs);
8330 }
8331 
8332 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
8333 /// implementation, which just calls LowerCall.
8334 /// FIXME: When all targets are
8335 /// migrated to using LowerCall, this hook should be integrated into SDISel.
8336 std::pair<SDValue, SDValue>
8337 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
8338   // Handle the incoming return values from the call.
8339   CLI.Ins.clear();
8340   Type *OrigRetTy = CLI.RetTy;
8341   SmallVector<EVT, 4> RetTys;
8342   SmallVector<uint64_t, 4> Offsets;
8343   auto &DL = CLI.DAG.getDataLayout();
8344   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
8345 
8346   if (CLI.IsPostTypeLegalization) {
8347     // If we are lowering a libcall after legalization, split the return type.
8348     SmallVector<EVT, 4> OldRetTys = std::move(RetTys);
8349     SmallVector<uint64_t, 4> OldOffsets = std::move(Offsets);
8350     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
8351       EVT RetVT = OldRetTys[i];
8352       uint64_t Offset = OldOffsets[i];
8353       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
8354       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
8355       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
8356       RetTys.append(NumRegs, RegisterVT);
8357       for (unsigned j = 0; j != NumRegs; ++j)
8358         Offsets.push_back(Offset + j * RegisterVTByteSZ);
8359     }
8360   }
8361 
8362   SmallVector<ISD::OutputArg, 4> Outs;
8363   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
8364 
8365   bool CanLowerReturn =
8366       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
8367                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
8368 
8369   SDValue DemoteStackSlot;
8370   int DemoteStackIdx = -100;
8371   if (!CanLowerReturn) {
8372     // FIXME: equivalent assert?
8373     // assert(!CS.hasInAllocaArgument() &&
8374     //        "sret demotion is incompatible with inalloca");
8375     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
8376     unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
8377     MachineFunction &MF = CLI.DAG.getMachineFunction();
8378     DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
8379     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
8380                                               DL.getAllocaAddrSpace());
8381 
8382     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
8383     ArgListEntry Entry;
8384     Entry.Node = DemoteStackSlot;
8385     Entry.Ty = StackSlotPtrType;
8386     Entry.IsSExt = false;
8387     Entry.IsZExt = false;
8388     Entry.IsInReg = false;
8389     Entry.IsSRet = true;
8390     Entry.IsNest = false;
8391     Entry.IsByVal = false;
8392     Entry.IsReturned = false;
8393     Entry.IsSwiftSelf = false;
8394     Entry.IsSwiftError = false;
8395     Entry.Alignment = Align;
8396     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
8397     CLI.NumFixedArgs += 1;
8398     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
8399 
8400     // sret demotion isn't compatible with tail-calls, since the sret argument
8401     // points into the callers stack frame.
8402     CLI.IsTailCall = false;
8403   } else {
8404     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8405       EVT VT = RetTys[I];
8406       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8407                                                      CLI.CallConv, VT);
8408       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8409                                                        CLI.CallConv, VT);
8410       for (unsigned i = 0; i != NumRegs; ++i) {
8411         ISD::InputArg MyFlags;
8412         MyFlags.VT = RegisterVT;
8413         MyFlags.ArgVT = VT;
8414         MyFlags.Used = CLI.IsReturnValueUsed;
8415         if (CLI.RetSExt)
8416           MyFlags.Flags.setSExt();
8417         if (CLI.RetZExt)
8418           MyFlags.Flags.setZExt();
8419         if (CLI.IsInReg)
8420           MyFlags.Flags.setInReg();
8421         CLI.Ins.push_back(MyFlags);
8422       }
8423     }
8424   }
8425 
8426   // We push in swifterror return as the last element of CLI.Ins.
8427   ArgListTy &Args = CLI.getArgs();
8428   if (supportSwiftError()) {
8429     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8430       if (Args[i].IsSwiftError) {
8431         ISD::InputArg MyFlags;
8432         MyFlags.VT = getPointerTy(DL);
8433         MyFlags.ArgVT = EVT(getPointerTy(DL));
8434         MyFlags.Flags.setSwiftError();
8435         CLI.Ins.push_back(MyFlags);
8436       }
8437     }
8438   }
8439 
8440   // Handle all of the outgoing arguments.
8441   CLI.Outs.clear();
8442   CLI.OutVals.clear();
8443   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8444     SmallVector<EVT, 4> ValueVTs;
8445     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
8446     // FIXME: Split arguments if CLI.IsPostTypeLegalization
8447     Type *FinalType = Args[i].Ty;
8448     if (Args[i].IsByVal)
8449       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
8450     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
8451         FinalType, CLI.CallConv, CLI.IsVarArg);
8452     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
8453          ++Value) {
8454       EVT VT = ValueVTs[Value];
8455       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
8456       SDValue Op = SDValue(Args[i].Node.getNode(),
8457                            Args[i].Node.getResNo() + Value);
8458       ISD::ArgFlagsTy Flags;
8459 
8460       // Certain targets (such as MIPS), may have a different ABI alignment
8461       // for a type depending on the context. Give the target a chance to
8462       // specify the alignment it wants.
8463       unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
8464 
8465       if (Args[i].IsZExt)
8466         Flags.setZExt();
8467       if (Args[i].IsSExt)
8468         Flags.setSExt();
8469       if (Args[i].IsInReg) {
8470         // If we are using vectorcall calling convention, a structure that is
8471         // passed InReg - is surely an HVA
8472         if (CLI.CallConv == CallingConv::X86_VectorCall &&
8473             isa<StructType>(FinalType)) {
8474           // The first value of a structure is marked
8475           if (0 == Value)
8476             Flags.setHvaStart();
8477           Flags.setHva();
8478         }
8479         // Set InReg Flag
8480         Flags.setInReg();
8481       }
8482       if (Args[i].IsSRet)
8483         Flags.setSRet();
8484       if (Args[i].IsSwiftSelf)
8485         Flags.setSwiftSelf();
8486       if (Args[i].IsSwiftError)
8487         Flags.setSwiftError();
8488       if (Args[i].IsByVal)
8489         Flags.setByVal();
8490       if (Args[i].IsInAlloca) {
8491         Flags.setInAlloca();
8492         // Set the byval flag for CCAssignFn callbacks that don't know about
8493         // inalloca.  This way we can know how many bytes we should've allocated
8494         // and how many bytes a callee cleanup function will pop.  If we port
8495         // inalloca to more targets, we'll have to add custom inalloca handling
8496         // in the various CC lowering callbacks.
8497         Flags.setByVal();
8498       }
8499       if (Args[i].IsByVal || Args[i].IsInAlloca) {
8500         PointerType *Ty = cast<PointerType>(Args[i].Ty);
8501         Type *ElementTy = Ty->getElementType();
8502         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8503         // For ByVal, alignment should come from FE.  BE will guess if this
8504         // info is not there but there are cases it cannot get right.
8505         unsigned FrameAlign;
8506         if (Args[i].Alignment)
8507           FrameAlign = Args[i].Alignment;
8508         else
8509           FrameAlign = getByValTypeAlignment(ElementTy, DL);
8510         Flags.setByValAlign(FrameAlign);
8511       }
8512       if (Args[i].IsNest)
8513         Flags.setNest();
8514       if (NeedsRegBlock)
8515         Flags.setInConsecutiveRegs();
8516       Flags.setOrigAlign(OriginalAlignment);
8517 
8518       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8519                                                  CLI.CallConv, VT);
8520       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8521                                                         CLI.CallConv, VT);
8522       SmallVector<SDValue, 4> Parts(NumParts);
8523       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
8524 
8525       if (Args[i].IsSExt)
8526         ExtendKind = ISD::SIGN_EXTEND;
8527       else if (Args[i].IsZExt)
8528         ExtendKind = ISD::ZERO_EXTEND;
8529 
8530       // Conservatively only handle 'returned' on non-vectors that can be lowered,
8531       // for now.
8532       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
8533           CanLowerReturn) {
8534         assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
8535                "unexpected use of 'returned'");
8536         // Before passing 'returned' to the target lowering code, ensure that
8537         // either the register MVT and the actual EVT are the same size or that
8538         // the return value and argument are extended in the same way; in these
8539         // cases it's safe to pass the argument register value unchanged as the
8540         // return register value (although it's at the target's option whether
8541         // to do so)
8542         // TODO: allow code generation to take advantage of partially preserved
8543         // registers rather than clobbering the entire register when the
8544         // parameter extension method is not compatible with the return
8545         // extension method
8546         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
8547             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
8548              CLI.RetZExt == Args[i].IsZExt))
8549           Flags.setReturned();
8550       }
8551 
8552       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
8553                      CLI.CS.getInstruction(), CLI.CallConv, ExtendKind);
8554 
8555       for (unsigned j = 0; j != NumParts; ++j) {
8556         // if it isn't first piece, alignment must be 1
8557         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
8558                                i < CLI.NumFixedArgs,
8559                                i, j*Parts[j].getValueType().getStoreSize());
8560         if (NumParts > 1 && j == 0)
8561           MyFlags.Flags.setSplit();
8562         else if (j != 0) {
8563           MyFlags.Flags.setOrigAlign(1);
8564           if (j == NumParts - 1)
8565             MyFlags.Flags.setSplitEnd();
8566         }
8567 
8568         CLI.Outs.push_back(MyFlags);
8569         CLI.OutVals.push_back(Parts[j]);
8570       }
8571 
8572       if (NeedsRegBlock && Value == NumValues - 1)
8573         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
8574     }
8575   }
8576 
8577   SmallVector<SDValue, 4> InVals;
8578   CLI.Chain = LowerCall(CLI, InVals);
8579 
8580   // Update CLI.InVals to use outside of this function.
8581   CLI.InVals = InVals;
8582 
8583   // Verify that the target's LowerCall behaved as expected.
8584   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
8585          "LowerCall didn't return a valid chain!");
8586   assert((!CLI.IsTailCall || InVals.empty()) &&
8587          "LowerCall emitted a return value for a tail call!");
8588   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
8589          "LowerCall didn't emit the correct number of values!");
8590 
8591   // For a tail call, the return value is merely live-out and there aren't
8592   // any nodes in the DAG representing it. Return a special value to
8593   // indicate that a tail call has been emitted and no more Instructions
8594   // should be processed in the current block.
8595   if (CLI.IsTailCall) {
8596     CLI.DAG.setRoot(CLI.Chain);
8597     return std::make_pair(SDValue(), SDValue());
8598   }
8599 
8600 #ifndef NDEBUG
8601   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
8602     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
8603     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
8604            "LowerCall emitted a value with the wrong type!");
8605   }
8606 #endif
8607 
8608   SmallVector<SDValue, 4> ReturnValues;
8609   if (!CanLowerReturn) {
8610     // The instruction result is the result of loading from the
8611     // hidden sret parameter.
8612     SmallVector<EVT, 1> PVTs;
8613     Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
8614 
8615     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
8616     assert(PVTs.size() == 1 && "Pointers should fit in one register");
8617     EVT PtrVT = PVTs[0];
8618 
8619     unsigned NumValues = RetTys.size();
8620     ReturnValues.resize(NumValues);
8621     SmallVector<SDValue, 4> Chains(NumValues);
8622 
8623     // An aggregate return value cannot wrap around the address space, so
8624     // offsets to its parts don't wrap either.
8625     SDNodeFlags Flags;
8626     Flags.setNoUnsignedWrap(true);
8627 
8628     for (unsigned i = 0; i < NumValues; ++i) {
8629       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
8630                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
8631                                                         PtrVT), Flags);
8632       SDValue L = CLI.DAG.getLoad(
8633           RetTys[i], CLI.DL, CLI.Chain, Add,
8634           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
8635                                             DemoteStackIdx, Offsets[i]),
8636           /* Alignment = */ 1);
8637       ReturnValues[i] = L;
8638       Chains[i] = L.getValue(1);
8639     }
8640 
8641     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
8642   } else {
8643     // Collect the legal value parts into potentially illegal values
8644     // that correspond to the original function's return values.
8645     Optional<ISD::NodeType> AssertOp;
8646     if (CLI.RetSExt)
8647       AssertOp = ISD::AssertSext;
8648     else if (CLI.RetZExt)
8649       AssertOp = ISD::AssertZext;
8650     unsigned CurReg = 0;
8651     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8652       EVT VT = RetTys[I];
8653       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8654                                                      CLI.CallConv, VT);
8655       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8656                                                        CLI.CallConv, VT);
8657 
8658       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
8659                                               NumRegs, RegisterVT, VT, nullptr,
8660                                               CLI.CallConv, AssertOp));
8661       CurReg += NumRegs;
8662     }
8663 
8664     // For a function returning void, there is no return value. We can't create
8665     // such a node, so we just return a null return value in that case. In
8666     // that case, nothing will actually look at the value.
8667     if (ReturnValues.empty())
8668       return std::make_pair(SDValue(), CLI.Chain);
8669   }
8670 
8671   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
8672                                 CLI.DAG.getVTList(RetTys), ReturnValues);
8673   return std::make_pair(Res, CLI.Chain);
8674 }
8675 
8676 void TargetLowering::LowerOperationWrapper(SDNode *N,
8677                                            SmallVectorImpl<SDValue> &Results,
8678                                            SelectionDAG &DAG) const {
8679   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
8680     Results.push_back(Res);
8681 }
8682 
8683 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
8684   llvm_unreachable("LowerOperation not implemented for this target!");
8685 }
8686 
8687 void
8688 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
8689   SDValue Op = getNonRegisterValue(V);
8690   assert((Op.getOpcode() != ISD::CopyFromReg ||
8691           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
8692          "Copy from a reg to the same reg!");
8693   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
8694 
8695   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8696   // If this is an InlineAsm we have to match the registers required, not the
8697   // notional registers required by the type.
8698 
8699   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
8700                    None); // This is not an ABI copy.
8701   SDValue Chain = DAG.getEntryNode();
8702 
8703   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
8704                               FuncInfo.PreferredExtendType.end())
8705                                  ? ISD::ANY_EXTEND
8706                                  : FuncInfo.PreferredExtendType[V];
8707   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
8708   PendingExports.push_back(Chain);
8709 }
8710 
8711 #include "llvm/CodeGen/SelectionDAGISel.h"
8712 
8713 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
8714 /// entry block, return true.  This includes arguments used by switches, since
8715 /// the switch may expand into multiple basic blocks.
8716 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
8717   // With FastISel active, we may be splitting blocks, so force creation
8718   // of virtual registers for all non-dead arguments.
8719   if (FastISel)
8720     return A->use_empty();
8721 
8722   const BasicBlock &Entry = A->getParent()->front();
8723   for (const User *U : A->users())
8724     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
8725       return false;  // Use not in entry block.
8726 
8727   return true;
8728 }
8729 
8730 using ArgCopyElisionMapTy =
8731     DenseMap<const Argument *,
8732              std::pair<const AllocaInst *, const StoreInst *>>;
8733 
8734 /// Scan the entry block of the function in FuncInfo for arguments that look
8735 /// like copies into a local alloca. Record any copied arguments in
8736 /// ArgCopyElisionCandidates.
8737 static void
8738 findArgumentCopyElisionCandidates(const DataLayout &DL,
8739                                   FunctionLoweringInfo *FuncInfo,
8740                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
8741   // Record the state of every static alloca used in the entry block. Argument
8742   // allocas are all used in the entry block, so we need approximately as many
8743   // entries as we have arguments.
8744   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
8745   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
8746   unsigned NumArgs = FuncInfo->Fn->arg_size();
8747   StaticAllocas.reserve(NumArgs * 2);
8748 
8749   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
8750     if (!V)
8751       return nullptr;
8752     V = V->stripPointerCasts();
8753     const auto *AI = dyn_cast<AllocaInst>(V);
8754     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
8755       return nullptr;
8756     auto Iter = StaticAllocas.insert({AI, Unknown});
8757     return &Iter.first->second;
8758   };
8759 
8760   // Look for stores of arguments to static allocas. Look through bitcasts and
8761   // GEPs to handle type coercions, as long as the alloca is fully initialized
8762   // by the store. Any non-store use of an alloca escapes it and any subsequent
8763   // unanalyzed store might write it.
8764   // FIXME: Handle structs initialized with multiple stores.
8765   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
8766     // Look for stores, and handle non-store uses conservatively.
8767     const auto *SI = dyn_cast<StoreInst>(&I);
8768     if (!SI) {
8769       // We will look through cast uses, so ignore them completely.
8770       if (I.isCast())
8771         continue;
8772       // Ignore debug info intrinsics, they don't escape or store to allocas.
8773       if (isa<DbgInfoIntrinsic>(I))
8774         continue;
8775       // This is an unknown instruction. Assume it escapes or writes to all
8776       // static alloca operands.
8777       for (const Use &U : I.operands()) {
8778         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
8779           *Info = StaticAllocaInfo::Clobbered;
8780       }
8781       continue;
8782     }
8783 
8784     // If the stored value is a static alloca, mark it as escaped.
8785     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
8786       *Info = StaticAllocaInfo::Clobbered;
8787 
8788     // Check if the destination is a static alloca.
8789     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
8790     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
8791     if (!Info)
8792       continue;
8793     const AllocaInst *AI = cast<AllocaInst>(Dst);
8794 
8795     // Skip allocas that have been initialized or clobbered.
8796     if (*Info != StaticAllocaInfo::Unknown)
8797       continue;
8798 
8799     // Check if the stored value is an argument, and that this store fully
8800     // initializes the alloca. Don't elide copies from the same argument twice.
8801     const Value *Val = SI->getValueOperand()->stripPointerCasts();
8802     const auto *Arg = dyn_cast<Argument>(Val);
8803     if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
8804         Arg->getType()->isEmptyTy() ||
8805         DL.getTypeStoreSize(Arg->getType()) !=
8806             DL.getTypeAllocSize(AI->getAllocatedType()) ||
8807         ArgCopyElisionCandidates.count(Arg)) {
8808       *Info = StaticAllocaInfo::Clobbered;
8809       continue;
8810     }
8811 
8812     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
8813                       << '\n');
8814 
8815     // Mark this alloca and store for argument copy elision.
8816     *Info = StaticAllocaInfo::Elidable;
8817     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
8818 
8819     // Stop scanning if we've seen all arguments. This will happen early in -O0
8820     // builds, which is useful, because -O0 builds have large entry blocks and
8821     // many allocas.
8822     if (ArgCopyElisionCandidates.size() == NumArgs)
8823       break;
8824   }
8825 }
8826 
8827 /// Try to elide argument copies from memory into a local alloca. Succeeds if
8828 /// ArgVal is a load from a suitable fixed stack object.
8829 static void tryToElideArgumentCopy(
8830     FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains,
8831     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
8832     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
8833     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
8834     SDValue ArgVal, bool &ArgHasUses) {
8835   // Check if this is a load from a fixed stack object.
8836   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
8837   if (!LNode)
8838     return;
8839   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
8840   if (!FINode)
8841     return;
8842 
8843   // Check that the fixed stack object is the right size and alignment.
8844   // Look at the alignment that the user wrote on the alloca instead of looking
8845   // at the stack object.
8846   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
8847   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
8848   const AllocaInst *AI = ArgCopyIter->second.first;
8849   int FixedIndex = FINode->getIndex();
8850   int &AllocaIndex = FuncInfo->StaticAllocaMap[AI];
8851   int OldIndex = AllocaIndex;
8852   MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
8853   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
8854     LLVM_DEBUG(
8855         dbgs() << "  argument copy elision failed due to bad fixed stack "
8856                   "object size\n");
8857     return;
8858   }
8859   unsigned RequiredAlignment = AI->getAlignment();
8860   if (!RequiredAlignment) {
8861     RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment(
8862         AI->getAllocatedType());
8863   }
8864   if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
8865     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
8866                          "greater than stack argument alignment ("
8867                       << RequiredAlignment << " vs "
8868                       << MFI.getObjectAlignment(FixedIndex) << ")\n");
8869     return;
8870   }
8871 
8872   // Perform the elision. Delete the old stack object and replace its only use
8873   // in the variable info map. Mark the stack object as mutable.
8874   LLVM_DEBUG({
8875     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
8876            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
8877            << '\n';
8878   });
8879   MFI.RemoveStackObject(OldIndex);
8880   MFI.setIsImmutableObjectIndex(FixedIndex, false);
8881   AllocaIndex = FixedIndex;
8882   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
8883   Chains.push_back(ArgVal.getValue(1));
8884 
8885   // Avoid emitting code for the store implementing the copy.
8886   const StoreInst *SI = ArgCopyIter->second.second;
8887   ElidedArgCopyInstrs.insert(SI);
8888 
8889   // Check for uses of the argument again so that we can avoid exporting ArgVal
8890   // if it is't used by anything other than the store.
8891   for (const Value *U : Arg.users()) {
8892     if (U != SI) {
8893       ArgHasUses = true;
8894       break;
8895     }
8896   }
8897 }
8898 
8899 void SelectionDAGISel::LowerArguments(const Function &F) {
8900   SelectionDAG &DAG = SDB->DAG;
8901   SDLoc dl = SDB->getCurSDLoc();
8902   const DataLayout &DL = DAG.getDataLayout();
8903   SmallVector<ISD::InputArg, 16> Ins;
8904 
8905   if (!FuncInfo->CanLowerReturn) {
8906     // Put in an sret pointer parameter before all the other parameters.
8907     SmallVector<EVT, 1> ValueVTs;
8908     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8909                     F.getReturnType()->getPointerTo(
8910                         DAG.getDataLayout().getAllocaAddrSpace()),
8911                     ValueVTs);
8912 
8913     // NOTE: Assuming that a pointer will never break down to more than one VT
8914     // or one register.
8915     ISD::ArgFlagsTy Flags;
8916     Flags.setSRet();
8917     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
8918     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
8919                          ISD::InputArg::NoArgIndex, 0);
8920     Ins.push_back(RetArg);
8921   }
8922 
8923   // Look for stores of arguments to static allocas. Mark such arguments with a
8924   // flag to ask the target to give us the memory location of that argument if
8925   // available.
8926   ArgCopyElisionMapTy ArgCopyElisionCandidates;
8927   findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates);
8928 
8929   // Set up the incoming argument description vector.
8930   for (const Argument &Arg : F.args()) {
8931     unsigned ArgNo = Arg.getArgNo();
8932     SmallVector<EVT, 4> ValueVTs;
8933     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8934     bool isArgValueUsed = !Arg.use_empty();
8935     unsigned PartBase = 0;
8936     Type *FinalType = Arg.getType();
8937     if (Arg.hasAttribute(Attribute::ByVal))
8938       FinalType = cast<PointerType>(FinalType)->getElementType();
8939     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
8940         FinalType, F.getCallingConv(), F.isVarArg());
8941     for (unsigned Value = 0, NumValues = ValueVTs.size();
8942          Value != NumValues; ++Value) {
8943       EVT VT = ValueVTs[Value];
8944       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8945       ISD::ArgFlagsTy Flags;
8946 
8947       // Certain targets (such as MIPS), may have a different ABI alignment
8948       // for a type depending on the context. Give the target a chance to
8949       // specify the alignment it wants.
8950       unsigned OriginalAlignment =
8951           TLI->getABIAlignmentForCallingConv(ArgTy, DL);
8952 
8953       if (Arg.hasAttribute(Attribute::ZExt))
8954         Flags.setZExt();
8955       if (Arg.hasAttribute(Attribute::SExt))
8956         Flags.setSExt();
8957       if (Arg.hasAttribute(Attribute::InReg)) {
8958         // If we are using vectorcall calling convention, a structure that is
8959         // passed InReg - is surely an HVA
8960         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
8961             isa<StructType>(Arg.getType())) {
8962           // The first value of a structure is marked
8963           if (0 == Value)
8964             Flags.setHvaStart();
8965           Flags.setHva();
8966         }
8967         // Set InReg Flag
8968         Flags.setInReg();
8969       }
8970       if (Arg.hasAttribute(Attribute::StructRet))
8971         Flags.setSRet();
8972       if (Arg.hasAttribute(Attribute::SwiftSelf))
8973         Flags.setSwiftSelf();
8974       if (Arg.hasAttribute(Attribute::SwiftError))
8975         Flags.setSwiftError();
8976       if (Arg.hasAttribute(Attribute::ByVal))
8977         Flags.setByVal();
8978       if (Arg.hasAttribute(Attribute::InAlloca)) {
8979         Flags.setInAlloca();
8980         // Set the byval flag for CCAssignFn callbacks that don't know about
8981         // inalloca.  This way we can know how many bytes we should've allocated
8982         // and how many bytes a callee cleanup function will pop.  If we port
8983         // inalloca to more targets, we'll have to add custom inalloca handling
8984         // in the various CC lowering callbacks.
8985         Flags.setByVal();
8986       }
8987       if (F.getCallingConv() == CallingConv::X86_INTR) {
8988         // IA Interrupt passes frame (1st parameter) by value in the stack.
8989         if (ArgNo == 0)
8990           Flags.setByVal();
8991       }
8992       if (Flags.isByVal() || Flags.isInAlloca()) {
8993         PointerType *Ty = cast<PointerType>(Arg.getType());
8994         Type *ElementTy = Ty->getElementType();
8995         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8996         // For ByVal, alignment should be passed from FE.  BE will guess if
8997         // this info is not there but there are cases it cannot get right.
8998         unsigned FrameAlign;
8999         if (Arg.getParamAlignment())
9000           FrameAlign = Arg.getParamAlignment();
9001         else
9002           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9003         Flags.setByValAlign(FrameAlign);
9004       }
9005       if (Arg.hasAttribute(Attribute::Nest))
9006         Flags.setNest();
9007       if (NeedsRegBlock)
9008         Flags.setInConsecutiveRegs();
9009       Flags.setOrigAlign(OriginalAlignment);
9010       if (ArgCopyElisionCandidates.count(&Arg))
9011         Flags.setCopyElisionCandidate();
9012 
9013       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9014           *CurDAG->getContext(), F.getCallingConv(), VT);
9015       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9016           *CurDAG->getContext(), F.getCallingConv(), VT);
9017       for (unsigned i = 0; i != NumRegs; ++i) {
9018         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9019                               ArgNo, PartBase+i*RegisterVT.getStoreSize());
9020         if (NumRegs > 1 && i == 0)
9021           MyFlags.Flags.setSplit();
9022         // if it isn't first piece, alignment must be 1
9023         else if (i > 0) {
9024           MyFlags.Flags.setOrigAlign(1);
9025           if (i == NumRegs - 1)
9026             MyFlags.Flags.setSplitEnd();
9027         }
9028         Ins.push_back(MyFlags);
9029       }
9030       if (NeedsRegBlock && Value == NumValues - 1)
9031         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9032       PartBase += VT.getStoreSize();
9033     }
9034   }
9035 
9036   // Call the target to set up the argument values.
9037   SmallVector<SDValue, 8> InVals;
9038   SDValue NewRoot = TLI->LowerFormalArguments(
9039       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9040 
9041   // Verify that the target's LowerFormalArguments behaved as expected.
9042   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9043          "LowerFormalArguments didn't return a valid chain!");
9044   assert(InVals.size() == Ins.size() &&
9045          "LowerFormalArguments didn't emit the correct number of values!");
9046   LLVM_DEBUG({
9047     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9048       assert(InVals[i].getNode() &&
9049              "LowerFormalArguments emitted a null value!");
9050       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9051              "LowerFormalArguments emitted a value with the wrong type!");
9052     }
9053   });
9054 
9055   // Update the DAG with the new chain value resulting from argument lowering.
9056   DAG.setRoot(NewRoot);
9057 
9058   // Set up the argument values.
9059   unsigned i = 0;
9060   if (!FuncInfo->CanLowerReturn) {
9061     // Create a virtual register for the sret pointer, and put in a copy
9062     // from the sret argument into it.
9063     SmallVector<EVT, 1> ValueVTs;
9064     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9065                     F.getReturnType()->getPointerTo(
9066                         DAG.getDataLayout().getAllocaAddrSpace()),
9067                     ValueVTs);
9068     MVT VT = ValueVTs[0].getSimpleVT();
9069     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9070     Optional<ISD::NodeType> AssertOp = None;
9071     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9072                                         nullptr, F.getCallingConv(), AssertOp);
9073 
9074     MachineFunction& MF = SDB->DAG.getMachineFunction();
9075     MachineRegisterInfo& RegInfo = MF.getRegInfo();
9076     unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9077     FuncInfo->DemoteRegister = SRetReg;
9078     NewRoot =
9079         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9080     DAG.setRoot(NewRoot);
9081 
9082     // i indexes lowered arguments.  Bump it past the hidden sret argument.
9083     ++i;
9084   }
9085 
9086   SmallVector<SDValue, 4> Chains;
9087   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9088   for (const Argument &Arg : F.args()) {
9089     SmallVector<SDValue, 4> ArgValues;
9090     SmallVector<EVT, 4> ValueVTs;
9091     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9092     unsigned NumValues = ValueVTs.size();
9093     if (NumValues == 0)
9094       continue;
9095 
9096     bool ArgHasUses = !Arg.use_empty();
9097 
9098     // Elide the copying store if the target loaded this argument from a
9099     // suitable fixed stack object.
9100     if (Ins[i].Flags.isCopyElisionCandidate()) {
9101       tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9102                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9103                              InVals[i], ArgHasUses);
9104     }
9105 
9106     // If this argument is unused then remember its value. It is used to generate
9107     // debugging information.
9108     bool isSwiftErrorArg =
9109         TLI->supportSwiftError() &&
9110         Arg.hasAttribute(Attribute::SwiftError);
9111     if (!ArgHasUses && !isSwiftErrorArg) {
9112       SDB->setUnusedArgValue(&Arg, InVals[i]);
9113 
9114       // Also remember any frame index for use in FastISel.
9115       if (FrameIndexSDNode *FI =
9116           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9117         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9118     }
9119 
9120     for (unsigned Val = 0; Val != NumValues; ++Val) {
9121       EVT VT = ValueVTs[Val];
9122       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9123                                                       F.getCallingConv(), VT);
9124       unsigned NumParts = TLI->getNumRegistersForCallingConv(
9125           *CurDAG->getContext(), F.getCallingConv(), VT);
9126 
9127       // Even an apparant 'unused' swifterror argument needs to be returned. So
9128       // we do generate a copy for it that can be used on return from the
9129       // function.
9130       if (ArgHasUses || isSwiftErrorArg) {
9131         Optional<ISD::NodeType> AssertOp;
9132         if (Arg.hasAttribute(Attribute::SExt))
9133           AssertOp = ISD::AssertSext;
9134         else if (Arg.hasAttribute(Attribute::ZExt))
9135           AssertOp = ISD::AssertZext;
9136 
9137         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9138                                              PartVT, VT, nullptr,
9139                                              F.getCallingConv(), AssertOp));
9140       }
9141 
9142       i += NumParts;
9143     }
9144 
9145     // We don't need to do anything else for unused arguments.
9146     if (ArgValues.empty())
9147       continue;
9148 
9149     // Note down frame index.
9150     if (FrameIndexSDNode *FI =
9151         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9152       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9153 
9154     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9155                                      SDB->getCurSDLoc());
9156 
9157     SDB->setValue(&Arg, Res);
9158     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9159       // We want to associate the argument with the frame index, among
9160       // involved operands, that correspond to the lowest address. The
9161       // getCopyFromParts function, called earlier, is swapping the order of
9162       // the operands to BUILD_PAIR depending on endianness. The result of
9163       // that swapping is that the least significant bits of the argument will
9164       // be in the first operand of the BUILD_PAIR node, and the most
9165       // significant bits will be in the second operand.
9166       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9167       if (LoadSDNode *LNode =
9168           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9169         if (FrameIndexSDNode *FI =
9170             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9171           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9172     }
9173 
9174     // Update the SwiftErrorVRegDefMap.
9175     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9176       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9177       if (TargetRegisterInfo::isVirtualRegister(Reg))
9178         FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
9179                                            FuncInfo->SwiftErrorArg, Reg);
9180     }
9181 
9182     // If this argument is live outside of the entry block, insert a copy from
9183     // wherever we got it to the vreg that other BB's will reference it as.
9184     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
9185       // If we can, though, try to skip creating an unnecessary vreg.
9186       // FIXME: This isn't very clean... it would be nice to make this more
9187       // general.  It's also subtly incompatible with the hacks FastISel
9188       // uses with vregs.
9189       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9190       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
9191         FuncInfo->ValueMap[&Arg] = Reg;
9192         continue;
9193       }
9194     }
9195     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9196       FuncInfo->InitializeRegForValue(&Arg);
9197       SDB->CopyToExportRegsIfNeeded(&Arg);
9198     }
9199   }
9200 
9201   if (!Chains.empty()) {
9202     Chains.push_back(NewRoot);
9203     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9204   }
9205 
9206   DAG.setRoot(NewRoot);
9207 
9208   assert(i == InVals.size() && "Argument register count mismatch!");
9209 
9210   // If any argument copy elisions occurred and we have debug info, update the
9211   // stale frame indices used in the dbg.declare variable info table.
9212   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9213   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9214     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9215       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9216       if (I != ArgCopyElisionFrameIndexMap.end())
9217         VI.Slot = I->second;
9218     }
9219   }
9220 
9221   // Finally, if the target has anything special to do, allow it to do so.
9222   EmitFunctionEntryCode();
9223 }
9224 
9225 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
9226 /// ensure constants are generated when needed.  Remember the virtual registers
9227 /// that need to be added to the Machine PHI nodes as input.  We cannot just
9228 /// directly add them, because expansion might result in multiple MBB's for one
9229 /// BB.  As such, the start of the BB might correspond to a different MBB than
9230 /// the end.
9231 void
9232 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
9233   const TerminatorInst *TI = LLVMBB->getTerminator();
9234 
9235   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
9236 
9237   // Check PHI nodes in successors that expect a value to be available from this
9238   // block.
9239   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
9240     const BasicBlock *SuccBB = TI->getSuccessor(succ);
9241     if (!isa<PHINode>(SuccBB->begin())) continue;
9242     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
9243 
9244     // If this terminator has multiple identical successors (common for
9245     // switches), only handle each succ once.
9246     if (!SuccsHandled.insert(SuccMBB).second)
9247       continue;
9248 
9249     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
9250 
9251     // At this point we know that there is a 1-1 correspondence between LLVM PHI
9252     // nodes and Machine PHI nodes, but the incoming operands have not been
9253     // emitted yet.
9254     for (const PHINode &PN : SuccBB->phis()) {
9255       // Ignore dead phi's.
9256       if (PN.use_empty())
9257         continue;
9258 
9259       // Skip empty types
9260       if (PN.getType()->isEmptyTy())
9261         continue;
9262 
9263       unsigned Reg;
9264       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
9265 
9266       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
9267         unsigned &RegOut = ConstantsOut[C];
9268         if (RegOut == 0) {
9269           RegOut = FuncInfo.CreateRegs(C->getType());
9270           CopyValueToVirtualRegister(C, RegOut);
9271         }
9272         Reg = RegOut;
9273       } else {
9274         DenseMap<const Value *, unsigned>::iterator I =
9275           FuncInfo.ValueMap.find(PHIOp);
9276         if (I != FuncInfo.ValueMap.end())
9277           Reg = I->second;
9278         else {
9279           assert(isa<AllocaInst>(PHIOp) &&
9280                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
9281                  "Didn't codegen value into a register!??");
9282           Reg = FuncInfo.CreateRegs(PHIOp->getType());
9283           CopyValueToVirtualRegister(PHIOp, Reg);
9284         }
9285       }
9286 
9287       // Remember that this register needs to added to the machine PHI node as
9288       // the input for this MBB.
9289       SmallVector<EVT, 4> ValueVTs;
9290       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9291       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
9292       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
9293         EVT VT = ValueVTs[vti];
9294         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
9295         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
9296           FuncInfo.PHINodesToUpdate.push_back(
9297               std::make_pair(&*MBBI++, Reg + i));
9298         Reg += NumRegisters;
9299       }
9300     }
9301   }
9302 
9303   ConstantsOut.clear();
9304 }
9305 
9306 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
9307 /// is 0.
9308 MachineBasicBlock *
9309 SelectionDAGBuilder::StackProtectorDescriptor::
9310 AddSuccessorMBB(const BasicBlock *BB,
9311                 MachineBasicBlock *ParentMBB,
9312                 bool IsLikely,
9313                 MachineBasicBlock *SuccMBB) {
9314   // If SuccBB has not been created yet, create it.
9315   if (!SuccMBB) {
9316     MachineFunction *MF = ParentMBB->getParent();
9317     MachineFunction::iterator BBI(ParentMBB);
9318     SuccMBB = MF->CreateMachineBasicBlock(BB);
9319     MF->insert(++BBI, SuccMBB);
9320   }
9321   // Add it as a successor of ParentMBB.
9322   ParentMBB->addSuccessor(
9323       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
9324   return SuccMBB;
9325 }
9326 
9327 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
9328   MachineFunction::iterator I(MBB);
9329   if (++I == FuncInfo.MF->end())
9330     return nullptr;
9331   return &*I;
9332 }
9333 
9334 /// During lowering new call nodes can be created (such as memset, etc.).
9335 /// Those will become new roots of the current DAG, but complications arise
9336 /// when they are tail calls. In such cases, the call lowering will update
9337 /// the root, but the builder still needs to know that a tail call has been
9338 /// lowered in order to avoid generating an additional return.
9339 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
9340   // If the node is null, we do have a tail call.
9341   if (MaybeTC.getNode() != nullptr)
9342     DAG.setRoot(MaybeTC);
9343   else
9344     HasTailCall = true;
9345 }
9346 
9347 uint64_t
9348 SelectionDAGBuilder::getJumpTableRange(const CaseClusterVector &Clusters,
9349                                        unsigned First, unsigned Last) const {
9350   assert(Last >= First);
9351   const APInt &LowCase = Clusters[First].Low->getValue();
9352   const APInt &HighCase = Clusters[Last].High->getValue();
9353   assert(LowCase.getBitWidth() == HighCase.getBitWidth());
9354 
9355   // FIXME: A range of consecutive cases has 100% density, but only requires one
9356   // comparison to lower. We should discriminate against such consecutive ranges
9357   // in jump tables.
9358 
9359   return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
9360 }
9361 
9362 uint64_t SelectionDAGBuilder::getJumpTableNumCases(
9363     const SmallVectorImpl<unsigned> &TotalCases, unsigned First,
9364     unsigned Last) const {
9365   assert(Last >= First);
9366   assert(TotalCases[Last] >= TotalCases[First]);
9367   uint64_t NumCases =
9368       TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
9369   return NumCases;
9370 }
9371 
9372 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters,
9373                                          unsigned First, unsigned Last,
9374                                          const SwitchInst *SI,
9375                                          MachineBasicBlock *DefaultMBB,
9376                                          CaseCluster &JTCluster) {
9377   assert(First <= Last);
9378 
9379   auto Prob = BranchProbability::getZero();
9380   unsigned NumCmps = 0;
9381   std::vector<MachineBasicBlock*> Table;
9382   DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
9383 
9384   // Initialize probabilities in JTProbs.
9385   for (unsigned I = First; I <= Last; ++I)
9386     JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
9387 
9388   for (unsigned I = First; I <= Last; ++I) {
9389     assert(Clusters[I].Kind == CC_Range);
9390     Prob += Clusters[I].Prob;
9391     const APInt &Low = Clusters[I].Low->getValue();
9392     const APInt &High = Clusters[I].High->getValue();
9393     NumCmps += (Low == High) ? 1 : 2;
9394     if (I != First) {
9395       // Fill the gap between this and the previous cluster.
9396       const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
9397       assert(PreviousHigh.slt(Low));
9398       uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
9399       for (uint64_t J = 0; J < Gap; J++)
9400         Table.push_back(DefaultMBB);
9401     }
9402     uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
9403     for (uint64_t J = 0; J < ClusterSize; ++J)
9404       Table.push_back(Clusters[I].MBB);
9405     JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
9406   }
9407 
9408   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9409   unsigned NumDests = JTProbs.size();
9410   if (TLI.isSuitableForBitTests(
9411           NumDests, NumCmps, Clusters[First].Low->getValue(),
9412           Clusters[Last].High->getValue(), DAG.getDataLayout())) {
9413     // Clusters[First..Last] should be lowered as bit tests instead.
9414     return false;
9415   }
9416 
9417   // Create the MBB that will load from and jump through the table.
9418   // Note: We create it here, but it's not inserted into the function yet.
9419   MachineFunction *CurMF = FuncInfo.MF;
9420   MachineBasicBlock *JumpTableMBB =
9421       CurMF->CreateMachineBasicBlock(SI->getParent());
9422 
9423   // Add successors. Note: use table order for determinism.
9424   SmallPtrSet<MachineBasicBlock *, 8> Done;
9425   for (MachineBasicBlock *Succ : Table) {
9426     if (Done.count(Succ))
9427       continue;
9428     addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
9429     Done.insert(Succ);
9430   }
9431   JumpTableMBB->normalizeSuccProbs();
9432 
9433   unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
9434                      ->createJumpTableIndex(Table);
9435 
9436   // Set up the jump table info.
9437   JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
9438   JumpTableHeader JTH(Clusters[First].Low->getValue(),
9439                       Clusters[Last].High->getValue(), SI->getCondition(),
9440                       nullptr, false);
9441   JTCases.emplace_back(std::move(JTH), std::move(JT));
9442 
9443   JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
9444                                      JTCases.size() - 1, Prob);
9445   return true;
9446 }
9447 
9448 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
9449                                          const SwitchInst *SI,
9450                                          MachineBasicBlock *DefaultMBB) {
9451 #ifndef NDEBUG
9452   // Clusters must be non-empty, sorted, and only contain Range clusters.
9453   assert(!Clusters.empty());
9454   for (CaseCluster &C : Clusters)
9455     assert(C.Kind == CC_Range);
9456   for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
9457     assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
9458 #endif
9459 
9460   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9461   if (!TLI.areJTsAllowed(SI->getParent()->getParent()))
9462     return;
9463 
9464   const int64_t N = Clusters.size();
9465   const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries();
9466   const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
9467 
9468   if (N < 2 || N < MinJumpTableEntries)
9469     return;
9470 
9471   // TotalCases[i]: Total nbr of cases in Clusters[0..i].
9472   SmallVector<unsigned, 8> TotalCases(N);
9473   for (unsigned i = 0; i < N; ++i) {
9474     const APInt &Hi = Clusters[i].High->getValue();
9475     const APInt &Lo = Clusters[i].Low->getValue();
9476     TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
9477     if (i != 0)
9478       TotalCases[i] += TotalCases[i - 1];
9479   }
9480 
9481   // Cheap case: the whole range may be suitable for jump table.
9482   uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
9483   uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
9484   assert(NumCases < UINT64_MAX / 100);
9485   assert(Range >= NumCases);
9486   if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9487     CaseCluster JTCluster;
9488     if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
9489       Clusters[0] = JTCluster;
9490       Clusters.resize(1);
9491       return;
9492     }
9493   }
9494 
9495   // The algorithm below is not suitable for -O0.
9496   if (TM.getOptLevel() == CodeGenOpt::None)
9497     return;
9498 
9499   // Split Clusters into minimum number of dense partitions. The algorithm uses
9500   // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
9501   // for the Case Statement'" (1994), but builds the MinPartitions array in
9502   // reverse order to make it easier to reconstruct the partitions in ascending
9503   // order. In the choice between two optimal partitionings, it picks the one
9504   // which yields more jump tables.
9505 
9506   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9507   SmallVector<unsigned, 8> MinPartitions(N);
9508   // LastElement[i] is the last element of the partition starting at i.
9509   SmallVector<unsigned, 8> LastElement(N);
9510   // PartitionsScore[i] is used to break ties when choosing between two
9511   // partitionings resulting in the same number of partitions.
9512   SmallVector<unsigned, 8> PartitionsScore(N);
9513   // For PartitionsScore, a small number of comparisons is considered as good as
9514   // a jump table and a single comparison is considered better than a jump
9515   // table.
9516   enum PartitionScores : unsigned {
9517     NoTable = 0,
9518     Table = 1,
9519     FewCases = 1,
9520     SingleCase = 2
9521   };
9522 
9523   // Base case: There is only one way to partition Clusters[N-1].
9524   MinPartitions[N - 1] = 1;
9525   LastElement[N - 1] = N - 1;
9526   PartitionsScore[N - 1] = PartitionScores::SingleCase;
9527 
9528   // Note: loop indexes are signed to avoid underflow.
9529   for (int64_t i = N - 2; i >= 0; i--) {
9530     // Find optimal partitioning of Clusters[i..N-1].
9531     // Baseline: Put Clusters[i] into a partition on its own.
9532     MinPartitions[i] = MinPartitions[i + 1] + 1;
9533     LastElement[i] = i;
9534     PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
9535 
9536     // Search for a solution that results in fewer partitions.
9537     for (int64_t j = N - 1; j > i; j--) {
9538       // Try building a partition from Clusters[i..j].
9539       uint64_t Range = getJumpTableRange(Clusters, i, j);
9540       uint64_t NumCases = getJumpTableNumCases(TotalCases, i, j);
9541       assert(NumCases < UINT64_MAX / 100);
9542       assert(Range >= NumCases);
9543       if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9544         unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9545         unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
9546         int64_t NumEntries = j - i + 1;
9547 
9548         if (NumEntries == 1)
9549           Score += PartitionScores::SingleCase;
9550         else if (NumEntries <= SmallNumberOfEntries)
9551           Score += PartitionScores::FewCases;
9552         else if (NumEntries >= MinJumpTableEntries)
9553           Score += PartitionScores::Table;
9554 
9555         // If this leads to fewer partitions, or to the same number of
9556         // partitions with better score, it is a better partitioning.
9557         if (NumPartitions < MinPartitions[i] ||
9558             (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
9559           MinPartitions[i] = NumPartitions;
9560           LastElement[i] = j;
9561           PartitionsScore[i] = Score;
9562         }
9563       }
9564     }
9565   }
9566 
9567   // Iterate over the partitions, replacing some with jump tables in-place.
9568   unsigned DstIndex = 0;
9569   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9570     Last = LastElement[First];
9571     assert(Last >= First);
9572     assert(DstIndex <= First);
9573     unsigned NumClusters = Last - First + 1;
9574 
9575     CaseCluster JTCluster;
9576     if (NumClusters >= MinJumpTableEntries &&
9577         buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
9578       Clusters[DstIndex++] = JTCluster;
9579     } else {
9580       for (unsigned I = First; I <= Last; ++I)
9581         std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
9582     }
9583   }
9584   Clusters.resize(DstIndex);
9585 }
9586 
9587 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
9588                                         unsigned First, unsigned Last,
9589                                         const SwitchInst *SI,
9590                                         CaseCluster &BTCluster) {
9591   assert(First <= Last);
9592   if (First == Last)
9593     return false;
9594 
9595   BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9596   unsigned NumCmps = 0;
9597   for (int64_t I = First; I <= Last; ++I) {
9598     assert(Clusters[I].Kind == CC_Range);
9599     Dests.set(Clusters[I].MBB->getNumber());
9600     NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
9601   }
9602   unsigned NumDests = Dests.count();
9603 
9604   APInt Low = Clusters[First].Low->getValue();
9605   APInt High = Clusters[Last].High->getValue();
9606   assert(Low.slt(High));
9607 
9608   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9609   const DataLayout &DL = DAG.getDataLayout();
9610   if (!TLI.isSuitableForBitTests(NumDests, NumCmps, Low, High, DL))
9611     return false;
9612 
9613   APInt LowBound;
9614   APInt CmpRange;
9615 
9616   const int BitWidth = TLI.getPointerTy(DL).getSizeInBits();
9617   assert(TLI.rangeFitsInWord(Low, High, DL) &&
9618          "Case range must fit in bit mask!");
9619 
9620   // Check if the clusters cover a contiguous range such that no value in the
9621   // range will jump to the default statement.
9622   bool ContiguousRange = true;
9623   for (int64_t I = First + 1; I <= Last; ++I) {
9624     if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
9625       ContiguousRange = false;
9626       break;
9627     }
9628   }
9629 
9630   if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
9631     // Optimize the case where all the case values fit in a word without having
9632     // to subtract minValue. In this case, we can optimize away the subtraction.
9633     LowBound = APInt::getNullValue(Low.getBitWidth());
9634     CmpRange = High;
9635     ContiguousRange = false;
9636   } else {
9637     LowBound = Low;
9638     CmpRange = High - Low;
9639   }
9640 
9641   CaseBitsVector CBV;
9642   auto TotalProb = BranchProbability::getZero();
9643   for (unsigned i = First; i <= Last; ++i) {
9644     // Find the CaseBits for this destination.
9645     unsigned j;
9646     for (j = 0; j < CBV.size(); ++j)
9647       if (CBV[j].BB == Clusters[i].MBB)
9648         break;
9649     if (j == CBV.size())
9650       CBV.push_back(
9651           CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
9652     CaseBits *CB = &CBV[j];
9653 
9654     // Update Mask, Bits and ExtraProb.
9655     uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
9656     uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
9657     assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
9658     CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
9659     CB->Bits += Hi - Lo + 1;
9660     CB->ExtraProb += Clusters[i].Prob;
9661     TotalProb += Clusters[i].Prob;
9662   }
9663 
9664   BitTestInfo BTI;
9665   llvm::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
9666     // Sort by probability first, number of bits second, bit mask third.
9667     if (a.ExtraProb != b.ExtraProb)
9668       return a.ExtraProb > b.ExtraProb;
9669     if (a.Bits != b.Bits)
9670       return a.Bits > b.Bits;
9671     return a.Mask < b.Mask;
9672   });
9673 
9674   for (auto &CB : CBV) {
9675     MachineBasicBlock *BitTestBB =
9676         FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
9677     BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
9678   }
9679   BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
9680                             SI->getCondition(), -1U, MVT::Other, false,
9681                             ContiguousRange, nullptr, nullptr, std::move(BTI),
9682                             TotalProb);
9683 
9684   BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
9685                                     BitTestCases.size() - 1, TotalProb);
9686   return true;
9687 }
9688 
9689 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
9690                                               const SwitchInst *SI) {
9691 // Partition Clusters into as few subsets as possible, where each subset has a
9692 // range that fits in a machine word and has <= 3 unique destinations.
9693 
9694 #ifndef NDEBUG
9695   // Clusters must be sorted and contain Range or JumpTable clusters.
9696   assert(!Clusters.empty());
9697   assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
9698   for (const CaseCluster &C : Clusters)
9699     assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
9700   for (unsigned i = 1; i < Clusters.size(); ++i)
9701     assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
9702 #endif
9703 
9704   // The algorithm below is not suitable for -O0.
9705   if (TM.getOptLevel() == CodeGenOpt::None)
9706     return;
9707 
9708   // If target does not have legal shift left, do not emit bit tests at all.
9709   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9710   const DataLayout &DL = DAG.getDataLayout();
9711 
9712   EVT PTy = TLI.getPointerTy(DL);
9713   if (!TLI.isOperationLegal(ISD::SHL, PTy))
9714     return;
9715 
9716   int BitWidth = PTy.getSizeInBits();
9717   const int64_t N = Clusters.size();
9718 
9719   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9720   SmallVector<unsigned, 8> MinPartitions(N);
9721   // LastElement[i] is the last element of the partition starting at i.
9722   SmallVector<unsigned, 8> LastElement(N);
9723 
9724   // FIXME: This might not be the best algorithm for finding bit test clusters.
9725 
9726   // Base case: There is only one way to partition Clusters[N-1].
9727   MinPartitions[N - 1] = 1;
9728   LastElement[N - 1] = N - 1;
9729 
9730   // Note: loop indexes are signed to avoid underflow.
9731   for (int64_t i = N - 2; i >= 0; --i) {
9732     // Find optimal partitioning of Clusters[i..N-1].
9733     // Baseline: Put Clusters[i] into a partition on its own.
9734     MinPartitions[i] = MinPartitions[i + 1] + 1;
9735     LastElement[i] = i;
9736 
9737     // Search for a solution that results in fewer partitions.
9738     // Note: the search is limited by BitWidth, reducing time complexity.
9739     for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
9740       // Try building a partition from Clusters[i..j].
9741 
9742       // Check the range.
9743       if (!TLI.rangeFitsInWord(Clusters[i].Low->getValue(),
9744                                Clusters[j].High->getValue(), DL))
9745         continue;
9746 
9747       // Check nbr of destinations and cluster types.
9748       // FIXME: This works, but doesn't seem very efficient.
9749       bool RangesOnly = true;
9750       BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9751       for (int64_t k = i; k <= j; k++) {
9752         if (Clusters[k].Kind != CC_Range) {
9753           RangesOnly = false;
9754           break;
9755         }
9756         Dests.set(Clusters[k].MBB->getNumber());
9757       }
9758       if (!RangesOnly || Dests.count() > 3)
9759         break;
9760 
9761       // Check if it's a better partition.
9762       unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9763       if (NumPartitions < MinPartitions[i]) {
9764         // Found a better partition.
9765         MinPartitions[i] = NumPartitions;
9766         LastElement[i] = j;
9767       }
9768     }
9769   }
9770 
9771   // Iterate over the partitions, replacing with bit-test clusters in-place.
9772   unsigned DstIndex = 0;
9773   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9774     Last = LastElement[First];
9775     assert(First <= Last);
9776     assert(DstIndex <= First);
9777 
9778     CaseCluster BitTestCluster;
9779     if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
9780       Clusters[DstIndex++] = BitTestCluster;
9781     } else {
9782       size_t NumClusters = Last - First + 1;
9783       std::memmove(&Clusters[DstIndex], &Clusters[First],
9784                    sizeof(Clusters[0]) * NumClusters);
9785       DstIndex += NumClusters;
9786     }
9787   }
9788   Clusters.resize(DstIndex);
9789 }
9790 
9791 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
9792                                         MachineBasicBlock *SwitchMBB,
9793                                         MachineBasicBlock *DefaultMBB) {
9794   MachineFunction *CurMF = FuncInfo.MF;
9795   MachineBasicBlock *NextMBB = nullptr;
9796   MachineFunction::iterator BBI(W.MBB);
9797   if (++BBI != FuncInfo.MF->end())
9798     NextMBB = &*BBI;
9799 
9800   unsigned Size = W.LastCluster - W.FirstCluster + 1;
9801 
9802   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9803 
9804   if (Size == 2 && W.MBB == SwitchMBB) {
9805     // If any two of the cases has the same destination, and if one value
9806     // is the same as the other, but has one bit unset that the other has set,
9807     // use bit manipulation to do two compares at once.  For example:
9808     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
9809     // TODO: This could be extended to merge any 2 cases in switches with 3
9810     // cases.
9811     // TODO: Handle cases where W.CaseBB != SwitchBB.
9812     CaseCluster &Small = *W.FirstCluster;
9813     CaseCluster &Big = *W.LastCluster;
9814 
9815     if (Small.Low == Small.High && Big.Low == Big.High &&
9816         Small.MBB == Big.MBB) {
9817       const APInt &SmallValue = Small.Low->getValue();
9818       const APInt &BigValue = Big.Low->getValue();
9819 
9820       // Check that there is only one bit different.
9821       APInt CommonBit = BigValue ^ SmallValue;
9822       if (CommonBit.isPowerOf2()) {
9823         SDValue CondLHS = getValue(Cond);
9824         EVT VT = CondLHS.getValueType();
9825         SDLoc DL = getCurSDLoc();
9826 
9827         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
9828                                  DAG.getConstant(CommonBit, DL, VT));
9829         SDValue Cond = DAG.getSetCC(
9830             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
9831             ISD::SETEQ);
9832 
9833         // Update successor info.
9834         // Both Small and Big will jump to Small.BB, so we sum up the
9835         // probabilities.
9836         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
9837         if (BPI)
9838           addSuccessorWithProb(
9839               SwitchMBB, DefaultMBB,
9840               // The default destination is the first successor in IR.
9841               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
9842         else
9843           addSuccessorWithProb(SwitchMBB, DefaultMBB);
9844 
9845         // Insert the true branch.
9846         SDValue BrCond =
9847             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
9848                         DAG.getBasicBlock(Small.MBB));
9849         // Insert the false branch.
9850         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
9851                              DAG.getBasicBlock(DefaultMBB));
9852 
9853         DAG.setRoot(BrCond);
9854         return;
9855       }
9856     }
9857   }
9858 
9859   if (TM.getOptLevel() != CodeGenOpt::None) {
9860     // Here, we order cases by probability so the most likely case will be
9861     // checked first. However, two clusters can have the same probability in
9862     // which case their relative ordering is non-deterministic. So we use Low
9863     // as a tie-breaker as clusters are guaranteed to never overlap.
9864     llvm::sort(W.FirstCluster, W.LastCluster + 1,
9865                [](const CaseCluster &a, const CaseCluster &b) {
9866       return a.Prob != b.Prob ?
9867              a.Prob > b.Prob :
9868              a.Low->getValue().slt(b.Low->getValue());
9869     });
9870 
9871     // Rearrange the case blocks so that the last one falls through if possible
9872     // without changing the order of probabilities.
9873     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
9874       --I;
9875       if (I->Prob > W.LastCluster->Prob)
9876         break;
9877       if (I->Kind == CC_Range && I->MBB == NextMBB) {
9878         std::swap(*I, *W.LastCluster);
9879         break;
9880       }
9881     }
9882   }
9883 
9884   // Compute total probability.
9885   BranchProbability DefaultProb = W.DefaultProb;
9886   BranchProbability UnhandledProbs = DefaultProb;
9887   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
9888     UnhandledProbs += I->Prob;
9889 
9890   MachineBasicBlock *CurMBB = W.MBB;
9891   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
9892     MachineBasicBlock *Fallthrough;
9893     if (I == W.LastCluster) {
9894       // For the last cluster, fall through to the default destination.
9895       Fallthrough = DefaultMBB;
9896     } else {
9897       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
9898       CurMF->insert(BBI, Fallthrough);
9899       // Put Cond in a virtual register to make it available from the new blocks.
9900       ExportFromCurrentBlock(Cond);
9901     }
9902     UnhandledProbs -= I->Prob;
9903 
9904     switch (I->Kind) {
9905       case CC_JumpTable: {
9906         // FIXME: Optimize away range check based on pivot comparisons.
9907         JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
9908         JumpTable *JT = &JTCases[I->JTCasesIndex].second;
9909 
9910         // The jump block hasn't been inserted yet; insert it here.
9911         MachineBasicBlock *JumpMBB = JT->MBB;
9912         CurMF->insert(BBI, JumpMBB);
9913 
9914         auto JumpProb = I->Prob;
9915         auto FallthroughProb = UnhandledProbs;
9916 
9917         // If the default statement is a target of the jump table, we evenly
9918         // distribute the default probability to successors of CurMBB. Also
9919         // update the probability on the edge from JumpMBB to Fallthrough.
9920         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
9921                                               SE = JumpMBB->succ_end();
9922              SI != SE; ++SI) {
9923           if (*SI == DefaultMBB) {
9924             JumpProb += DefaultProb / 2;
9925             FallthroughProb -= DefaultProb / 2;
9926             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
9927             JumpMBB->normalizeSuccProbs();
9928             break;
9929           }
9930         }
9931 
9932         addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
9933         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
9934         CurMBB->normalizeSuccProbs();
9935 
9936         // The jump table header will be inserted in our current block, do the
9937         // range check, and fall through to our fallthrough block.
9938         JTH->HeaderBB = CurMBB;
9939         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
9940 
9941         // If we're in the right place, emit the jump table header right now.
9942         if (CurMBB == SwitchMBB) {
9943           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
9944           JTH->Emitted = true;
9945         }
9946         break;
9947       }
9948       case CC_BitTests: {
9949         // FIXME: Optimize away range check based on pivot comparisons.
9950         BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
9951 
9952         // The bit test blocks haven't been inserted yet; insert them here.
9953         for (BitTestCase &BTC : BTB->Cases)
9954           CurMF->insert(BBI, BTC.ThisBB);
9955 
9956         // Fill in fields of the BitTestBlock.
9957         BTB->Parent = CurMBB;
9958         BTB->Default = Fallthrough;
9959 
9960         BTB->DefaultProb = UnhandledProbs;
9961         // If the cases in bit test don't form a contiguous range, we evenly
9962         // distribute the probability on the edge to Fallthrough to two
9963         // successors of CurMBB.
9964         if (!BTB->ContiguousRange) {
9965           BTB->Prob += DefaultProb / 2;
9966           BTB->DefaultProb -= DefaultProb / 2;
9967         }
9968 
9969         // If we're in the right place, emit the bit test header right now.
9970         if (CurMBB == SwitchMBB) {
9971           visitBitTestHeader(*BTB, SwitchMBB);
9972           BTB->Emitted = true;
9973         }
9974         break;
9975       }
9976       case CC_Range: {
9977         const Value *RHS, *LHS, *MHS;
9978         ISD::CondCode CC;
9979         if (I->Low == I->High) {
9980           // Check Cond == I->Low.
9981           CC = ISD::SETEQ;
9982           LHS = Cond;
9983           RHS=I->Low;
9984           MHS = nullptr;
9985         } else {
9986           // Check I->Low <= Cond <= I->High.
9987           CC = ISD::SETLE;
9988           LHS = I->Low;
9989           MHS = Cond;
9990           RHS = I->High;
9991         }
9992 
9993         // The false probability is the sum of all unhandled cases.
9994         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
9995                      getCurSDLoc(), I->Prob, UnhandledProbs);
9996 
9997         if (CurMBB == SwitchMBB)
9998           visitSwitchCase(CB, SwitchMBB);
9999         else
10000           SwitchCases.push_back(CB);
10001 
10002         break;
10003       }
10004     }
10005     CurMBB = Fallthrough;
10006   }
10007 }
10008 
10009 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10010                                               CaseClusterIt First,
10011                                               CaseClusterIt Last) {
10012   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10013     if (X.Prob != CC.Prob)
10014       return X.Prob > CC.Prob;
10015 
10016     // Ties are broken by comparing the case value.
10017     return X.Low->getValue().slt(CC.Low->getValue());
10018   });
10019 }
10020 
10021 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10022                                         const SwitchWorkListItem &W,
10023                                         Value *Cond,
10024                                         MachineBasicBlock *SwitchMBB) {
10025   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10026          "Clusters not sorted?");
10027 
10028   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10029 
10030   // Balance the tree based on branch probabilities to create a near-optimal (in
10031   // terms of search time given key frequency) binary search tree. See e.g. Kurt
10032   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10033   CaseClusterIt LastLeft = W.FirstCluster;
10034   CaseClusterIt FirstRight = W.LastCluster;
10035   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10036   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10037 
10038   // Move LastLeft and FirstRight towards each other from opposite directions to
10039   // find a partitioning of the clusters which balances the probability on both
10040   // sides. If LeftProb and RightProb are equal, alternate which side is
10041   // taken to ensure 0-probability nodes are distributed evenly.
10042   unsigned I = 0;
10043   while (LastLeft + 1 < FirstRight) {
10044     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10045       LeftProb += (++LastLeft)->Prob;
10046     else
10047       RightProb += (--FirstRight)->Prob;
10048     I++;
10049   }
10050 
10051   while (true) {
10052     // Our binary search tree differs from a typical BST in that ours can have up
10053     // to three values in each leaf. The pivot selection above doesn't take that
10054     // into account, which means the tree might require more nodes and be less
10055     // efficient. We compensate for this here.
10056 
10057     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10058     unsigned NumRight = W.LastCluster - FirstRight + 1;
10059 
10060     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10061       // If one side has less than 3 clusters, and the other has more than 3,
10062       // consider taking a cluster from the other side.
10063 
10064       if (NumLeft < NumRight) {
10065         // Consider moving the first cluster on the right to the left side.
10066         CaseCluster &CC = *FirstRight;
10067         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10068         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10069         if (LeftSideRank <= RightSideRank) {
10070           // Moving the cluster to the left does not demote it.
10071           ++LastLeft;
10072           ++FirstRight;
10073           continue;
10074         }
10075       } else {
10076         assert(NumRight < NumLeft);
10077         // Consider moving the last element on the left to the right side.
10078         CaseCluster &CC = *LastLeft;
10079         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10080         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10081         if (RightSideRank <= LeftSideRank) {
10082           // Moving the cluster to the right does not demot it.
10083           --LastLeft;
10084           --FirstRight;
10085           continue;
10086         }
10087       }
10088     }
10089     break;
10090   }
10091 
10092   assert(LastLeft + 1 == FirstRight);
10093   assert(LastLeft >= W.FirstCluster);
10094   assert(FirstRight <= W.LastCluster);
10095 
10096   // Use the first element on the right as pivot since we will make less-than
10097   // comparisons against it.
10098   CaseClusterIt PivotCluster = FirstRight;
10099   assert(PivotCluster > W.FirstCluster);
10100   assert(PivotCluster <= W.LastCluster);
10101 
10102   CaseClusterIt FirstLeft = W.FirstCluster;
10103   CaseClusterIt LastRight = W.LastCluster;
10104 
10105   const ConstantInt *Pivot = PivotCluster->Low;
10106 
10107   // New blocks will be inserted immediately after the current one.
10108   MachineFunction::iterator BBI(W.MBB);
10109   ++BBI;
10110 
10111   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10112   // we can branch to its destination directly if it's squeezed exactly in
10113   // between the known lower bound and Pivot - 1.
10114   MachineBasicBlock *LeftMBB;
10115   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10116       FirstLeft->Low == W.GE &&
10117       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10118     LeftMBB = FirstLeft->MBB;
10119   } else {
10120     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10121     FuncInfo.MF->insert(BBI, LeftMBB);
10122     WorkList.push_back(
10123         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10124     // Put Cond in a virtual register to make it available from the new blocks.
10125     ExportFromCurrentBlock(Cond);
10126   }
10127 
10128   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10129   // single cluster, RHS.Low == Pivot, and we can branch to its destination
10130   // directly if RHS.High equals the current upper bound.
10131   MachineBasicBlock *RightMBB;
10132   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10133       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10134     RightMBB = FirstRight->MBB;
10135   } else {
10136     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10137     FuncInfo.MF->insert(BBI, RightMBB);
10138     WorkList.push_back(
10139         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10140     // Put Cond in a virtual register to make it available from the new blocks.
10141     ExportFromCurrentBlock(Cond);
10142   }
10143 
10144   // Create the CaseBlock record that will be used to lower the branch.
10145   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10146                getCurSDLoc(), LeftProb, RightProb);
10147 
10148   if (W.MBB == SwitchMBB)
10149     visitSwitchCase(CB, SwitchMBB);
10150   else
10151     SwitchCases.push_back(CB);
10152 }
10153 
10154 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10155 // from the swith statement.
10156 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10157                                             BranchProbability PeeledCaseProb) {
10158   if (PeeledCaseProb == BranchProbability::getOne())
10159     return BranchProbability::getZero();
10160   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10161 
10162   uint32_t Numerator = CaseProb.getNumerator();
10163   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10164   return BranchProbability(Numerator, std::max(Numerator, Denominator));
10165 }
10166 
10167 // Try to peel the top probability case if it exceeds the threshold.
10168 // Return current MachineBasicBlock for the switch statement if the peeling
10169 // does not occur.
10170 // If the peeling is performed, return the newly created MachineBasicBlock
10171 // for the peeled switch statement. Also update Clusters to remove the peeled
10172 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10173 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10174     const SwitchInst &SI, CaseClusterVector &Clusters,
10175     BranchProbability &PeeledCaseProb) {
10176   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10177   // Don't perform if there is only one cluster or optimizing for size.
10178   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10179       TM.getOptLevel() == CodeGenOpt::None ||
10180       SwitchMBB->getParent()->getFunction().optForMinSize())
10181     return SwitchMBB;
10182 
10183   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10184   unsigned PeeledCaseIndex = 0;
10185   bool SwitchPeeled = false;
10186   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10187     CaseCluster &CC = Clusters[Index];
10188     if (CC.Prob < TopCaseProb)
10189       continue;
10190     TopCaseProb = CC.Prob;
10191     PeeledCaseIndex = Index;
10192     SwitchPeeled = true;
10193   }
10194   if (!SwitchPeeled)
10195     return SwitchMBB;
10196 
10197   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10198                     << TopCaseProb << "\n");
10199 
10200   // Record the MBB for the peeled switch statement.
10201   MachineFunction::iterator BBI(SwitchMBB);
10202   ++BBI;
10203   MachineBasicBlock *PeeledSwitchMBB =
10204       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10205   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10206 
10207   ExportFromCurrentBlock(SI.getCondition());
10208   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10209   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10210                           nullptr,   nullptr,      TopCaseProb.getCompl()};
10211   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10212 
10213   Clusters.erase(PeeledCaseIt);
10214   for (CaseCluster &CC : Clusters) {
10215     LLVM_DEBUG(
10216         dbgs() << "Scale the probablity for one cluster, before scaling: "
10217                << CC.Prob << "\n");
10218     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10219     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10220   }
10221   PeeledCaseProb = TopCaseProb;
10222   return PeeledSwitchMBB;
10223 }
10224 
10225 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10226   // Extract cases from the switch.
10227   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10228   CaseClusterVector Clusters;
10229   Clusters.reserve(SI.getNumCases());
10230   for (auto I : SI.cases()) {
10231     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10232     const ConstantInt *CaseVal = I.getCaseValue();
10233     BranchProbability Prob =
10234         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10235             : BranchProbability(1, SI.getNumCases() + 1);
10236     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10237   }
10238 
10239   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10240 
10241   // Cluster adjacent cases with the same destination. We do this at all
10242   // optimization levels because it's cheap to do and will make codegen faster
10243   // if there are many clusters.
10244   sortAndRangeify(Clusters);
10245 
10246   if (TM.getOptLevel() != CodeGenOpt::None) {
10247     // Replace an unreachable default with the most popular destination.
10248     // FIXME: Exploit unreachable default more aggressively.
10249     bool UnreachableDefault =
10250         isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
10251     if (UnreachableDefault && !Clusters.empty()) {
10252       DenseMap<const BasicBlock *, unsigned> Popularity;
10253       unsigned MaxPop = 0;
10254       const BasicBlock *MaxBB = nullptr;
10255       for (auto I : SI.cases()) {
10256         const BasicBlock *BB = I.getCaseSuccessor();
10257         if (++Popularity[BB] > MaxPop) {
10258           MaxPop = Popularity[BB];
10259           MaxBB = BB;
10260         }
10261       }
10262       // Set new default.
10263       assert(MaxPop > 0 && MaxBB);
10264       DefaultMBB = FuncInfo.MBBMap[MaxBB];
10265 
10266       // Remove cases that were pointing to the destination that is now the
10267       // default.
10268       CaseClusterVector New;
10269       New.reserve(Clusters.size());
10270       for (CaseCluster &CC : Clusters) {
10271         if (CC.MBB != DefaultMBB)
10272           New.push_back(CC);
10273       }
10274       Clusters = std::move(New);
10275     }
10276   }
10277 
10278   // The branch probablity of the peeled case.
10279   BranchProbability PeeledCaseProb = BranchProbability::getZero();
10280   MachineBasicBlock *PeeledSwitchMBB =
10281       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10282 
10283   // If there is only the default destination, jump there directly.
10284   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10285   if (Clusters.empty()) {
10286     assert(PeeledSwitchMBB == SwitchMBB);
10287     SwitchMBB->addSuccessor(DefaultMBB);
10288     if (DefaultMBB != NextBlock(SwitchMBB)) {
10289       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10290                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10291     }
10292     return;
10293   }
10294 
10295   findJumpTables(Clusters, &SI, DefaultMBB);
10296   findBitTestClusters(Clusters, &SI);
10297 
10298   LLVM_DEBUG({
10299     dbgs() << "Case clusters: ";
10300     for (const CaseCluster &C : Clusters) {
10301       if (C.Kind == CC_JumpTable)
10302         dbgs() << "JT:";
10303       if (C.Kind == CC_BitTests)
10304         dbgs() << "BT:";
10305 
10306       C.Low->getValue().print(dbgs(), true);
10307       if (C.Low != C.High) {
10308         dbgs() << '-';
10309         C.High->getValue().print(dbgs(), true);
10310       }
10311       dbgs() << ' ';
10312     }
10313     dbgs() << '\n';
10314   });
10315 
10316   assert(!Clusters.empty());
10317   SwitchWorkList WorkList;
10318   CaseClusterIt First = Clusters.begin();
10319   CaseClusterIt Last = Clusters.end() - 1;
10320   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10321   // Scale the branchprobability for DefaultMBB if the peel occurs and
10322   // DefaultMBB is not replaced.
10323   if (PeeledCaseProb != BranchProbability::getZero() &&
10324       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10325     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10326   WorkList.push_back(
10327       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10328 
10329   while (!WorkList.empty()) {
10330     SwitchWorkListItem W = WorkList.back();
10331     WorkList.pop_back();
10332     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10333 
10334     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10335         !DefaultMBB->getParent()->getFunction().optForMinSize()) {
10336       // For optimized builds, lower large range as a balanced binary tree.
10337       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10338       continue;
10339     }
10340 
10341     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10342   }
10343 }
10344