xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 57b5966dad858f30fd3bbdf42ba560ef9382f0c2)
1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Triple.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/Analysis/AliasAnalysis.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/Analysis/VectorUtils.h"
39 #include "llvm/CodeGen/Analysis.h"
40 #include "llvm/CodeGen/FunctionLoweringInfo.h"
41 #include "llvm/CodeGen/GCMetadata.h"
42 #include "llvm/CodeGen/ISDOpcodes.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineFrameInfo.h"
45 #include "llvm/CodeGen/MachineFunction.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineJumpTableInfo.h"
49 #include "llvm/CodeGen/MachineMemOperand.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/CodeGen/RuntimeLibcalls.h"
54 #include "llvm/CodeGen/SelectionDAG.h"
55 #include "llvm/CodeGen/SelectionDAGNodes.h"
56 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
57 #include "llvm/CodeGen/StackMaps.h"
58 #include "llvm/CodeGen/TargetFrameLowering.h"
59 #include "llvm/CodeGen/TargetInstrInfo.h"
60 #include "llvm/CodeGen/TargetLowering.h"
61 #include "llvm/CodeGen/TargetOpcodes.h"
62 #include "llvm/CodeGen/TargetRegisterInfo.h"
63 #include "llvm/CodeGen/TargetSubtargetInfo.h"
64 #include "llvm/CodeGen/ValueTypes.h"
65 #include "llvm/CodeGen/WinEHFuncInfo.h"
66 #include "llvm/IR/Argument.h"
67 #include "llvm/IR/Attributes.h"
68 #include "llvm/IR/BasicBlock.h"
69 #include "llvm/IR/CFG.h"
70 #include "llvm/IR/CallSite.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/ConstantRange.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DebugInfoMetadata.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/IR/DerivedTypes.h"
79 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GetElementPtrTypeIterator.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/Intrinsics.h"
87 #include "llvm/IR/LLVMContext.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Module.h"
90 #include "llvm/IR/Operator.h"
91 #include "llvm/IR/Statepoint.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/User.h"
94 #include "llvm/IR/Value.h"
95 #include "llvm/MC/MCContext.h"
96 #include "llvm/MC/MCSymbol.h"
97 #include "llvm/Support/AtomicOrdering.h"
98 #include "llvm/Support/BranchProbability.h"
99 #include "llvm/Support/Casting.h"
100 #include "llvm/Support/CodeGen.h"
101 #include "llvm/Support/CommandLine.h"
102 #include "llvm/Support/Compiler.h"
103 #include "llvm/Support/Debug.h"
104 #include "llvm/Support/ErrorHandling.h"
105 #include "llvm/Support/MachineValueType.h"
106 #include "llvm/Support/MathExtras.h"
107 #include "llvm/Support/raw_ostream.h"
108 #include "llvm/Target/TargetIntrinsicInfo.h"
109 #include "llvm/Target/TargetMachine.h"
110 #include "llvm/Target/TargetOptions.h"
111 #include <algorithm>
112 #include <cassert>
113 #include <cstddef>
114 #include <cstdint>
115 #include <cstring>
116 #include <iterator>
117 #include <limits>
118 #include <numeric>
119 #include <tuple>
120 #include <utility>
121 #include <vector>
122 
123 using namespace llvm;
124 
125 #define DEBUG_TYPE "isel"
126 
127 /// LimitFloatPrecision - Generate low-precision inline sequences for
128 /// some float libcalls (6, 8 or 12 bits).
129 static unsigned LimitFloatPrecision;
130 
131 static cl::opt<unsigned, true>
132     LimitFPPrecision("limit-float-precision",
133                      cl::desc("Generate low-precision inline sequences "
134                               "for some float libcalls"),
135                      cl::location(LimitFloatPrecision), cl::Hidden,
136                      cl::init(0));
137 
138 static cl::opt<unsigned> SwitchPeelThreshold(
139     "switch-peel-threshold", cl::Hidden, cl::init(66),
140     cl::desc("Set the case probability threshold for peeling the case from a "
141              "switch statement. A value greater than 100 will void this "
142              "optimization"));
143 
144 // Limit the width of DAG chains. This is important in general to prevent
145 // DAG-based analysis from blowing up. For example, alias analysis and
146 // load clustering may not complete in reasonable time. It is difficult to
147 // recognize and avoid this situation within each individual analysis, and
148 // future analyses are likely to have the same behavior. Limiting DAG width is
149 // the safe approach and will be especially important with global DAGs.
150 //
151 // MaxParallelChains default is arbitrarily high to avoid affecting
152 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
153 // sequence over this should have been converted to llvm.memcpy by the
154 // frontend. It is easy to induce this behavior with .ll code such as:
155 // %buffer = alloca [4096 x i8]
156 // %data = load [4096 x i8]* %argPtr
157 // store [4096 x i8] %data, [4096 x i8]* %buffer
158 static const unsigned MaxParallelChains = 64;
159 
160 // Return the calling convention if the Value passed requires ABI mangling as it
161 // is a parameter to a function or a return value from a function which is not
162 // an intrinsic.
163 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
164   if (auto *R = dyn_cast<ReturnInst>(V))
165     return R->getParent()->getParent()->getCallingConv();
166 
167   if (auto *CI = dyn_cast<CallInst>(V)) {
168     const bool IsInlineAsm = CI->isInlineAsm();
169     const bool IsIndirectFunctionCall =
170         !IsInlineAsm && !CI->getCalledFunction();
171 
172     // It is possible that the call instruction is an inline asm statement or an
173     // indirect function call in which case the return value of
174     // getCalledFunction() would be nullptr.
175     const bool IsInstrinsicCall =
176         !IsInlineAsm && !IsIndirectFunctionCall &&
177         CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
178 
179     if (!IsInlineAsm && !IsInstrinsicCall)
180       return CI->getCallingConv();
181   }
182 
183   return None;
184 }
185 
186 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
187                                       const SDValue *Parts, unsigned NumParts,
188                                       MVT PartVT, EVT ValueVT, const Value *V,
189                                       Optional<CallingConv::ID> CC);
190 
191 /// getCopyFromParts - Create a value that contains the specified legal parts
192 /// combined into the value they represent.  If the parts combine to a type
193 /// larger than ValueVT then AssertOp can be used to specify whether the extra
194 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
195 /// (ISD::AssertSext).
196 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
197                                 const SDValue *Parts, unsigned NumParts,
198                                 MVT PartVT, EVT ValueVT, const Value *V,
199                                 Optional<CallingConv::ID> CC = None,
200                                 Optional<ISD::NodeType> AssertOp = None) {
201   if (ValueVT.isVector())
202     return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
203                                   CC);
204 
205   assert(NumParts > 0 && "No parts to assemble!");
206   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
207   SDValue Val = Parts[0];
208 
209   if (NumParts > 1) {
210     // Assemble the value from multiple parts.
211     if (ValueVT.isInteger()) {
212       unsigned PartBits = PartVT.getSizeInBits();
213       unsigned ValueBits = ValueVT.getSizeInBits();
214 
215       // Assemble the power of 2 part.
216       unsigned RoundParts = NumParts & (NumParts - 1) ?
217         1 << Log2_32(NumParts) : NumParts;
218       unsigned RoundBits = PartBits * RoundParts;
219       EVT RoundVT = RoundBits == ValueBits ?
220         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
221       SDValue Lo, Hi;
222 
223       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
224 
225       if (RoundParts > 2) {
226         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
227                               PartVT, HalfVT, V);
228         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
229                               RoundParts / 2, PartVT, HalfVT, V);
230       } else {
231         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
232         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
233       }
234 
235       if (DAG.getDataLayout().isBigEndian())
236         std::swap(Lo, Hi);
237 
238       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
239 
240       if (RoundParts < NumParts) {
241         // Assemble the trailing non-power-of-2 part.
242         unsigned OddParts = NumParts - RoundParts;
243         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
244         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
245                               OddVT, V, CC);
246 
247         // Combine the round and odd parts.
248         Lo = Val;
249         if (DAG.getDataLayout().isBigEndian())
250           std::swap(Lo, Hi);
251         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
252         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
253         Hi =
254             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
255                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
256                                         TLI.getPointerTy(DAG.getDataLayout())));
257         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
258         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
259       }
260     } else if (PartVT.isFloatingPoint()) {
261       // FP split into multiple FP parts (for ppcf128)
262       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
263              "Unexpected split");
264       SDValue Lo, Hi;
265       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
266       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
267       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
268         std::swap(Lo, Hi);
269       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
270     } else {
271       // FP split into integer parts (soft fp)
272       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
273              !PartVT.isVector() && "Unexpected split");
274       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
275       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
276     }
277   }
278 
279   // There is now one part, held in Val.  Correct it to match ValueVT.
280   // PartEVT is the type of the register class that holds the value.
281   // ValueVT is the type of the inline asm operation.
282   EVT PartEVT = Val.getValueType();
283 
284   if (PartEVT == ValueVT)
285     return Val;
286 
287   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
288       ValueVT.bitsLT(PartEVT)) {
289     // For an FP value in an integer part, we need to truncate to the right
290     // width first.
291     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
292     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
293   }
294 
295   // Handle types that have the same size.
296   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
297     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
298 
299   // Handle types with different sizes.
300   if (PartEVT.isInteger() && ValueVT.isInteger()) {
301     if (ValueVT.bitsLT(PartEVT)) {
302       // For a truncate, see if we have any information to
303       // indicate whether the truncated bits will always be
304       // zero or sign-extension.
305       if (AssertOp.hasValue())
306         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
307                           DAG.getValueType(ValueVT));
308       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
309     }
310     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
311   }
312 
313   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
314     // FP_ROUND's are always exact here.
315     if (ValueVT.bitsLT(Val.getValueType()))
316       return DAG.getNode(
317           ISD::FP_ROUND, DL, ValueVT, Val,
318           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
319 
320     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
321   }
322 
323   llvm_unreachable("Unknown mismatch!");
324 }
325 
326 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
327                                               const Twine &ErrMsg) {
328   const Instruction *I = dyn_cast_or_null<Instruction>(V);
329   if (!V)
330     return Ctx.emitError(ErrMsg);
331 
332   const char *AsmError = ", possible invalid constraint for vector type";
333   if (const CallInst *CI = dyn_cast<CallInst>(I))
334     if (isa<InlineAsm>(CI->getCalledValue()))
335       return Ctx.emitError(I, ErrMsg + AsmError);
336 
337   return Ctx.emitError(I, ErrMsg);
338 }
339 
340 /// getCopyFromPartsVector - Create a value that contains the specified legal
341 /// parts combined into the value they represent.  If the parts combine to a
342 /// type larger than ValueVT then AssertOp can be used to specify whether the
343 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
344 /// ValueVT (ISD::AssertSext).
345 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
346                                       const SDValue *Parts, unsigned NumParts,
347                                       MVT PartVT, EVT ValueVT, const Value *V,
348                                       Optional<CallingConv::ID> CallConv) {
349   assert(ValueVT.isVector() && "Not a vector value");
350   assert(NumParts > 0 && "No parts to assemble!");
351   const bool IsABIRegCopy = CallConv.hasValue();
352 
353   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
354   SDValue Val = Parts[0];
355 
356   // Handle a multi-element vector.
357   if (NumParts > 1) {
358     EVT IntermediateVT;
359     MVT RegisterVT;
360     unsigned NumIntermediates;
361     unsigned NumRegs;
362 
363     if (IsABIRegCopy) {
364       NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
365           *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
366           NumIntermediates, RegisterVT);
367     } else {
368       NumRegs =
369           TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
370                                      NumIntermediates, RegisterVT);
371     }
372 
373     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
374     NumParts = NumRegs; // Silence a compiler warning.
375     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
376     assert(RegisterVT.getSizeInBits() ==
377            Parts[0].getSimpleValueType().getSizeInBits() &&
378            "Part type sizes don't match!");
379 
380     // Assemble the parts into intermediate operands.
381     SmallVector<SDValue, 8> Ops(NumIntermediates);
382     if (NumIntermediates == NumParts) {
383       // If the register was not expanded, truncate or copy the value,
384       // as appropriate.
385       for (unsigned i = 0; i != NumParts; ++i)
386         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
387                                   PartVT, IntermediateVT, V);
388     } else if (NumParts > 0) {
389       // If the intermediate type was expanded, build the intermediate
390       // operands from the parts.
391       assert(NumParts % NumIntermediates == 0 &&
392              "Must expand into a divisible number of parts!");
393       unsigned Factor = NumParts / NumIntermediates;
394       for (unsigned i = 0; i != NumIntermediates; ++i)
395         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
396                                   PartVT, IntermediateVT, V);
397     }
398 
399     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
400     // intermediate operands.
401     EVT BuiltVectorTy =
402         EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
403                          (IntermediateVT.isVector()
404                               ? IntermediateVT.getVectorNumElements() * NumParts
405                               : NumIntermediates));
406     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
407                                                 : ISD::BUILD_VECTOR,
408                       DL, BuiltVectorTy, Ops);
409   }
410 
411   // There is now one part, held in Val.  Correct it to match ValueVT.
412   EVT PartEVT = Val.getValueType();
413 
414   if (PartEVT == ValueVT)
415     return Val;
416 
417   if (PartEVT.isVector()) {
418     // If the element type of the source/dest vectors are the same, but the
419     // parts vector has more elements than the value vector, then we have a
420     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
421     // elements we want.
422     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
423       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
424              "Cannot narrow, it would be a lossy transformation");
425       return DAG.getNode(
426           ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
427           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
428     }
429 
430     // Vector/Vector bitcast.
431     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
432       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
433 
434     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
435       "Cannot handle this kind of promotion");
436     // Promoted vector extract
437     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
438 
439   }
440 
441   // Trivial bitcast if the types are the same size and the destination
442   // vector type is legal.
443   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
444       TLI.isTypeLegal(ValueVT))
445     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
446 
447   if (ValueVT.getVectorNumElements() != 1) {
448      // Certain ABIs require that vectors are passed as integers. For vectors
449      // are the same size, this is an obvious bitcast.
450      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
451        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
452      } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
453        // Bitcast Val back the original type and extract the corresponding
454        // vector we want.
455        unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
456        EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
457                                            ValueVT.getVectorElementType(), Elts);
458        Val = DAG.getBitcast(WiderVecType, Val);
459        return DAG.getNode(
460            ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
461            DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
462      }
463 
464      diagnosePossiblyInvalidConstraint(
465          *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
466      return DAG.getUNDEF(ValueVT);
467   }
468 
469   // Handle cases such as i8 -> <1 x i1>
470   EVT ValueSVT = ValueVT.getVectorElementType();
471   if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
472     Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
473                                     : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
474 
475   return DAG.getBuildVector(ValueVT, DL, Val);
476 }
477 
478 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
479                                  SDValue Val, SDValue *Parts, unsigned NumParts,
480                                  MVT PartVT, const Value *V,
481                                  Optional<CallingConv::ID> CallConv);
482 
483 /// getCopyToParts - Create a series of nodes that contain the specified value
484 /// split into legal parts.  If the parts contain more bits than Val, then, for
485 /// integers, ExtendKind can be used to specify how to generate the extra bits.
486 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
487                            SDValue *Parts, unsigned NumParts, MVT PartVT,
488                            const Value *V,
489                            Optional<CallingConv::ID> CallConv = None,
490                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
491   EVT ValueVT = Val.getValueType();
492 
493   // Handle the vector case separately.
494   if (ValueVT.isVector())
495     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
496                                 CallConv);
497 
498   unsigned PartBits = PartVT.getSizeInBits();
499   unsigned OrigNumParts = NumParts;
500   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
501          "Copying to an illegal type!");
502 
503   if (NumParts == 0)
504     return;
505 
506   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
507   EVT PartEVT = PartVT;
508   if (PartEVT == ValueVT) {
509     assert(NumParts == 1 && "No-op copy with multiple parts!");
510     Parts[0] = Val;
511     return;
512   }
513 
514   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
515     // If the parts cover more bits than the value has, promote the value.
516     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
517       assert(NumParts == 1 && "Do not know what to promote to!");
518       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
519     } else {
520       if (ValueVT.isFloatingPoint()) {
521         // FP values need to be bitcast, then extended if they are being put
522         // into a larger container.
523         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
524         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
525       }
526       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
527              ValueVT.isInteger() &&
528              "Unknown mismatch!");
529       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
530       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
531       if (PartVT == MVT::x86mmx)
532         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
533     }
534   } else if (PartBits == ValueVT.getSizeInBits()) {
535     // Different types of the same size.
536     assert(NumParts == 1 && PartEVT != ValueVT);
537     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
538   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
539     // If the parts cover less bits than value has, truncate the value.
540     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
541            ValueVT.isInteger() &&
542            "Unknown mismatch!");
543     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
544     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
545     if (PartVT == MVT::x86mmx)
546       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
547   }
548 
549   // The value may have changed - recompute ValueVT.
550   ValueVT = Val.getValueType();
551   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
552          "Failed to tile the value with PartVT!");
553 
554   if (NumParts == 1) {
555     if (PartEVT != ValueVT) {
556       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
557                                         "scalar-to-vector conversion failed");
558       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
559     }
560 
561     Parts[0] = Val;
562     return;
563   }
564 
565   // Expand the value into multiple parts.
566   if (NumParts & (NumParts - 1)) {
567     // The number of parts is not a power of 2.  Split off and copy the tail.
568     assert(PartVT.isInteger() && ValueVT.isInteger() &&
569            "Do not know what to expand to!");
570     unsigned RoundParts = 1 << Log2_32(NumParts);
571     unsigned RoundBits = RoundParts * PartBits;
572     unsigned OddParts = NumParts - RoundParts;
573     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
574                                  DAG.getIntPtrConstant(RoundBits, DL));
575     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
576                    CallConv);
577 
578     if (DAG.getDataLayout().isBigEndian())
579       // The odd parts were reversed by getCopyToParts - unreverse them.
580       std::reverse(Parts + RoundParts, Parts + NumParts);
581 
582     NumParts = RoundParts;
583     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
584     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
585   }
586 
587   // The number of parts is a power of 2.  Repeatedly bisect the value using
588   // EXTRACT_ELEMENT.
589   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
590                          EVT::getIntegerVT(*DAG.getContext(),
591                                            ValueVT.getSizeInBits()),
592                          Val);
593 
594   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
595     for (unsigned i = 0; i < NumParts; i += StepSize) {
596       unsigned ThisBits = StepSize * PartBits / 2;
597       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
598       SDValue &Part0 = Parts[i];
599       SDValue &Part1 = Parts[i+StepSize/2];
600 
601       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
602                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
603       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
604                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
605 
606       if (ThisBits == PartBits && ThisVT != PartVT) {
607         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
608         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
609       }
610     }
611   }
612 
613   if (DAG.getDataLayout().isBigEndian())
614     std::reverse(Parts, Parts + OrigNumParts);
615 }
616 
617 static SDValue widenVectorToPartType(SelectionDAG &DAG,
618                                      SDValue Val, const SDLoc &DL, EVT PartVT) {
619   if (!PartVT.isVector())
620     return SDValue();
621 
622   EVT ValueVT = Val.getValueType();
623   unsigned PartNumElts = PartVT.getVectorNumElements();
624   unsigned ValueNumElts = ValueVT.getVectorNumElements();
625   if (PartNumElts > ValueNumElts &&
626       PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
627     EVT ElementVT = PartVT.getVectorElementType();
628     // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
629     // undef elements.
630     SmallVector<SDValue, 16> Ops;
631     DAG.ExtractVectorElements(Val, Ops);
632     SDValue EltUndef = DAG.getUNDEF(ElementVT);
633     for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
634       Ops.push_back(EltUndef);
635 
636     // FIXME: Use CONCAT for 2x -> 4x.
637     return DAG.getBuildVector(PartVT, DL, Ops);
638   }
639 
640   return SDValue();
641 }
642 
643 /// getCopyToPartsVector - Create a series of nodes that contain the specified
644 /// value split into legal parts.
645 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
646                                  SDValue Val, SDValue *Parts, unsigned NumParts,
647                                  MVT PartVT, const Value *V,
648                                  Optional<CallingConv::ID> CallConv) {
649   EVT ValueVT = Val.getValueType();
650   assert(ValueVT.isVector() && "Not a vector");
651   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
652   const bool IsABIRegCopy = CallConv.hasValue();
653 
654   if (NumParts == 1) {
655     EVT PartEVT = PartVT;
656     if (PartEVT == ValueVT) {
657       // Nothing to do.
658     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
659       // Bitconvert vector->vector case.
660       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
661     } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
662       Val = Widened;
663     } else if (PartVT.isVector() &&
664                PartEVT.getVectorElementType().bitsGE(
665                  ValueVT.getVectorElementType()) &&
666                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
667 
668       // Promoted vector extract
669       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
670     } else {
671       if (ValueVT.getVectorNumElements() == 1) {
672         Val = DAG.getNode(
673             ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
674             DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
675       } else {
676         assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
677                "lossy conversion of vector to scalar type");
678         EVT IntermediateType =
679             EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
680         Val = DAG.getBitcast(IntermediateType, Val);
681         Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
682       }
683     }
684 
685     assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
686     Parts[0] = Val;
687     return;
688   }
689 
690   // Handle a multi-element vector.
691   EVT IntermediateVT;
692   MVT RegisterVT;
693   unsigned NumIntermediates;
694   unsigned NumRegs;
695   if (IsABIRegCopy) {
696     NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
697         *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
698         NumIntermediates, RegisterVT);
699   } else {
700     NumRegs =
701         TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
702                                    NumIntermediates, RegisterVT);
703   }
704 
705   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
706   NumParts = NumRegs; // Silence a compiler warning.
707   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
708 
709   unsigned IntermediateNumElts = IntermediateVT.isVector() ?
710     IntermediateVT.getVectorNumElements() : 1;
711 
712   // Convert the vector to the appropiate type if necessary.
713   unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts;
714 
715   EVT BuiltVectorTy = EVT::getVectorVT(
716       *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
717   MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
718   if (ValueVT != BuiltVectorTy) {
719     if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
720       Val = Widened;
721 
722     Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
723   }
724 
725   // Split the vector into intermediate operands.
726   SmallVector<SDValue, 8> Ops(NumIntermediates);
727   for (unsigned i = 0; i != NumIntermediates; ++i) {
728     if (IntermediateVT.isVector()) {
729       Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
730                            DAG.getConstant(i * IntermediateNumElts, DL, IdxVT));
731     } else {
732       Ops[i] = DAG.getNode(
733           ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
734           DAG.getConstant(i, DL, IdxVT));
735     }
736   }
737 
738   // Split the intermediate operands into legal parts.
739   if (NumParts == NumIntermediates) {
740     // If the register was not expanded, promote or copy the value,
741     // as appropriate.
742     for (unsigned i = 0; i != NumParts; ++i)
743       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
744   } else if (NumParts > 0) {
745     // If the intermediate type was expanded, split each the value into
746     // legal parts.
747     assert(NumIntermediates != 0 && "division by zero");
748     assert(NumParts % NumIntermediates == 0 &&
749            "Must expand into a divisible number of parts!");
750     unsigned Factor = NumParts / NumIntermediates;
751     for (unsigned i = 0; i != NumIntermediates; ++i)
752       getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
753                      CallConv);
754   }
755 }
756 
757 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
758                            EVT valuevt, Optional<CallingConv::ID> CC)
759     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
760       RegCount(1, regs.size()), CallConv(CC) {}
761 
762 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
763                            const DataLayout &DL, unsigned Reg, Type *Ty,
764                            Optional<CallingConv::ID> CC) {
765   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
766 
767   CallConv = CC;
768 
769   for (EVT ValueVT : ValueVTs) {
770     unsigned NumRegs =
771         isABIMangled()
772             ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
773             : TLI.getNumRegisters(Context, ValueVT);
774     MVT RegisterVT =
775         isABIMangled()
776             ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
777             : TLI.getRegisterType(Context, ValueVT);
778     for (unsigned i = 0; i != NumRegs; ++i)
779       Regs.push_back(Reg + i);
780     RegVTs.push_back(RegisterVT);
781     RegCount.push_back(NumRegs);
782     Reg += NumRegs;
783   }
784 }
785 
786 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
787                                       FunctionLoweringInfo &FuncInfo,
788                                       const SDLoc &dl, SDValue &Chain,
789                                       SDValue *Flag, const Value *V) const {
790   // A Value with type {} or [0 x %t] needs no registers.
791   if (ValueVTs.empty())
792     return SDValue();
793 
794   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
795 
796   // Assemble the legal parts into the final values.
797   SmallVector<SDValue, 4> Values(ValueVTs.size());
798   SmallVector<SDValue, 8> Parts;
799   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
800     // Copy the legal parts from the registers.
801     EVT ValueVT = ValueVTs[Value];
802     unsigned NumRegs = RegCount[Value];
803     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
804                                           *DAG.getContext(),
805                                           CallConv.getValue(), RegVTs[Value])
806                                     : RegVTs[Value];
807 
808     Parts.resize(NumRegs);
809     for (unsigned i = 0; i != NumRegs; ++i) {
810       SDValue P;
811       if (!Flag) {
812         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
813       } else {
814         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
815         *Flag = P.getValue(2);
816       }
817 
818       Chain = P.getValue(1);
819       Parts[i] = P;
820 
821       // If the source register was virtual and if we know something about it,
822       // add an assert node.
823       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
824           !RegisterVT.isInteger() || RegisterVT.isVector())
825         continue;
826 
827       const FunctionLoweringInfo::LiveOutInfo *LOI =
828         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
829       if (!LOI)
830         continue;
831 
832       unsigned RegSize = RegisterVT.getSizeInBits();
833       unsigned NumSignBits = LOI->NumSignBits;
834       unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
835 
836       if (NumZeroBits == RegSize) {
837         // The current value is a zero.
838         // Explicitly express that as it would be easier for
839         // optimizations to kick in.
840         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
841         continue;
842       }
843 
844       // FIXME: We capture more information than the dag can represent.  For
845       // now, just use the tightest assertzext/assertsext possible.
846       bool isSExt;
847       EVT FromVT(MVT::Other);
848       if (NumZeroBits) {
849         FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
850         isSExt = false;
851       } else if (NumSignBits > 1) {
852         FromVT =
853             EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
854         isSExt = true;
855       } else {
856         continue;
857       }
858       // Add an assertion node.
859       assert(FromVT != MVT::Other);
860       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
861                              RegisterVT, P, DAG.getValueType(FromVT));
862     }
863 
864     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
865                                      RegisterVT, ValueVT, V, CallConv);
866     Part += NumRegs;
867     Parts.clear();
868   }
869 
870   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
871 }
872 
873 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
874                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
875                                  const Value *V,
876                                  ISD::NodeType PreferredExtendType) const {
877   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
878   ISD::NodeType ExtendKind = PreferredExtendType;
879 
880   // Get the list of the values's legal parts.
881   unsigned NumRegs = Regs.size();
882   SmallVector<SDValue, 8> Parts(NumRegs);
883   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
884     unsigned NumParts = RegCount[Value];
885 
886     MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
887                                           *DAG.getContext(),
888                                           CallConv.getValue(), RegVTs[Value])
889                                     : RegVTs[Value];
890 
891     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
892       ExtendKind = ISD::ZERO_EXTEND;
893 
894     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
895                    NumParts, RegisterVT, V, CallConv, ExtendKind);
896     Part += NumParts;
897   }
898 
899   // Copy the parts into the registers.
900   SmallVector<SDValue, 8> Chains(NumRegs);
901   for (unsigned i = 0; i != NumRegs; ++i) {
902     SDValue Part;
903     if (!Flag) {
904       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
905     } else {
906       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
907       *Flag = Part.getValue(1);
908     }
909 
910     Chains[i] = Part.getValue(0);
911   }
912 
913   if (NumRegs == 1 || Flag)
914     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
915     // flagged to it. That is the CopyToReg nodes and the user are considered
916     // a single scheduling unit. If we create a TokenFactor and return it as
917     // chain, then the TokenFactor is both a predecessor (operand) of the
918     // user as well as a successor (the TF operands are flagged to the user).
919     // c1, f1 = CopyToReg
920     // c2, f2 = CopyToReg
921     // c3     = TokenFactor c1, c2
922     // ...
923     //        = op c3, ..., f2
924     Chain = Chains[NumRegs-1];
925   else
926     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
927 }
928 
929 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
930                                         unsigned MatchingIdx, const SDLoc &dl,
931                                         SelectionDAG &DAG,
932                                         std::vector<SDValue> &Ops) const {
933   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
934 
935   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
936   if (HasMatching)
937     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
938   else if (!Regs.empty() &&
939            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
940     // Put the register class of the virtual registers in the flag word.  That
941     // way, later passes can recompute register class constraints for inline
942     // assembly as well as normal instructions.
943     // Don't do this for tied operands that can use the regclass information
944     // from the def.
945     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
946     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
947     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
948   }
949 
950   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
951   Ops.push_back(Res);
952 
953   if (Code == InlineAsm::Kind_Clobber) {
954     // Clobbers should always have a 1:1 mapping with registers, and may
955     // reference registers that have illegal (e.g. vector) types. Hence, we
956     // shouldn't try to apply any sort of splitting logic to them.
957     assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
958            "No 1:1 mapping from clobbers to regs?");
959     unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
960     (void)SP;
961     for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
962       Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
963       assert(
964           (Regs[I] != SP ||
965            DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
966           "If we clobbered the stack pointer, MFI should know about it.");
967     }
968     return;
969   }
970 
971   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
972     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
973     MVT RegisterVT = RegVTs[Value];
974     for (unsigned i = 0; i != NumRegs; ++i) {
975       assert(Reg < Regs.size() && "Mismatch in # registers expected");
976       unsigned TheReg = Regs[Reg++];
977       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
978     }
979   }
980 }
981 
982 SmallVector<std::pair<unsigned, unsigned>, 4>
983 RegsForValue::getRegsAndSizes() const {
984   SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
985   unsigned I = 0;
986   for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
987     unsigned RegCount = std::get<0>(CountAndVT);
988     MVT RegisterVT = std::get<1>(CountAndVT);
989     unsigned RegisterSize = RegisterVT.getSizeInBits();
990     for (unsigned E = I + RegCount; I != E; ++I)
991       OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
992   }
993   return OutVec;
994 }
995 
996 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
997                                const TargetLibraryInfo *li) {
998   AA = aa;
999   GFI = gfi;
1000   LibInfo = li;
1001   DL = &DAG.getDataLayout();
1002   Context = DAG.getContext();
1003   LPadToCallSiteMap.clear();
1004 }
1005 
1006 void SelectionDAGBuilder::clear() {
1007   NodeMap.clear();
1008   UnusedArgNodeMap.clear();
1009   PendingLoads.clear();
1010   PendingExports.clear();
1011   CurInst = nullptr;
1012   HasTailCall = false;
1013   SDNodeOrder = LowestSDNodeOrder;
1014   StatepointLowering.clear();
1015 }
1016 
1017 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1018   DanglingDebugInfoMap.clear();
1019 }
1020 
1021 SDValue SelectionDAGBuilder::getRoot() {
1022   if (PendingLoads.empty())
1023     return DAG.getRoot();
1024 
1025   if (PendingLoads.size() == 1) {
1026     SDValue Root = PendingLoads[0];
1027     DAG.setRoot(Root);
1028     PendingLoads.clear();
1029     return Root;
1030   }
1031 
1032   // Otherwise, we have to make a token factor node.
1033   SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1034                              PendingLoads);
1035   PendingLoads.clear();
1036   DAG.setRoot(Root);
1037   return Root;
1038 }
1039 
1040 SDValue SelectionDAGBuilder::getControlRoot() {
1041   SDValue Root = DAG.getRoot();
1042 
1043   if (PendingExports.empty())
1044     return Root;
1045 
1046   // Turn all of the CopyToReg chains into one factored node.
1047   if (Root.getOpcode() != ISD::EntryToken) {
1048     unsigned i = 0, e = PendingExports.size();
1049     for (; i != e; ++i) {
1050       assert(PendingExports[i].getNode()->getNumOperands() > 1);
1051       if (PendingExports[i].getNode()->getOperand(0) == Root)
1052         break;  // Don't add the root if we already indirectly depend on it.
1053     }
1054 
1055     if (i == e)
1056       PendingExports.push_back(Root);
1057   }
1058 
1059   Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
1060                      PendingExports);
1061   PendingExports.clear();
1062   DAG.setRoot(Root);
1063   return Root;
1064 }
1065 
1066 void SelectionDAGBuilder::visit(const Instruction &I) {
1067   // Set up outgoing PHI node register values before emitting the terminator.
1068   if (I.isTerminator()) {
1069     HandlePHINodesInSuccessorBlocks(I.getParent());
1070   }
1071 
1072   // Increase the SDNodeOrder if dealing with a non-debug instruction.
1073   if (!isa<DbgInfoIntrinsic>(I))
1074     ++SDNodeOrder;
1075 
1076   CurInst = &I;
1077 
1078   visit(I.getOpcode(), I);
1079 
1080   if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1081     // Propagate the fast-math-flags of this IR instruction to the DAG node that
1082     // maps to this instruction.
1083     // TODO: We could handle all flags (nsw, etc) here.
1084     // TODO: If an IR instruction maps to >1 node, only the final node will have
1085     //       flags set.
1086     if (SDNode *Node = getNodeForIRValue(&I)) {
1087       SDNodeFlags IncomingFlags;
1088       IncomingFlags.copyFMF(*FPMO);
1089       if (!Node->getFlags().isDefined())
1090         Node->setFlags(IncomingFlags);
1091       else
1092         Node->intersectFlagsWith(IncomingFlags);
1093     }
1094   }
1095 
1096   if (!I.isTerminator() && !HasTailCall &&
1097       !isStatepoint(&I)) // statepoints handle their exports internally
1098     CopyToExportRegsIfNeeded(&I);
1099 
1100   CurInst = nullptr;
1101 }
1102 
1103 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1104   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1105 }
1106 
1107 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1108   // Note: this doesn't use InstVisitor, because it has to work with
1109   // ConstantExpr's in addition to instructions.
1110   switch (Opcode) {
1111   default: llvm_unreachable("Unknown instruction type encountered!");
1112     // Build the switch statement using the Instruction.def file.
1113 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1114     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1115 #include "llvm/IR/Instruction.def"
1116   }
1117 }
1118 
1119 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1120                                                 const DIExpression *Expr) {
1121   auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1122     const DbgValueInst *DI = DDI.getDI();
1123     DIVariable *DanglingVariable = DI->getVariable();
1124     DIExpression *DanglingExpr = DI->getExpression();
1125     if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1126       LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1127       return true;
1128     }
1129     return false;
1130   };
1131 
1132   for (auto &DDIMI : DanglingDebugInfoMap) {
1133     DanglingDebugInfoVector &DDIV = DDIMI.second;
1134     DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1135   }
1136 }
1137 
1138 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1139 // generate the debug data structures now that we've seen its definition.
1140 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1141                                                    SDValue Val) {
1142   auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1143   if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1144     return;
1145 
1146   DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1147   for (auto &DDI : DDIV) {
1148     const DbgValueInst *DI = DDI.getDI();
1149     assert(DI && "Ill-formed DanglingDebugInfo");
1150     DebugLoc dl = DDI.getdl();
1151     unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1152     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1153     DILocalVariable *Variable = DI->getVariable();
1154     DIExpression *Expr = DI->getExpression();
1155     assert(Variable->isValidLocationForIntrinsic(dl) &&
1156            "Expected inlined-at fields to agree");
1157     SDDbgValue *SDV;
1158     if (Val.getNode()) {
1159       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1160         LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1161                           << DbgSDNodeOrder << "] for:\n  " << *DI << "\n");
1162         LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1163         // Increase the SDNodeOrder for the DbgValue here to make sure it is
1164         // inserted after the definition of Val when emitting the instructions
1165         // after ISel. An alternative could be to teach
1166         // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1167         LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1168                    << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1169                    << ValSDNodeOrder << "\n");
1170         SDV = getDbgValue(Val, Variable, Expr, dl,
1171                           std::max(DbgSDNodeOrder, ValSDNodeOrder));
1172         DAG.AddDbgValue(SDV, Val.getNode(), false);
1173       } else
1174         LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1175                           << "in EmitFuncArgumentDbgValue\n");
1176     } else
1177       LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1178   }
1179   DDIV.clear();
1180 }
1181 
1182 /// getCopyFromRegs - If there was virtual register allocated for the value V
1183 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1184 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1185   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1186   SDValue Result;
1187 
1188   if (It != FuncInfo.ValueMap.end()) {
1189     unsigned InReg = It->second;
1190 
1191     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1192                      DAG.getDataLayout(), InReg, Ty,
1193                      None); // This is not an ABI copy.
1194     SDValue Chain = DAG.getEntryNode();
1195     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1196                                  V);
1197     resolveDanglingDebugInfo(V, Result);
1198   }
1199 
1200   return Result;
1201 }
1202 
1203 /// getValue - Return an SDValue for the given Value.
1204 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1205   // If we already have an SDValue for this value, use it. It's important
1206   // to do this first, so that we don't create a CopyFromReg if we already
1207   // have a regular SDValue.
1208   SDValue &N = NodeMap[V];
1209   if (N.getNode()) return N;
1210 
1211   // If there's a virtual register allocated and initialized for this
1212   // value, use it.
1213   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1214     return copyFromReg;
1215 
1216   // Otherwise create a new SDValue and remember it.
1217   SDValue Val = getValueImpl(V);
1218   NodeMap[V] = Val;
1219   resolveDanglingDebugInfo(V, Val);
1220   return Val;
1221 }
1222 
1223 // Return true if SDValue exists for the given Value
1224 bool SelectionDAGBuilder::findValue(const Value *V) const {
1225   return (NodeMap.find(V) != NodeMap.end()) ||
1226     (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1227 }
1228 
1229 /// getNonRegisterValue - Return an SDValue for the given Value, but
1230 /// don't look in FuncInfo.ValueMap for a virtual register.
1231 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1232   // If we already have an SDValue for this value, use it.
1233   SDValue &N = NodeMap[V];
1234   if (N.getNode()) {
1235     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1236       // Remove the debug location from the node as the node is about to be used
1237       // in a location which may differ from the original debug location.  This
1238       // is relevant to Constant and ConstantFP nodes because they can appear
1239       // as constant expressions inside PHI nodes.
1240       N->setDebugLoc(DebugLoc());
1241     }
1242     return N;
1243   }
1244 
1245   // Otherwise create a new SDValue and remember it.
1246   SDValue Val = getValueImpl(V);
1247   NodeMap[V] = Val;
1248   resolveDanglingDebugInfo(V, Val);
1249   return Val;
1250 }
1251 
1252 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1253 /// Create an SDValue for the given value.
1254 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1255   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1256 
1257   if (const Constant *C = dyn_cast<Constant>(V)) {
1258     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1259 
1260     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1261       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1262 
1263     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1264       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1265 
1266     if (isa<ConstantPointerNull>(C)) {
1267       unsigned AS = V->getType()->getPointerAddressSpace();
1268       return DAG.getConstant(0, getCurSDLoc(),
1269                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1270     }
1271 
1272     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1273       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1274 
1275     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1276       return DAG.getUNDEF(VT);
1277 
1278     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1279       visit(CE->getOpcode(), *CE);
1280       SDValue N1 = NodeMap[V];
1281       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1282       return N1;
1283     }
1284 
1285     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1286       SmallVector<SDValue, 4> Constants;
1287       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1288            OI != OE; ++OI) {
1289         SDNode *Val = getValue(*OI).getNode();
1290         // If the operand is an empty aggregate, there are no values.
1291         if (!Val) continue;
1292         // Add each leaf value from the operand to the Constants list
1293         // to form a flattened list of all the values.
1294         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1295           Constants.push_back(SDValue(Val, i));
1296       }
1297 
1298       return DAG.getMergeValues(Constants, getCurSDLoc());
1299     }
1300 
1301     if (const ConstantDataSequential *CDS =
1302           dyn_cast<ConstantDataSequential>(C)) {
1303       SmallVector<SDValue, 4> Ops;
1304       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1305         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1306         // Add each leaf value from the operand to the Constants list
1307         // to form a flattened list of all the values.
1308         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1309           Ops.push_back(SDValue(Val, i));
1310       }
1311 
1312       if (isa<ArrayType>(CDS->getType()))
1313         return DAG.getMergeValues(Ops, getCurSDLoc());
1314       return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1315     }
1316 
1317     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1318       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1319              "Unknown struct or array constant!");
1320 
1321       SmallVector<EVT, 4> ValueVTs;
1322       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1323       unsigned NumElts = ValueVTs.size();
1324       if (NumElts == 0)
1325         return SDValue(); // empty struct
1326       SmallVector<SDValue, 4> Constants(NumElts);
1327       for (unsigned i = 0; i != NumElts; ++i) {
1328         EVT EltVT = ValueVTs[i];
1329         if (isa<UndefValue>(C))
1330           Constants[i] = DAG.getUNDEF(EltVT);
1331         else if (EltVT.isFloatingPoint())
1332           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1333         else
1334           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1335       }
1336 
1337       return DAG.getMergeValues(Constants, getCurSDLoc());
1338     }
1339 
1340     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1341       return DAG.getBlockAddress(BA, VT);
1342 
1343     VectorType *VecTy = cast<VectorType>(V->getType());
1344     unsigned NumElements = VecTy->getNumElements();
1345 
1346     // Now that we know the number and type of the elements, get that number of
1347     // elements into the Ops array based on what kind of constant it is.
1348     SmallVector<SDValue, 16> Ops;
1349     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1350       for (unsigned i = 0; i != NumElements; ++i)
1351         Ops.push_back(getValue(CV->getOperand(i)));
1352     } else {
1353       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1354       EVT EltVT =
1355           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1356 
1357       SDValue Op;
1358       if (EltVT.isFloatingPoint())
1359         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1360       else
1361         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1362       Ops.assign(NumElements, Op);
1363     }
1364 
1365     // Create a BUILD_VECTOR node.
1366     return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1367   }
1368 
1369   // If this is a static alloca, generate it as the frameindex instead of
1370   // computation.
1371   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1372     DenseMap<const AllocaInst*, int>::iterator SI =
1373       FuncInfo.StaticAllocaMap.find(AI);
1374     if (SI != FuncInfo.StaticAllocaMap.end())
1375       return DAG.getFrameIndex(SI->second,
1376                                TLI.getFrameIndexTy(DAG.getDataLayout()));
1377   }
1378 
1379   // If this is an instruction which fast-isel has deferred, select it now.
1380   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1381     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1382 
1383     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1384                      Inst->getType(), getABIRegCopyCC(V));
1385     SDValue Chain = DAG.getEntryNode();
1386     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1387   }
1388 
1389   llvm_unreachable("Can't get register for value!");
1390 }
1391 
1392 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1393   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1394   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1395   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1396   bool IsSEH = isAsynchronousEHPersonality(Pers);
1397   bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
1398   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1399   if (!IsSEH)
1400     CatchPadMBB->setIsEHScopeEntry();
1401   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1402   if (IsMSVCCXX || IsCoreCLR)
1403     CatchPadMBB->setIsEHFuncletEntry();
1404   // Wasm does not need catchpads anymore
1405   if (!IsWasmCXX)
1406     DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
1407                             getControlRoot()));
1408 }
1409 
1410 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1411   // Update machine-CFG edge.
1412   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1413   FuncInfo.MBB->addSuccessor(TargetMBB);
1414 
1415   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1416   bool IsSEH = isAsynchronousEHPersonality(Pers);
1417   if (IsSEH) {
1418     // If this is not a fall-through branch or optimizations are switched off,
1419     // emit the branch.
1420     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1421         TM.getOptLevel() == CodeGenOpt::None)
1422       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1423                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1424     return;
1425   }
1426 
1427   // Figure out the funclet membership for the catchret's successor.
1428   // This will be used by the FuncletLayout pass to determine how to order the
1429   // BB's.
1430   // A 'catchret' returns to the outer scope's color.
1431   Value *ParentPad = I.getCatchSwitchParentPad();
1432   const BasicBlock *SuccessorColor;
1433   if (isa<ConstantTokenNone>(ParentPad))
1434     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1435   else
1436     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1437   assert(SuccessorColor && "No parent funclet for catchret!");
1438   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1439   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1440 
1441   // Create the terminator node.
1442   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1443                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1444                             DAG.getBasicBlock(SuccessorColorMBB));
1445   DAG.setRoot(Ret);
1446 }
1447 
1448 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1449   // Don't emit any special code for the cleanuppad instruction. It just marks
1450   // the start of an EH scope/funclet.
1451   FuncInfo.MBB->setIsEHScopeEntry();
1452   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1453   if (Pers != EHPersonality::Wasm_CXX) {
1454     FuncInfo.MBB->setIsEHFuncletEntry();
1455     FuncInfo.MBB->setIsCleanupFuncletEntry();
1456   }
1457 }
1458 
1459 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1460 /// many places it could ultimately go. In the IR, we have a single unwind
1461 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1462 /// This function skips over imaginary basic blocks that hold catchswitch
1463 /// instructions, and finds all the "real" machine
1464 /// basic block destinations. As those destinations may not be successors of
1465 /// EHPadBB, here we also calculate the edge probability to those destinations.
1466 /// The passed-in Prob is the edge probability to EHPadBB.
1467 static void findUnwindDestinations(
1468     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1469     BranchProbability Prob,
1470     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1471         &UnwindDests) {
1472   EHPersonality Personality =
1473     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1474   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1475   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1476   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1477   bool IsSEH = isAsynchronousEHPersonality(Personality);
1478 
1479   while (EHPadBB) {
1480     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1481     BasicBlock *NewEHPadBB = nullptr;
1482     if (isa<LandingPadInst>(Pad)) {
1483       // Stop on landingpads. They are not funclets.
1484       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1485       break;
1486     } else if (isa<CleanupPadInst>(Pad)) {
1487       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1488       // personalities.
1489       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1490       UnwindDests.back().first->setIsEHScopeEntry();
1491       if (!IsWasmCXX)
1492         UnwindDests.back().first->setIsEHFuncletEntry();
1493       break;
1494     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1495       // Add the catchpad handlers to the possible destinations.
1496       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1497         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1498         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1499         if (IsMSVCCXX || IsCoreCLR)
1500           UnwindDests.back().first->setIsEHFuncletEntry();
1501         if (!IsSEH)
1502           UnwindDests.back().first->setIsEHScopeEntry();
1503       }
1504       NewEHPadBB = CatchSwitch->getUnwindDest();
1505     } else {
1506       continue;
1507     }
1508 
1509     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1510     if (BPI && NewEHPadBB)
1511       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1512     EHPadBB = NewEHPadBB;
1513   }
1514 }
1515 
1516 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1517   // Update successor info.
1518   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1519   auto UnwindDest = I.getUnwindDest();
1520   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1521   BranchProbability UnwindDestProb =
1522       (BPI && UnwindDest)
1523           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1524           : BranchProbability::getZero();
1525   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1526   for (auto &UnwindDest : UnwindDests) {
1527     UnwindDest.first->setIsEHPad();
1528     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1529   }
1530   FuncInfo.MBB->normalizeSuccProbs();
1531 
1532   // Create the terminator node.
1533   SDValue Ret =
1534       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1535   DAG.setRoot(Ret);
1536 }
1537 
1538 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1539   report_fatal_error("visitCatchSwitch not yet implemented!");
1540 }
1541 
1542 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1543   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1544   auto &DL = DAG.getDataLayout();
1545   SDValue Chain = getControlRoot();
1546   SmallVector<ISD::OutputArg, 8> Outs;
1547   SmallVector<SDValue, 8> OutVals;
1548 
1549   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1550   // lower
1551   //
1552   //   %val = call <ty> @llvm.experimental.deoptimize()
1553   //   ret <ty> %val
1554   //
1555   // differently.
1556   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1557     LowerDeoptimizingReturn();
1558     return;
1559   }
1560 
1561   if (!FuncInfo.CanLowerReturn) {
1562     unsigned DemoteReg = FuncInfo.DemoteRegister;
1563     const Function *F = I.getParent()->getParent();
1564 
1565     // Emit a store of the return value through the virtual register.
1566     // Leave Outs empty so that LowerReturn won't try to load return
1567     // registers the usual way.
1568     SmallVector<EVT, 1> PtrValueVTs;
1569     ComputeValueVTs(TLI, DL,
1570                     F->getReturnType()->getPointerTo(
1571                         DAG.getDataLayout().getAllocaAddrSpace()),
1572                     PtrValueVTs);
1573 
1574     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1575                                         DemoteReg, PtrValueVTs[0]);
1576     SDValue RetOp = getValue(I.getOperand(0));
1577 
1578     SmallVector<EVT, 4> ValueVTs;
1579     SmallVector<uint64_t, 4> Offsets;
1580     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1581     unsigned NumValues = ValueVTs.size();
1582 
1583     SmallVector<SDValue, 4> Chains(NumValues);
1584     for (unsigned i = 0; i != NumValues; ++i) {
1585       // An aggregate return value cannot wrap around the address space, so
1586       // offsets to its parts don't wrap either.
1587       SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1588       Chains[i] = DAG.getStore(
1589           Chain, getCurSDLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1590           // FIXME: better loc info would be nice.
1591           Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1592     }
1593 
1594     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1595                         MVT::Other, Chains);
1596   } else if (I.getNumOperands() != 0) {
1597     SmallVector<EVT, 4> ValueVTs;
1598     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1599     unsigned NumValues = ValueVTs.size();
1600     if (NumValues) {
1601       SDValue RetOp = getValue(I.getOperand(0));
1602 
1603       const Function *F = I.getParent()->getParent();
1604 
1605       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1606       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1607                                           Attribute::SExt))
1608         ExtendKind = ISD::SIGN_EXTEND;
1609       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1610                                                Attribute::ZExt))
1611         ExtendKind = ISD::ZERO_EXTEND;
1612 
1613       LLVMContext &Context = F->getContext();
1614       bool RetInReg = F->getAttributes().hasAttribute(
1615           AttributeList::ReturnIndex, Attribute::InReg);
1616 
1617       for (unsigned j = 0; j != NumValues; ++j) {
1618         EVT VT = ValueVTs[j];
1619 
1620         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1621           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1622 
1623         CallingConv::ID CC = F->getCallingConv();
1624 
1625         unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1626         MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1627         SmallVector<SDValue, 4> Parts(NumParts);
1628         getCopyToParts(DAG, getCurSDLoc(),
1629                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1630                        &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1631 
1632         // 'inreg' on function refers to return value
1633         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1634         if (RetInReg)
1635           Flags.setInReg();
1636 
1637         // Propagate extension type if any
1638         if (ExtendKind == ISD::SIGN_EXTEND)
1639           Flags.setSExt();
1640         else if (ExtendKind == ISD::ZERO_EXTEND)
1641           Flags.setZExt();
1642 
1643         for (unsigned i = 0; i < NumParts; ++i) {
1644           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1645                                         VT, /*isfixed=*/true, 0, 0));
1646           OutVals.push_back(Parts[i]);
1647         }
1648       }
1649     }
1650   }
1651 
1652   // Push in swifterror virtual register as the last element of Outs. This makes
1653   // sure swifterror virtual register will be returned in the swifterror
1654   // physical register.
1655   const Function *F = I.getParent()->getParent();
1656   if (TLI.supportSwiftError() &&
1657       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1658     assert(FuncInfo.SwiftErrorArg && "Need a swift error argument");
1659     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1660     Flags.setSwiftError();
1661     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1662                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1663                                   true /*isfixed*/, 1 /*origidx*/,
1664                                   0 /*partOffs*/));
1665     // Create SDNode for the swifterror virtual register.
1666     OutVals.push_back(
1667         DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt(
1668                             &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first,
1669                         EVT(TLI.getPointerTy(DL))));
1670   }
1671 
1672   bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1673   CallingConv::ID CallConv =
1674     DAG.getMachineFunction().getFunction().getCallingConv();
1675   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1676       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1677 
1678   // Verify that the target's LowerReturn behaved as expected.
1679   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1680          "LowerReturn didn't return a valid chain!");
1681 
1682   // Update the DAG with the new chain value resulting from return lowering.
1683   DAG.setRoot(Chain);
1684 }
1685 
1686 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1687 /// created for it, emit nodes to copy the value into the virtual
1688 /// registers.
1689 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1690   // Skip empty types
1691   if (V->getType()->isEmptyTy())
1692     return;
1693 
1694   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1695   if (VMI != FuncInfo.ValueMap.end()) {
1696     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1697     CopyValueToVirtualRegister(V, VMI->second);
1698   }
1699 }
1700 
1701 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1702 /// the current basic block, add it to ValueMap now so that we'll get a
1703 /// CopyTo/FromReg.
1704 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1705   // No need to export constants.
1706   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1707 
1708   // Already exported?
1709   if (FuncInfo.isExportedInst(V)) return;
1710 
1711   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1712   CopyValueToVirtualRegister(V, Reg);
1713 }
1714 
1715 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1716                                                      const BasicBlock *FromBB) {
1717   // The operands of the setcc have to be in this block.  We don't know
1718   // how to export them from some other block.
1719   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1720     // Can export from current BB.
1721     if (VI->getParent() == FromBB)
1722       return true;
1723 
1724     // Is already exported, noop.
1725     return FuncInfo.isExportedInst(V);
1726   }
1727 
1728   // If this is an argument, we can export it if the BB is the entry block or
1729   // if it is already exported.
1730   if (isa<Argument>(V)) {
1731     if (FromBB == &FromBB->getParent()->getEntryBlock())
1732       return true;
1733 
1734     // Otherwise, can only export this if it is already exported.
1735     return FuncInfo.isExportedInst(V);
1736   }
1737 
1738   // Otherwise, constants can always be exported.
1739   return true;
1740 }
1741 
1742 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1743 BranchProbability
1744 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1745                                         const MachineBasicBlock *Dst) const {
1746   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1747   const BasicBlock *SrcBB = Src->getBasicBlock();
1748   const BasicBlock *DstBB = Dst->getBasicBlock();
1749   if (!BPI) {
1750     // If BPI is not available, set the default probability as 1 / N, where N is
1751     // the number of successors.
1752     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
1753     return BranchProbability(1, SuccSize);
1754   }
1755   return BPI->getEdgeProbability(SrcBB, DstBB);
1756 }
1757 
1758 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1759                                                MachineBasicBlock *Dst,
1760                                                BranchProbability Prob) {
1761   if (!FuncInfo.BPI)
1762     Src->addSuccessorWithoutProb(Dst);
1763   else {
1764     if (Prob.isUnknown())
1765       Prob = getEdgeProbability(Src, Dst);
1766     Src->addSuccessor(Dst, Prob);
1767   }
1768 }
1769 
1770 static bool InBlock(const Value *V, const BasicBlock *BB) {
1771   if (const Instruction *I = dyn_cast<Instruction>(V))
1772     return I->getParent() == BB;
1773   return true;
1774 }
1775 
1776 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1777 /// This function emits a branch and is used at the leaves of an OR or an
1778 /// AND operator tree.
1779 void
1780 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1781                                                   MachineBasicBlock *TBB,
1782                                                   MachineBasicBlock *FBB,
1783                                                   MachineBasicBlock *CurBB,
1784                                                   MachineBasicBlock *SwitchBB,
1785                                                   BranchProbability TProb,
1786                                                   BranchProbability FProb,
1787                                                   bool InvertCond) {
1788   const BasicBlock *BB = CurBB->getBasicBlock();
1789 
1790   // If the leaf of the tree is a comparison, merge the condition into
1791   // the caseblock.
1792   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1793     // The operands of the cmp have to be in this block.  We don't know
1794     // how to export them from some other block.  If this is the first block
1795     // of the sequence, no exporting is needed.
1796     if (CurBB == SwitchBB ||
1797         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1798          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1799       ISD::CondCode Condition;
1800       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1801         ICmpInst::Predicate Pred =
1802             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1803         Condition = getICmpCondCode(Pred);
1804       } else {
1805         const FCmpInst *FC = cast<FCmpInst>(Cond);
1806         FCmpInst::Predicate Pred =
1807             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
1808         Condition = getFCmpCondCode(Pred);
1809         if (TM.Options.NoNaNsFPMath)
1810           Condition = getFCmpCodeWithoutNaN(Condition);
1811       }
1812 
1813       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1814                    TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1815       SwitchCases.push_back(CB);
1816       return;
1817     }
1818   }
1819 
1820   // Create a CaseBlock record representing this branch.
1821   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
1822   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
1823                nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
1824   SwitchCases.push_back(CB);
1825 }
1826 
1827 /// FindMergedConditions - If Cond is an expression like
1828 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1829                                                MachineBasicBlock *TBB,
1830                                                MachineBasicBlock *FBB,
1831                                                MachineBasicBlock *CurBB,
1832                                                MachineBasicBlock *SwitchBB,
1833                                                Instruction::BinaryOps Opc,
1834                                                BranchProbability TProb,
1835                                                BranchProbability FProb,
1836                                                bool InvertCond) {
1837   // Skip over not part of the tree and remember to invert op and operands at
1838   // next level.
1839   if (BinaryOperator::isNot(Cond) && Cond->hasOneUse()) {
1840     const Value *CondOp = BinaryOperator::getNotArgument(Cond);
1841     if (InBlock(CondOp, CurBB->getBasicBlock())) {
1842       FindMergedConditions(CondOp, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1843                            !InvertCond);
1844       return;
1845     }
1846   }
1847 
1848   const Instruction *BOp = dyn_cast<Instruction>(Cond);
1849   // Compute the effective opcode for Cond, taking into account whether it needs
1850   // to be inverted, e.g.
1851   //   and (not (or A, B)), C
1852   // gets lowered as
1853   //   and (and (not A, not B), C)
1854   unsigned BOpc = 0;
1855   if (BOp) {
1856     BOpc = BOp->getOpcode();
1857     if (InvertCond) {
1858       if (BOpc == Instruction::And)
1859         BOpc = Instruction::Or;
1860       else if (BOpc == Instruction::Or)
1861         BOpc = Instruction::And;
1862     }
1863   }
1864 
1865   // If this node is not part of the or/and tree, emit it as a branch.
1866   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1867       BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
1868       BOp->getParent() != CurBB->getBasicBlock() ||
1869       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1870       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1871     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1872                                  TProb, FProb, InvertCond);
1873     return;
1874   }
1875 
1876   //  Create TmpBB after CurBB.
1877   MachineFunction::iterator BBI(CurBB);
1878   MachineFunction &MF = DAG.getMachineFunction();
1879   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1880   CurBB->getParent()->insert(++BBI, TmpBB);
1881 
1882   if (Opc == Instruction::Or) {
1883     // Codegen X | Y as:
1884     // BB1:
1885     //   jmp_if_X TBB
1886     //   jmp TmpBB
1887     // TmpBB:
1888     //   jmp_if_Y TBB
1889     //   jmp FBB
1890     //
1891 
1892     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1893     // The requirement is that
1894     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1895     //     = TrueProb for original BB.
1896     // Assuming the original probabilities are A and B, one choice is to set
1897     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
1898     // A/(1+B) and 2B/(1+B). This choice assumes that
1899     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1900     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1901     // TmpBB, but the math is more complicated.
1902 
1903     auto NewTrueProb = TProb / 2;
1904     auto NewFalseProb = TProb / 2 + FProb;
1905     // Emit the LHS condition.
1906     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1907                          NewTrueProb, NewFalseProb, InvertCond);
1908 
1909     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
1910     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
1911     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1912     // Emit the RHS condition into TmpBB.
1913     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1914                          Probs[0], Probs[1], InvertCond);
1915   } else {
1916     assert(Opc == Instruction::And && "Unknown merge op!");
1917     // Codegen X & Y as:
1918     // BB1:
1919     //   jmp_if_X TmpBB
1920     //   jmp FBB
1921     // TmpBB:
1922     //   jmp_if_Y TBB
1923     //   jmp FBB
1924     //
1925     //  This requires creation of TmpBB after CurBB.
1926 
1927     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1928     // The requirement is that
1929     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1930     //     = FalseProb for original BB.
1931     // Assuming the original probabilities are A and B, one choice is to set
1932     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
1933     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
1934     // TrueProb for BB1 * FalseProb for TmpBB.
1935 
1936     auto NewTrueProb = TProb + FProb / 2;
1937     auto NewFalseProb = FProb / 2;
1938     // Emit the LHS condition.
1939     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1940                          NewTrueProb, NewFalseProb, InvertCond);
1941 
1942     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
1943     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
1944     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1945     // Emit the RHS condition into TmpBB.
1946     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1947                          Probs[0], Probs[1], InvertCond);
1948   }
1949 }
1950 
1951 /// If the set of cases should be emitted as a series of branches, return true.
1952 /// If we should emit this as a bunch of and/or'd together conditions, return
1953 /// false.
1954 bool
1955 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1956   if (Cases.size() != 2) return true;
1957 
1958   // If this is two comparisons of the same values or'd or and'd together, they
1959   // will get folded into a single comparison, so don't emit two blocks.
1960   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1961        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1962       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1963        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1964     return false;
1965   }
1966 
1967   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1968   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1969   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1970       Cases[0].CC == Cases[1].CC &&
1971       isa<Constant>(Cases[0].CmpRHS) &&
1972       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1973     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1974       return false;
1975     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1976       return false;
1977   }
1978 
1979   return true;
1980 }
1981 
1982 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1983   MachineBasicBlock *BrMBB = FuncInfo.MBB;
1984 
1985   // Update machine-CFG edges.
1986   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1987 
1988   if (I.isUnconditional()) {
1989     // Update machine-CFG edges.
1990     BrMBB->addSuccessor(Succ0MBB);
1991 
1992     // If this is not a fall-through branch or optimizations are switched off,
1993     // emit the branch.
1994     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
1995       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1996                               MVT::Other, getControlRoot(),
1997                               DAG.getBasicBlock(Succ0MBB)));
1998 
1999     return;
2000   }
2001 
2002   // If this condition is one of the special cases we handle, do special stuff
2003   // now.
2004   const Value *CondVal = I.getCondition();
2005   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2006 
2007   // If this is a series of conditions that are or'd or and'd together, emit
2008   // this as a sequence of branches instead of setcc's with and/or operations.
2009   // As long as jumps are not expensive, this should improve performance.
2010   // For example, instead of something like:
2011   //     cmp A, B
2012   //     C = seteq
2013   //     cmp D, E
2014   //     F = setle
2015   //     or C, F
2016   //     jnz foo
2017   // Emit:
2018   //     cmp A, B
2019   //     je foo
2020   //     cmp D, E
2021   //     jle foo
2022   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2023     Instruction::BinaryOps Opcode = BOp->getOpcode();
2024     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2025         !I.getMetadata(LLVMContext::MD_unpredictable) &&
2026         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2027       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2028                            Opcode,
2029                            getEdgeProbability(BrMBB, Succ0MBB),
2030                            getEdgeProbability(BrMBB, Succ1MBB),
2031                            /*InvertCond=*/false);
2032       // If the compares in later blocks need to use values not currently
2033       // exported from this block, export them now.  This block should always
2034       // be the first entry.
2035       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2036 
2037       // Allow some cases to be rejected.
2038       if (ShouldEmitAsBranches(SwitchCases)) {
2039         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
2040           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
2041           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
2042         }
2043 
2044         // Emit the branch for this block.
2045         visitSwitchCase(SwitchCases[0], BrMBB);
2046         SwitchCases.erase(SwitchCases.begin());
2047         return;
2048       }
2049 
2050       // Okay, we decided not to do this, remove any inserted MBB's and clear
2051       // SwitchCases.
2052       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
2053         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
2054 
2055       SwitchCases.clear();
2056     }
2057   }
2058 
2059   // Create a CaseBlock record representing this branch.
2060   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2061                nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2062 
2063   // Use visitSwitchCase to actually insert the fast branch sequence for this
2064   // cond branch.
2065   visitSwitchCase(CB, BrMBB);
2066 }
2067 
2068 /// visitSwitchCase - Emits the necessary code to represent a single node in
2069 /// the binary search tree resulting from lowering a switch instruction.
2070 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2071                                           MachineBasicBlock *SwitchBB) {
2072   SDValue Cond;
2073   SDValue CondLHS = getValue(CB.CmpLHS);
2074   SDLoc dl = CB.DL;
2075 
2076   // Build the setcc now.
2077   if (!CB.CmpMHS) {
2078     // Fold "(X == true)" to X and "(X == false)" to !X to
2079     // handle common cases produced by branch lowering.
2080     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2081         CB.CC == ISD::SETEQ)
2082       Cond = CondLHS;
2083     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2084              CB.CC == ISD::SETEQ) {
2085       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2086       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2087     } else
2088       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
2089   } else {
2090     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2091 
2092     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2093     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2094 
2095     SDValue CmpOp = getValue(CB.CmpMHS);
2096     EVT VT = CmpOp.getValueType();
2097 
2098     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2099       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2100                           ISD::SETLE);
2101     } else {
2102       SDValue SUB = DAG.getNode(ISD::SUB, dl,
2103                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2104       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2105                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2106     }
2107   }
2108 
2109   // Update successor info
2110   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2111   // TrueBB and FalseBB are always different unless the incoming IR is
2112   // degenerate. This only happens when running llc on weird IR.
2113   if (CB.TrueBB != CB.FalseBB)
2114     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2115   SwitchBB->normalizeSuccProbs();
2116 
2117   // If the lhs block is the next block, invert the condition so that we can
2118   // fall through to the lhs instead of the rhs block.
2119   if (CB.TrueBB == NextBlock(SwitchBB)) {
2120     std::swap(CB.TrueBB, CB.FalseBB);
2121     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2122     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2123   }
2124 
2125   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2126                                MVT::Other, getControlRoot(), Cond,
2127                                DAG.getBasicBlock(CB.TrueBB));
2128 
2129   // Insert the false branch. Do this even if it's a fall through branch,
2130   // this makes it easier to do DAG optimizations which require inverting
2131   // the branch condition.
2132   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2133                        DAG.getBasicBlock(CB.FalseBB));
2134 
2135   DAG.setRoot(BrCond);
2136 }
2137 
2138 /// visitJumpTable - Emit JumpTable node in the current MBB
2139 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
2140   // Emit the code for the jump table
2141   assert(JT.Reg != -1U && "Should lower JT Header first!");
2142   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2143   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2144                                      JT.Reg, PTy);
2145   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2146   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2147                                     MVT::Other, Index.getValue(1),
2148                                     Table, Index);
2149   DAG.setRoot(BrJumpTable);
2150 }
2151 
2152 /// visitJumpTableHeader - This function emits necessary code to produce index
2153 /// in the JumpTable from switch case.
2154 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
2155                                                JumpTableHeader &JTH,
2156                                                MachineBasicBlock *SwitchBB) {
2157   SDLoc dl = getCurSDLoc();
2158 
2159   // Subtract the lowest switch case value from the value being switched on and
2160   // conditional branch to default mbb if the result is greater than the
2161   // difference between smallest and largest cases.
2162   SDValue SwitchOp = getValue(JTH.SValue);
2163   EVT VT = SwitchOp.getValueType();
2164   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2165                             DAG.getConstant(JTH.First, dl, VT));
2166 
2167   // The SDNode we just created, which holds the value being switched on minus
2168   // the smallest case value, needs to be copied to a virtual register so it
2169   // can be used as an index into the jump table in a subsequent basic block.
2170   // This value may be smaller or larger than the target's pointer type, and
2171   // therefore require extension or truncating.
2172   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2173   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2174 
2175   unsigned JumpTableReg =
2176       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2177   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2178                                     JumpTableReg, SwitchOp);
2179   JT.Reg = JumpTableReg;
2180 
2181   // Emit the range check for the jump table, and branch to the default block
2182   // for the switch statement if the value being switched on exceeds the largest
2183   // case in the switch.
2184   SDValue CMP = DAG.getSetCC(
2185       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2186                                  Sub.getValueType()),
2187       Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2188 
2189   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2190                                MVT::Other, CopyTo, CMP,
2191                                DAG.getBasicBlock(JT.Default));
2192 
2193   // Avoid emitting unnecessary branches to the next block.
2194   if (JT.MBB != NextBlock(SwitchBB))
2195     BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2196                          DAG.getBasicBlock(JT.MBB));
2197 
2198   DAG.setRoot(BrCond);
2199 }
2200 
2201 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2202 /// variable if there exists one.
2203 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2204                                  SDValue &Chain) {
2205   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2206   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2207   MachineFunction &MF = DAG.getMachineFunction();
2208   Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2209   MachineSDNode *Node =
2210       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2211   if (Global) {
2212     MachinePointerInfo MPInfo(Global);
2213     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2214                  MachineMemOperand::MODereferenceable;
2215     MachineMemOperand *MemRef = MF.getMachineMemOperand(
2216         MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy));
2217     DAG.setNodeMemRefs(Node, {MemRef});
2218   }
2219   return SDValue(Node, 0);
2220 }
2221 
2222 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2223 /// tail spliced into a stack protector check success bb.
2224 ///
2225 /// For a high level explanation of how this fits into the stack protector
2226 /// generation see the comment on the declaration of class
2227 /// StackProtectorDescriptor.
2228 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2229                                                   MachineBasicBlock *ParentBB) {
2230 
2231   // First create the loads to the guard/stack slot for the comparison.
2232   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2233   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2234 
2235   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2236   int FI = MFI.getStackProtectorIndex();
2237 
2238   SDValue Guard;
2239   SDLoc dl = getCurSDLoc();
2240   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2241   const Module &M = *ParentBB->getParent()->getFunction().getParent();
2242   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2243 
2244   // Generate code to load the content of the guard slot.
2245   SDValue GuardVal = DAG.getLoad(
2246       PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
2247       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2248       MachineMemOperand::MOVolatile);
2249 
2250   if (TLI.useStackGuardXorFP())
2251     GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2252 
2253   // Retrieve guard check function, nullptr if instrumentation is inlined.
2254   if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
2255     // The target provides a guard check function to validate the guard value.
2256     // Generate a call to that function with the content of the guard slot as
2257     // argument.
2258     auto *Fn = cast<Function>(GuardCheck);
2259     FunctionType *FnTy = Fn->getFunctionType();
2260     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2261 
2262     TargetLowering::ArgListTy Args;
2263     TargetLowering::ArgListEntry Entry;
2264     Entry.Node = GuardVal;
2265     Entry.Ty = FnTy->getParamType(0);
2266     if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2267       Entry.IsInReg = true;
2268     Args.push_back(Entry);
2269 
2270     TargetLowering::CallLoweringInfo CLI(DAG);
2271     CLI.setDebugLoc(getCurSDLoc())
2272       .setChain(DAG.getEntryNode())
2273       .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2274                  getValue(GuardCheck), std::move(Args));
2275 
2276     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2277     DAG.setRoot(Result.second);
2278     return;
2279   }
2280 
2281   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2282   // Otherwise, emit a volatile load to retrieve the stack guard value.
2283   SDValue Chain = DAG.getEntryNode();
2284   if (TLI.useLoadStackGuardNode()) {
2285     Guard = getLoadStackGuard(DAG, dl, Chain);
2286   } else {
2287     const Value *IRGuard = TLI.getSDagStackGuard(M);
2288     SDValue GuardPtr = getValue(IRGuard);
2289 
2290     Guard =
2291         DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2292                     Align, MachineMemOperand::MOVolatile);
2293   }
2294 
2295   // Perform the comparison via a subtract/getsetcc.
2296   EVT VT = Guard.getValueType();
2297   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal);
2298 
2299   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2300                                                         *DAG.getContext(),
2301                                                         Sub.getValueType()),
2302                              Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2303 
2304   // If the sub is not 0, then we know the guard/stackslot do not equal, so
2305   // branch to failure MBB.
2306   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2307                                MVT::Other, GuardVal.getOperand(0),
2308                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2309   // Otherwise branch to success MBB.
2310   SDValue Br = DAG.getNode(ISD::BR, dl,
2311                            MVT::Other, BrCond,
2312                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2313 
2314   DAG.setRoot(Br);
2315 }
2316 
2317 /// Codegen the failure basic block for a stack protector check.
2318 ///
2319 /// A failure stack protector machine basic block consists simply of a call to
2320 /// __stack_chk_fail().
2321 ///
2322 /// For a high level explanation of how this fits into the stack protector
2323 /// generation see the comment on the declaration of class
2324 /// StackProtectorDescriptor.
2325 void
2326 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2327   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2328   SDValue Chain =
2329       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2330                       None, false, getCurSDLoc(), false, false).second;
2331   DAG.setRoot(Chain);
2332 }
2333 
2334 /// visitBitTestHeader - This function emits necessary code to produce value
2335 /// suitable for "bit tests"
2336 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2337                                              MachineBasicBlock *SwitchBB) {
2338   SDLoc dl = getCurSDLoc();
2339 
2340   // Subtract the minimum value
2341   SDValue SwitchOp = getValue(B.SValue);
2342   EVT VT = SwitchOp.getValueType();
2343   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2344                             DAG.getConstant(B.First, dl, VT));
2345 
2346   // Check range
2347   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2348   SDValue RangeCmp = DAG.getSetCC(
2349       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2350                                  Sub.getValueType()),
2351       Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2352 
2353   // Determine the type of the test operands.
2354   bool UsePtrType = false;
2355   if (!TLI.isTypeLegal(VT))
2356     UsePtrType = true;
2357   else {
2358     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2359       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2360         // Switch table case range are encoded into series of masks.
2361         // Just use pointer type, it's guaranteed to fit.
2362         UsePtrType = true;
2363         break;
2364       }
2365   }
2366   if (UsePtrType) {
2367     VT = TLI.getPointerTy(DAG.getDataLayout());
2368     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2369   }
2370 
2371   B.RegVT = VT.getSimpleVT();
2372   B.Reg = FuncInfo.CreateReg(B.RegVT);
2373   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2374 
2375   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2376 
2377   addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2378   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2379   SwitchBB->normalizeSuccProbs();
2380 
2381   SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2382                                 MVT::Other, CopyTo, RangeCmp,
2383                                 DAG.getBasicBlock(B.Default));
2384 
2385   // Avoid emitting unnecessary branches to the next block.
2386   if (MBB != NextBlock(SwitchBB))
2387     BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2388                           DAG.getBasicBlock(MBB));
2389 
2390   DAG.setRoot(BrRange);
2391 }
2392 
2393 /// visitBitTestCase - this function produces one "bit test"
2394 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2395                                            MachineBasicBlock* NextMBB,
2396                                            BranchProbability BranchProbToNext,
2397                                            unsigned Reg,
2398                                            BitTestCase &B,
2399                                            MachineBasicBlock *SwitchBB) {
2400   SDLoc dl = getCurSDLoc();
2401   MVT VT = BB.RegVT;
2402   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2403   SDValue Cmp;
2404   unsigned PopCount = countPopulation(B.Mask);
2405   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2406   if (PopCount == 1) {
2407     // Testing for a single bit; just compare the shift count with what it
2408     // would need to be to shift a 1 bit in that position.
2409     Cmp = DAG.getSetCC(
2410         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2411         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2412         ISD::SETEQ);
2413   } else if (PopCount == BB.Range) {
2414     // There is only one zero bit in the range, test for it directly.
2415     Cmp = DAG.getSetCC(
2416         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2417         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2418         ISD::SETNE);
2419   } else {
2420     // Make desired shift
2421     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2422                                     DAG.getConstant(1, dl, VT), ShiftOp);
2423 
2424     // Emit bit tests and jumps
2425     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2426                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2427     Cmp = DAG.getSetCC(
2428         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2429         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2430   }
2431 
2432   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2433   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2434   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2435   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2436   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2437   // one as they are relative probabilities (and thus work more like weights),
2438   // and hence we need to normalize them to let the sum of them become one.
2439   SwitchBB->normalizeSuccProbs();
2440 
2441   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2442                               MVT::Other, getControlRoot(),
2443                               Cmp, DAG.getBasicBlock(B.TargetBB));
2444 
2445   // Avoid emitting unnecessary branches to the next block.
2446   if (NextMBB != NextBlock(SwitchBB))
2447     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2448                         DAG.getBasicBlock(NextMBB));
2449 
2450   DAG.setRoot(BrAnd);
2451 }
2452 
2453 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2454   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2455 
2456   // Retrieve successors. Look through artificial IR level blocks like
2457   // catchswitch for successors.
2458   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2459   const BasicBlock *EHPadBB = I.getSuccessor(1);
2460 
2461   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2462   // have to do anything here to lower funclet bundles.
2463   assert(!I.hasOperandBundlesOtherThan(
2464              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2465          "Cannot lower invokes with arbitrary operand bundles yet!");
2466 
2467   const Value *Callee(I.getCalledValue());
2468   const Function *Fn = dyn_cast<Function>(Callee);
2469   if (isa<InlineAsm>(Callee))
2470     visitInlineAsm(&I);
2471   else if (Fn && Fn->isIntrinsic()) {
2472     switch (Fn->getIntrinsicID()) {
2473     default:
2474       llvm_unreachable("Cannot invoke this intrinsic");
2475     case Intrinsic::donothing:
2476       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2477       break;
2478     case Intrinsic::experimental_patchpoint_void:
2479     case Intrinsic::experimental_patchpoint_i64:
2480       visitPatchpoint(&I, EHPadBB);
2481       break;
2482     case Intrinsic::experimental_gc_statepoint:
2483       LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2484       break;
2485     }
2486   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2487     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2488     // Eventually we will support lowering the @llvm.experimental.deoptimize
2489     // intrinsic, and right now there are no plans to support other intrinsics
2490     // with deopt state.
2491     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2492   } else {
2493     LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2494   }
2495 
2496   // If the value of the invoke is used outside of its defining block, make it
2497   // available as a virtual register.
2498   // We already took care of the exported value for the statepoint instruction
2499   // during call to the LowerStatepoint.
2500   if (!isStatepoint(I)) {
2501     CopyToExportRegsIfNeeded(&I);
2502   }
2503 
2504   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2505   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2506   BranchProbability EHPadBBProb =
2507       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2508           : BranchProbability::getZero();
2509   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2510 
2511   // Update successor info.
2512   addSuccessorWithProb(InvokeMBB, Return);
2513   for (auto &UnwindDest : UnwindDests) {
2514     UnwindDest.first->setIsEHPad();
2515     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2516   }
2517   InvokeMBB->normalizeSuccProbs();
2518 
2519   // Drop into normal successor.
2520   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2521                           MVT::Other, getControlRoot(),
2522                           DAG.getBasicBlock(Return)));
2523 }
2524 
2525 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2526   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2527 }
2528 
2529 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2530   assert(FuncInfo.MBB->isEHPad() &&
2531          "Call to landingpad not in landing pad!");
2532 
2533   MachineBasicBlock *MBB = FuncInfo.MBB;
2534   addLandingPadInfo(LP, *MBB);
2535 
2536   // If there aren't registers to copy the values into (e.g., during SjLj
2537   // exceptions), then don't bother to create these DAG nodes.
2538   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2539   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2540   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2541       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2542     return;
2543 
2544   // If landingpad's return type is token type, we don't create DAG nodes
2545   // for its exception pointer and selector value. The extraction of exception
2546   // pointer or selector value from token type landingpads is not currently
2547   // supported.
2548   if (LP.getType()->isTokenTy())
2549     return;
2550 
2551   SmallVector<EVT, 2> ValueVTs;
2552   SDLoc dl = getCurSDLoc();
2553   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2554   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2555 
2556   // Get the two live-in registers as SDValues. The physregs have already been
2557   // copied into virtual registers.
2558   SDValue Ops[2];
2559   if (FuncInfo.ExceptionPointerVirtReg) {
2560     Ops[0] = DAG.getZExtOrTrunc(
2561         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2562                            FuncInfo.ExceptionPointerVirtReg,
2563                            TLI.getPointerTy(DAG.getDataLayout())),
2564         dl, ValueVTs[0]);
2565   } else {
2566     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2567   }
2568   Ops[1] = DAG.getZExtOrTrunc(
2569       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2570                          FuncInfo.ExceptionSelectorVirtReg,
2571                          TLI.getPointerTy(DAG.getDataLayout())),
2572       dl, ValueVTs[1]);
2573 
2574   // Merge into one.
2575   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2576                             DAG.getVTList(ValueVTs), Ops);
2577   setValue(&LP, Res);
2578 }
2579 
2580 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2581 #ifndef NDEBUG
2582   for (const CaseCluster &CC : Clusters)
2583     assert(CC.Low == CC.High && "Input clusters must be single-case");
2584 #endif
2585 
2586   llvm::sort(Clusters.begin(), Clusters.end(),
2587              [](const CaseCluster &a, const CaseCluster &b) {
2588     return a.Low->getValue().slt(b.Low->getValue());
2589   });
2590 
2591   // Merge adjacent clusters with the same destination.
2592   const unsigned N = Clusters.size();
2593   unsigned DstIndex = 0;
2594   for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2595     CaseCluster &CC = Clusters[SrcIndex];
2596     const ConstantInt *CaseVal = CC.Low;
2597     MachineBasicBlock *Succ = CC.MBB;
2598 
2599     if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2600         (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2601       // If this case has the same successor and is a neighbour, merge it into
2602       // the previous cluster.
2603       Clusters[DstIndex - 1].High = CaseVal;
2604       Clusters[DstIndex - 1].Prob += CC.Prob;
2605     } else {
2606       std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2607                    sizeof(Clusters[SrcIndex]));
2608     }
2609   }
2610   Clusters.resize(DstIndex);
2611 }
2612 
2613 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2614                                            MachineBasicBlock *Last) {
2615   // Update JTCases.
2616   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2617     if (JTCases[i].first.HeaderBB == First)
2618       JTCases[i].first.HeaderBB = Last;
2619 
2620   // Update BitTestCases.
2621   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2622     if (BitTestCases[i].Parent == First)
2623       BitTestCases[i].Parent = Last;
2624 }
2625 
2626 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2627   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2628 
2629   // Update machine-CFG edges with unique successors.
2630   SmallSet<BasicBlock*, 32> Done;
2631   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2632     BasicBlock *BB = I.getSuccessor(i);
2633     bool Inserted = Done.insert(BB).second;
2634     if (!Inserted)
2635         continue;
2636 
2637     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2638     addSuccessorWithProb(IndirectBrMBB, Succ);
2639   }
2640   IndirectBrMBB->normalizeSuccProbs();
2641 
2642   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2643                           MVT::Other, getControlRoot(),
2644                           getValue(I.getAddress())));
2645 }
2646 
2647 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2648   if (!DAG.getTarget().Options.TrapUnreachable)
2649     return;
2650 
2651   // We may be able to ignore unreachable behind a noreturn call.
2652   if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2653     const BasicBlock &BB = *I.getParent();
2654     if (&I != &BB.front()) {
2655       BasicBlock::const_iterator PredI =
2656         std::prev(BasicBlock::const_iterator(&I));
2657       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2658         if (Call->doesNotReturn())
2659           return;
2660       }
2661     }
2662   }
2663 
2664   DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2665 }
2666 
2667 void SelectionDAGBuilder::visitFSub(const User &I) {
2668   // -0.0 - X --> fneg
2669   Type *Ty = I.getType();
2670   if (isa<Constant>(I.getOperand(0)) &&
2671       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2672     SDValue Op2 = getValue(I.getOperand(1));
2673     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2674                              Op2.getValueType(), Op2));
2675     return;
2676   }
2677 
2678   visitBinary(I, ISD::FSUB);
2679 }
2680 
2681 /// Checks if the given instruction performs a vector reduction, in which case
2682 /// we have the freedom to alter the elements in the result as long as the
2683 /// reduction of them stays unchanged.
2684 static bool isVectorReductionOp(const User *I) {
2685   const Instruction *Inst = dyn_cast<Instruction>(I);
2686   if (!Inst || !Inst->getType()->isVectorTy())
2687     return false;
2688 
2689   auto OpCode = Inst->getOpcode();
2690   switch (OpCode) {
2691   case Instruction::Add:
2692   case Instruction::Mul:
2693   case Instruction::And:
2694   case Instruction::Or:
2695   case Instruction::Xor:
2696     break;
2697   case Instruction::FAdd:
2698   case Instruction::FMul:
2699     if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2700       if (FPOp->getFastMathFlags().isFast())
2701         break;
2702     LLVM_FALLTHROUGH;
2703   default:
2704     return false;
2705   }
2706 
2707   unsigned ElemNum = Inst->getType()->getVectorNumElements();
2708   // Ensure the reduction size is a power of 2.
2709   if (!isPowerOf2_32(ElemNum))
2710     return false;
2711 
2712   unsigned ElemNumToReduce = ElemNum;
2713 
2714   // Do DFS search on the def-use chain from the given instruction. We only
2715   // allow four kinds of operations during the search until we reach the
2716   // instruction that extracts the first element from the vector:
2717   //
2718   //   1. The reduction operation of the same opcode as the given instruction.
2719   //
2720   //   2. PHI node.
2721   //
2722   //   3. ShuffleVector instruction together with a reduction operation that
2723   //      does a partial reduction.
2724   //
2725   //   4. ExtractElement that extracts the first element from the vector, and we
2726   //      stop searching the def-use chain here.
2727   //
2728   // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2729   // from 1-3 to the stack to continue the DFS. The given instruction is not
2730   // a reduction operation if we meet any other instructions other than those
2731   // listed above.
2732 
2733   SmallVector<const User *, 16> UsersToVisit{Inst};
2734   SmallPtrSet<const User *, 16> Visited;
2735   bool ReduxExtracted = false;
2736 
2737   while (!UsersToVisit.empty()) {
2738     auto User = UsersToVisit.back();
2739     UsersToVisit.pop_back();
2740     if (!Visited.insert(User).second)
2741       continue;
2742 
2743     for (const auto &U : User->users()) {
2744       auto Inst = dyn_cast<Instruction>(U);
2745       if (!Inst)
2746         return false;
2747 
2748       if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2749         if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2750           if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
2751             return false;
2752         UsersToVisit.push_back(U);
2753       } else if (const ShuffleVectorInst *ShufInst =
2754                      dyn_cast<ShuffleVectorInst>(U)) {
2755         // Detect the following pattern: A ShuffleVector instruction together
2756         // with a reduction that do partial reduction on the first and second
2757         // ElemNumToReduce / 2 elements, and store the result in
2758         // ElemNumToReduce / 2 elements in another vector.
2759 
2760         unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2761         if (ResultElements < ElemNum)
2762           return false;
2763 
2764         if (ElemNumToReduce == 1)
2765           return false;
2766         if (!isa<UndefValue>(U->getOperand(1)))
2767           return false;
2768         for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2769           if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2770             return false;
2771         for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2772           if (ShufInst->getMaskValue(i) != -1)
2773             return false;
2774 
2775         // There is only one user of this ShuffleVector instruction, which
2776         // must be a reduction operation.
2777         if (!U->hasOneUse())
2778           return false;
2779 
2780         auto U2 = dyn_cast<Instruction>(*U->user_begin());
2781         if (!U2 || U2->getOpcode() != OpCode)
2782           return false;
2783 
2784         // Check operands of the reduction operation.
2785         if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2786             (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2787           UsersToVisit.push_back(U2);
2788           ElemNumToReduce /= 2;
2789         } else
2790           return false;
2791       } else if (isa<ExtractElementInst>(U)) {
2792         // At this moment we should have reduced all elements in the vector.
2793         if (ElemNumToReduce != 1)
2794           return false;
2795 
2796         const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
2797         if (!Val || !Val->isZero())
2798           return false;
2799 
2800         ReduxExtracted = true;
2801       } else
2802         return false;
2803     }
2804   }
2805   return ReduxExtracted;
2806 }
2807 
2808 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
2809   SDNodeFlags Flags;
2810   if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
2811     Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
2812     Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
2813   }
2814   if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
2815     Flags.setExact(ExactOp->isExact());
2816   }
2817   if (isVectorReductionOp(&I)) {
2818     Flags.setVectorReduction(true);
2819     LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
2820   }
2821 
2822   SDValue Op1 = getValue(I.getOperand(0));
2823   SDValue Op2 = getValue(I.getOperand(1));
2824   SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
2825                                      Op1, Op2, Flags);
2826   setValue(&I, BinNodeValue);
2827 }
2828 
2829 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2830   SDValue Op1 = getValue(I.getOperand(0));
2831   SDValue Op2 = getValue(I.getOperand(1));
2832 
2833   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
2834       Op2.getValueType(), DAG.getDataLayout());
2835 
2836   // Coerce the shift amount to the right type if we can.
2837   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2838     unsigned ShiftSize = ShiftTy.getSizeInBits();
2839     unsigned Op2Size = Op2.getValueSizeInBits();
2840     SDLoc DL = getCurSDLoc();
2841 
2842     // If the operand is smaller than the shift count type, promote it.
2843     if (ShiftSize > Op2Size)
2844       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2845 
2846     // If the operand is larger than the shift count type but the shift
2847     // count type has enough bits to represent any shift value, truncate
2848     // it now. This is a common case and it exposes the truncate to
2849     // optimization early.
2850     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
2851       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2852     // Otherwise we'll need to temporarily settle for some other convenient
2853     // type.  Type legalization will make adjustments once the shiftee is split.
2854     else
2855       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2856   }
2857 
2858   bool nuw = false;
2859   bool nsw = false;
2860   bool exact = false;
2861 
2862   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2863 
2864     if (const OverflowingBinaryOperator *OFBinOp =
2865             dyn_cast<const OverflowingBinaryOperator>(&I)) {
2866       nuw = OFBinOp->hasNoUnsignedWrap();
2867       nsw = OFBinOp->hasNoSignedWrap();
2868     }
2869     if (const PossiblyExactOperator *ExactOp =
2870             dyn_cast<const PossiblyExactOperator>(&I))
2871       exact = ExactOp->isExact();
2872   }
2873   SDNodeFlags Flags;
2874   Flags.setExact(exact);
2875   Flags.setNoSignedWrap(nsw);
2876   Flags.setNoUnsignedWrap(nuw);
2877   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2878                             Flags);
2879   setValue(&I, Res);
2880 }
2881 
2882 void SelectionDAGBuilder::visitSDiv(const User &I) {
2883   SDValue Op1 = getValue(I.getOperand(0));
2884   SDValue Op2 = getValue(I.getOperand(1));
2885 
2886   SDNodeFlags Flags;
2887   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2888                  cast<PossiblyExactOperator>(&I)->isExact());
2889   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2890                            Op2, Flags));
2891 }
2892 
2893 void SelectionDAGBuilder::visitICmp(const User &I) {
2894   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2895   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2896     predicate = IC->getPredicate();
2897   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2898     predicate = ICmpInst::Predicate(IC->getPredicate());
2899   SDValue Op1 = getValue(I.getOperand(0));
2900   SDValue Op2 = getValue(I.getOperand(1));
2901   ISD::CondCode Opcode = getICmpCondCode(predicate);
2902 
2903   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2904                                                         I.getType());
2905   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2906 }
2907 
2908 void SelectionDAGBuilder::visitFCmp(const User &I) {
2909   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2910   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2911     predicate = FC->getPredicate();
2912   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2913     predicate = FCmpInst::Predicate(FC->getPredicate());
2914   SDValue Op1 = getValue(I.getOperand(0));
2915   SDValue Op2 = getValue(I.getOperand(1));
2916 
2917   ISD::CondCode Condition = getFCmpCondCode(predicate);
2918   auto *FPMO = dyn_cast<FPMathOperator>(&I);
2919   if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
2920     Condition = getFCmpCodeWithoutNaN(Condition);
2921 
2922   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2923                                                         I.getType());
2924   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2925 }
2926 
2927 // Check if the condition of the select has one use or two users that are both
2928 // selects with the same condition.
2929 static bool hasOnlySelectUsers(const Value *Cond) {
2930   return llvm::all_of(Cond->users(), [](const Value *V) {
2931     return isa<SelectInst>(V);
2932   });
2933 }
2934 
2935 void SelectionDAGBuilder::visitSelect(const User &I) {
2936   SmallVector<EVT, 4> ValueVTs;
2937   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2938                   ValueVTs);
2939   unsigned NumValues = ValueVTs.size();
2940   if (NumValues == 0) return;
2941 
2942   SmallVector<SDValue, 4> Values(NumValues);
2943   SDValue Cond     = getValue(I.getOperand(0));
2944   SDValue LHSVal   = getValue(I.getOperand(1));
2945   SDValue RHSVal   = getValue(I.getOperand(2));
2946   auto BaseOps = {Cond};
2947   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2948     ISD::VSELECT : ISD::SELECT;
2949 
2950   // Min/max matching is only viable if all output VTs are the same.
2951   if (is_splat(ValueVTs)) {
2952     EVT VT = ValueVTs[0];
2953     LLVMContext &Ctx = *DAG.getContext();
2954     auto &TLI = DAG.getTargetLoweringInfo();
2955 
2956     // We care about the legality of the operation after it has been type
2957     // legalized.
2958     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
2959            VT != TLI.getTypeToTransformTo(Ctx, VT))
2960       VT = TLI.getTypeToTransformTo(Ctx, VT);
2961 
2962     // If the vselect is legal, assume we want to leave this as a vector setcc +
2963     // vselect. Otherwise, if this is going to be scalarized, we want to see if
2964     // min/max is legal on the scalar type.
2965     bool UseScalarMinMax = VT.isVector() &&
2966       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
2967 
2968     Value *LHS, *RHS;
2969     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2970     ISD::NodeType Opc = ISD::DELETED_NODE;
2971     switch (SPR.Flavor) {
2972     case SPF_UMAX:    Opc = ISD::UMAX; break;
2973     case SPF_UMIN:    Opc = ISD::UMIN; break;
2974     case SPF_SMAX:    Opc = ISD::SMAX; break;
2975     case SPF_SMIN:    Opc = ISD::SMIN; break;
2976     case SPF_FMINNUM:
2977       switch (SPR.NaNBehavior) {
2978       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2979       case SPNB_RETURNS_NAN:   Opc = ISD::FMINNAN; break;
2980       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
2981       case SPNB_RETURNS_ANY: {
2982         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
2983           Opc = ISD::FMINNUM;
2984         else if (TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT))
2985           Opc = ISD::FMINNAN;
2986         else if (UseScalarMinMax)
2987           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
2988             ISD::FMINNUM : ISD::FMINNAN;
2989         break;
2990       }
2991       }
2992       break;
2993     case SPF_FMAXNUM:
2994       switch (SPR.NaNBehavior) {
2995       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2996       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXNAN; break;
2997       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
2998       case SPNB_RETURNS_ANY:
2999 
3000         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3001           Opc = ISD::FMAXNUM;
3002         else if (TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT))
3003           Opc = ISD::FMAXNAN;
3004         else if (UseScalarMinMax)
3005           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3006             ISD::FMAXNUM : ISD::FMAXNAN;
3007         break;
3008       }
3009       break;
3010     default: break;
3011     }
3012 
3013     if (Opc != ISD::DELETED_NODE &&
3014         (TLI.isOperationLegalOrCustom(Opc, VT) ||
3015          (UseScalarMinMax &&
3016           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3017         // If the underlying comparison instruction is used by any other
3018         // instruction, the consumed instructions won't be destroyed, so it is
3019         // not profitable to convert to a min/max.
3020         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3021       OpCode = Opc;
3022       LHSVal = getValue(LHS);
3023       RHSVal = getValue(RHS);
3024       BaseOps = {};
3025     }
3026   }
3027 
3028   for (unsigned i = 0; i != NumValues; ++i) {
3029     SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3030     Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3031     Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3032     Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
3033                             LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
3034                             Ops);
3035   }
3036 
3037   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3038                            DAG.getVTList(ValueVTs), Values));
3039 }
3040 
3041 void SelectionDAGBuilder::visitTrunc(const User &I) {
3042   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3043   SDValue N = getValue(I.getOperand(0));
3044   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3045                                                         I.getType());
3046   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3047 }
3048 
3049 void SelectionDAGBuilder::visitZExt(const User &I) {
3050   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3051   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3052   SDValue N = getValue(I.getOperand(0));
3053   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3054                                                         I.getType());
3055   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3056 }
3057 
3058 void SelectionDAGBuilder::visitSExt(const User &I) {
3059   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3060   // SExt also can't be a cast to bool for same reason. So, nothing much to do
3061   SDValue N = getValue(I.getOperand(0));
3062   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3063                                                         I.getType());
3064   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3065 }
3066 
3067 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3068   // FPTrunc is never a no-op cast, no need to check
3069   SDValue N = getValue(I.getOperand(0));
3070   SDLoc dl = getCurSDLoc();
3071   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3072   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3073   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3074                            DAG.getTargetConstant(
3075                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3076 }
3077 
3078 void SelectionDAGBuilder::visitFPExt(const User &I) {
3079   // FPExt is never a no-op cast, no need to check
3080   SDValue N = getValue(I.getOperand(0));
3081   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3082                                                         I.getType());
3083   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3084 }
3085 
3086 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3087   // FPToUI is never a no-op cast, no need to check
3088   SDValue N = getValue(I.getOperand(0));
3089   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3090                                                         I.getType());
3091   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3092 }
3093 
3094 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3095   // FPToSI is never a no-op cast, no need to check
3096   SDValue N = getValue(I.getOperand(0));
3097   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3098                                                         I.getType());
3099   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3100 }
3101 
3102 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3103   // UIToFP is never a no-op cast, no need to check
3104   SDValue N = getValue(I.getOperand(0));
3105   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3106                                                         I.getType());
3107   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3108 }
3109 
3110 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3111   // SIToFP is never a no-op cast, no need to check
3112   SDValue N = getValue(I.getOperand(0));
3113   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3114                                                         I.getType());
3115   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3116 }
3117 
3118 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3119   // What to do depends on the size of the integer and the size of the pointer.
3120   // We can either truncate, zero extend, or no-op, accordingly.
3121   SDValue N = getValue(I.getOperand(0));
3122   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3123                                                         I.getType());
3124   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3125 }
3126 
3127 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3128   // What to do depends on the size of the integer and the size of the pointer.
3129   // We can either truncate, zero extend, or no-op, accordingly.
3130   SDValue N = getValue(I.getOperand(0));
3131   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3132                                                         I.getType());
3133   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3134 }
3135 
3136 void SelectionDAGBuilder::visitBitCast(const User &I) {
3137   SDValue N = getValue(I.getOperand(0));
3138   SDLoc dl = getCurSDLoc();
3139   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3140                                                         I.getType());
3141 
3142   // BitCast assures us that source and destination are the same size so this is
3143   // either a BITCAST or a no-op.
3144   if (DestVT != N.getValueType())
3145     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3146                              DestVT, N)); // convert types.
3147   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3148   // might fold any kind of constant expression to an integer constant and that
3149   // is not what we are looking for. Only recognize a bitcast of a genuine
3150   // constant integer as an opaque constant.
3151   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3152     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3153                                  /*isOpaque*/true));
3154   else
3155     setValue(&I, N);            // noop cast.
3156 }
3157 
3158 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3159   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3160   const Value *SV = I.getOperand(0);
3161   SDValue N = getValue(SV);
3162   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3163 
3164   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3165   unsigned DestAS = I.getType()->getPointerAddressSpace();
3166 
3167   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3168     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3169 
3170   setValue(&I, N);
3171 }
3172 
3173 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3174   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3175   SDValue InVec = getValue(I.getOperand(0));
3176   SDValue InVal = getValue(I.getOperand(1));
3177   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3178                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3179   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3180                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3181                            InVec, InVal, InIdx));
3182 }
3183 
3184 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3185   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3186   SDValue InVec = getValue(I.getOperand(0));
3187   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3188                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
3189   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3190                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
3191                            InVec, InIdx));
3192 }
3193 
3194 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3195   SDValue Src1 = getValue(I.getOperand(0));
3196   SDValue Src2 = getValue(I.getOperand(1));
3197   SDLoc DL = getCurSDLoc();
3198 
3199   SmallVector<int, 8> Mask;
3200   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3201   unsigned MaskNumElts = Mask.size();
3202 
3203   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3204   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3205   EVT SrcVT = Src1.getValueType();
3206   unsigned SrcNumElts = SrcVT.getVectorNumElements();
3207 
3208   if (SrcNumElts == MaskNumElts) {
3209     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3210     return;
3211   }
3212 
3213   // Normalize the shuffle vector since mask and vector length don't match.
3214   if (SrcNumElts < MaskNumElts) {
3215     // Mask is longer than the source vectors. We can use concatenate vector to
3216     // make the mask and vectors lengths match.
3217 
3218     if (MaskNumElts % SrcNumElts == 0) {
3219       // Mask length is a multiple of the source vector length.
3220       // Check if the shuffle is some kind of concatenation of the input
3221       // vectors.
3222       unsigned NumConcat = MaskNumElts / SrcNumElts;
3223       bool IsConcat = true;
3224       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3225       for (unsigned i = 0; i != MaskNumElts; ++i) {
3226         int Idx = Mask[i];
3227         if (Idx < 0)
3228           continue;
3229         // Ensure the indices in each SrcVT sized piece are sequential and that
3230         // the same source is used for the whole piece.
3231         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3232             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3233              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3234           IsConcat = false;
3235           break;
3236         }
3237         // Remember which source this index came from.
3238         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3239       }
3240 
3241       // The shuffle is concatenating multiple vectors together. Just emit
3242       // a CONCAT_VECTORS operation.
3243       if (IsConcat) {
3244         SmallVector<SDValue, 8> ConcatOps;
3245         for (auto Src : ConcatSrcs) {
3246           if (Src < 0)
3247             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3248           else if (Src == 0)
3249             ConcatOps.push_back(Src1);
3250           else
3251             ConcatOps.push_back(Src2);
3252         }
3253         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3254         return;
3255       }
3256     }
3257 
3258     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3259     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3260     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3261                                     PaddedMaskNumElts);
3262 
3263     // Pad both vectors with undefs to make them the same length as the mask.
3264     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3265 
3266     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3267     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3268     MOps1[0] = Src1;
3269     MOps2[0] = Src2;
3270 
3271     Src1 = Src1.isUndef()
3272                ? DAG.getUNDEF(PaddedVT)
3273                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3274     Src2 = Src2.isUndef()
3275                ? DAG.getUNDEF(PaddedVT)
3276                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3277 
3278     // Readjust mask for new input vector length.
3279     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3280     for (unsigned i = 0; i != MaskNumElts; ++i) {
3281       int Idx = Mask[i];
3282       if (Idx >= (int)SrcNumElts)
3283         Idx -= SrcNumElts - PaddedMaskNumElts;
3284       MappedOps[i] = Idx;
3285     }
3286 
3287     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3288 
3289     // If the concatenated vector was padded, extract a subvector with the
3290     // correct number of elements.
3291     if (MaskNumElts != PaddedMaskNumElts)
3292       Result = DAG.getNode(
3293           ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3294           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3295 
3296     setValue(&I, Result);
3297     return;
3298   }
3299 
3300   if (SrcNumElts > MaskNumElts) {
3301     // Analyze the access pattern of the vector to see if we can extract
3302     // two subvectors and do the shuffle.
3303     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3304     bool CanExtract = true;
3305     for (int Idx : Mask) {
3306       unsigned Input = 0;
3307       if (Idx < 0)
3308         continue;
3309 
3310       if (Idx >= (int)SrcNumElts) {
3311         Input = 1;
3312         Idx -= SrcNumElts;
3313       }
3314 
3315       // If all the indices come from the same MaskNumElts sized portion of
3316       // the sources we can use extract. Also make sure the extract wouldn't
3317       // extract past the end of the source.
3318       int NewStartIdx = alignDown(Idx, MaskNumElts);
3319       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3320           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3321         CanExtract = false;
3322       // Make sure we always update StartIdx as we use it to track if all
3323       // elements are undef.
3324       StartIdx[Input] = NewStartIdx;
3325     }
3326 
3327     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3328       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3329       return;
3330     }
3331     if (CanExtract) {
3332       // Extract appropriate subvector and generate a vector shuffle
3333       for (unsigned Input = 0; Input < 2; ++Input) {
3334         SDValue &Src = Input == 0 ? Src1 : Src2;
3335         if (StartIdx[Input] < 0)
3336           Src = DAG.getUNDEF(VT);
3337         else {
3338           Src = DAG.getNode(
3339               ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3340               DAG.getConstant(StartIdx[Input], DL,
3341                               TLI.getVectorIdxTy(DAG.getDataLayout())));
3342         }
3343       }
3344 
3345       // Calculate new mask.
3346       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3347       for (int &Idx : MappedOps) {
3348         if (Idx >= (int)SrcNumElts)
3349           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3350         else if (Idx >= 0)
3351           Idx -= StartIdx[0];
3352       }
3353 
3354       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3355       return;
3356     }
3357   }
3358 
3359   // We can't use either concat vectors or extract subvectors so fall back to
3360   // replacing the shuffle with extract and build vector.
3361   // to insert and build vector.
3362   EVT EltVT = VT.getVectorElementType();
3363   EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3364   SmallVector<SDValue,8> Ops;
3365   for (int Idx : Mask) {
3366     SDValue Res;
3367 
3368     if (Idx < 0) {
3369       Res = DAG.getUNDEF(EltVT);
3370     } else {
3371       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3372       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3373 
3374       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3375                         EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3376     }
3377 
3378     Ops.push_back(Res);
3379   }
3380 
3381   setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3382 }
3383 
3384 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3385   ArrayRef<unsigned> Indices;
3386   if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3387     Indices = IV->getIndices();
3388   else
3389     Indices = cast<ConstantExpr>(&I)->getIndices();
3390 
3391   const Value *Op0 = I.getOperand(0);
3392   const Value *Op1 = I.getOperand(1);
3393   Type *AggTy = I.getType();
3394   Type *ValTy = Op1->getType();
3395   bool IntoUndef = isa<UndefValue>(Op0);
3396   bool FromUndef = isa<UndefValue>(Op1);
3397 
3398   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3399 
3400   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3401   SmallVector<EVT, 4> AggValueVTs;
3402   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3403   SmallVector<EVT, 4> ValValueVTs;
3404   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3405 
3406   unsigned NumAggValues = AggValueVTs.size();
3407   unsigned NumValValues = ValValueVTs.size();
3408   SmallVector<SDValue, 4> Values(NumAggValues);
3409 
3410   // Ignore an insertvalue that produces an empty object
3411   if (!NumAggValues) {
3412     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3413     return;
3414   }
3415 
3416   SDValue Agg = getValue(Op0);
3417   unsigned i = 0;
3418   // Copy the beginning value(s) from the original aggregate.
3419   for (; i != LinearIndex; ++i)
3420     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3421                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3422   // Copy values from the inserted value(s).
3423   if (NumValValues) {
3424     SDValue Val = getValue(Op1);
3425     for (; i != LinearIndex + NumValValues; ++i)
3426       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3427                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3428   }
3429   // Copy remaining value(s) from the original aggregate.
3430   for (; i != NumAggValues; ++i)
3431     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3432                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3433 
3434   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3435                            DAG.getVTList(AggValueVTs), Values));
3436 }
3437 
3438 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3439   ArrayRef<unsigned> Indices;
3440   if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3441     Indices = EV->getIndices();
3442   else
3443     Indices = cast<ConstantExpr>(&I)->getIndices();
3444 
3445   const Value *Op0 = I.getOperand(0);
3446   Type *AggTy = Op0->getType();
3447   Type *ValTy = I.getType();
3448   bool OutOfUndef = isa<UndefValue>(Op0);
3449 
3450   unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3451 
3452   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3453   SmallVector<EVT, 4> ValValueVTs;
3454   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3455 
3456   unsigned NumValValues = ValValueVTs.size();
3457 
3458   // Ignore a extractvalue that produces an empty object
3459   if (!NumValValues) {
3460     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3461     return;
3462   }
3463 
3464   SmallVector<SDValue, 4> Values(NumValValues);
3465 
3466   SDValue Agg = getValue(Op0);
3467   // Copy out the selected value(s).
3468   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3469     Values[i - LinearIndex] =
3470       OutOfUndef ?
3471         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3472         SDValue(Agg.getNode(), Agg.getResNo() + i);
3473 
3474   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3475                            DAG.getVTList(ValValueVTs), Values));
3476 }
3477 
3478 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3479   Value *Op0 = I.getOperand(0);
3480   // Note that the pointer operand may be a vector of pointers. Take the scalar
3481   // element which holds a pointer.
3482   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3483   SDValue N = getValue(Op0);
3484   SDLoc dl = getCurSDLoc();
3485 
3486   // Normalize Vector GEP - all scalar operands should be converted to the
3487   // splat vector.
3488   unsigned VectorWidth = I.getType()->isVectorTy() ?
3489     cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3490 
3491   if (VectorWidth && !N.getValueType().isVector()) {
3492     LLVMContext &Context = *DAG.getContext();
3493     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3494     N = DAG.getSplatBuildVector(VT, dl, N);
3495   }
3496 
3497   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3498        GTI != E; ++GTI) {
3499     const Value *Idx = GTI.getOperand();
3500     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3501       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3502       if (Field) {
3503         // N = N + Offset
3504         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3505 
3506         // In an inbounds GEP with an offset that is nonnegative even when
3507         // interpreted as signed, assume there is no unsigned overflow.
3508         SDNodeFlags Flags;
3509         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3510           Flags.setNoUnsignedWrap(true);
3511 
3512         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3513                         DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3514       }
3515     } else {
3516       unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3517       MVT IdxTy = MVT::getIntegerVT(IdxSize);
3518       APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3519 
3520       // If this is a scalar constant or a splat vector of constants,
3521       // handle it quickly.
3522       const auto *CI = dyn_cast<ConstantInt>(Idx);
3523       if (!CI && isa<ConstantDataVector>(Idx) &&
3524           cast<ConstantDataVector>(Idx)->getSplatValue())
3525         CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3526 
3527       if (CI) {
3528         if (CI->isZero())
3529           continue;
3530         APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize);
3531         LLVMContext &Context = *DAG.getContext();
3532         SDValue OffsVal = VectorWidth ?
3533           DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) :
3534           DAG.getConstant(Offs, dl, IdxTy);
3535 
3536         // In an inbouds GEP with an offset that is nonnegative even when
3537         // interpreted as signed, assume there is no unsigned overflow.
3538         SDNodeFlags Flags;
3539         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3540           Flags.setNoUnsignedWrap(true);
3541 
3542         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3543         continue;
3544       }
3545 
3546       // N = N + Idx * ElementSize;
3547       SDValue IdxN = getValue(Idx);
3548 
3549       if (!IdxN.getValueType().isVector() && VectorWidth) {
3550         EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth);
3551         IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3552       }
3553 
3554       // If the index is smaller or larger than intptr_t, truncate or extend
3555       // it.
3556       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3557 
3558       // If this is a multiply by a power of two, turn it into a shl
3559       // immediately.  This is a very common case.
3560       if (ElementSize != 1) {
3561         if (ElementSize.isPowerOf2()) {
3562           unsigned Amt = ElementSize.logBase2();
3563           IdxN = DAG.getNode(ISD::SHL, dl,
3564                              N.getValueType(), IdxN,
3565                              DAG.getConstant(Amt, dl, IdxN.getValueType()));
3566         } else {
3567           SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3568           IdxN = DAG.getNode(ISD::MUL, dl,
3569                              N.getValueType(), IdxN, Scale);
3570         }
3571       }
3572 
3573       N = DAG.getNode(ISD::ADD, dl,
3574                       N.getValueType(), N, IdxN);
3575     }
3576   }
3577 
3578   setValue(&I, N);
3579 }
3580 
3581 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3582   // If this is a fixed sized alloca in the entry block of the function,
3583   // allocate it statically on the stack.
3584   if (FuncInfo.StaticAllocaMap.count(&I))
3585     return;   // getValue will auto-populate this.
3586 
3587   SDLoc dl = getCurSDLoc();
3588   Type *Ty = I.getAllocatedType();
3589   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3590   auto &DL = DAG.getDataLayout();
3591   uint64_t TySize = DL.getTypeAllocSize(Ty);
3592   unsigned Align =
3593       std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3594 
3595   SDValue AllocSize = getValue(I.getArraySize());
3596 
3597   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3598   if (AllocSize.getValueType() != IntPtr)
3599     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3600 
3601   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3602                           AllocSize,
3603                           DAG.getConstant(TySize, dl, IntPtr));
3604 
3605   // Handle alignment.  If the requested alignment is less than or equal to
3606   // the stack alignment, ignore it.  If the size is greater than or equal to
3607   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3608   unsigned StackAlign =
3609       DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3610   if (Align <= StackAlign)
3611     Align = 0;
3612 
3613   // Round the size of the allocation up to the stack alignment size
3614   // by add SA-1 to the size. This doesn't overflow because we're computing
3615   // an address inside an alloca.
3616   SDNodeFlags Flags;
3617   Flags.setNoUnsignedWrap(true);
3618   AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3619                           DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags);
3620 
3621   // Mask out the low bits for alignment purposes.
3622   AllocSize =
3623       DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3624                   DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
3625 
3626   SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)};
3627   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3628   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3629   setValue(&I, DSA);
3630   DAG.setRoot(DSA.getValue(1));
3631 
3632   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3633 }
3634 
3635 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3636   if (I.isAtomic())
3637     return visitAtomicLoad(I);
3638 
3639   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3640   const Value *SV = I.getOperand(0);
3641   if (TLI.supportSwiftError()) {
3642     // Swifterror values can come from either a function parameter with
3643     // swifterror attribute or an alloca with swifterror attribute.
3644     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3645       if (Arg->hasSwiftErrorAttr())
3646         return visitLoadFromSwiftError(I);
3647     }
3648 
3649     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3650       if (Alloca->isSwiftError())
3651         return visitLoadFromSwiftError(I);
3652     }
3653   }
3654 
3655   SDValue Ptr = getValue(SV);
3656 
3657   Type *Ty = I.getType();
3658 
3659   bool isVolatile = I.isVolatile();
3660   bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3661   bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3662   bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3663   unsigned Alignment = I.getAlignment();
3664 
3665   AAMDNodes AAInfo;
3666   I.getAAMetadata(AAInfo);
3667   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3668 
3669   SmallVector<EVT, 4> ValueVTs;
3670   SmallVector<uint64_t, 4> Offsets;
3671   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3672   unsigned NumValues = ValueVTs.size();
3673   if (NumValues == 0)
3674     return;
3675 
3676   SDValue Root;
3677   bool ConstantMemory = false;
3678   if (isVolatile || NumValues > MaxParallelChains)
3679     // Serialize volatile loads with other side effects.
3680     Root = getRoot();
3681   else if (AA && AA->pointsToConstantMemory(MemoryLocation(
3682                SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
3683     // Do not serialize (non-volatile) loads of constant memory with anything.
3684     Root = DAG.getEntryNode();
3685     ConstantMemory = true;
3686   } else {
3687     // Do not serialize non-volatile loads against each other.
3688     Root = DAG.getRoot();
3689   }
3690 
3691   SDLoc dl = getCurSDLoc();
3692 
3693   if (isVolatile)
3694     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3695 
3696   // An aggregate load cannot wrap around the address space, so offsets to its
3697   // parts don't wrap either.
3698   SDNodeFlags Flags;
3699   Flags.setNoUnsignedWrap(true);
3700 
3701   SmallVector<SDValue, 4> Values(NumValues);
3702   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3703   EVT PtrVT = Ptr.getValueType();
3704   unsigned ChainI = 0;
3705   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3706     // Serializing loads here may result in excessive register pressure, and
3707     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3708     // could recover a bit by hoisting nodes upward in the chain by recognizing
3709     // they are side-effect free or do not alias. The optimizer should really
3710     // avoid this case by converting large object/array copies to llvm.memcpy
3711     // (MaxParallelChains should always remain as failsafe).
3712     if (ChainI == MaxParallelChains) {
3713       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3714       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3715                                   makeArrayRef(Chains.data(), ChainI));
3716       Root = Chain;
3717       ChainI = 0;
3718     }
3719     SDValue A = DAG.getNode(ISD::ADD, dl,
3720                             PtrVT, Ptr,
3721                             DAG.getConstant(Offsets[i], dl, PtrVT),
3722                             Flags);
3723     auto MMOFlags = MachineMemOperand::MONone;
3724     if (isVolatile)
3725       MMOFlags |= MachineMemOperand::MOVolatile;
3726     if (isNonTemporal)
3727       MMOFlags |= MachineMemOperand::MONonTemporal;
3728     if (isInvariant)
3729       MMOFlags |= MachineMemOperand::MOInvariant;
3730     if (isDereferenceable)
3731       MMOFlags |= MachineMemOperand::MODereferenceable;
3732     MMOFlags |= TLI.getMMOFlags(I);
3733 
3734     SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3735                             MachinePointerInfo(SV, Offsets[i]), Alignment,
3736                             MMOFlags, AAInfo, Ranges);
3737 
3738     Values[i] = L;
3739     Chains[ChainI] = L.getValue(1);
3740   }
3741 
3742   if (!ConstantMemory) {
3743     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3744                                 makeArrayRef(Chains.data(), ChainI));
3745     if (isVolatile)
3746       DAG.setRoot(Chain);
3747     else
3748       PendingLoads.push_back(Chain);
3749   }
3750 
3751   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3752                            DAG.getVTList(ValueVTs), Values));
3753 }
3754 
3755 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
3756   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3757          "call visitStoreToSwiftError when backend supports swifterror");
3758 
3759   SmallVector<EVT, 4> ValueVTs;
3760   SmallVector<uint64_t, 4> Offsets;
3761   const Value *SrcV = I.getOperand(0);
3762   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3763                   SrcV->getType(), ValueVTs, &Offsets);
3764   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3765          "expect a single EVT for swifterror");
3766 
3767   SDValue Src = getValue(SrcV);
3768   // Create a virtual register, then update the virtual register.
3769   unsigned VReg; bool CreatedVReg;
3770   std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I);
3771   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
3772   // Chain can be getRoot or getControlRoot.
3773   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
3774                                       SDValue(Src.getNode(), Src.getResNo()));
3775   DAG.setRoot(CopyNode);
3776   if (CreatedVReg)
3777     FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
3778 }
3779 
3780 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
3781   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3782          "call visitLoadFromSwiftError when backend supports swifterror");
3783 
3784   assert(!I.isVolatile() &&
3785          I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&
3786          I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&
3787          "Support volatile, non temporal, invariant for load_from_swift_error");
3788 
3789   const Value *SV = I.getOperand(0);
3790   Type *Ty = I.getType();
3791   AAMDNodes AAInfo;
3792   I.getAAMetadata(AAInfo);
3793   assert((!AA || !AA->pointsToConstantMemory(MemoryLocation(
3794              SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) &&
3795          "load_from_swift_error should not be constant memory");
3796 
3797   SmallVector<EVT, 4> ValueVTs;
3798   SmallVector<uint64_t, 4> Offsets;
3799   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
3800                   ValueVTs, &Offsets);
3801   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3802          "expect a single EVT for swifterror");
3803 
3804   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
3805   SDValue L = DAG.getCopyFromReg(
3806       getRoot(), getCurSDLoc(),
3807       FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first,
3808       ValueVTs[0]);
3809 
3810   setValue(&I, L);
3811 }
3812 
3813 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3814   if (I.isAtomic())
3815     return visitAtomicStore(I);
3816 
3817   const Value *SrcV = I.getOperand(0);
3818   const Value *PtrV = I.getOperand(1);
3819 
3820   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3821   if (TLI.supportSwiftError()) {
3822     // Swifterror values can come from either a function parameter with
3823     // swifterror attribute or an alloca with swifterror attribute.
3824     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
3825       if (Arg->hasSwiftErrorAttr())
3826         return visitStoreToSwiftError(I);
3827     }
3828 
3829     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3830       if (Alloca->isSwiftError())
3831         return visitStoreToSwiftError(I);
3832     }
3833   }
3834 
3835   SmallVector<EVT, 4> ValueVTs;
3836   SmallVector<uint64_t, 4> Offsets;
3837   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3838                   SrcV->getType(), ValueVTs, &Offsets);
3839   unsigned NumValues = ValueVTs.size();
3840   if (NumValues == 0)
3841     return;
3842 
3843   // Get the lowered operands. Note that we do this after
3844   // checking if NumResults is zero, because with zero results
3845   // the operands won't have values in the map.
3846   SDValue Src = getValue(SrcV);
3847   SDValue Ptr = getValue(PtrV);
3848 
3849   SDValue Root = getRoot();
3850   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3851   SDLoc dl = getCurSDLoc();
3852   EVT PtrVT = Ptr.getValueType();
3853   unsigned Alignment = I.getAlignment();
3854   AAMDNodes AAInfo;
3855   I.getAAMetadata(AAInfo);
3856 
3857   auto MMOFlags = MachineMemOperand::MONone;
3858   if (I.isVolatile())
3859     MMOFlags |= MachineMemOperand::MOVolatile;
3860   if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
3861     MMOFlags |= MachineMemOperand::MONonTemporal;
3862   MMOFlags |= TLI.getMMOFlags(I);
3863 
3864   // An aggregate load cannot wrap around the address space, so offsets to its
3865   // parts don't wrap either.
3866   SDNodeFlags Flags;
3867   Flags.setNoUnsignedWrap(true);
3868 
3869   unsigned ChainI = 0;
3870   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3871     // See visitLoad comments.
3872     if (ChainI == MaxParallelChains) {
3873       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3874                                   makeArrayRef(Chains.data(), ChainI));
3875       Root = Chain;
3876       ChainI = 0;
3877     }
3878     SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3879                               DAG.getConstant(Offsets[i], dl, PtrVT), Flags);
3880     SDValue St = DAG.getStore(
3881         Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
3882         MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
3883     Chains[ChainI] = St;
3884   }
3885 
3886   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3887                                   makeArrayRef(Chains.data(), ChainI));
3888   DAG.setRoot(StoreNode);
3889 }
3890 
3891 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
3892                                            bool IsCompressing) {
3893   SDLoc sdl = getCurSDLoc();
3894 
3895   auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3896                            unsigned& Alignment) {
3897     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
3898     Src0 = I.getArgOperand(0);
3899     Ptr = I.getArgOperand(1);
3900     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3901     Mask = I.getArgOperand(3);
3902   };
3903   auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3904                            unsigned& Alignment) {
3905     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
3906     Src0 = I.getArgOperand(0);
3907     Ptr = I.getArgOperand(1);
3908     Mask = I.getArgOperand(2);
3909     Alignment = 0;
3910   };
3911 
3912   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3913   unsigned Alignment;
3914   if (IsCompressing)
3915     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3916   else
3917     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3918 
3919   SDValue Ptr = getValue(PtrOperand);
3920   SDValue Src0 = getValue(Src0Operand);
3921   SDValue Mask = getValue(MaskOperand);
3922 
3923   EVT VT = Src0.getValueType();
3924   if (!Alignment)
3925     Alignment = DAG.getEVTAlignment(VT);
3926 
3927   AAMDNodes AAInfo;
3928   I.getAAMetadata(AAInfo);
3929 
3930   MachineMemOperand *MMO =
3931     DAG.getMachineFunction().
3932     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3933                           MachineMemOperand::MOStore,  VT.getStoreSize(),
3934                           Alignment, AAInfo);
3935   SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3936                                          MMO, false /* Truncating */,
3937                                          IsCompressing);
3938   DAG.setRoot(StoreNode);
3939   setValue(&I, StoreNode);
3940 }
3941 
3942 // Get a uniform base for the Gather/Scatter intrinsic.
3943 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
3944 // We try to represent it as a base pointer + vector of indices.
3945 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
3946 // The first operand of the GEP may be a single pointer or a vector of pointers
3947 // Example:
3948 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
3949 //  or
3950 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
3951 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
3952 //
3953 // When the first GEP operand is a single pointer - it is the uniform base we
3954 // are looking for. If first operand of the GEP is a splat vector - we
3955 // extract the splat value and use it as a uniform base.
3956 // In all other cases the function returns 'false'.
3957 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
3958                            SDValue &Scale, SelectionDAGBuilder* SDB) {
3959   SelectionDAG& DAG = SDB->DAG;
3960   LLVMContext &Context = *DAG.getContext();
3961 
3962   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
3963   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
3964   if (!GEP)
3965     return false;
3966 
3967   const Value *GEPPtr = GEP->getPointerOperand();
3968   if (!GEPPtr->getType()->isVectorTy())
3969     Ptr = GEPPtr;
3970   else if (!(Ptr = getSplatValue(GEPPtr)))
3971     return false;
3972 
3973   unsigned FinalIndex = GEP->getNumOperands() - 1;
3974   Value *IndexVal = GEP->getOperand(FinalIndex);
3975 
3976   // Ensure all the other indices are 0.
3977   for (unsigned i = 1; i < FinalIndex; ++i) {
3978     auto *C = dyn_cast<ConstantInt>(GEP->getOperand(i));
3979     if (!C || !C->isZero())
3980       return false;
3981   }
3982 
3983   // The operands of the GEP may be defined in another basic block.
3984   // In this case we'll not find nodes for the operands.
3985   if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
3986     return false;
3987 
3988   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3989   const DataLayout &DL = DAG.getDataLayout();
3990   Scale = DAG.getTargetConstant(DL.getTypeAllocSize(GEP->getResultElementType()),
3991                                 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
3992   Base = SDB->getValue(Ptr);
3993   Index = SDB->getValue(IndexVal);
3994 
3995   if (!Index.getValueType().isVector()) {
3996     unsigned GEPWidth = GEP->getType()->getVectorNumElements();
3997     EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
3998     Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
3999   }
4000   return true;
4001 }
4002 
4003 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4004   SDLoc sdl = getCurSDLoc();
4005 
4006   // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
4007   const Value *Ptr = I.getArgOperand(1);
4008   SDValue Src0 = getValue(I.getArgOperand(0));
4009   SDValue Mask = getValue(I.getArgOperand(3));
4010   EVT VT = Src0.getValueType();
4011   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
4012   if (!Alignment)
4013     Alignment = DAG.getEVTAlignment(VT);
4014   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4015 
4016   AAMDNodes AAInfo;
4017   I.getAAMetadata(AAInfo);
4018 
4019   SDValue Base;
4020   SDValue Index;
4021   SDValue Scale;
4022   const Value *BasePtr = Ptr;
4023   bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4024 
4025   const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
4026   MachineMemOperand *MMO = DAG.getMachineFunction().
4027     getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
4028                          MachineMemOperand::MOStore,  VT.getStoreSize(),
4029                          Alignment, AAInfo);
4030   if (!UniformBase) {
4031     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4032     Index = getValue(Ptr);
4033     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4034   }
4035   SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index, Scale };
4036   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4037                                          Ops, MMO);
4038   DAG.setRoot(Scatter);
4039   setValue(&I, Scatter);
4040 }
4041 
4042 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4043   SDLoc sdl = getCurSDLoc();
4044 
4045   auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4046                            unsigned& Alignment) {
4047     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4048     Ptr = I.getArgOperand(0);
4049     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
4050     Mask = I.getArgOperand(2);
4051     Src0 = I.getArgOperand(3);
4052   };
4053   auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4054                            unsigned& Alignment) {
4055     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4056     Ptr = I.getArgOperand(0);
4057     Alignment = 0;
4058     Mask = I.getArgOperand(1);
4059     Src0 = I.getArgOperand(2);
4060   };
4061 
4062   Value  *PtrOperand, *MaskOperand, *Src0Operand;
4063   unsigned Alignment;
4064   if (IsExpanding)
4065     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4066   else
4067     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4068 
4069   SDValue Ptr = getValue(PtrOperand);
4070   SDValue Src0 = getValue(Src0Operand);
4071   SDValue Mask = getValue(MaskOperand);
4072 
4073   EVT VT = Src0.getValueType();
4074   if (!Alignment)
4075     Alignment = DAG.getEVTAlignment(VT);
4076 
4077   AAMDNodes AAInfo;
4078   I.getAAMetadata(AAInfo);
4079   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4080 
4081   // Do not serialize masked loads of constant memory with anything.
4082   bool AddToChain = !AA || !AA->pointsToConstantMemory(MemoryLocation(
4083       PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
4084   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4085 
4086   MachineMemOperand *MMO =
4087     DAG.getMachineFunction().
4088     getMachineMemOperand(MachinePointerInfo(PtrOperand),
4089                           MachineMemOperand::MOLoad,  VT.getStoreSize(),
4090                           Alignment, AAInfo, Ranges);
4091 
4092   SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
4093                                    ISD::NON_EXTLOAD, IsExpanding);
4094   if (AddToChain)
4095     PendingLoads.push_back(Load.getValue(1));
4096   setValue(&I, Load);
4097 }
4098 
4099 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4100   SDLoc sdl = getCurSDLoc();
4101 
4102   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4103   const Value *Ptr = I.getArgOperand(0);
4104   SDValue Src0 = getValue(I.getArgOperand(3));
4105   SDValue Mask = getValue(I.getArgOperand(2));
4106 
4107   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4108   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4109   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
4110   if (!Alignment)
4111     Alignment = DAG.getEVTAlignment(VT);
4112 
4113   AAMDNodes AAInfo;
4114   I.getAAMetadata(AAInfo);
4115   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4116 
4117   SDValue Root = DAG.getRoot();
4118   SDValue Base;
4119   SDValue Index;
4120   SDValue Scale;
4121   const Value *BasePtr = Ptr;
4122   bool UniformBase = getUniformBase(BasePtr, Base, Index, Scale, this);
4123   bool ConstantMemory = false;
4124   if (UniformBase &&
4125       AA && AA->pointsToConstantMemory(MemoryLocation(
4126           BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
4127           AAInfo))) {
4128     // Do not serialize (non-volatile) loads of constant memory with anything.
4129     Root = DAG.getEntryNode();
4130     ConstantMemory = true;
4131   }
4132 
4133   MachineMemOperand *MMO =
4134     DAG.getMachineFunction().
4135     getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
4136                          MachineMemOperand::MOLoad,  VT.getStoreSize(),
4137                          Alignment, AAInfo, Ranges);
4138 
4139   if (!UniformBase) {
4140     Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4141     Index = getValue(Ptr);
4142     Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4143   }
4144   SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4145   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4146                                        Ops, MMO);
4147 
4148   SDValue OutChain = Gather.getValue(1);
4149   if (!ConstantMemory)
4150     PendingLoads.push_back(OutChain);
4151   setValue(&I, Gather);
4152 }
4153 
4154 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4155   SDLoc dl = getCurSDLoc();
4156   AtomicOrdering SuccessOrder = I.getSuccessOrdering();
4157   AtomicOrdering FailureOrder = I.getFailureOrdering();
4158   SyncScope::ID SSID = I.getSyncScopeID();
4159 
4160   SDValue InChain = getRoot();
4161 
4162   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4163   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4164   SDValue L = DAG.getAtomicCmpSwap(
4165       ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
4166       getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
4167       getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
4168       /*Alignment=*/ 0, SuccessOrder, FailureOrder, SSID);
4169 
4170   SDValue OutChain = L.getValue(2);
4171 
4172   setValue(&I, L);
4173   DAG.setRoot(OutChain);
4174 }
4175 
4176 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4177   SDLoc dl = getCurSDLoc();
4178   ISD::NodeType NT;
4179   switch (I.getOperation()) {
4180   default: llvm_unreachable("Unknown atomicrmw operation");
4181   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4182   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4183   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4184   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4185   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4186   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4187   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4188   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4189   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4190   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4191   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4192   }
4193   AtomicOrdering Order = I.getOrdering();
4194   SyncScope::ID SSID = I.getSyncScopeID();
4195 
4196   SDValue InChain = getRoot();
4197 
4198   SDValue L =
4199     DAG.getAtomic(NT, dl,
4200                   getValue(I.getValOperand()).getSimpleValueType(),
4201                   InChain,
4202                   getValue(I.getPointerOperand()),
4203                   getValue(I.getValOperand()),
4204                   I.getPointerOperand(),
4205                   /* Alignment=*/ 0, Order, SSID);
4206 
4207   SDValue OutChain = L.getValue(1);
4208 
4209   setValue(&I, L);
4210   DAG.setRoot(OutChain);
4211 }
4212 
4213 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4214   SDLoc dl = getCurSDLoc();
4215   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4216   SDValue Ops[3];
4217   Ops[0] = getRoot();
4218   Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
4219                            TLI.getFenceOperandTy(DAG.getDataLayout()));
4220   Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl,
4221                            TLI.getFenceOperandTy(DAG.getDataLayout()));
4222   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4223 }
4224 
4225 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4226   SDLoc dl = getCurSDLoc();
4227   AtomicOrdering Order = I.getOrdering();
4228   SyncScope::ID SSID = I.getSyncScopeID();
4229 
4230   SDValue InChain = getRoot();
4231 
4232   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4233   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4234 
4235   if (!TLI.supportsUnalignedAtomics() &&
4236       I.getAlignment() < VT.getStoreSize())
4237     report_fatal_error("Cannot generate unaligned atomic load");
4238 
4239   MachineMemOperand *MMO =
4240       DAG.getMachineFunction().
4241       getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4242                            MachineMemOperand::MOVolatile |
4243                            MachineMemOperand::MOLoad,
4244                            VT.getStoreSize(),
4245                            I.getAlignment() ? I.getAlignment() :
4246                                               DAG.getEVTAlignment(VT),
4247                            AAMDNodes(), nullptr, SSID, Order);
4248 
4249   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4250   SDValue L =
4251       DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
4252                     getValue(I.getPointerOperand()), MMO);
4253 
4254   SDValue OutChain = L.getValue(1);
4255 
4256   setValue(&I, L);
4257   DAG.setRoot(OutChain);
4258 }
4259 
4260 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4261   SDLoc dl = getCurSDLoc();
4262 
4263   AtomicOrdering Order = I.getOrdering();
4264   SyncScope::ID SSID = I.getSyncScopeID();
4265 
4266   SDValue InChain = getRoot();
4267 
4268   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4269   EVT VT =
4270       TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4271 
4272   if (I.getAlignment() < VT.getStoreSize())
4273     report_fatal_error("Cannot generate unaligned atomic store");
4274 
4275   SDValue OutChain =
4276     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
4277                   InChain,
4278                   getValue(I.getPointerOperand()),
4279                   getValue(I.getValueOperand()),
4280                   I.getPointerOperand(), I.getAlignment(),
4281                   Order, SSID);
4282 
4283   DAG.setRoot(OutChain);
4284 }
4285 
4286 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4287 /// node.
4288 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4289                                                unsigned Intrinsic) {
4290   // Ignore the callsite's attributes. A specific call site may be marked with
4291   // readnone, but the lowering code will expect the chain based on the
4292   // definition.
4293   const Function *F = I.getCalledFunction();
4294   bool HasChain = !F->doesNotAccessMemory();
4295   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4296 
4297   // Build the operand list.
4298   SmallVector<SDValue, 8> Ops;
4299   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4300     if (OnlyLoad) {
4301       // We don't need to serialize loads against other loads.
4302       Ops.push_back(DAG.getRoot());
4303     } else {
4304       Ops.push_back(getRoot());
4305     }
4306   }
4307 
4308   // Info is set by getTgtMemInstrinsic
4309   TargetLowering::IntrinsicInfo Info;
4310   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4311   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4312                                                DAG.getMachineFunction(),
4313                                                Intrinsic);
4314 
4315   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4316   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4317       Info.opc == ISD::INTRINSIC_W_CHAIN)
4318     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4319                                         TLI.getPointerTy(DAG.getDataLayout())));
4320 
4321   // Add all operands of the call to the operand list.
4322   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4323     SDValue Op = getValue(I.getArgOperand(i));
4324     Ops.push_back(Op);
4325   }
4326 
4327   SmallVector<EVT, 4> ValueVTs;
4328   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4329 
4330   if (HasChain)
4331     ValueVTs.push_back(MVT::Other);
4332 
4333   SDVTList VTs = DAG.getVTList(ValueVTs);
4334 
4335   // Create the node.
4336   SDValue Result;
4337   if (IsTgtIntrinsic) {
4338     // This is target intrinsic that touches memory
4339     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs,
4340       Ops, Info.memVT,
4341       MachinePointerInfo(Info.ptrVal, Info.offset), Info.align,
4342       Info.flags, Info.size);
4343   } else if (!HasChain) {
4344     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4345   } else if (!I.getType()->isVoidTy()) {
4346     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4347   } else {
4348     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4349   }
4350 
4351   if (HasChain) {
4352     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4353     if (OnlyLoad)
4354       PendingLoads.push_back(Chain);
4355     else
4356       DAG.setRoot(Chain);
4357   }
4358 
4359   if (!I.getType()->isVoidTy()) {
4360     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4361       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4362       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4363     } else
4364       Result = lowerRangeToAssertZExt(DAG, I, Result);
4365 
4366     setValue(&I, Result);
4367   }
4368 }
4369 
4370 /// GetSignificand - Get the significand and build it into a floating-point
4371 /// number with exponent of 1:
4372 ///
4373 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4374 ///
4375 /// where Op is the hexadecimal representation of floating point value.
4376 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4377   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4378                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4379   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4380                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4381   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4382 }
4383 
4384 /// GetExponent - Get the exponent:
4385 ///
4386 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4387 ///
4388 /// where Op is the hexadecimal representation of floating point value.
4389 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4390                            const TargetLowering &TLI, const SDLoc &dl) {
4391   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4392                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4393   SDValue t1 = DAG.getNode(
4394       ISD::SRL, dl, MVT::i32, t0,
4395       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4396   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4397                            DAG.getConstant(127, dl, MVT::i32));
4398   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4399 }
4400 
4401 /// getF32Constant - Get 32-bit floating point constant.
4402 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4403                               const SDLoc &dl) {
4404   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4405                            MVT::f32);
4406 }
4407 
4408 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4409                                        SelectionDAG &DAG) {
4410   // TODO: What fast-math-flags should be set on the floating-point nodes?
4411 
4412   //   IntegerPartOfX = ((int32_t)(t0);
4413   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4414 
4415   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4416   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4417   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4418 
4419   //   IntegerPartOfX <<= 23;
4420   IntegerPartOfX = DAG.getNode(
4421       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4422       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4423                                   DAG.getDataLayout())));
4424 
4425   SDValue TwoToFractionalPartOfX;
4426   if (LimitFloatPrecision <= 6) {
4427     // For floating-point precision of 6:
4428     //
4429     //   TwoToFractionalPartOfX =
4430     //     0.997535578f +
4431     //       (0.735607626f + 0.252464424f * x) * x;
4432     //
4433     // error 0.0144103317, which is 6 bits
4434     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4435                              getF32Constant(DAG, 0x3e814304, dl));
4436     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4437                              getF32Constant(DAG, 0x3f3c50c8, dl));
4438     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4439     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4440                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4441   } else if (LimitFloatPrecision <= 12) {
4442     // For floating-point precision of 12:
4443     //
4444     //   TwoToFractionalPartOfX =
4445     //     0.999892986f +
4446     //       (0.696457318f +
4447     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4448     //
4449     // error 0.000107046256, which is 13 to 14 bits
4450     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4451                              getF32Constant(DAG, 0x3da235e3, dl));
4452     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4453                              getF32Constant(DAG, 0x3e65b8f3, dl));
4454     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4455     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4456                              getF32Constant(DAG, 0x3f324b07, dl));
4457     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4458     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4459                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4460   } else { // LimitFloatPrecision <= 18
4461     // For floating-point precision of 18:
4462     //
4463     //   TwoToFractionalPartOfX =
4464     //     0.999999982f +
4465     //       (0.693148872f +
4466     //         (0.240227044f +
4467     //           (0.554906021e-1f +
4468     //             (0.961591928e-2f +
4469     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4470     // error 2.47208000*10^(-7), which is better than 18 bits
4471     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4472                              getF32Constant(DAG, 0x3924b03e, dl));
4473     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4474                              getF32Constant(DAG, 0x3ab24b87, dl));
4475     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4476     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4477                              getF32Constant(DAG, 0x3c1d8c17, dl));
4478     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4479     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4480                              getF32Constant(DAG, 0x3d634a1d, dl));
4481     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4482     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4483                              getF32Constant(DAG, 0x3e75fe14, dl));
4484     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4485     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4486                               getF32Constant(DAG, 0x3f317234, dl));
4487     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4488     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4489                                          getF32Constant(DAG, 0x3f800000, dl));
4490   }
4491 
4492   // Add the exponent into the result in integer domain.
4493   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4494   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4495                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4496 }
4497 
4498 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4499 /// limited-precision mode.
4500 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4501                          const TargetLowering &TLI) {
4502   if (Op.getValueType() == MVT::f32 &&
4503       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4504 
4505     // Put the exponent in the right bit position for later addition to the
4506     // final result:
4507     //
4508     //   #define LOG2OFe 1.4426950f
4509     //   t0 = Op * LOG2OFe
4510 
4511     // TODO: What fast-math-flags should be set here?
4512     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4513                              getF32Constant(DAG, 0x3fb8aa3b, dl));
4514     return getLimitedPrecisionExp2(t0, dl, DAG);
4515   }
4516 
4517   // No special expansion.
4518   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4519 }
4520 
4521 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4522 /// limited-precision mode.
4523 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4524                          const TargetLowering &TLI) {
4525   // TODO: What fast-math-flags should be set on the floating-point nodes?
4526 
4527   if (Op.getValueType() == MVT::f32 &&
4528       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4529     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4530 
4531     // Scale the exponent by log(2) [0.69314718f].
4532     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4533     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4534                                         getF32Constant(DAG, 0x3f317218, dl));
4535 
4536     // Get the significand and build it into a floating-point number with
4537     // exponent of 1.
4538     SDValue X = GetSignificand(DAG, Op1, dl);
4539 
4540     SDValue LogOfMantissa;
4541     if (LimitFloatPrecision <= 6) {
4542       // For floating-point precision of 6:
4543       //
4544       //   LogofMantissa =
4545       //     -1.1609546f +
4546       //       (1.4034025f - 0.23903021f * x) * x;
4547       //
4548       // error 0.0034276066, which is better than 8 bits
4549       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4550                                getF32Constant(DAG, 0xbe74c456, dl));
4551       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4552                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4553       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4554       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4555                                   getF32Constant(DAG, 0x3f949a29, dl));
4556     } else if (LimitFloatPrecision <= 12) {
4557       // For floating-point precision of 12:
4558       //
4559       //   LogOfMantissa =
4560       //     -1.7417939f +
4561       //       (2.8212026f +
4562       //         (-1.4699568f +
4563       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4564       //
4565       // error 0.000061011436, which is 14 bits
4566       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4567                                getF32Constant(DAG, 0xbd67b6d6, dl));
4568       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4569                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4570       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4571       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4572                                getF32Constant(DAG, 0x3fbc278b, dl));
4573       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4574       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4575                                getF32Constant(DAG, 0x40348e95, dl));
4576       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4577       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4578                                   getF32Constant(DAG, 0x3fdef31a, dl));
4579     } else { // LimitFloatPrecision <= 18
4580       // For floating-point precision of 18:
4581       //
4582       //   LogOfMantissa =
4583       //     -2.1072184f +
4584       //       (4.2372794f +
4585       //         (-3.7029485f +
4586       //           (2.2781945f +
4587       //             (-0.87823314f +
4588       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4589       //
4590       // error 0.0000023660568, which is better than 18 bits
4591       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4592                                getF32Constant(DAG, 0xbc91e5ac, dl));
4593       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4594                                getF32Constant(DAG, 0x3e4350aa, dl));
4595       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4596       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4597                                getF32Constant(DAG, 0x3f60d3e3, dl));
4598       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4599       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4600                                getF32Constant(DAG, 0x4011cdf0, dl));
4601       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4602       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4603                                getF32Constant(DAG, 0x406cfd1c, dl));
4604       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4605       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4606                                getF32Constant(DAG, 0x408797cb, dl));
4607       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4608       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4609                                   getF32Constant(DAG, 0x4006dcab, dl));
4610     }
4611 
4612     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4613   }
4614 
4615   // No special expansion.
4616   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4617 }
4618 
4619 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4620 /// limited-precision mode.
4621 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4622                           const TargetLowering &TLI) {
4623   // TODO: What fast-math-flags should be set on the floating-point nodes?
4624 
4625   if (Op.getValueType() == MVT::f32 &&
4626       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4627     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4628 
4629     // Get the exponent.
4630     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4631 
4632     // Get the significand and build it into a floating-point number with
4633     // exponent of 1.
4634     SDValue X = GetSignificand(DAG, Op1, dl);
4635 
4636     // Different possible minimax approximations of significand in
4637     // floating-point for various degrees of accuracy over [1,2].
4638     SDValue Log2ofMantissa;
4639     if (LimitFloatPrecision <= 6) {
4640       // For floating-point precision of 6:
4641       //
4642       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4643       //
4644       // error 0.0049451742, which is more than 7 bits
4645       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4646                                getF32Constant(DAG, 0xbeb08fe0, dl));
4647       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4648                                getF32Constant(DAG, 0x40019463, dl));
4649       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4650       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4651                                    getF32Constant(DAG, 0x3fd6633d, dl));
4652     } else if (LimitFloatPrecision <= 12) {
4653       // For floating-point precision of 12:
4654       //
4655       //   Log2ofMantissa =
4656       //     -2.51285454f +
4657       //       (4.07009056f +
4658       //         (-2.12067489f +
4659       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4660       //
4661       // error 0.0000876136000, which is better than 13 bits
4662       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4663                                getF32Constant(DAG, 0xbda7262e, dl));
4664       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4665                                getF32Constant(DAG, 0x3f25280b, dl));
4666       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4667       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4668                                getF32Constant(DAG, 0x4007b923, dl));
4669       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4670       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4671                                getF32Constant(DAG, 0x40823e2f, dl));
4672       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4673       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4674                                    getF32Constant(DAG, 0x4020d29c, dl));
4675     } else { // LimitFloatPrecision <= 18
4676       // For floating-point precision of 18:
4677       //
4678       //   Log2ofMantissa =
4679       //     -3.0400495f +
4680       //       (6.1129976f +
4681       //         (-5.3420409f +
4682       //           (3.2865683f +
4683       //             (-1.2669343f +
4684       //               (0.27515199f -
4685       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4686       //
4687       // error 0.0000018516, which is better than 18 bits
4688       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4689                                getF32Constant(DAG, 0xbcd2769e, dl));
4690       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4691                                getF32Constant(DAG, 0x3e8ce0b9, dl));
4692       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4693       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4694                                getF32Constant(DAG, 0x3fa22ae7, dl));
4695       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4696       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4697                                getF32Constant(DAG, 0x40525723, dl));
4698       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4699       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4700                                getF32Constant(DAG, 0x40aaf200, dl));
4701       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4702       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4703                                getF32Constant(DAG, 0x40c39dad, dl));
4704       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4705       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4706                                    getF32Constant(DAG, 0x4042902c, dl));
4707     }
4708 
4709     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4710   }
4711 
4712   // No special expansion.
4713   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4714 }
4715 
4716 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4717 /// limited-precision mode.
4718 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4719                            const TargetLowering &TLI) {
4720   // TODO: What fast-math-flags should be set on the floating-point nodes?
4721 
4722   if (Op.getValueType() == MVT::f32 &&
4723       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4724     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4725 
4726     // Scale the exponent by log10(2) [0.30102999f].
4727     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4728     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4729                                         getF32Constant(DAG, 0x3e9a209a, dl));
4730 
4731     // Get the significand and build it into a floating-point number with
4732     // exponent of 1.
4733     SDValue X = GetSignificand(DAG, Op1, dl);
4734 
4735     SDValue Log10ofMantissa;
4736     if (LimitFloatPrecision <= 6) {
4737       // For floating-point precision of 6:
4738       //
4739       //   Log10ofMantissa =
4740       //     -0.50419619f +
4741       //       (0.60948995f - 0.10380950f * x) * x;
4742       //
4743       // error 0.0014886165, which is 6 bits
4744       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4745                                getF32Constant(DAG, 0xbdd49a13, dl));
4746       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4747                                getF32Constant(DAG, 0x3f1c0789, dl));
4748       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4749       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4750                                     getF32Constant(DAG, 0x3f011300, dl));
4751     } else if (LimitFloatPrecision <= 12) {
4752       // For floating-point precision of 12:
4753       //
4754       //   Log10ofMantissa =
4755       //     -0.64831180f +
4756       //       (0.91751397f +
4757       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4758       //
4759       // error 0.00019228036, which is better than 12 bits
4760       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4761                                getF32Constant(DAG, 0x3d431f31, dl));
4762       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4763                                getF32Constant(DAG, 0x3ea21fb2, dl));
4764       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4765       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4766                                getF32Constant(DAG, 0x3f6ae232, dl));
4767       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4768       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4769                                     getF32Constant(DAG, 0x3f25f7c3, dl));
4770     } else { // LimitFloatPrecision <= 18
4771       // For floating-point precision of 18:
4772       //
4773       //   Log10ofMantissa =
4774       //     -0.84299375f +
4775       //       (1.5327582f +
4776       //         (-1.0688956f +
4777       //           (0.49102474f +
4778       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4779       //
4780       // error 0.0000037995730, which is better than 18 bits
4781       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4782                                getF32Constant(DAG, 0x3c5d51ce, dl));
4783       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4784                                getF32Constant(DAG, 0x3e00685a, dl));
4785       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4786       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4787                                getF32Constant(DAG, 0x3efb6798, dl));
4788       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4789       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4790                                getF32Constant(DAG, 0x3f88d192, dl));
4791       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4792       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4793                                getF32Constant(DAG, 0x3fc4316c, dl));
4794       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4795       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4796                                     getF32Constant(DAG, 0x3f57ce70, dl));
4797     }
4798 
4799     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4800   }
4801 
4802   // No special expansion.
4803   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4804 }
4805 
4806 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4807 /// limited-precision mode.
4808 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4809                           const TargetLowering &TLI) {
4810   if (Op.getValueType() == MVT::f32 &&
4811       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4812     return getLimitedPrecisionExp2(Op, dl, DAG);
4813 
4814   // No special expansion.
4815   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4816 }
4817 
4818 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4819 /// limited-precision mode with x == 10.0f.
4820 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
4821                          SelectionDAG &DAG, const TargetLowering &TLI) {
4822   bool IsExp10 = false;
4823   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4824       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4825     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4826       APFloat Ten(10.0f);
4827       IsExp10 = LHSC->isExactlyValue(Ten);
4828     }
4829   }
4830 
4831   // TODO: What fast-math-flags should be set on the FMUL node?
4832   if (IsExp10) {
4833     // Put the exponent in the right bit position for later addition to the
4834     // final result:
4835     //
4836     //   #define LOG2OF10 3.3219281f
4837     //   t0 = Op * LOG2OF10;
4838     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4839                              getF32Constant(DAG, 0x40549a78, dl));
4840     return getLimitedPrecisionExp2(t0, dl, DAG);
4841   }
4842 
4843   // No special expansion.
4844   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4845 }
4846 
4847 /// ExpandPowI - Expand a llvm.powi intrinsic.
4848 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
4849                           SelectionDAG &DAG) {
4850   // If RHS is a constant, we can expand this out to a multiplication tree,
4851   // otherwise we end up lowering to a call to __powidf2 (for example).  When
4852   // optimizing for size, we only want to do this if the expansion would produce
4853   // a small number of multiplies, otherwise we do the full expansion.
4854   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4855     // Get the exponent as a positive value.
4856     unsigned Val = RHSC->getSExtValue();
4857     if ((int)Val < 0) Val = -Val;
4858 
4859     // powi(x, 0) -> 1.0
4860     if (Val == 0)
4861       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
4862 
4863     const Function &F = DAG.getMachineFunction().getFunction();
4864     if (!F.optForSize() ||
4865         // If optimizing for size, don't insert too many multiplies.
4866         // This inserts up to 5 multiplies.
4867         countPopulation(Val) + Log2_32(Val) < 7) {
4868       // We use the simple binary decomposition method to generate the multiply
4869       // sequence.  There are more optimal ways to do this (for example,
4870       // powi(x,15) generates one more multiply than it should), but this has
4871       // the benefit of being both really simple and much better than a libcall.
4872       SDValue Res;  // Logically starts equal to 1.0
4873       SDValue CurSquare = LHS;
4874       // TODO: Intrinsics should have fast-math-flags that propagate to these
4875       // nodes.
4876       while (Val) {
4877         if (Val & 1) {
4878           if (Res.getNode())
4879             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4880           else
4881             Res = CurSquare;  // 1.0*CurSquare.
4882         }
4883 
4884         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4885                                 CurSquare, CurSquare);
4886         Val >>= 1;
4887       }
4888 
4889       // If the original was negative, invert the result, producing 1/(x*x*x).
4890       if (RHSC->getSExtValue() < 0)
4891         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4892                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
4893       return Res;
4894     }
4895   }
4896 
4897   // Otherwise, expand to a libcall.
4898   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4899 }
4900 
4901 // getUnderlyingArgReg - Find underlying register used for a truncated or
4902 // bitcasted argument.
4903 static unsigned getUnderlyingArgReg(const SDValue &N) {
4904   switch (N.getOpcode()) {
4905   case ISD::CopyFromReg:
4906     return cast<RegisterSDNode>(N.getOperand(1))->getReg();
4907   case ISD::BITCAST:
4908   case ISD::AssertZext:
4909   case ISD::AssertSext:
4910   case ISD::TRUNCATE:
4911     return getUnderlyingArgReg(N.getOperand(0));
4912   default:
4913     return 0;
4914   }
4915 }
4916 
4917 /// If the DbgValueInst is a dbg_value of a function argument, create the
4918 /// corresponding DBG_VALUE machine instruction for it now.  At the end of
4919 /// instruction selection, they will be inserted to the entry BB.
4920 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4921     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
4922     DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
4923   const Argument *Arg = dyn_cast<Argument>(V);
4924   if (!Arg)
4925     return false;
4926 
4927   MachineFunction &MF = DAG.getMachineFunction();
4928   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4929 
4930   bool IsIndirect = false;
4931   Optional<MachineOperand> Op;
4932   // Some arguments' frame index is recorded during argument lowering.
4933   int FI = FuncInfo.getArgumentFrameIndex(Arg);
4934   if (FI != std::numeric_limits<int>::max())
4935     Op = MachineOperand::CreateFI(FI);
4936 
4937   if (!Op && N.getNode()) {
4938     unsigned Reg = getUnderlyingArgReg(N);
4939     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4940       MachineRegisterInfo &RegInfo = MF.getRegInfo();
4941       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4942       if (PR)
4943         Reg = PR;
4944     }
4945     if (Reg) {
4946       Op = MachineOperand::CreateReg(Reg, false);
4947       IsIndirect = IsDbgDeclare;
4948     }
4949   }
4950 
4951   if (!Op && N.getNode())
4952     // Check if frame index is available.
4953     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4954       if (FrameIndexSDNode *FINode =
4955           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4956         Op = MachineOperand::CreateFI(FINode->getIndex());
4957 
4958   if (!Op) {
4959     // Check if ValueMap has reg number.
4960     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4961     if (VMI != FuncInfo.ValueMap.end()) {
4962       const auto &TLI = DAG.getTargetLoweringInfo();
4963       RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
4964                        V->getType(), getABIRegCopyCC(V));
4965       if (RFV.occupiesMultipleRegs()) {
4966         unsigned Offset = 0;
4967         for (auto RegAndSize : RFV.getRegsAndSizes()) {
4968           Op = MachineOperand::CreateReg(RegAndSize.first, false);
4969           auto FragmentExpr = DIExpression::createFragmentExpression(
4970               Expr, Offset, RegAndSize.second);
4971           if (!FragmentExpr)
4972             continue;
4973           FuncInfo.ArgDbgValues.push_back(
4974               BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
4975                       Op->getReg(), Variable, *FragmentExpr));
4976           Offset += RegAndSize.second;
4977         }
4978         return true;
4979       }
4980       Op = MachineOperand::CreateReg(VMI->second, false);
4981       IsIndirect = IsDbgDeclare;
4982     }
4983   }
4984 
4985   if (!Op)
4986     return false;
4987 
4988   assert(Variable->isValidLocationForIntrinsic(DL) &&
4989          "Expected inlined-at fields to agree");
4990   IsIndirect = (Op->isReg()) ? IsIndirect : true;
4991   FuncInfo.ArgDbgValues.push_back(
4992       BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
4993               *Op, Variable, Expr));
4994 
4995   return true;
4996 }
4997 
4998 /// Return the appropriate SDDbgValue based on N.
4999 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5000                                              DILocalVariable *Variable,
5001                                              DIExpression *Expr,
5002                                              const DebugLoc &dl,
5003                                              unsigned DbgSDNodeOrder) {
5004   if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5005     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5006     // stack slot locations.
5007     //
5008     // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5009     // debug values here after optimization:
5010     //
5011     //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5012     //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5013     //
5014     // Both describe the direct values of their associated variables.
5015     return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5016                                      /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5017   }
5018   return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5019                          /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5020 }
5021 
5022 // VisualStudio defines setjmp as _setjmp
5023 #if defined(_MSC_VER) && defined(setjmp) && \
5024                          !defined(setjmp_undefined_for_msvc)
5025 #  pragma push_macro("setjmp")
5026 #  undef setjmp
5027 #  define setjmp_undefined_for_msvc
5028 #endif
5029 
5030 /// Lower the call to the specified intrinsic function. If we want to emit this
5031 /// as a call to a named external function, return the name. Otherwise, lower it
5032 /// and return null.
5033 const char *
5034 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
5035   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5036   SDLoc sdl = getCurSDLoc();
5037   DebugLoc dl = getCurDebugLoc();
5038   SDValue Res;
5039 
5040   switch (Intrinsic) {
5041   default:
5042     // By default, turn this into a target intrinsic node.
5043     visitTargetIntrinsic(I, Intrinsic);
5044     return nullptr;
5045   case Intrinsic::vastart:  visitVAStart(I); return nullptr;
5046   case Intrinsic::vaend:    visitVAEnd(I); return nullptr;
5047   case Intrinsic::vacopy:   visitVACopy(I); return nullptr;
5048   case Intrinsic::returnaddress:
5049     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5050                              TLI.getPointerTy(DAG.getDataLayout()),
5051                              getValue(I.getArgOperand(0))));
5052     return nullptr;
5053   case Intrinsic::addressofreturnaddress:
5054     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5055                              TLI.getPointerTy(DAG.getDataLayout())));
5056     return nullptr;
5057   case Intrinsic::frameaddress:
5058     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5059                              TLI.getPointerTy(DAG.getDataLayout()),
5060                              getValue(I.getArgOperand(0))));
5061     return nullptr;
5062   case Intrinsic::read_register: {
5063     Value *Reg = I.getArgOperand(0);
5064     SDValue Chain = getRoot();
5065     SDValue RegName =
5066         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5067     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5068     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5069       DAG.getVTList(VT, MVT::Other), Chain, RegName);
5070     setValue(&I, Res);
5071     DAG.setRoot(Res.getValue(1));
5072     return nullptr;
5073   }
5074   case Intrinsic::write_register: {
5075     Value *Reg = I.getArgOperand(0);
5076     Value *RegValue = I.getArgOperand(1);
5077     SDValue Chain = getRoot();
5078     SDValue RegName =
5079         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5080     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5081                             RegName, getValue(RegValue)));
5082     return nullptr;
5083   }
5084   case Intrinsic::setjmp:
5085     return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
5086   case Intrinsic::longjmp:
5087     return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
5088   case Intrinsic::memcpy: {
5089     const auto &MCI = cast<MemCpyInst>(I);
5090     SDValue Op1 = getValue(I.getArgOperand(0));
5091     SDValue Op2 = getValue(I.getArgOperand(1));
5092     SDValue Op3 = getValue(I.getArgOperand(2));
5093     // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5094     unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1);
5095     unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1);
5096     unsigned Align = MinAlign(DstAlign, SrcAlign);
5097     bool isVol = MCI.isVolatile();
5098     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5099     // FIXME: Support passing different dest/src alignments to the memcpy DAG
5100     // node.
5101     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5102                                false, isTC,
5103                                MachinePointerInfo(I.getArgOperand(0)),
5104                                MachinePointerInfo(I.getArgOperand(1)));
5105     updateDAGForMaybeTailCall(MC);
5106     return nullptr;
5107   }
5108   case Intrinsic::memset: {
5109     const auto &MSI = cast<MemSetInst>(I);
5110     SDValue Op1 = getValue(I.getArgOperand(0));
5111     SDValue Op2 = getValue(I.getArgOperand(1));
5112     SDValue Op3 = getValue(I.getArgOperand(2));
5113     // @llvm.memset defines 0 and 1 to both mean no alignment.
5114     unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
5115     bool isVol = MSI.isVolatile();
5116     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5117     SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5118                                isTC, MachinePointerInfo(I.getArgOperand(0)));
5119     updateDAGForMaybeTailCall(MS);
5120     return nullptr;
5121   }
5122   case Intrinsic::memmove: {
5123     const auto &MMI = cast<MemMoveInst>(I);
5124     SDValue Op1 = getValue(I.getArgOperand(0));
5125     SDValue Op2 = getValue(I.getArgOperand(1));
5126     SDValue Op3 = getValue(I.getArgOperand(2));
5127     // @llvm.memmove defines 0 and 1 to both mean no alignment.
5128     unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1);
5129     unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1);
5130     unsigned Align = MinAlign(DstAlign, SrcAlign);
5131     bool isVol = MMI.isVolatile();
5132     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5133     // FIXME: Support passing different dest/src alignments to the memmove DAG
5134     // node.
5135     SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
5136                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
5137                                 MachinePointerInfo(I.getArgOperand(1)));
5138     updateDAGForMaybeTailCall(MM);
5139     return nullptr;
5140   }
5141   case Intrinsic::memcpy_element_unordered_atomic: {
5142     const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5143     SDValue Dst = getValue(MI.getRawDest());
5144     SDValue Src = getValue(MI.getRawSource());
5145     SDValue Length = getValue(MI.getLength());
5146 
5147     unsigned DstAlign = MI.getDestAlignment();
5148     unsigned SrcAlign = MI.getSourceAlignment();
5149     Type *LengthTy = MI.getLength()->getType();
5150     unsigned ElemSz = MI.getElementSizeInBytes();
5151     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5152     SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5153                                      SrcAlign, Length, LengthTy, ElemSz, isTC,
5154                                      MachinePointerInfo(MI.getRawDest()),
5155                                      MachinePointerInfo(MI.getRawSource()));
5156     updateDAGForMaybeTailCall(MC);
5157     return nullptr;
5158   }
5159   case Intrinsic::memmove_element_unordered_atomic: {
5160     auto &MI = cast<AtomicMemMoveInst>(I);
5161     SDValue Dst = getValue(MI.getRawDest());
5162     SDValue Src = getValue(MI.getRawSource());
5163     SDValue Length = getValue(MI.getLength());
5164 
5165     unsigned DstAlign = MI.getDestAlignment();
5166     unsigned SrcAlign = MI.getSourceAlignment();
5167     Type *LengthTy = MI.getLength()->getType();
5168     unsigned ElemSz = MI.getElementSizeInBytes();
5169     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5170     SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5171                                       SrcAlign, Length, LengthTy, ElemSz, isTC,
5172                                       MachinePointerInfo(MI.getRawDest()),
5173                                       MachinePointerInfo(MI.getRawSource()));
5174     updateDAGForMaybeTailCall(MC);
5175     return nullptr;
5176   }
5177   case Intrinsic::memset_element_unordered_atomic: {
5178     auto &MI = cast<AtomicMemSetInst>(I);
5179     SDValue Dst = getValue(MI.getRawDest());
5180     SDValue Val = getValue(MI.getValue());
5181     SDValue Length = getValue(MI.getLength());
5182 
5183     unsigned DstAlign = MI.getDestAlignment();
5184     Type *LengthTy = MI.getLength()->getType();
5185     unsigned ElemSz = MI.getElementSizeInBytes();
5186     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5187     SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5188                                      LengthTy, ElemSz, isTC,
5189                                      MachinePointerInfo(MI.getRawDest()));
5190     updateDAGForMaybeTailCall(MC);
5191     return nullptr;
5192   }
5193   case Intrinsic::dbg_addr:
5194   case Intrinsic::dbg_declare: {
5195     const auto &DI = cast<DbgVariableIntrinsic>(I);
5196     DILocalVariable *Variable = DI.getVariable();
5197     DIExpression *Expression = DI.getExpression();
5198     dropDanglingDebugInfo(Variable, Expression);
5199     assert(Variable && "Missing variable");
5200 
5201     // Check if address has undef value.
5202     const Value *Address = DI.getVariableLocation();
5203     if (!Address || isa<UndefValue>(Address) ||
5204         (Address->use_empty() && !isa<Argument>(Address))) {
5205       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5206       return nullptr;
5207     }
5208 
5209     bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5210 
5211     // Check if this variable can be described by a frame index, typically
5212     // either as a static alloca or a byval parameter.
5213     int FI = std::numeric_limits<int>::max();
5214     if (const auto *AI =
5215             dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5216       if (AI->isStaticAlloca()) {
5217         auto I = FuncInfo.StaticAllocaMap.find(AI);
5218         if (I != FuncInfo.StaticAllocaMap.end())
5219           FI = I->second;
5220       }
5221     } else if (const auto *Arg = dyn_cast<Argument>(
5222                    Address->stripInBoundsConstantOffsets())) {
5223       FI = FuncInfo.getArgumentFrameIndex(Arg);
5224     }
5225 
5226     // llvm.dbg.addr is control dependent and always generates indirect
5227     // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5228     // the MachineFunction variable table.
5229     if (FI != std::numeric_limits<int>::max()) {
5230       if (Intrinsic == Intrinsic::dbg_addr) {
5231         SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5232             Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5233         DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5234       }
5235       return nullptr;
5236     }
5237 
5238     SDValue &N = NodeMap[Address];
5239     if (!N.getNode() && isa<Argument>(Address))
5240       // Check unused arguments map.
5241       N = UnusedArgNodeMap[Address];
5242     SDDbgValue *SDV;
5243     if (N.getNode()) {
5244       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5245         Address = BCI->getOperand(0);
5246       // Parameters are handled specially.
5247       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5248       if (isParameter && FINode) {
5249         // Byval parameter. We have a frame index at this point.
5250         SDV =
5251             DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5252                                       /*IsIndirect*/ true, dl, SDNodeOrder);
5253       } else if (isa<Argument>(Address)) {
5254         // Address is an argument, so try to emit its dbg value using
5255         // virtual register info from the FuncInfo.ValueMap.
5256         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5257         return nullptr;
5258       } else {
5259         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5260                               true, dl, SDNodeOrder);
5261       }
5262       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5263     } else {
5264       // If Address is an argument then try to emit its dbg value using
5265       // virtual register info from the FuncInfo.ValueMap.
5266       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5267                                     N)) {
5268         LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5269       }
5270     }
5271     return nullptr;
5272   }
5273   case Intrinsic::dbg_label: {
5274     const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5275     DILabel *Label = DI.getLabel();
5276     assert(Label && "Missing label");
5277 
5278     SDDbgLabel *SDV;
5279     SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5280     DAG.AddDbgLabel(SDV);
5281     return nullptr;
5282   }
5283   case Intrinsic::dbg_value: {
5284     const DbgValueInst &DI = cast<DbgValueInst>(I);
5285     assert(DI.getVariable() && "Missing variable");
5286 
5287     DILocalVariable *Variable = DI.getVariable();
5288     DIExpression *Expression = DI.getExpression();
5289     dropDanglingDebugInfo(Variable, Expression);
5290     const Value *V = DI.getValue();
5291     if (!V)
5292       return nullptr;
5293 
5294     SDDbgValue *SDV;
5295     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
5296       SDV = DAG.getConstantDbgValue(Variable, Expression, V, dl, SDNodeOrder);
5297       DAG.AddDbgValue(SDV, nullptr, false);
5298       return nullptr;
5299     }
5300 
5301     // Do not use getValue() in here; we don't want to generate code at
5302     // this point if it hasn't been done yet.
5303     SDValue N = NodeMap[V];
5304     if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
5305       N = UnusedArgNodeMap[V];
5306     if (N.getNode()) {
5307       if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, false, N))
5308         return nullptr;
5309       SDV = getDbgValue(N, Variable, Expression, dl, SDNodeOrder);
5310       DAG.AddDbgValue(SDV, N.getNode(), false);
5311       return nullptr;
5312     }
5313 
5314     // PHI nodes have already been selected, so we should know which VReg that
5315     // is assigns to already.
5316     if (isa<PHINode>(V)) {
5317       auto VMI = FuncInfo.ValueMap.find(V);
5318       if (VMI != FuncInfo.ValueMap.end()) {
5319         unsigned Reg = VMI->second;
5320         // The PHI node may be split up into several MI PHI nodes (in
5321         // FunctionLoweringInfo::set).
5322         RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
5323                          V->getType(), None);
5324         if (RFV.occupiesMultipleRegs()) {
5325           unsigned Offset = 0;
5326           unsigned BitsToDescribe = 0;
5327           if (auto VarSize = Variable->getSizeInBits())
5328             BitsToDescribe = *VarSize;
5329           if (auto Fragment = Expression->getFragmentInfo())
5330             BitsToDescribe = Fragment->SizeInBits;
5331           for (auto RegAndSize : RFV.getRegsAndSizes()) {
5332             unsigned RegisterSize = RegAndSize.second;
5333             // Bail out if all bits are described already.
5334             if (Offset >= BitsToDescribe)
5335               break;
5336             unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
5337                 ? BitsToDescribe - Offset
5338                 : RegisterSize;
5339             auto FragmentExpr = DIExpression::createFragmentExpression(
5340                 Expression, Offset, FragmentSize);
5341             if (!FragmentExpr)
5342                 continue;
5343             SDV = DAG.getVRegDbgValue(Variable, *FragmentExpr, RegAndSize.first,
5344                                       false, dl, SDNodeOrder);
5345             DAG.AddDbgValue(SDV, nullptr, false);
5346             Offset += RegisterSize;
5347           }
5348         } else {
5349           SDV = DAG.getVRegDbgValue(Variable, Expression, Reg, false, dl,
5350                                     SDNodeOrder);
5351           DAG.AddDbgValue(SDV, nullptr, false);
5352         }
5353         return nullptr;
5354       }
5355     }
5356 
5357     // TODO: When we get here we will either drop the dbg.value completely, or
5358     // we try to move it forward by letting it dangle for awhile. So we should
5359     // probably add an extra DbgValue to the DAG here, with a reference to
5360     // "noreg", to indicate that we have lost the debug location for the
5361     // variable.
5362 
5363     if (!V->use_empty() ) {
5364       // Do not call getValue(V) yet, as we don't want to generate code.
5365       // Remember it for later.
5366       DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5367       return nullptr;
5368     }
5369 
5370     LLVM_DEBUG(dbgs() << "Dropping debug location info for:\n  " << DI << "\n");
5371     LLVM_DEBUG(dbgs() << "  Last seen at:\n    " << *V << "\n");
5372     return nullptr;
5373   }
5374 
5375   case Intrinsic::eh_typeid_for: {
5376     // Find the type id for the given typeinfo.
5377     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5378     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5379     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5380     setValue(&I, Res);
5381     return nullptr;
5382   }
5383 
5384   case Intrinsic::eh_return_i32:
5385   case Intrinsic::eh_return_i64:
5386     DAG.getMachineFunction().setCallsEHReturn(true);
5387     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5388                             MVT::Other,
5389                             getControlRoot(),
5390                             getValue(I.getArgOperand(0)),
5391                             getValue(I.getArgOperand(1))));
5392     return nullptr;
5393   case Intrinsic::eh_unwind_init:
5394     DAG.getMachineFunction().setCallsUnwindInit(true);
5395     return nullptr;
5396   case Intrinsic::eh_dwarf_cfa:
5397     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5398                              TLI.getPointerTy(DAG.getDataLayout()),
5399                              getValue(I.getArgOperand(0))));
5400     return nullptr;
5401   case Intrinsic::eh_sjlj_callsite: {
5402     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5403     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5404     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5405     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5406 
5407     MMI.setCurrentCallSite(CI->getZExtValue());
5408     return nullptr;
5409   }
5410   case Intrinsic::eh_sjlj_functioncontext: {
5411     // Get and store the index of the function context.
5412     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5413     AllocaInst *FnCtx =
5414       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5415     int FI = FuncInfo.StaticAllocaMap[FnCtx];
5416     MFI.setFunctionContextIndex(FI);
5417     return nullptr;
5418   }
5419   case Intrinsic::eh_sjlj_setjmp: {
5420     SDValue Ops[2];
5421     Ops[0] = getRoot();
5422     Ops[1] = getValue(I.getArgOperand(0));
5423     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5424                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
5425     setValue(&I, Op.getValue(0));
5426     DAG.setRoot(Op.getValue(1));
5427     return nullptr;
5428   }
5429   case Intrinsic::eh_sjlj_longjmp:
5430     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5431                             getRoot(), getValue(I.getArgOperand(0))));
5432     return nullptr;
5433   case Intrinsic::eh_sjlj_setup_dispatch:
5434     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5435                             getRoot()));
5436     return nullptr;
5437   case Intrinsic::masked_gather:
5438     visitMaskedGather(I);
5439     return nullptr;
5440   case Intrinsic::masked_load:
5441     visitMaskedLoad(I);
5442     return nullptr;
5443   case Intrinsic::masked_scatter:
5444     visitMaskedScatter(I);
5445     return nullptr;
5446   case Intrinsic::masked_store:
5447     visitMaskedStore(I);
5448     return nullptr;
5449   case Intrinsic::masked_expandload:
5450     visitMaskedLoad(I, true /* IsExpanding */);
5451     return nullptr;
5452   case Intrinsic::masked_compressstore:
5453     visitMaskedStore(I, true /* IsCompressing */);
5454     return nullptr;
5455   case Intrinsic::x86_mmx_pslli_w:
5456   case Intrinsic::x86_mmx_pslli_d:
5457   case Intrinsic::x86_mmx_pslli_q:
5458   case Intrinsic::x86_mmx_psrli_w:
5459   case Intrinsic::x86_mmx_psrli_d:
5460   case Intrinsic::x86_mmx_psrli_q:
5461   case Intrinsic::x86_mmx_psrai_w:
5462   case Intrinsic::x86_mmx_psrai_d: {
5463     SDValue ShAmt = getValue(I.getArgOperand(1));
5464     if (isa<ConstantSDNode>(ShAmt)) {
5465       visitTargetIntrinsic(I, Intrinsic);
5466       return nullptr;
5467     }
5468     unsigned NewIntrinsic = 0;
5469     EVT ShAmtVT = MVT::v2i32;
5470     switch (Intrinsic) {
5471     case Intrinsic::x86_mmx_pslli_w:
5472       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5473       break;
5474     case Intrinsic::x86_mmx_pslli_d:
5475       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5476       break;
5477     case Intrinsic::x86_mmx_pslli_q:
5478       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5479       break;
5480     case Intrinsic::x86_mmx_psrli_w:
5481       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5482       break;
5483     case Intrinsic::x86_mmx_psrli_d:
5484       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5485       break;
5486     case Intrinsic::x86_mmx_psrli_q:
5487       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5488       break;
5489     case Intrinsic::x86_mmx_psrai_w:
5490       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5491       break;
5492     case Intrinsic::x86_mmx_psrai_d:
5493       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5494       break;
5495     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5496     }
5497 
5498     // The vector shift intrinsics with scalars uses 32b shift amounts but
5499     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5500     // to be zero.
5501     // We must do this early because v2i32 is not a legal type.
5502     SDValue ShOps[2];
5503     ShOps[0] = ShAmt;
5504     ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5505     ShAmt =  DAG.getBuildVector(ShAmtVT, sdl, ShOps);
5506     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5507     ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5508     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5509                        DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5510                        getValue(I.getArgOperand(0)), ShAmt);
5511     setValue(&I, Res);
5512     return nullptr;
5513   }
5514   case Intrinsic::powi:
5515     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5516                             getValue(I.getArgOperand(1)), DAG));
5517     return nullptr;
5518   case Intrinsic::log:
5519     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5520     return nullptr;
5521   case Intrinsic::log2:
5522     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5523     return nullptr;
5524   case Intrinsic::log10:
5525     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5526     return nullptr;
5527   case Intrinsic::exp:
5528     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5529     return nullptr;
5530   case Intrinsic::exp2:
5531     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5532     return nullptr;
5533   case Intrinsic::pow:
5534     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5535                            getValue(I.getArgOperand(1)), DAG, TLI));
5536     return nullptr;
5537   case Intrinsic::sqrt:
5538   case Intrinsic::fabs:
5539   case Intrinsic::sin:
5540   case Intrinsic::cos:
5541   case Intrinsic::floor:
5542   case Intrinsic::ceil:
5543   case Intrinsic::trunc:
5544   case Intrinsic::rint:
5545   case Intrinsic::nearbyint:
5546   case Intrinsic::round:
5547   case Intrinsic::canonicalize: {
5548     unsigned Opcode;
5549     switch (Intrinsic) {
5550     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5551     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
5552     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
5553     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
5554     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
5555     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
5556     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
5557     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
5558     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
5559     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5560     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
5561     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5562     }
5563 
5564     setValue(&I, DAG.getNode(Opcode, sdl,
5565                              getValue(I.getArgOperand(0)).getValueType(),
5566                              getValue(I.getArgOperand(0))));
5567     return nullptr;
5568   }
5569   case Intrinsic::minnum: {
5570     auto VT = getValue(I.getArgOperand(0)).getValueType();
5571     unsigned Opc =
5572         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT)
5573             ? ISD::FMINNAN
5574             : ISD::FMINNUM;
5575     setValue(&I, DAG.getNode(Opc, sdl, VT,
5576                              getValue(I.getArgOperand(0)),
5577                              getValue(I.getArgOperand(1))));
5578     return nullptr;
5579   }
5580   case Intrinsic::maxnum: {
5581     auto VT = getValue(I.getArgOperand(0)).getValueType();
5582     unsigned Opc =
5583         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT)
5584             ? ISD::FMAXNAN
5585             : ISD::FMAXNUM;
5586     setValue(&I, DAG.getNode(Opc, sdl, VT,
5587                              getValue(I.getArgOperand(0)),
5588                              getValue(I.getArgOperand(1))));
5589     return nullptr;
5590   }
5591   case Intrinsic::copysign:
5592     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5593                              getValue(I.getArgOperand(0)).getValueType(),
5594                              getValue(I.getArgOperand(0)),
5595                              getValue(I.getArgOperand(1))));
5596     return nullptr;
5597   case Intrinsic::fma:
5598     setValue(&I, DAG.getNode(ISD::FMA, sdl,
5599                              getValue(I.getArgOperand(0)).getValueType(),
5600                              getValue(I.getArgOperand(0)),
5601                              getValue(I.getArgOperand(1)),
5602                              getValue(I.getArgOperand(2))));
5603     return nullptr;
5604   case Intrinsic::experimental_constrained_fadd:
5605   case Intrinsic::experimental_constrained_fsub:
5606   case Intrinsic::experimental_constrained_fmul:
5607   case Intrinsic::experimental_constrained_fdiv:
5608   case Intrinsic::experimental_constrained_frem:
5609   case Intrinsic::experimental_constrained_fma:
5610   case Intrinsic::experimental_constrained_sqrt:
5611   case Intrinsic::experimental_constrained_pow:
5612   case Intrinsic::experimental_constrained_powi:
5613   case Intrinsic::experimental_constrained_sin:
5614   case Intrinsic::experimental_constrained_cos:
5615   case Intrinsic::experimental_constrained_exp:
5616   case Intrinsic::experimental_constrained_exp2:
5617   case Intrinsic::experimental_constrained_log:
5618   case Intrinsic::experimental_constrained_log10:
5619   case Intrinsic::experimental_constrained_log2:
5620   case Intrinsic::experimental_constrained_rint:
5621   case Intrinsic::experimental_constrained_nearbyint:
5622     visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
5623     return nullptr;
5624   case Intrinsic::fmuladd: {
5625     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5626     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5627         TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5628       setValue(&I, DAG.getNode(ISD::FMA, sdl,
5629                                getValue(I.getArgOperand(0)).getValueType(),
5630                                getValue(I.getArgOperand(0)),
5631                                getValue(I.getArgOperand(1)),
5632                                getValue(I.getArgOperand(2))));
5633     } else {
5634       // TODO: Intrinsic calls should have fast-math-flags.
5635       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5636                                 getValue(I.getArgOperand(0)).getValueType(),
5637                                 getValue(I.getArgOperand(0)),
5638                                 getValue(I.getArgOperand(1)));
5639       SDValue Add = DAG.getNode(ISD::FADD, sdl,
5640                                 getValue(I.getArgOperand(0)).getValueType(),
5641                                 Mul,
5642                                 getValue(I.getArgOperand(2)));
5643       setValue(&I, Add);
5644     }
5645     return nullptr;
5646   }
5647   case Intrinsic::convert_to_fp16:
5648     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5649                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5650                                          getValue(I.getArgOperand(0)),
5651                                          DAG.getTargetConstant(0, sdl,
5652                                                                MVT::i32))));
5653     return nullptr;
5654   case Intrinsic::convert_from_fp16:
5655     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5656                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5657                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5658                                          getValue(I.getArgOperand(0)))));
5659     return nullptr;
5660   case Intrinsic::pcmarker: {
5661     SDValue Tmp = getValue(I.getArgOperand(0));
5662     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5663     return nullptr;
5664   }
5665   case Intrinsic::readcyclecounter: {
5666     SDValue Op = getRoot();
5667     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5668                       DAG.getVTList(MVT::i64, MVT::Other), Op);
5669     setValue(&I, Res);
5670     DAG.setRoot(Res.getValue(1));
5671     return nullptr;
5672   }
5673   case Intrinsic::bitreverse:
5674     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5675                              getValue(I.getArgOperand(0)).getValueType(),
5676                              getValue(I.getArgOperand(0))));
5677     return nullptr;
5678   case Intrinsic::bswap:
5679     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5680                              getValue(I.getArgOperand(0)).getValueType(),
5681                              getValue(I.getArgOperand(0))));
5682     return nullptr;
5683   case Intrinsic::cttz: {
5684     SDValue Arg = getValue(I.getArgOperand(0));
5685     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5686     EVT Ty = Arg.getValueType();
5687     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5688                              sdl, Ty, Arg));
5689     return nullptr;
5690   }
5691   case Intrinsic::ctlz: {
5692     SDValue Arg = getValue(I.getArgOperand(0));
5693     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5694     EVT Ty = Arg.getValueType();
5695     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5696                              sdl, Ty, Arg));
5697     return nullptr;
5698   }
5699   case Intrinsic::ctpop: {
5700     SDValue Arg = getValue(I.getArgOperand(0));
5701     EVT Ty = Arg.getValueType();
5702     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5703     return nullptr;
5704   }
5705   case Intrinsic::fshl:
5706   case Intrinsic::fshr: {
5707     bool IsFSHL = Intrinsic == Intrinsic::fshl;
5708     SDValue X = getValue(I.getArgOperand(0));
5709     SDValue Y = getValue(I.getArgOperand(1));
5710     SDValue Z = getValue(I.getArgOperand(2));
5711     EVT VT = X.getValueType();
5712     SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
5713     SDValue Zero = DAG.getConstant(0, sdl, VT);
5714     SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
5715 
5716     // When X == Y, this is rotate. If the data type has a power-of-2 size, we
5717     // avoid the select that is necessary in the general case to filter out
5718     // the 0-shift possibility that leads to UB.
5719     if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
5720       // TODO: This should also be done if the operation is custom, but we have
5721       // to make sure targets are handling the modulo shift amount as expected.
5722       auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
5723       if (TLI.isOperationLegal(RotateOpcode, VT)) {
5724         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
5725         return nullptr;
5726       }
5727 
5728       // Some targets only rotate one way. Try the opposite direction.
5729       RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
5730       if (TLI.isOperationLegal(RotateOpcode, VT)) {
5731         // Negate the shift amount because it is safe to ignore the high bits.
5732         SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5733         setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
5734         return nullptr;
5735       }
5736 
5737       // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
5738       // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
5739       SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
5740       SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
5741       SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
5742       SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
5743       setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
5744       return nullptr;
5745     }
5746 
5747     // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
5748     // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
5749     SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
5750     SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
5751     SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
5752     SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
5753 
5754     // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
5755     // and that is undefined. We must compare and select to avoid UB.
5756     EVT CCVT = MVT::i1;
5757     if (VT.isVector())
5758       CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
5759 
5760     // For fshl, 0-shift returns the 1st arg (X).
5761     // For fshr, 0-shift returns the 2nd arg (Y).
5762     SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
5763     setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
5764     return nullptr;
5765   }
5766   case Intrinsic::stacksave: {
5767     SDValue Op = getRoot();
5768     Res = DAG.getNode(
5769         ISD::STACKSAVE, sdl,
5770         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
5771     setValue(&I, Res);
5772     DAG.setRoot(Res.getValue(1));
5773     return nullptr;
5774   }
5775   case Intrinsic::stackrestore:
5776     Res = getValue(I.getArgOperand(0));
5777     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5778     return nullptr;
5779   case Intrinsic::get_dynamic_area_offset: {
5780     SDValue Op = getRoot();
5781     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5782     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
5783     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
5784     // target.
5785     if (PtrTy != ResTy)
5786       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
5787                          " intrinsic!");
5788     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
5789                       Op);
5790     DAG.setRoot(Op);
5791     setValue(&I, Res);
5792     return nullptr;
5793   }
5794   case Intrinsic::stackguard: {
5795     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5796     MachineFunction &MF = DAG.getMachineFunction();
5797     const Module &M = *MF.getFunction().getParent();
5798     SDValue Chain = getRoot();
5799     if (TLI.useLoadStackGuardNode()) {
5800       Res = getLoadStackGuard(DAG, sdl, Chain);
5801     } else {
5802       const Value *Global = TLI.getSDagStackGuard(M);
5803       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
5804       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
5805                         MachinePointerInfo(Global, 0), Align,
5806                         MachineMemOperand::MOVolatile);
5807     }
5808     if (TLI.useStackGuardXorFP())
5809       Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
5810     DAG.setRoot(Chain);
5811     setValue(&I, Res);
5812     return nullptr;
5813   }
5814   case Intrinsic::stackprotector: {
5815     // Emit code into the DAG to store the stack guard onto the stack.
5816     MachineFunction &MF = DAG.getMachineFunction();
5817     MachineFrameInfo &MFI = MF.getFrameInfo();
5818     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5819     SDValue Src, Chain = getRoot();
5820 
5821     if (TLI.useLoadStackGuardNode())
5822       Src = getLoadStackGuard(DAG, sdl, Chain);
5823     else
5824       Src = getValue(I.getArgOperand(0));   // The guard's value.
5825 
5826     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5827 
5828     int FI = FuncInfo.StaticAllocaMap[Slot];
5829     MFI.setStackProtectorIndex(FI);
5830 
5831     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5832 
5833     // Store the stack protector onto the stack.
5834     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
5835                                                  DAG.getMachineFunction(), FI),
5836                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
5837     setValue(&I, Res);
5838     DAG.setRoot(Res);
5839     return nullptr;
5840   }
5841   case Intrinsic::objectsize: {
5842     // If we don't know by now, we're never going to know.
5843     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5844 
5845     assert(CI && "Non-constant type in __builtin_object_size?");
5846 
5847     SDValue Arg = getValue(I.getCalledValue());
5848     EVT Ty = Arg.getValueType();
5849 
5850     if (CI->isZero())
5851       Res = DAG.getConstant(-1ULL, sdl, Ty);
5852     else
5853       Res = DAG.getConstant(0, sdl, Ty);
5854 
5855     setValue(&I, Res);
5856     return nullptr;
5857   }
5858   case Intrinsic::annotation:
5859   case Intrinsic::ptr_annotation:
5860   case Intrinsic::launder_invariant_group:
5861   case Intrinsic::strip_invariant_group:
5862     // Drop the intrinsic, but forward the value
5863     setValue(&I, getValue(I.getOperand(0)));
5864     return nullptr;
5865   case Intrinsic::assume:
5866   case Intrinsic::var_annotation:
5867   case Intrinsic::sideeffect:
5868     // Discard annotate attributes, assumptions, and artificial side-effects.
5869     return nullptr;
5870 
5871   case Intrinsic::codeview_annotation: {
5872     // Emit a label associated with this metadata.
5873     MachineFunction &MF = DAG.getMachineFunction();
5874     MCSymbol *Label =
5875         MF.getMMI().getContext().createTempSymbol("annotation", true);
5876     Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
5877     MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
5878     Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
5879     DAG.setRoot(Res);
5880     return nullptr;
5881   }
5882 
5883   case Intrinsic::init_trampoline: {
5884     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5885 
5886     SDValue Ops[6];
5887     Ops[0] = getRoot();
5888     Ops[1] = getValue(I.getArgOperand(0));
5889     Ops[2] = getValue(I.getArgOperand(1));
5890     Ops[3] = getValue(I.getArgOperand(2));
5891     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5892     Ops[5] = DAG.getSrcValue(F);
5893 
5894     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5895 
5896     DAG.setRoot(Res);
5897     return nullptr;
5898   }
5899   case Intrinsic::adjust_trampoline:
5900     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5901                              TLI.getPointerTy(DAG.getDataLayout()),
5902                              getValue(I.getArgOperand(0))));
5903     return nullptr;
5904   case Intrinsic::gcroot: {
5905     assert(DAG.getMachineFunction().getFunction().hasGC() &&
5906            "only valid in functions with gc specified, enforced by Verifier");
5907     assert(GFI && "implied by previous");
5908     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5909     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5910 
5911     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5912     GFI->addStackRoot(FI->getIndex(), TypeMap);
5913     return nullptr;
5914   }
5915   case Intrinsic::gcread:
5916   case Intrinsic::gcwrite:
5917     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5918   case Intrinsic::flt_rounds:
5919     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5920     return nullptr;
5921 
5922   case Intrinsic::expect:
5923     // Just replace __builtin_expect(exp, c) with EXP.
5924     setValue(&I, getValue(I.getArgOperand(0)));
5925     return nullptr;
5926 
5927   case Intrinsic::debugtrap:
5928   case Intrinsic::trap: {
5929     StringRef TrapFuncName =
5930         I.getAttributes()
5931             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
5932             .getValueAsString();
5933     if (TrapFuncName.empty()) {
5934       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5935         ISD::TRAP : ISD::DEBUGTRAP;
5936       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5937       return nullptr;
5938     }
5939     TargetLowering::ArgListTy Args;
5940 
5941     TargetLowering::CallLoweringInfo CLI(DAG);
5942     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
5943         CallingConv::C, I.getType(),
5944         DAG.getExternalSymbol(TrapFuncName.data(),
5945                               TLI.getPointerTy(DAG.getDataLayout())),
5946         std::move(Args));
5947 
5948     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5949     DAG.setRoot(Result.second);
5950     return nullptr;
5951   }
5952 
5953   case Intrinsic::uadd_with_overflow:
5954   case Intrinsic::sadd_with_overflow:
5955   case Intrinsic::usub_with_overflow:
5956   case Intrinsic::ssub_with_overflow:
5957   case Intrinsic::umul_with_overflow:
5958   case Intrinsic::smul_with_overflow: {
5959     ISD::NodeType Op;
5960     switch (Intrinsic) {
5961     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5962     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5963     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5964     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5965     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5966     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5967     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5968     }
5969     SDValue Op1 = getValue(I.getArgOperand(0));
5970     SDValue Op2 = getValue(I.getArgOperand(1));
5971 
5972     SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5973     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5974     return nullptr;
5975   }
5976   case Intrinsic::prefetch: {
5977     SDValue Ops[5];
5978     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5979     auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
5980     Ops[0] = DAG.getRoot();
5981     Ops[1] = getValue(I.getArgOperand(0));
5982     Ops[2] = getValue(I.getArgOperand(1));
5983     Ops[3] = getValue(I.getArgOperand(2));
5984     Ops[4] = getValue(I.getArgOperand(3));
5985     SDValue Result = DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5986                                              DAG.getVTList(MVT::Other), Ops,
5987                                              EVT::getIntegerVT(*Context, 8),
5988                                              MachinePointerInfo(I.getArgOperand(0)),
5989                                              0, /* align */
5990                                              Flags);
5991 
5992     // Chain the prefetch in parallell with any pending loads, to stay out of
5993     // the way of later optimizations.
5994     PendingLoads.push_back(Result);
5995     Result = getRoot();
5996     DAG.setRoot(Result);
5997     return nullptr;
5998   }
5999   case Intrinsic::lifetime_start:
6000   case Intrinsic::lifetime_end: {
6001     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6002     // Stack coloring is not enabled in O0, discard region information.
6003     if (TM.getOptLevel() == CodeGenOpt::None)
6004       return nullptr;
6005 
6006     SmallVector<Value *, 4> Allocas;
6007     GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
6008 
6009     for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
6010            E = Allocas.end(); Object != E; ++Object) {
6011       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6012 
6013       // Could not find an Alloca.
6014       if (!LifetimeObject)
6015         continue;
6016 
6017       // First check that the Alloca is static, otherwise it won't have a
6018       // valid frame index.
6019       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6020       if (SI == FuncInfo.StaticAllocaMap.end())
6021         return nullptr;
6022 
6023       int FI = SI->second;
6024 
6025       SDValue Ops[2];
6026       Ops[0] = getRoot();
6027       Ops[1] =
6028           DAG.getFrameIndex(FI, TLI.getFrameIndexTy(DAG.getDataLayout()), true);
6029       unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
6030 
6031       Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
6032       DAG.setRoot(Res);
6033     }
6034     return nullptr;
6035   }
6036   case Intrinsic::invariant_start:
6037     // Discard region information.
6038     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6039     return nullptr;
6040   case Intrinsic::invariant_end:
6041     // Discard region information.
6042     return nullptr;
6043   case Intrinsic::clear_cache:
6044     return TLI.getClearCacheBuiltinName();
6045   case Intrinsic::donothing:
6046     // ignore
6047     return nullptr;
6048   case Intrinsic::experimental_stackmap:
6049     visitStackmap(I);
6050     return nullptr;
6051   case Intrinsic::experimental_patchpoint_void:
6052   case Intrinsic::experimental_patchpoint_i64:
6053     visitPatchpoint(&I);
6054     return nullptr;
6055   case Intrinsic::experimental_gc_statepoint:
6056     LowerStatepoint(ImmutableStatepoint(&I));
6057     return nullptr;
6058   case Intrinsic::experimental_gc_result:
6059     visitGCResult(cast<GCResultInst>(I));
6060     return nullptr;
6061   case Intrinsic::experimental_gc_relocate:
6062     visitGCRelocate(cast<GCRelocateInst>(I));
6063     return nullptr;
6064   case Intrinsic::instrprof_increment:
6065     llvm_unreachable("instrprof failed to lower an increment");
6066   case Intrinsic::instrprof_value_profile:
6067     llvm_unreachable("instrprof failed to lower a value profiling call");
6068   case Intrinsic::localescape: {
6069     MachineFunction &MF = DAG.getMachineFunction();
6070     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6071 
6072     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6073     // is the same on all targets.
6074     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6075       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6076       if (isa<ConstantPointerNull>(Arg))
6077         continue; // Skip null pointers. They represent a hole in index space.
6078       AllocaInst *Slot = cast<AllocaInst>(Arg);
6079       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6080              "can only escape static allocas");
6081       int FI = FuncInfo.StaticAllocaMap[Slot];
6082       MCSymbol *FrameAllocSym =
6083           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6084               GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6085       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6086               TII->get(TargetOpcode::LOCAL_ESCAPE))
6087           .addSym(FrameAllocSym)
6088           .addFrameIndex(FI);
6089     }
6090 
6091     return nullptr;
6092   }
6093 
6094   case Intrinsic::localrecover: {
6095     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6096     MachineFunction &MF = DAG.getMachineFunction();
6097     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
6098 
6099     // Get the symbol that defines the frame offset.
6100     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6101     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6102     unsigned IdxVal =
6103         unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6104     MCSymbol *FrameAllocSym =
6105         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6106             GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6107 
6108     // Create a MCSymbol for the label to avoid any target lowering
6109     // that would make this PC relative.
6110     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6111     SDValue OffsetVal =
6112         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6113 
6114     // Add the offset to the FP.
6115     Value *FP = I.getArgOperand(1);
6116     SDValue FPVal = getValue(FP);
6117     SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
6118     setValue(&I, Add);
6119 
6120     return nullptr;
6121   }
6122 
6123   case Intrinsic::eh_exceptionpointer:
6124   case Intrinsic::eh_exceptioncode: {
6125     // Get the exception pointer vreg, copy from it, and resize it to fit.
6126     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6127     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6128     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6129     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6130     SDValue N =
6131         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6132     if (Intrinsic == Intrinsic::eh_exceptioncode)
6133       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6134     setValue(&I, N);
6135     return nullptr;
6136   }
6137   case Intrinsic::xray_customevent: {
6138     // Here we want to make sure that the intrinsic behaves as if it has a
6139     // specific calling convention, and only for x86_64.
6140     // FIXME: Support other platforms later.
6141     const auto &Triple = DAG.getTarget().getTargetTriple();
6142     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6143       return nullptr;
6144 
6145     SDLoc DL = getCurSDLoc();
6146     SmallVector<SDValue, 8> Ops;
6147 
6148     // We want to say that we always want the arguments in registers.
6149     SDValue LogEntryVal = getValue(I.getArgOperand(0));
6150     SDValue StrSizeVal = getValue(I.getArgOperand(1));
6151     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6152     SDValue Chain = getRoot();
6153     Ops.push_back(LogEntryVal);
6154     Ops.push_back(StrSizeVal);
6155     Ops.push_back(Chain);
6156 
6157     // We need to enforce the calling convention for the callsite, so that
6158     // argument ordering is enforced correctly, and that register allocation can
6159     // see that some registers may be assumed clobbered and have to preserve
6160     // them across calls to the intrinsic.
6161     MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6162                                            DL, NodeTys, Ops);
6163     SDValue patchableNode = SDValue(MN, 0);
6164     DAG.setRoot(patchableNode);
6165     setValue(&I, patchableNode);
6166     return nullptr;
6167   }
6168   case Intrinsic::xray_typedevent: {
6169     // Here we want to make sure that the intrinsic behaves as if it has a
6170     // specific calling convention, and only for x86_64.
6171     // FIXME: Support other platforms later.
6172     const auto &Triple = DAG.getTarget().getTargetTriple();
6173     if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6174       return nullptr;
6175 
6176     SDLoc DL = getCurSDLoc();
6177     SmallVector<SDValue, 8> Ops;
6178 
6179     // We want to say that we always want the arguments in registers.
6180     // It's unclear to me how manipulating the selection DAG here forces callers
6181     // to provide arguments in registers instead of on the stack.
6182     SDValue LogTypeId = getValue(I.getArgOperand(0));
6183     SDValue LogEntryVal = getValue(I.getArgOperand(1));
6184     SDValue StrSizeVal = getValue(I.getArgOperand(2));
6185     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6186     SDValue Chain = getRoot();
6187     Ops.push_back(LogTypeId);
6188     Ops.push_back(LogEntryVal);
6189     Ops.push_back(StrSizeVal);
6190     Ops.push_back(Chain);
6191 
6192     // We need to enforce the calling convention for the callsite, so that
6193     // argument ordering is enforced correctly, and that register allocation can
6194     // see that some registers may be assumed clobbered and have to preserve
6195     // them across calls to the intrinsic.
6196     MachineSDNode *MN = DAG.getMachineNode(
6197         TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6198     SDValue patchableNode = SDValue(MN, 0);
6199     DAG.setRoot(patchableNode);
6200     setValue(&I, patchableNode);
6201     return nullptr;
6202   }
6203   case Intrinsic::experimental_deoptimize:
6204     LowerDeoptimizeCall(&I);
6205     return nullptr;
6206 
6207   case Intrinsic::experimental_vector_reduce_fadd:
6208   case Intrinsic::experimental_vector_reduce_fmul:
6209   case Intrinsic::experimental_vector_reduce_add:
6210   case Intrinsic::experimental_vector_reduce_mul:
6211   case Intrinsic::experimental_vector_reduce_and:
6212   case Intrinsic::experimental_vector_reduce_or:
6213   case Intrinsic::experimental_vector_reduce_xor:
6214   case Intrinsic::experimental_vector_reduce_smax:
6215   case Intrinsic::experimental_vector_reduce_smin:
6216   case Intrinsic::experimental_vector_reduce_umax:
6217   case Intrinsic::experimental_vector_reduce_umin:
6218   case Intrinsic::experimental_vector_reduce_fmax:
6219   case Intrinsic::experimental_vector_reduce_fmin:
6220     visitVectorReduce(I, Intrinsic);
6221     return nullptr;
6222 
6223   case Intrinsic::icall_branch_funnel: {
6224     SmallVector<SDValue, 16> Ops;
6225     Ops.push_back(DAG.getRoot());
6226     Ops.push_back(getValue(I.getArgOperand(0)));
6227 
6228     int64_t Offset;
6229     auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6230         I.getArgOperand(1), Offset, DAG.getDataLayout()));
6231     if (!Base)
6232       report_fatal_error(
6233           "llvm.icall.branch.funnel operand must be a GlobalValue");
6234     Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6235 
6236     struct BranchFunnelTarget {
6237       int64_t Offset;
6238       SDValue Target;
6239     };
6240     SmallVector<BranchFunnelTarget, 8> Targets;
6241 
6242     for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6243       auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6244           I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6245       if (ElemBase != Base)
6246         report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6247                            "to the same GlobalValue");
6248 
6249       SDValue Val = getValue(I.getArgOperand(Op + 1));
6250       auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6251       if (!GA)
6252         report_fatal_error(
6253             "llvm.icall.branch.funnel operand must be a GlobalValue");
6254       Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6255                                      GA->getGlobal(), getCurSDLoc(),
6256                                      Val.getValueType(), GA->getOffset())});
6257     }
6258     llvm::sort(Targets.begin(), Targets.end(),
6259                [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6260                  return T1.Offset < T2.Offset;
6261                });
6262 
6263     for (auto &T : Targets) {
6264       Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6265       Ops.push_back(T.Target);
6266     }
6267 
6268     SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6269                                  getCurSDLoc(), MVT::Other, Ops),
6270               0);
6271     DAG.setRoot(N);
6272     setValue(&I, N);
6273     HasTailCall = true;
6274     return nullptr;
6275   }
6276 
6277   case Intrinsic::wasm_landingpad_index: {
6278     // TODO store landing pad index in a map, which will be used when generating
6279     // LSDA information
6280     return nullptr;
6281   }
6282   }
6283 }
6284 
6285 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6286     const ConstrainedFPIntrinsic &FPI) {
6287   SDLoc sdl = getCurSDLoc();
6288   unsigned Opcode;
6289   switch (FPI.getIntrinsicID()) {
6290   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6291   case Intrinsic::experimental_constrained_fadd:
6292     Opcode = ISD::STRICT_FADD;
6293     break;
6294   case Intrinsic::experimental_constrained_fsub:
6295     Opcode = ISD::STRICT_FSUB;
6296     break;
6297   case Intrinsic::experimental_constrained_fmul:
6298     Opcode = ISD::STRICT_FMUL;
6299     break;
6300   case Intrinsic::experimental_constrained_fdiv:
6301     Opcode = ISD::STRICT_FDIV;
6302     break;
6303   case Intrinsic::experimental_constrained_frem:
6304     Opcode = ISD::STRICT_FREM;
6305     break;
6306   case Intrinsic::experimental_constrained_fma:
6307     Opcode = ISD::STRICT_FMA;
6308     break;
6309   case Intrinsic::experimental_constrained_sqrt:
6310     Opcode = ISD::STRICT_FSQRT;
6311     break;
6312   case Intrinsic::experimental_constrained_pow:
6313     Opcode = ISD::STRICT_FPOW;
6314     break;
6315   case Intrinsic::experimental_constrained_powi:
6316     Opcode = ISD::STRICT_FPOWI;
6317     break;
6318   case Intrinsic::experimental_constrained_sin:
6319     Opcode = ISD::STRICT_FSIN;
6320     break;
6321   case Intrinsic::experimental_constrained_cos:
6322     Opcode = ISD::STRICT_FCOS;
6323     break;
6324   case Intrinsic::experimental_constrained_exp:
6325     Opcode = ISD::STRICT_FEXP;
6326     break;
6327   case Intrinsic::experimental_constrained_exp2:
6328     Opcode = ISD::STRICT_FEXP2;
6329     break;
6330   case Intrinsic::experimental_constrained_log:
6331     Opcode = ISD::STRICT_FLOG;
6332     break;
6333   case Intrinsic::experimental_constrained_log10:
6334     Opcode = ISD::STRICT_FLOG10;
6335     break;
6336   case Intrinsic::experimental_constrained_log2:
6337     Opcode = ISD::STRICT_FLOG2;
6338     break;
6339   case Intrinsic::experimental_constrained_rint:
6340     Opcode = ISD::STRICT_FRINT;
6341     break;
6342   case Intrinsic::experimental_constrained_nearbyint:
6343     Opcode = ISD::STRICT_FNEARBYINT;
6344     break;
6345   }
6346   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6347   SDValue Chain = getRoot();
6348   SmallVector<EVT, 4> ValueVTs;
6349   ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
6350   ValueVTs.push_back(MVT::Other); // Out chain
6351 
6352   SDVTList VTs = DAG.getVTList(ValueVTs);
6353   SDValue Result;
6354   if (FPI.isUnaryOp())
6355     Result = DAG.getNode(Opcode, sdl, VTs,
6356                          { Chain, getValue(FPI.getArgOperand(0)) });
6357   else if (FPI.isTernaryOp())
6358     Result = DAG.getNode(Opcode, sdl, VTs,
6359                          { Chain, getValue(FPI.getArgOperand(0)),
6360                                   getValue(FPI.getArgOperand(1)),
6361                                   getValue(FPI.getArgOperand(2)) });
6362   else
6363     Result = DAG.getNode(Opcode, sdl, VTs,
6364                          { Chain, getValue(FPI.getArgOperand(0)),
6365                            getValue(FPI.getArgOperand(1))  });
6366 
6367   assert(Result.getNode()->getNumValues() == 2);
6368   SDValue OutChain = Result.getValue(1);
6369   DAG.setRoot(OutChain);
6370   SDValue FPResult = Result.getValue(0);
6371   setValue(&FPI, FPResult);
6372 }
6373 
6374 std::pair<SDValue, SDValue>
6375 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
6376                                     const BasicBlock *EHPadBB) {
6377   MachineFunction &MF = DAG.getMachineFunction();
6378   MachineModuleInfo &MMI = MF.getMMI();
6379   MCSymbol *BeginLabel = nullptr;
6380 
6381   if (EHPadBB) {
6382     // Insert a label before the invoke call to mark the try range.  This can be
6383     // used to detect deletion of the invoke via the MachineModuleInfo.
6384     BeginLabel = MMI.getContext().createTempSymbol();
6385 
6386     // For SjLj, keep track of which landing pads go with which invokes
6387     // so as to maintain the ordering of pads in the LSDA.
6388     unsigned CallSiteIndex = MMI.getCurrentCallSite();
6389     if (CallSiteIndex) {
6390       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
6391       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
6392 
6393       // Now that the call site is handled, stop tracking it.
6394       MMI.setCurrentCallSite(0);
6395     }
6396 
6397     // Both PendingLoads and PendingExports must be flushed here;
6398     // this call might not return.
6399     (void)getRoot();
6400     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
6401 
6402     CLI.setChain(getRoot());
6403   }
6404   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6405   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6406 
6407   assert((CLI.IsTailCall || Result.second.getNode()) &&
6408          "Non-null chain expected with non-tail call!");
6409   assert((Result.second.getNode() || !Result.first.getNode()) &&
6410          "Null value expected with tail call!");
6411 
6412   if (!Result.second.getNode()) {
6413     // As a special case, a null chain means that a tail call has been emitted
6414     // and the DAG root is already updated.
6415     HasTailCall = true;
6416 
6417     // Since there's no actual continuation from this block, nothing can be
6418     // relying on us setting vregs for them.
6419     PendingExports.clear();
6420   } else {
6421     DAG.setRoot(Result.second);
6422   }
6423 
6424   if (EHPadBB) {
6425     // Insert a label at the end of the invoke call to mark the try range.  This
6426     // can be used to detect deletion of the invoke via the MachineModuleInfo.
6427     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
6428     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
6429 
6430     // Inform MachineModuleInfo of range.
6431     auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
6432     // There is a platform (e.g. wasm) that uses funclet style IR but does not
6433     // actually use outlined funclets and their LSDA info style.
6434     if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
6435       assert(CLI.CS);
6436       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
6437       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()),
6438                                 BeginLabel, EndLabel);
6439     } else {
6440       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
6441     }
6442   }
6443 
6444   return Result;
6445 }
6446 
6447 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
6448                                       bool isTailCall,
6449                                       const BasicBlock *EHPadBB) {
6450   auto &DL = DAG.getDataLayout();
6451   FunctionType *FTy = CS.getFunctionType();
6452   Type *RetTy = CS.getType();
6453 
6454   TargetLowering::ArgListTy Args;
6455   Args.reserve(CS.arg_size());
6456 
6457   const Value *SwiftErrorVal = nullptr;
6458   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6459 
6460   // We can't tail call inside a function with a swifterror argument. Lowering
6461   // does not support this yet. It would have to move into the swifterror
6462   // register before the call.
6463   auto *Caller = CS.getInstruction()->getParent()->getParent();
6464   if (TLI.supportSwiftError() &&
6465       Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
6466     isTailCall = false;
6467 
6468   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
6469        i != e; ++i) {
6470     TargetLowering::ArgListEntry Entry;
6471     const Value *V = *i;
6472 
6473     // Skip empty types
6474     if (V->getType()->isEmptyTy())
6475       continue;
6476 
6477     SDValue ArgNode = getValue(V);
6478     Entry.Node = ArgNode; Entry.Ty = V->getType();
6479 
6480     Entry.setAttributes(&CS, i - CS.arg_begin());
6481 
6482     // Use swifterror virtual register as input to the call.
6483     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
6484       SwiftErrorVal = V;
6485       // We find the virtual register for the actual swifterror argument.
6486       // Instead of using the Value, we use the virtual register instead.
6487       Entry.Node = DAG.getRegister(FuncInfo
6488                                        .getOrCreateSwiftErrorVRegUseAt(
6489                                            CS.getInstruction(), FuncInfo.MBB, V)
6490                                        .first,
6491                                    EVT(TLI.getPointerTy(DL)));
6492     }
6493 
6494     Args.push_back(Entry);
6495 
6496     // If we have an explicit sret argument that is an Instruction, (i.e., it
6497     // might point to function-local memory), we can't meaningfully tail-call.
6498     if (Entry.IsSRet && isa<Instruction>(V))
6499       isTailCall = false;
6500   }
6501 
6502   // Check if target-independent constraints permit a tail call here.
6503   // Target-dependent constraints are checked within TLI->LowerCallTo.
6504   if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
6505     isTailCall = false;
6506 
6507   // Disable tail calls if there is an swifterror argument. Targets have not
6508   // been updated to support tail calls.
6509   if (TLI.supportSwiftError() && SwiftErrorVal)
6510     isTailCall = false;
6511 
6512   TargetLowering::CallLoweringInfo CLI(DAG);
6513   CLI.setDebugLoc(getCurSDLoc())
6514       .setChain(getRoot())
6515       .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
6516       .setTailCall(isTailCall)
6517       .setConvergent(CS.isConvergent());
6518   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
6519 
6520   if (Result.first.getNode()) {
6521     const Instruction *Inst = CS.getInstruction();
6522     Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
6523     setValue(Inst, Result.first);
6524   }
6525 
6526   // The last element of CLI.InVals has the SDValue for swifterror return.
6527   // Here we copy it to a virtual register and update SwiftErrorMap for
6528   // book-keeping.
6529   if (SwiftErrorVal && TLI.supportSwiftError()) {
6530     // Get the last element of InVals.
6531     SDValue Src = CLI.InVals.back();
6532     unsigned VReg; bool CreatedVReg;
6533     std::tie(VReg, CreatedVReg) =
6534         FuncInfo.getOrCreateSwiftErrorVRegDefAt(CS.getInstruction());
6535     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
6536     // We update the virtual register for the actual swifterror argument.
6537     if (CreatedVReg)
6538       FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
6539     DAG.setRoot(CopyNode);
6540   }
6541 }
6542 
6543 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
6544                              SelectionDAGBuilder &Builder) {
6545   // Check to see if this load can be trivially constant folded, e.g. if the
6546   // input is from a string literal.
6547   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
6548     // Cast pointer to the type we really want to load.
6549     Type *LoadTy =
6550         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
6551     if (LoadVT.isVector())
6552       LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
6553 
6554     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
6555                                          PointerType::getUnqual(LoadTy));
6556 
6557     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
6558             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
6559       return Builder.getValue(LoadCst);
6560   }
6561 
6562   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
6563   // still constant memory, the input chain can be the entry node.
6564   SDValue Root;
6565   bool ConstantMemory = false;
6566 
6567   // Do not serialize (non-volatile) loads of constant memory with anything.
6568   if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
6569     Root = Builder.DAG.getEntryNode();
6570     ConstantMemory = true;
6571   } else {
6572     // Do not serialize non-volatile loads against each other.
6573     Root = Builder.DAG.getRoot();
6574   }
6575 
6576   SDValue Ptr = Builder.getValue(PtrVal);
6577   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
6578                                         Ptr, MachinePointerInfo(PtrVal),
6579                                         /* Alignment = */ 1);
6580 
6581   if (!ConstantMemory)
6582     Builder.PendingLoads.push_back(LoadVal.getValue(1));
6583   return LoadVal;
6584 }
6585 
6586 /// Record the value for an instruction that produces an integer result,
6587 /// converting the type where necessary.
6588 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
6589                                                   SDValue Value,
6590                                                   bool IsSigned) {
6591   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6592                                                     I.getType(), true);
6593   if (IsSigned)
6594     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
6595   else
6596     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
6597   setValue(&I, Value);
6598 }
6599 
6600 /// See if we can lower a memcmp call into an optimized form. If so, return
6601 /// true and lower it. Otherwise return false, and it will be lowered like a
6602 /// normal call.
6603 /// The caller already checked that \p I calls the appropriate LibFunc with a
6604 /// correct prototype.
6605 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
6606   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
6607   const Value *Size = I.getArgOperand(2);
6608   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
6609   if (CSize && CSize->getZExtValue() == 0) {
6610     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6611                                                           I.getType(), true);
6612     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
6613     return true;
6614   }
6615 
6616   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6617   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
6618       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
6619       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
6620   if (Res.first.getNode()) {
6621     processIntegerCallValue(I, Res.first, true);
6622     PendingLoads.push_back(Res.second);
6623     return true;
6624   }
6625 
6626   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
6627   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
6628   if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
6629     return false;
6630 
6631   // If the target has a fast compare for the given size, it will return a
6632   // preferred load type for that size. Require that the load VT is legal and
6633   // that the target supports unaligned loads of that type. Otherwise, return
6634   // INVALID.
6635   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
6636     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6637     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
6638     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
6639       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
6640       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
6641       // TODO: Check alignment of src and dest ptrs.
6642       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
6643       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
6644       if (!TLI.isTypeLegal(LVT) ||
6645           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
6646           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
6647         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
6648     }
6649 
6650     return LVT;
6651   };
6652 
6653   // This turns into unaligned loads. We only do this if the target natively
6654   // supports the MVT we'll be loading or if it is small enough (<= 4) that
6655   // we'll only produce a small number of byte loads.
6656   MVT LoadVT;
6657   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
6658   switch (NumBitsToCompare) {
6659   default:
6660     return false;
6661   case 16:
6662     LoadVT = MVT::i16;
6663     break;
6664   case 32:
6665     LoadVT = MVT::i32;
6666     break;
6667   case 64:
6668   case 128:
6669   case 256:
6670     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
6671     break;
6672   }
6673 
6674   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
6675     return false;
6676 
6677   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
6678   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
6679 
6680   // Bitcast to a wide integer type if the loads are vectors.
6681   if (LoadVT.isVector()) {
6682     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
6683     LoadL = DAG.getBitcast(CmpVT, LoadL);
6684     LoadR = DAG.getBitcast(CmpVT, LoadR);
6685   }
6686 
6687   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
6688   processIntegerCallValue(I, Cmp, false);
6689   return true;
6690 }
6691 
6692 /// See if we can lower a memchr call into an optimized form. If so, return
6693 /// true and lower it. Otherwise return false, and it will be lowered like a
6694 /// normal call.
6695 /// The caller already checked that \p I calls the appropriate LibFunc with a
6696 /// correct prototype.
6697 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
6698   const Value *Src = I.getArgOperand(0);
6699   const Value *Char = I.getArgOperand(1);
6700   const Value *Length = I.getArgOperand(2);
6701 
6702   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6703   std::pair<SDValue, SDValue> Res =
6704     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
6705                                 getValue(Src), getValue(Char), getValue(Length),
6706                                 MachinePointerInfo(Src));
6707   if (Res.first.getNode()) {
6708     setValue(&I, Res.first);
6709     PendingLoads.push_back(Res.second);
6710     return true;
6711   }
6712 
6713   return false;
6714 }
6715 
6716 /// See if we can lower a mempcpy call into an optimized form. If so, return
6717 /// true and lower it. Otherwise return false, and it will be lowered like a
6718 /// normal call.
6719 /// The caller already checked that \p I calls the appropriate LibFunc with a
6720 /// correct prototype.
6721 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
6722   SDValue Dst = getValue(I.getArgOperand(0));
6723   SDValue Src = getValue(I.getArgOperand(1));
6724   SDValue Size = getValue(I.getArgOperand(2));
6725 
6726   unsigned DstAlign = DAG.InferPtrAlignment(Dst);
6727   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6728   unsigned Align = std::min(DstAlign, SrcAlign);
6729   if (Align == 0) // Alignment of one or both could not be inferred.
6730     Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
6731 
6732   bool isVol = false;
6733   SDLoc sdl = getCurSDLoc();
6734 
6735   // In the mempcpy context we need to pass in a false value for isTailCall
6736   // because the return pointer needs to be adjusted by the size of
6737   // the copied memory.
6738   SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
6739                              false, /*isTailCall=*/false,
6740                              MachinePointerInfo(I.getArgOperand(0)),
6741                              MachinePointerInfo(I.getArgOperand(1)));
6742   assert(MC.getNode() != nullptr &&
6743          "** memcpy should not be lowered as TailCall in mempcpy context **");
6744   DAG.setRoot(MC);
6745 
6746   // Check if Size needs to be truncated or extended.
6747   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
6748 
6749   // Adjust return pointer to point just past the last dst byte.
6750   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
6751                                     Dst, Size);
6752   setValue(&I, DstPlusSize);
6753   return true;
6754 }
6755 
6756 /// See if we can lower a strcpy call into an optimized form.  If so, return
6757 /// true and lower it, otherwise return false and it will be lowered like a
6758 /// normal call.
6759 /// The caller already checked that \p I calls the appropriate LibFunc with a
6760 /// correct prototype.
6761 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
6762   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6763 
6764   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6765   std::pair<SDValue, SDValue> Res =
6766     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
6767                                 getValue(Arg0), getValue(Arg1),
6768                                 MachinePointerInfo(Arg0),
6769                                 MachinePointerInfo(Arg1), isStpcpy);
6770   if (Res.first.getNode()) {
6771     setValue(&I, Res.first);
6772     DAG.setRoot(Res.second);
6773     return true;
6774   }
6775 
6776   return false;
6777 }
6778 
6779 /// See if we can lower a strcmp call into an optimized form.  If so, return
6780 /// true and lower it, otherwise return false and it will be lowered like a
6781 /// normal call.
6782 /// The caller already checked that \p I calls the appropriate LibFunc with a
6783 /// correct prototype.
6784 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
6785   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6786 
6787   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6788   std::pair<SDValue, SDValue> Res =
6789     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
6790                                 getValue(Arg0), getValue(Arg1),
6791                                 MachinePointerInfo(Arg0),
6792                                 MachinePointerInfo(Arg1));
6793   if (Res.first.getNode()) {
6794     processIntegerCallValue(I, Res.first, true);
6795     PendingLoads.push_back(Res.second);
6796     return true;
6797   }
6798 
6799   return false;
6800 }
6801 
6802 /// See if we can lower a strlen call into an optimized form.  If so, return
6803 /// true and lower it, otherwise return false and it will be lowered like a
6804 /// normal call.
6805 /// The caller already checked that \p I calls the appropriate LibFunc with a
6806 /// correct prototype.
6807 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
6808   const Value *Arg0 = I.getArgOperand(0);
6809 
6810   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6811   std::pair<SDValue, SDValue> Res =
6812     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
6813                                 getValue(Arg0), MachinePointerInfo(Arg0));
6814   if (Res.first.getNode()) {
6815     processIntegerCallValue(I, Res.first, false);
6816     PendingLoads.push_back(Res.second);
6817     return true;
6818   }
6819 
6820   return false;
6821 }
6822 
6823 /// See if we can lower a strnlen call into an optimized form.  If so, return
6824 /// true and lower it, otherwise return false and it will be lowered like a
6825 /// normal call.
6826 /// The caller already checked that \p I calls the appropriate LibFunc with a
6827 /// correct prototype.
6828 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
6829   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6830 
6831   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6832   std::pair<SDValue, SDValue> Res =
6833     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
6834                                  getValue(Arg0), getValue(Arg1),
6835                                  MachinePointerInfo(Arg0));
6836   if (Res.first.getNode()) {
6837     processIntegerCallValue(I, Res.first, false);
6838     PendingLoads.push_back(Res.second);
6839     return true;
6840   }
6841 
6842   return false;
6843 }
6844 
6845 /// See if we can lower a unary floating-point operation into an SDNode with
6846 /// the specified Opcode.  If so, return true and lower it, otherwise return
6847 /// false and it will be lowered like a normal call.
6848 /// The caller already checked that \p I calls the appropriate LibFunc with a
6849 /// correct prototype.
6850 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
6851                                               unsigned Opcode) {
6852   // We already checked this call's prototype; verify it doesn't modify errno.
6853   if (!I.onlyReadsMemory())
6854     return false;
6855 
6856   SDValue Tmp = getValue(I.getArgOperand(0));
6857   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
6858   return true;
6859 }
6860 
6861 /// See if we can lower a binary floating-point operation into an SDNode with
6862 /// the specified Opcode. If so, return true and lower it. Otherwise return
6863 /// false, and it will be lowered like a normal call.
6864 /// The caller already checked that \p I calls the appropriate LibFunc with a
6865 /// correct prototype.
6866 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
6867                                                unsigned Opcode) {
6868   // We already checked this call's prototype; verify it doesn't modify errno.
6869   if (!I.onlyReadsMemory())
6870     return false;
6871 
6872   SDValue Tmp0 = getValue(I.getArgOperand(0));
6873   SDValue Tmp1 = getValue(I.getArgOperand(1));
6874   EVT VT = Tmp0.getValueType();
6875   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
6876   return true;
6877 }
6878 
6879 void SelectionDAGBuilder::visitCall(const CallInst &I) {
6880   // Handle inline assembly differently.
6881   if (isa<InlineAsm>(I.getCalledValue())) {
6882     visitInlineAsm(&I);
6883     return;
6884   }
6885 
6886   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6887   computeUsesVAFloatArgument(I, MMI);
6888 
6889   const char *RenameFn = nullptr;
6890   if (Function *F = I.getCalledFunction()) {
6891     if (F->isDeclaration()) {
6892       // Is this an LLVM intrinsic or a target-specific intrinsic?
6893       unsigned IID = F->getIntrinsicID();
6894       if (!IID)
6895         if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
6896           IID = II->getIntrinsicID(F);
6897 
6898       if (IID) {
6899         RenameFn = visitIntrinsicCall(I, IID);
6900         if (!RenameFn)
6901           return;
6902       }
6903     }
6904 
6905     // Check for well-known libc/libm calls.  If the function is internal, it
6906     // can't be a library call.  Don't do the check if marked as nobuiltin for
6907     // some reason or the call site requires strict floating point semantics.
6908     LibFunc Func;
6909     if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
6910         F->hasName() && LibInfo->getLibFunc(*F, Func) &&
6911         LibInfo->hasOptimizedCodeGen(Func)) {
6912       switch (Func) {
6913       default: break;
6914       case LibFunc_copysign:
6915       case LibFunc_copysignf:
6916       case LibFunc_copysignl:
6917         // We already checked this call's prototype; verify it doesn't modify
6918         // errno.
6919         if (I.onlyReadsMemory()) {
6920           SDValue LHS = getValue(I.getArgOperand(0));
6921           SDValue RHS = getValue(I.getArgOperand(1));
6922           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
6923                                    LHS.getValueType(), LHS, RHS));
6924           return;
6925         }
6926         break;
6927       case LibFunc_fabs:
6928       case LibFunc_fabsf:
6929       case LibFunc_fabsl:
6930         if (visitUnaryFloatCall(I, ISD::FABS))
6931           return;
6932         break;
6933       case LibFunc_fmin:
6934       case LibFunc_fminf:
6935       case LibFunc_fminl:
6936         if (visitBinaryFloatCall(I, ISD::FMINNUM))
6937           return;
6938         break;
6939       case LibFunc_fmax:
6940       case LibFunc_fmaxf:
6941       case LibFunc_fmaxl:
6942         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
6943           return;
6944         break;
6945       case LibFunc_sin:
6946       case LibFunc_sinf:
6947       case LibFunc_sinl:
6948         if (visitUnaryFloatCall(I, ISD::FSIN))
6949           return;
6950         break;
6951       case LibFunc_cos:
6952       case LibFunc_cosf:
6953       case LibFunc_cosl:
6954         if (visitUnaryFloatCall(I, ISD::FCOS))
6955           return;
6956         break;
6957       case LibFunc_sqrt:
6958       case LibFunc_sqrtf:
6959       case LibFunc_sqrtl:
6960       case LibFunc_sqrt_finite:
6961       case LibFunc_sqrtf_finite:
6962       case LibFunc_sqrtl_finite:
6963         if (visitUnaryFloatCall(I, ISD::FSQRT))
6964           return;
6965         break;
6966       case LibFunc_floor:
6967       case LibFunc_floorf:
6968       case LibFunc_floorl:
6969         if (visitUnaryFloatCall(I, ISD::FFLOOR))
6970           return;
6971         break;
6972       case LibFunc_nearbyint:
6973       case LibFunc_nearbyintf:
6974       case LibFunc_nearbyintl:
6975         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6976           return;
6977         break;
6978       case LibFunc_ceil:
6979       case LibFunc_ceilf:
6980       case LibFunc_ceill:
6981         if (visitUnaryFloatCall(I, ISD::FCEIL))
6982           return;
6983         break;
6984       case LibFunc_rint:
6985       case LibFunc_rintf:
6986       case LibFunc_rintl:
6987         if (visitUnaryFloatCall(I, ISD::FRINT))
6988           return;
6989         break;
6990       case LibFunc_round:
6991       case LibFunc_roundf:
6992       case LibFunc_roundl:
6993         if (visitUnaryFloatCall(I, ISD::FROUND))
6994           return;
6995         break;
6996       case LibFunc_trunc:
6997       case LibFunc_truncf:
6998       case LibFunc_truncl:
6999         if (visitUnaryFloatCall(I, ISD::FTRUNC))
7000           return;
7001         break;
7002       case LibFunc_log2:
7003       case LibFunc_log2f:
7004       case LibFunc_log2l:
7005         if (visitUnaryFloatCall(I, ISD::FLOG2))
7006           return;
7007         break;
7008       case LibFunc_exp2:
7009       case LibFunc_exp2f:
7010       case LibFunc_exp2l:
7011         if (visitUnaryFloatCall(I, ISD::FEXP2))
7012           return;
7013         break;
7014       case LibFunc_memcmp:
7015         if (visitMemCmpCall(I))
7016           return;
7017         break;
7018       case LibFunc_mempcpy:
7019         if (visitMemPCpyCall(I))
7020           return;
7021         break;
7022       case LibFunc_memchr:
7023         if (visitMemChrCall(I))
7024           return;
7025         break;
7026       case LibFunc_strcpy:
7027         if (visitStrCpyCall(I, false))
7028           return;
7029         break;
7030       case LibFunc_stpcpy:
7031         if (visitStrCpyCall(I, true))
7032           return;
7033         break;
7034       case LibFunc_strcmp:
7035         if (visitStrCmpCall(I))
7036           return;
7037         break;
7038       case LibFunc_strlen:
7039         if (visitStrLenCall(I))
7040           return;
7041         break;
7042       case LibFunc_strnlen:
7043         if (visitStrNLenCall(I))
7044           return;
7045         break;
7046       }
7047     }
7048   }
7049 
7050   SDValue Callee;
7051   if (!RenameFn)
7052     Callee = getValue(I.getCalledValue());
7053   else
7054     Callee = DAG.getExternalSymbol(
7055         RenameFn,
7056         DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
7057 
7058   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7059   // have to do anything here to lower funclet bundles.
7060   assert(!I.hasOperandBundlesOtherThan(
7061              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
7062          "Cannot lower calls with arbitrary operand bundles!");
7063 
7064   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7065     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7066   else
7067     // Check if we can potentially perform a tail call. More detailed checking
7068     // is be done within LowerCallTo, after more information about the call is
7069     // known.
7070     LowerCallTo(&I, Callee, I.isTailCall());
7071 }
7072 
7073 namespace {
7074 
7075 /// AsmOperandInfo - This contains information for each constraint that we are
7076 /// lowering.
7077 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7078 public:
7079   /// CallOperand - If this is the result output operand or a clobber
7080   /// this is null, otherwise it is the incoming operand to the CallInst.
7081   /// This gets modified as the asm is processed.
7082   SDValue CallOperand;
7083 
7084   /// AssignedRegs - If this is a register or register class operand, this
7085   /// contains the set of register corresponding to the operand.
7086   RegsForValue AssignedRegs;
7087 
7088   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7089     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7090   }
7091 
7092   /// Whether or not this operand accesses memory
7093   bool hasMemory(const TargetLowering &TLI) const {
7094     // Indirect operand accesses access memory.
7095     if (isIndirect)
7096       return true;
7097 
7098     for (const auto &Code : Codes)
7099       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7100         return true;
7101 
7102     return false;
7103   }
7104 
7105   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7106   /// corresponds to.  If there is no Value* for this operand, it returns
7107   /// MVT::Other.
7108   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7109                            const DataLayout &DL) const {
7110     if (!CallOperandVal) return MVT::Other;
7111 
7112     if (isa<BasicBlock>(CallOperandVal))
7113       return TLI.getPointerTy(DL);
7114 
7115     llvm::Type *OpTy = CallOperandVal->getType();
7116 
7117     // FIXME: code duplicated from TargetLowering::ParseConstraints().
7118     // If this is an indirect operand, the operand is a pointer to the
7119     // accessed type.
7120     if (isIndirect) {
7121       PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7122       if (!PtrTy)
7123         report_fatal_error("Indirect operand for inline asm not a pointer!");
7124       OpTy = PtrTy->getElementType();
7125     }
7126 
7127     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7128     if (StructType *STy = dyn_cast<StructType>(OpTy))
7129       if (STy->getNumElements() == 1)
7130         OpTy = STy->getElementType(0);
7131 
7132     // If OpTy is not a single value, it may be a struct/union that we
7133     // can tile with integers.
7134     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7135       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7136       switch (BitSize) {
7137       default: break;
7138       case 1:
7139       case 8:
7140       case 16:
7141       case 32:
7142       case 64:
7143       case 128:
7144         OpTy = IntegerType::get(Context, BitSize);
7145         break;
7146       }
7147     }
7148 
7149     return TLI.getValueType(DL, OpTy, true);
7150   }
7151 };
7152 
7153 using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>;
7154 
7155 } // end anonymous namespace
7156 
7157 /// Make sure that the output operand \p OpInfo and its corresponding input
7158 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7159 /// out).
7160 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7161                                SDISelAsmOperandInfo &MatchingOpInfo,
7162                                SelectionDAG &DAG) {
7163   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7164     return;
7165 
7166   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7167   const auto &TLI = DAG.getTargetLoweringInfo();
7168 
7169   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7170       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7171                                        OpInfo.ConstraintVT);
7172   std::pair<unsigned, const TargetRegisterClass *> InputRC =
7173       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7174                                        MatchingOpInfo.ConstraintVT);
7175   if ((OpInfo.ConstraintVT.isInteger() !=
7176        MatchingOpInfo.ConstraintVT.isInteger()) ||
7177       (MatchRC.second != InputRC.second)) {
7178     // FIXME: error out in a more elegant fashion
7179     report_fatal_error("Unsupported asm: input constraint"
7180                        " with a matching output constraint of"
7181                        " incompatible type!");
7182   }
7183   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7184 }
7185 
7186 /// Get a direct memory input to behave well as an indirect operand.
7187 /// This may introduce stores, hence the need for a \p Chain.
7188 /// \return The (possibly updated) chain.
7189 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7190                                         SDISelAsmOperandInfo &OpInfo,
7191                                         SelectionDAG &DAG) {
7192   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7193 
7194   // If we don't have an indirect input, put it in the constpool if we can,
7195   // otherwise spill it to a stack slot.
7196   // TODO: This isn't quite right. We need to handle these according to
7197   // the addressing mode that the constraint wants. Also, this may take
7198   // an additional register for the computation and we don't want that
7199   // either.
7200 
7201   // If the operand is a float, integer, or vector constant, spill to a
7202   // constant pool entry to get its address.
7203   const Value *OpVal = OpInfo.CallOperandVal;
7204   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7205       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7206     OpInfo.CallOperand = DAG.getConstantPool(
7207         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7208     return Chain;
7209   }
7210 
7211   // Otherwise, create a stack slot and emit a store to it before the asm.
7212   Type *Ty = OpVal->getType();
7213   auto &DL = DAG.getDataLayout();
7214   uint64_t TySize = DL.getTypeAllocSize(Ty);
7215   unsigned Align = DL.getPrefTypeAlignment(Ty);
7216   MachineFunction &MF = DAG.getMachineFunction();
7217   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7218   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7219   Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7220                        MachinePointerInfo::getFixedStack(MF, SSFI));
7221   OpInfo.CallOperand = StackSlot;
7222 
7223   return Chain;
7224 }
7225 
7226 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7227 /// specified operand.  We prefer to assign virtual registers, to allow the
7228 /// register allocator to handle the assignment process.  However, if the asm
7229 /// uses features that we can't model on machineinstrs, we have SDISel do the
7230 /// allocation.  This produces generally horrible, but correct, code.
7231 ///
7232 ///   OpInfo describes the operand
7233 ///   RefOpInfo describes the matching operand if any, the operand otherwise
7234 static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
7235                                  const SDLoc &DL, SDISelAsmOperandInfo &OpInfo,
7236                                  SDISelAsmOperandInfo &RefOpInfo) {
7237   LLVMContext &Context = *DAG.getContext();
7238 
7239   MachineFunction &MF = DAG.getMachineFunction();
7240   SmallVector<unsigned, 4> Regs;
7241   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7242 
7243   // If this is a constraint for a single physreg, or a constraint for a
7244   // register class, find it.
7245   std::pair<unsigned, const TargetRegisterClass *> PhysReg =
7246       TLI.getRegForInlineAsmConstraint(&TRI, RefOpInfo.ConstraintCode,
7247                                        RefOpInfo.ConstraintVT);
7248 
7249   unsigned NumRegs = 1;
7250   if (OpInfo.ConstraintVT != MVT::Other) {
7251     // If this is an FP operand in an integer register (or visa versa), or more
7252     // generally if the operand value disagrees with the register class we plan
7253     // to stick it in, fix the operand type.
7254     //
7255     // If this is an input value, the bitcast to the new type is done now.
7256     // Bitcast for output value is done at the end of visitInlineAsm().
7257     if ((OpInfo.Type == InlineAsm::isOutput ||
7258          OpInfo.Type == InlineAsm::isInput) &&
7259         PhysReg.second &&
7260         !TRI.isTypeLegalForClass(*PhysReg.second, OpInfo.ConstraintVT)) {
7261       // Try to convert to the first EVT that the reg class contains.  If the
7262       // types are identical size, use a bitcast to convert (e.g. two differing
7263       // vector types).  Note: output bitcast is done at the end of
7264       // visitInlineAsm().
7265       MVT RegVT = *TRI.legalclasstypes_begin(*PhysReg.second);
7266       if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7267         // Exclude indirect inputs while they are unsupported because the code
7268         // to perform the load is missing and thus OpInfo.CallOperand still
7269         // refers to the input address rather than the pointed-to value.
7270         if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7271           OpInfo.CallOperand =
7272               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7273         OpInfo.ConstraintVT = RegVT;
7274         // If the operand is an FP value and we want it in integer registers,
7275         // use the corresponding integer type. This turns an f64 value into
7276         // i64, which can be passed with two i32 values on a 32-bit machine.
7277       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7278         RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7279         if (OpInfo.Type == InlineAsm::isInput)
7280           OpInfo.CallOperand =
7281               DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7282         OpInfo.ConstraintVT = RegVT;
7283       }
7284     }
7285 
7286     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
7287   }
7288 
7289   // No need to allocate a matching input constraint since the constraint it's
7290   // matching to has already been allocated.
7291   if (OpInfo.isMatchingInputConstraint())
7292     return;
7293 
7294   MVT RegVT;
7295   EVT ValueVT = OpInfo.ConstraintVT;
7296 
7297   // If this is a constraint for a specific physical register, like {r17},
7298   // assign it now.
7299   if (unsigned AssignedReg = PhysReg.first) {
7300     const TargetRegisterClass *RC = PhysReg.second;
7301     if (OpInfo.ConstraintVT == MVT::Other)
7302       ValueVT = *TRI.legalclasstypes_begin(*RC);
7303 
7304     // Get the actual register value type.  This is important, because the user
7305     // may have asked for (e.g.) the AX register in i32 type.  We need to
7306     // remember that AX is actually i16 to get the right extension.
7307     RegVT = *TRI.legalclasstypes_begin(*RC);
7308 
7309     // This is an explicit reference to a physical register.
7310     Regs.push_back(AssignedReg);
7311 
7312     // If this is an expanded reference, add the rest of the regs to Regs.
7313     if (NumRegs != 1) {
7314       TargetRegisterClass::iterator I = RC->begin();
7315       for (; *I != AssignedReg; ++I)
7316         assert(I != RC->end() && "Didn't find reg!");
7317 
7318       // Already added the first reg.
7319       --NumRegs; ++I;
7320       for (; NumRegs; --NumRegs, ++I) {
7321         assert(I != RC->end() && "Ran out of registers to allocate!");
7322         Regs.push_back(*I);
7323       }
7324     }
7325 
7326     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7327     return;
7328   }
7329 
7330   // Otherwise, if this was a reference to an LLVM register class, create vregs
7331   // for this reference.
7332   if (const TargetRegisterClass *RC = PhysReg.second) {
7333     RegVT = *TRI.legalclasstypes_begin(*RC);
7334     if (OpInfo.ConstraintVT == MVT::Other)
7335       ValueVT = RegVT;
7336 
7337     // Create the appropriate number of virtual registers.
7338     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7339     for (; NumRegs; --NumRegs)
7340       Regs.push_back(RegInfo.createVirtualRegister(RC));
7341 
7342     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
7343     return;
7344   }
7345 
7346   // Otherwise, we couldn't allocate enough registers for this.
7347 }
7348 
7349 static unsigned
7350 findMatchingInlineAsmOperand(unsigned OperandNo,
7351                              const std::vector<SDValue> &AsmNodeOperands) {
7352   // Scan until we find the definition we already emitted of this operand.
7353   unsigned CurOp = InlineAsm::Op_FirstOperand;
7354   for (; OperandNo; --OperandNo) {
7355     // Advance to the next operand.
7356     unsigned OpFlag =
7357         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7358     assert((InlineAsm::isRegDefKind(OpFlag) ||
7359             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
7360             InlineAsm::isMemKind(OpFlag)) &&
7361            "Skipped past definitions?");
7362     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
7363   }
7364   return CurOp;
7365 }
7366 
7367 /// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT
7368 /// \return true if it has succeeded, false otherwise
7369 static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs,
7370                               MVT RegVT, SelectionDAG &DAG) {
7371   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7372   MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
7373   for (unsigned i = 0, e = NumRegs; i != e; ++i) {
7374     if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
7375       Regs.push_back(RegInfo.createVirtualRegister(RC));
7376     else
7377       return false;
7378   }
7379   return true;
7380 }
7381 
7382 namespace {
7383 
7384 class ExtraFlags {
7385   unsigned Flags = 0;
7386 
7387 public:
7388   explicit ExtraFlags(ImmutableCallSite CS) {
7389     const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7390     if (IA->hasSideEffects())
7391       Flags |= InlineAsm::Extra_HasSideEffects;
7392     if (IA->isAlignStack())
7393       Flags |= InlineAsm::Extra_IsAlignStack;
7394     if (CS.isConvergent())
7395       Flags |= InlineAsm::Extra_IsConvergent;
7396     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
7397   }
7398 
7399   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
7400     // Ideally, we would only check against memory constraints.  However, the
7401     // meaning of an Other constraint can be target-specific and we can't easily
7402     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
7403     // for Other constraints as well.
7404     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
7405         OpInfo.ConstraintType == TargetLowering::C_Other) {
7406       if (OpInfo.Type == InlineAsm::isInput)
7407         Flags |= InlineAsm::Extra_MayLoad;
7408       else if (OpInfo.Type == InlineAsm::isOutput)
7409         Flags |= InlineAsm::Extra_MayStore;
7410       else if (OpInfo.Type == InlineAsm::isClobber)
7411         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
7412     }
7413   }
7414 
7415   unsigned get() const { return Flags; }
7416 };
7417 
7418 } // end anonymous namespace
7419 
7420 /// visitInlineAsm - Handle a call to an InlineAsm object.
7421 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
7422   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
7423 
7424   /// ConstraintOperands - Information about all of the constraints.
7425   SDISelAsmOperandInfoVector ConstraintOperands;
7426 
7427   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7428   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
7429       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
7430 
7431   bool hasMemory = false;
7432 
7433   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
7434   ExtraFlags ExtraInfo(CS);
7435 
7436   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
7437   unsigned ResNo = 0;   // ResNo - The result number of the next output.
7438   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
7439     ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
7440     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
7441 
7442     MVT OpVT = MVT::Other;
7443 
7444     // Compute the value type for each operand.
7445     if (OpInfo.Type == InlineAsm::isInput ||
7446         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
7447       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
7448 
7449       // Process the call argument. BasicBlocks are labels, currently appearing
7450       // only in asm's.
7451       if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
7452         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
7453       } else {
7454         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
7455       }
7456 
7457       OpVT =
7458           OpInfo
7459               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
7460               .getSimpleVT();
7461     }
7462 
7463     if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
7464       // The return value of the call is this value.  As such, there is no
7465       // corresponding argument.
7466       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7467       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
7468         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
7469                                       STy->getElementType(ResNo));
7470       } else {
7471         assert(ResNo == 0 && "Asm only has one result!");
7472         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
7473       }
7474       ++ResNo;
7475     }
7476 
7477     OpInfo.ConstraintVT = OpVT;
7478 
7479     if (!hasMemory)
7480       hasMemory = OpInfo.hasMemory(TLI);
7481 
7482     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
7483     // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]?
7484     auto TargetConstraint = TargetConstraints[i];
7485 
7486     // Compute the constraint code and ConstraintType to use.
7487     TLI.ComputeConstraintToUse(TargetConstraint, SDValue());
7488 
7489     ExtraInfo.update(TargetConstraint);
7490   }
7491 
7492   SDValue Chain, Flag;
7493 
7494   // We won't need to flush pending loads if this asm doesn't touch
7495   // memory and is nonvolatile.
7496   if (hasMemory || IA->hasSideEffects())
7497     Chain = getRoot();
7498   else
7499     Chain = DAG.getRoot();
7500 
7501   // Second pass over the constraints: compute which constraint option to use
7502   // and assign registers to constraints that want a specific physreg.
7503   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7504     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7505 
7506     // If this is an output operand with a matching input operand, look up the
7507     // matching input. If their types mismatch, e.g. one is an integer, the
7508     // other is floating point, or their sizes are different, flag it as an
7509     // error.
7510     if (OpInfo.hasMatchingInput()) {
7511       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
7512       patchMatchingInput(OpInfo, Input, DAG);
7513     }
7514 
7515     // Compute the constraint code and ConstraintType to use.
7516     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
7517 
7518     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7519         OpInfo.Type == InlineAsm::isClobber)
7520       continue;
7521 
7522     // If this is a memory input, and if the operand is not indirect, do what we
7523     // need to provide an address for the memory input.
7524     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
7525         !OpInfo.isIndirect) {
7526       assert((OpInfo.isMultipleAlternative ||
7527               (OpInfo.Type == InlineAsm::isInput)) &&
7528              "Can only indirectify direct input operands!");
7529 
7530       // Memory operands really want the address of the value.
7531       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
7532 
7533       // There is no longer a Value* corresponding to this operand.
7534       OpInfo.CallOperandVal = nullptr;
7535 
7536       // It is now an indirect operand.
7537       OpInfo.isIndirect = true;
7538     }
7539 
7540     // If this constraint is for a specific register, allocate it before
7541     // anything else.
7542     SDISelAsmOperandInfo &RefOpInfo =
7543         OpInfo.isMatchingInputConstraint()
7544             ? ConstraintOperands[OpInfo.getMatchedOperand()]
7545             : ConstraintOperands[i];
7546     if (RefOpInfo.ConstraintType == TargetLowering::C_Register)
7547       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
7548   }
7549 
7550   // Third pass - Loop over all of the operands, assigning virtual or physregs
7551   // to register class operands.
7552   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7553     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7554     SDISelAsmOperandInfo &RefOpInfo =
7555         OpInfo.isMatchingInputConstraint()
7556             ? ConstraintOperands[OpInfo.getMatchedOperand()]
7557             : ConstraintOperands[i];
7558 
7559     // C_Register operands have already been allocated, Other/Memory don't need
7560     // to be.
7561     if (RefOpInfo.ConstraintType == TargetLowering::C_RegisterClass)
7562       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo, RefOpInfo);
7563   }
7564 
7565   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
7566   std::vector<SDValue> AsmNodeOperands;
7567   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
7568   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
7569       IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
7570 
7571   // If we have a !srcloc metadata node associated with it, we want to attach
7572   // this to the ultimately generated inline asm machineinstr.  To do this, we
7573   // pass in the third operand as this (potentially null) inline asm MDNode.
7574   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
7575   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
7576 
7577   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
7578   // bits as operand 3.
7579   AsmNodeOperands.push_back(DAG.getTargetConstant(
7580       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7581 
7582   // Loop over all of the inputs, copying the operand values into the
7583   // appropriate registers and processing the output regs.
7584   RegsForValue RetValRegs;
7585 
7586   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
7587   std::vector<std::pair<RegsForValue, Value *>> IndirectStoresToEmit;
7588 
7589   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
7590     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
7591 
7592     switch (OpInfo.Type) {
7593     case InlineAsm::isOutput:
7594       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
7595           OpInfo.ConstraintType != TargetLowering::C_Register) {
7596         // Memory output, or 'other' output (e.g. 'X' constraint).
7597         assert(OpInfo.isIndirect && "Memory output must be indirect operand");
7598 
7599         unsigned ConstraintID =
7600             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7601         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7602                "Failed to convert memory constraint code to constraint id.");
7603 
7604         // Add information to the INLINEASM node to know about this output.
7605         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7606         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
7607         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
7608                                                         MVT::i32));
7609         AsmNodeOperands.push_back(OpInfo.CallOperand);
7610         break;
7611       }
7612 
7613       // Otherwise, this is a register or register class output.
7614 
7615       // Copy the output from the appropriate register.  Find a register that
7616       // we can use.
7617       if (OpInfo.AssignedRegs.Regs.empty()) {
7618         emitInlineAsmError(
7619             CS, "couldn't allocate output register for constraint '" +
7620                     Twine(OpInfo.ConstraintCode) + "'");
7621         return;
7622       }
7623 
7624       // If this is an indirect operand, store through the pointer after the
7625       // asm.
7626       if (OpInfo.isIndirect) {
7627         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
7628                                                       OpInfo.CallOperandVal));
7629       } else {
7630         // This is the result value of the call.
7631         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7632         // Concatenate this output onto the outputs list.
7633         RetValRegs.append(OpInfo.AssignedRegs);
7634       }
7635 
7636       // Add information to the INLINEASM node to know that this register is
7637       // set.
7638       OpInfo.AssignedRegs
7639           .AddInlineAsmOperands(OpInfo.isEarlyClobber
7640                                     ? InlineAsm::Kind_RegDefEarlyClobber
7641                                     : InlineAsm::Kind_RegDef,
7642                                 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7643       break;
7644 
7645     case InlineAsm::isInput: {
7646       SDValue InOperandVal = OpInfo.CallOperand;
7647 
7648       if (OpInfo.isMatchingInputConstraint()) {
7649         // If this is required to match an output register we have already set,
7650         // just use its register.
7651         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
7652                                                   AsmNodeOperands);
7653         unsigned OpFlag =
7654           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7655         if (InlineAsm::isRegDefKind(OpFlag) ||
7656             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
7657           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
7658           if (OpInfo.isIndirect) {
7659             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
7660             emitInlineAsmError(CS, "inline asm not supported yet:"
7661                                    " don't know how to handle tied "
7662                                    "indirect register inputs");
7663             return;
7664           }
7665 
7666           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7667           SmallVector<unsigned, 4> Regs;
7668 
7669           if (!createVirtualRegs(Regs,
7670                                  InlineAsm::getNumOperandRegisters(OpFlag),
7671                                  RegVT, DAG)) {
7672             emitInlineAsmError(CS, "inline asm error: This value type register "
7673                                    "class is not natively supported!");
7674             return;
7675           }
7676 
7677           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
7678 
7679           SDLoc dl = getCurSDLoc();
7680           // Use the produced MatchedRegs object to
7681           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
7682                                     CS.getInstruction());
7683           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
7684                                            true, OpInfo.getMatchedOperand(), dl,
7685                                            DAG, AsmNodeOperands);
7686           break;
7687         }
7688 
7689         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
7690         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
7691                "Unexpected number of operands");
7692         // Add information to the INLINEASM node to know about this input.
7693         // See InlineAsm.h isUseOperandTiedToDef.
7694         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
7695         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
7696                                                     OpInfo.getMatchedOperand());
7697         AsmNodeOperands.push_back(DAG.getTargetConstant(
7698             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7699         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
7700         break;
7701       }
7702 
7703       // Treat indirect 'X' constraint as memory.
7704       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
7705           OpInfo.isIndirect)
7706         OpInfo.ConstraintType = TargetLowering::C_Memory;
7707 
7708       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
7709         std::vector<SDValue> Ops;
7710         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
7711                                           Ops, DAG);
7712         if (Ops.empty()) {
7713           emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
7714                                      Twine(OpInfo.ConstraintCode) + "'");
7715           return;
7716         }
7717 
7718         // Add information to the INLINEASM node to know about this input.
7719         unsigned ResOpType =
7720           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
7721         AsmNodeOperands.push_back(DAG.getTargetConstant(
7722             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7723         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
7724         break;
7725       }
7726 
7727       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
7728         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
7729         assert(InOperandVal.getValueType() ==
7730                    TLI.getPointerTy(DAG.getDataLayout()) &&
7731                "Memory operands expect pointer values");
7732 
7733         unsigned ConstraintID =
7734             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7735         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7736                "Failed to convert memory constraint code to constraint id.");
7737 
7738         // Add information to the INLINEASM node to know about this input.
7739         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7740         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
7741         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
7742                                                         getCurSDLoc(),
7743                                                         MVT::i32));
7744         AsmNodeOperands.push_back(InOperandVal);
7745         break;
7746       }
7747 
7748       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
7749               OpInfo.ConstraintType == TargetLowering::C_Register) &&
7750              "Unknown constraint type!");
7751 
7752       // TODO: Support this.
7753       if (OpInfo.isIndirect) {
7754         emitInlineAsmError(
7755             CS, "Don't know how to handle indirect register inputs yet "
7756                 "for constraint '" +
7757                     Twine(OpInfo.ConstraintCode) + "'");
7758         return;
7759       }
7760 
7761       // Copy the input into the appropriate registers.
7762       if (OpInfo.AssignedRegs.Regs.empty()) {
7763         emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
7764                                    Twine(OpInfo.ConstraintCode) + "'");
7765         return;
7766       }
7767 
7768       SDLoc dl = getCurSDLoc();
7769 
7770       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7771                                         Chain, &Flag, CS.getInstruction());
7772 
7773       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
7774                                                dl, DAG, AsmNodeOperands);
7775       break;
7776     }
7777     case InlineAsm::isClobber:
7778       // Add the clobbered value to the operand list, so that the register
7779       // allocator is aware that the physreg got clobbered.
7780       if (!OpInfo.AssignedRegs.Regs.empty())
7781         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
7782                                                  false, 0, getCurSDLoc(), DAG,
7783                                                  AsmNodeOperands);
7784       break;
7785     }
7786   }
7787 
7788   // Finish up input operands.  Set the input chain and add the flag last.
7789   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
7790   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
7791 
7792   Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
7793                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
7794   Flag = Chain.getValue(1);
7795 
7796   // If this asm returns a register value, copy the result from that register
7797   // and set it as the value of the call.
7798   if (!RetValRegs.Regs.empty()) {
7799     SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7800                                              Chain, &Flag, CS.getInstruction());
7801 
7802     llvm::Type *CSResultType = CS.getType();
7803     unsigned numRet;
7804     ArrayRef<Type *> ResultTypes;
7805     SmallVector<SDValue, 1> ResultValues(1);
7806     if (CSResultType->isSingleValueType()) {
7807       numRet = 1;
7808       ResultValues[0] = Val;
7809       ResultTypes = makeArrayRef(CSResultType);
7810     } else {
7811       numRet = CSResultType->getNumContainedTypes();
7812       assert(Val->getNumOperands() == numRet &&
7813              "Mismatch in number of output operands in asm result");
7814       ResultTypes = CSResultType->subtypes();
7815       ArrayRef<SDUse> ValueUses = Val->ops();
7816       ResultValues.resize(numRet);
7817       std::transform(ValueUses.begin(), ValueUses.end(), ResultValues.begin(),
7818                      [](const SDUse &u) -> SDValue { return u.get(); });
7819     }
7820     SmallVector<EVT, 1> ResultVTs(numRet);
7821     for (unsigned i = 0; i < numRet; i++) {
7822       EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), ResultTypes[i]);
7823       SDValue Val = ResultValues[i];
7824       assert(ResultTypes[i]->isSized() && "Unexpected unsized type");
7825       // If the type of the inline asm call site return value is different but
7826       // has same size as the type of the asm output bitcast it.  One example
7827       // of this is for vectors with different width / number of elements.
7828       // This can happen for register classes that can contain multiple
7829       // different value types.  The preg or vreg allocated may not have the
7830       // same VT as was expected.
7831       //
7832       // This can also happen for a return value that disagrees with the
7833       // register class it is put in, eg. a double in a general-purpose
7834       // register on a 32-bit machine.
7835       if (ResultVT != Val.getValueType() &&
7836           ResultVT.getSizeInBits() == Val.getValueSizeInBits())
7837         Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, Val);
7838       else if (ResultVT != Val.getValueType() && ResultVT.isInteger() &&
7839                Val.getValueType().isInteger()) {
7840         // If a result value was tied to an input value, the computed result
7841         // may have a wider width than the expected result.  Extract the
7842         // relevant portion.
7843         Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, Val);
7844       }
7845 
7846       assert(ResultVT == Val.getValueType() && "Asm result value mismatch!");
7847       ResultVTs[i] = ResultVT;
7848       ResultValues[i] = Val;
7849     }
7850 
7851     Val = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
7852                       DAG.getVTList(ResultVTs), ResultValues);
7853     setValue(CS.getInstruction(), Val);
7854     // Don't need to use this as a chain in this case.
7855     if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
7856       return;
7857   }
7858 
7859   std::vector<std::pair<SDValue, const Value *>> StoresToEmit;
7860 
7861   // Process indirect outputs, first output all of the flagged copies out of
7862   // physregs.
7863   for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
7864     RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
7865     const Value *Ptr = IndirectStoresToEmit[i].second;
7866     SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7867                                              Chain, &Flag, IA);
7868     StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
7869   }
7870 
7871   // Emit the non-flagged stores from the physregs.
7872   SmallVector<SDValue, 8> OutChains;
7873   for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
7874     SDValue Val = DAG.getStore(Chain, getCurSDLoc(), StoresToEmit[i].first,
7875                                getValue(StoresToEmit[i].second),
7876                                MachinePointerInfo(StoresToEmit[i].second));
7877     OutChains.push_back(Val);
7878   }
7879 
7880   if (!OutChains.empty())
7881     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
7882 
7883   DAG.setRoot(Chain);
7884 }
7885 
7886 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
7887                                              const Twine &Message) {
7888   LLVMContext &Ctx = *DAG.getContext();
7889   Ctx.emitError(CS.getInstruction(), Message);
7890 
7891   // Make sure we leave the DAG in a valid state
7892   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7893   SmallVector<EVT, 1> ValueVTs;
7894   ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
7895 
7896   if (ValueVTs.empty())
7897     return;
7898 
7899   SmallVector<SDValue, 1> Ops;
7900   for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
7901     Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
7902 
7903   setValue(CS.getInstruction(), DAG.getMergeValues(Ops, getCurSDLoc()));
7904 }
7905 
7906 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
7907   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
7908                           MVT::Other, getRoot(),
7909                           getValue(I.getArgOperand(0)),
7910                           DAG.getSrcValue(I.getArgOperand(0))));
7911 }
7912 
7913 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
7914   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7915   const DataLayout &DL = DAG.getDataLayout();
7916   SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7917                            getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
7918                            DAG.getSrcValue(I.getOperand(0)),
7919                            DL.getABITypeAlignment(I.getType()));
7920   setValue(&I, V);
7921   DAG.setRoot(V.getValue(1));
7922 }
7923 
7924 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
7925   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
7926                           MVT::Other, getRoot(),
7927                           getValue(I.getArgOperand(0)),
7928                           DAG.getSrcValue(I.getArgOperand(0))));
7929 }
7930 
7931 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
7932   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
7933                           MVT::Other, getRoot(),
7934                           getValue(I.getArgOperand(0)),
7935                           getValue(I.getArgOperand(1)),
7936                           DAG.getSrcValue(I.getArgOperand(0)),
7937                           DAG.getSrcValue(I.getArgOperand(1))));
7938 }
7939 
7940 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
7941                                                     const Instruction &I,
7942                                                     SDValue Op) {
7943   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
7944   if (!Range)
7945     return Op;
7946 
7947   ConstantRange CR = getConstantRangeFromMetadata(*Range);
7948   if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet())
7949     return Op;
7950 
7951   APInt Lo = CR.getUnsignedMin();
7952   if (!Lo.isMinValue())
7953     return Op;
7954 
7955   APInt Hi = CR.getUnsignedMax();
7956   unsigned Bits = Hi.getActiveBits();
7957 
7958   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7959 
7960   SDLoc SL = getCurSDLoc();
7961 
7962   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
7963                              DAG.getValueType(SmallVT));
7964   unsigned NumVals = Op.getNode()->getNumValues();
7965   if (NumVals == 1)
7966     return ZExt;
7967 
7968   SmallVector<SDValue, 4> Ops;
7969 
7970   Ops.push_back(ZExt);
7971   for (unsigned I = 1; I != NumVals; ++I)
7972     Ops.push_back(Op.getValue(I));
7973 
7974   return DAG.getMergeValues(Ops, SL);
7975 }
7976 
7977 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
7978 /// the call being lowered.
7979 ///
7980 /// This is a helper for lowering intrinsics that follow a target calling
7981 /// convention or require stack pointer adjustment. Only a subset of the
7982 /// intrinsic's operands need to participate in the calling convention.
7983 void SelectionDAGBuilder::populateCallLoweringInfo(
7984     TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS,
7985     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
7986     bool IsPatchPoint) {
7987   TargetLowering::ArgListTy Args;
7988   Args.reserve(NumArgs);
7989 
7990   // Populate the argument list.
7991   // Attributes for args start at offset 1, after the return attribute.
7992   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
7993        ArgI != ArgE; ++ArgI) {
7994     const Value *V = CS->getOperand(ArgI);
7995 
7996     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
7997 
7998     TargetLowering::ArgListEntry Entry;
7999     Entry.Node = getValue(V);
8000     Entry.Ty = V->getType();
8001     Entry.setAttributes(&CS, ArgI);
8002     Args.push_back(Entry);
8003   }
8004 
8005   CLI.setDebugLoc(getCurSDLoc())
8006       .setChain(getRoot())
8007       .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args))
8008       .setDiscardResult(CS->use_empty())
8009       .setIsPatchPoint(IsPatchPoint);
8010 }
8011 
8012 /// Add a stack map intrinsic call's live variable operands to a stackmap
8013 /// or patchpoint target node's operand list.
8014 ///
8015 /// Constants are converted to TargetConstants purely as an optimization to
8016 /// avoid constant materialization and register allocation.
8017 ///
8018 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8019 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
8020 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8021 /// address materialization and register allocation, but may also be required
8022 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8023 /// alloca in the entry block, then the runtime may assume that the alloca's
8024 /// StackMap location can be read immediately after compilation and that the
8025 /// location is valid at any point during execution (this is similar to the
8026 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8027 /// only available in a register, then the runtime would need to trap when
8028 /// execution reaches the StackMap in order to read the alloca's location.
8029 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
8030                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8031                                 SelectionDAGBuilder &Builder) {
8032   for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
8033     SDValue OpVal = Builder.getValue(CS.getArgument(i));
8034     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8035       Ops.push_back(
8036         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8037       Ops.push_back(
8038         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8039     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8040       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8041       Ops.push_back(Builder.DAG.getTargetFrameIndex(
8042           FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8043     } else
8044       Ops.push_back(OpVal);
8045   }
8046 }
8047 
8048 /// Lower llvm.experimental.stackmap directly to its target opcode.
8049 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8050   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8051   //                                  [live variables...])
8052 
8053   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8054 
8055   SDValue Chain, InFlag, Callee, NullPtr;
8056   SmallVector<SDValue, 32> Ops;
8057 
8058   SDLoc DL = getCurSDLoc();
8059   Callee = getValue(CI.getCalledValue());
8060   NullPtr = DAG.getIntPtrConstant(0, DL, true);
8061 
8062   // The stackmap intrinsic only records the live variables (the arguemnts
8063   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8064   // intrinsic, this won't be lowered to a function call. This means we don't
8065   // have to worry about calling conventions and target specific lowering code.
8066   // Instead we perform the call lowering right here.
8067   //
8068   // chain, flag = CALLSEQ_START(chain, 0, 0)
8069   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8070   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8071   //
8072   Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8073   InFlag = Chain.getValue(1);
8074 
8075   // Add the <id> and <numBytes> constants.
8076   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8077   Ops.push_back(DAG.getTargetConstant(
8078                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8079   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8080   Ops.push_back(DAG.getTargetConstant(
8081                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8082                   MVT::i32));
8083 
8084   // Push live variables for the stack map.
8085   addStackMapLiveVars(&CI, 2, DL, Ops, *this);
8086 
8087   // We are not pushing any register mask info here on the operands list,
8088   // because the stackmap doesn't clobber anything.
8089 
8090   // Push the chain and the glue flag.
8091   Ops.push_back(Chain);
8092   Ops.push_back(InFlag);
8093 
8094   // Create the STACKMAP node.
8095   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8096   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8097   Chain = SDValue(SM, 0);
8098   InFlag = Chain.getValue(1);
8099 
8100   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8101 
8102   // Stackmaps don't generate values, so nothing goes into the NodeMap.
8103 
8104   // Set the root to the target-lowered call chain.
8105   DAG.setRoot(Chain);
8106 
8107   // Inform the Frame Information that we have a stackmap in this function.
8108   FuncInfo.MF->getFrameInfo().setHasStackMap();
8109 }
8110 
8111 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8112 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
8113                                           const BasicBlock *EHPadBB) {
8114   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8115   //                                                 i32 <numBytes>,
8116   //                                                 i8* <target>,
8117   //                                                 i32 <numArgs>,
8118   //                                                 [Args...],
8119   //                                                 [live variables...])
8120 
8121   CallingConv::ID CC = CS.getCallingConv();
8122   bool IsAnyRegCC = CC == CallingConv::AnyReg;
8123   bool HasDef = !CS->getType()->isVoidTy();
8124   SDLoc dl = getCurSDLoc();
8125   SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
8126 
8127   // Handle immediate and symbolic callees.
8128   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8129     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8130                                    /*isTarget=*/true);
8131   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8132     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8133                                          SDLoc(SymbolicCallee),
8134                                          SymbolicCallee->getValueType(0));
8135 
8136   // Get the real number of arguments participating in the call <numArgs>
8137   SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
8138   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8139 
8140   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8141   // Intrinsics include all meta-operands up to but not including CC.
8142   unsigned NumMetaOpers = PatchPointOpers::CCPos;
8143   assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
8144          "Not enough arguments provided to the patchpoint intrinsic");
8145 
8146   // For AnyRegCC the arguments are lowered later on manually.
8147   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8148   Type *ReturnTy =
8149     IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
8150 
8151   TargetLowering::CallLoweringInfo CLI(DAG);
8152   populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
8153                            true);
8154   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8155 
8156   SDNode *CallEnd = Result.second.getNode();
8157   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8158     CallEnd = CallEnd->getOperand(0).getNode();
8159 
8160   /// Get a call instruction from the call sequence chain.
8161   /// Tail calls are not allowed.
8162   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8163          "Expected a callseq node.");
8164   SDNode *Call = CallEnd->getOperand(0).getNode();
8165   bool HasGlue = Call->getGluedNode();
8166 
8167   // Replace the target specific call node with the patchable intrinsic.
8168   SmallVector<SDValue, 8> Ops;
8169 
8170   // Add the <id> and <numBytes> constants.
8171   SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
8172   Ops.push_back(DAG.getTargetConstant(
8173                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8174   SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
8175   Ops.push_back(DAG.getTargetConstant(
8176                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8177                   MVT::i32));
8178 
8179   // Add the callee.
8180   Ops.push_back(Callee);
8181 
8182   // Adjust <numArgs> to account for any arguments that have been passed on the
8183   // stack instead.
8184   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8185   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8186   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8187   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8188 
8189   // Add the calling convention
8190   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8191 
8192   // Add the arguments we omitted previously. The register allocator should
8193   // place these in any free register.
8194   if (IsAnyRegCC)
8195     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8196       Ops.push_back(getValue(CS.getArgument(i)));
8197 
8198   // Push the arguments from the call instruction up to the register mask.
8199   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8200   Ops.append(Call->op_begin() + 2, e);
8201 
8202   // Push live variables for the stack map.
8203   addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
8204 
8205   // Push the register mask info.
8206   if (HasGlue)
8207     Ops.push_back(*(Call->op_end()-2));
8208   else
8209     Ops.push_back(*(Call->op_end()-1));
8210 
8211   // Push the chain (this is originally the first operand of the call, but
8212   // becomes now the last or second to last operand).
8213   Ops.push_back(*(Call->op_begin()));
8214 
8215   // Push the glue flag (last operand).
8216   if (HasGlue)
8217     Ops.push_back(*(Call->op_end()-1));
8218 
8219   SDVTList NodeTys;
8220   if (IsAnyRegCC && HasDef) {
8221     // Create the return types based on the intrinsic definition
8222     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8223     SmallVector<EVT, 3> ValueVTs;
8224     ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8225     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8226 
8227     // There is always a chain and a glue type at the end
8228     ValueVTs.push_back(MVT::Other);
8229     ValueVTs.push_back(MVT::Glue);
8230     NodeTys = DAG.getVTList(ValueVTs);
8231   } else
8232     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8233 
8234   // Replace the target specific call node with a PATCHPOINT node.
8235   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8236                                          dl, NodeTys, Ops);
8237 
8238   // Update the NodeMap.
8239   if (HasDef) {
8240     if (IsAnyRegCC)
8241       setValue(CS.getInstruction(), SDValue(MN, 0));
8242     else
8243       setValue(CS.getInstruction(), Result.first);
8244   }
8245 
8246   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8247   // call sequence. Furthermore the location of the chain and glue can change
8248   // when the AnyReg calling convention is used and the intrinsic returns a
8249   // value.
8250   if (IsAnyRegCC && HasDef) {
8251     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8252     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8253     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8254   } else
8255     DAG.ReplaceAllUsesWith(Call, MN);
8256   DAG.DeleteNode(Call);
8257 
8258   // Inform the Frame Information that we have a patchpoint in this function.
8259   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8260 }
8261 
8262 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8263                                             unsigned Intrinsic) {
8264   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8265   SDValue Op1 = getValue(I.getArgOperand(0));
8266   SDValue Op2;
8267   if (I.getNumArgOperands() > 1)
8268     Op2 = getValue(I.getArgOperand(1));
8269   SDLoc dl = getCurSDLoc();
8270   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8271   SDValue Res;
8272   FastMathFlags FMF;
8273   if (isa<FPMathOperator>(I))
8274     FMF = I.getFastMathFlags();
8275 
8276   switch (Intrinsic) {
8277   case Intrinsic::experimental_vector_reduce_fadd:
8278     if (FMF.isFast())
8279       Res = DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2);
8280     else
8281       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8282     break;
8283   case Intrinsic::experimental_vector_reduce_fmul:
8284     if (FMF.isFast())
8285       Res = DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2);
8286     else
8287       Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8288     break;
8289   case Intrinsic::experimental_vector_reduce_add:
8290     Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8291     break;
8292   case Intrinsic::experimental_vector_reduce_mul:
8293     Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8294     break;
8295   case Intrinsic::experimental_vector_reduce_and:
8296     Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8297     break;
8298   case Intrinsic::experimental_vector_reduce_or:
8299     Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8300     break;
8301   case Intrinsic::experimental_vector_reduce_xor:
8302     Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8303     break;
8304   case Intrinsic::experimental_vector_reduce_smax:
8305     Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8306     break;
8307   case Intrinsic::experimental_vector_reduce_smin:
8308     Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
8309     break;
8310   case Intrinsic::experimental_vector_reduce_umax:
8311     Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
8312     break;
8313   case Intrinsic::experimental_vector_reduce_umin:
8314     Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
8315     break;
8316   case Intrinsic::experimental_vector_reduce_fmax:
8317     Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
8318     break;
8319   case Intrinsic::experimental_vector_reduce_fmin:
8320     Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
8321     break;
8322   default:
8323     llvm_unreachable("Unhandled vector reduce intrinsic");
8324   }
8325   setValue(&I, Res);
8326 }
8327 
8328 /// Returns an AttributeList representing the attributes applied to the return
8329 /// value of the given call.
8330 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
8331   SmallVector<Attribute::AttrKind, 2> Attrs;
8332   if (CLI.RetSExt)
8333     Attrs.push_back(Attribute::SExt);
8334   if (CLI.RetZExt)
8335     Attrs.push_back(Attribute::ZExt);
8336   if (CLI.IsInReg)
8337     Attrs.push_back(Attribute::InReg);
8338 
8339   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
8340                             Attrs);
8341 }
8342 
8343 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
8344 /// implementation, which just calls LowerCall.
8345 /// FIXME: When all targets are
8346 /// migrated to using LowerCall, this hook should be integrated into SDISel.
8347 std::pair<SDValue, SDValue>
8348 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
8349   // Handle the incoming return values from the call.
8350   CLI.Ins.clear();
8351   Type *OrigRetTy = CLI.RetTy;
8352   SmallVector<EVT, 4> RetTys;
8353   SmallVector<uint64_t, 4> Offsets;
8354   auto &DL = CLI.DAG.getDataLayout();
8355   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
8356 
8357   if (CLI.IsPostTypeLegalization) {
8358     // If we are lowering a libcall after legalization, split the return type.
8359     SmallVector<EVT, 4> OldRetTys = std::move(RetTys);
8360     SmallVector<uint64_t, 4> OldOffsets = std::move(Offsets);
8361     for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
8362       EVT RetVT = OldRetTys[i];
8363       uint64_t Offset = OldOffsets[i];
8364       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
8365       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
8366       unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
8367       RetTys.append(NumRegs, RegisterVT);
8368       for (unsigned j = 0; j != NumRegs; ++j)
8369         Offsets.push_back(Offset + j * RegisterVTByteSZ);
8370     }
8371   }
8372 
8373   SmallVector<ISD::OutputArg, 4> Outs;
8374   GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
8375 
8376   bool CanLowerReturn =
8377       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
8378                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
8379 
8380   SDValue DemoteStackSlot;
8381   int DemoteStackIdx = -100;
8382   if (!CanLowerReturn) {
8383     // FIXME: equivalent assert?
8384     // assert(!CS.hasInAllocaArgument() &&
8385     //        "sret demotion is incompatible with inalloca");
8386     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
8387     unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
8388     MachineFunction &MF = CLI.DAG.getMachineFunction();
8389     DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
8390     Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
8391                                               DL.getAllocaAddrSpace());
8392 
8393     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
8394     ArgListEntry Entry;
8395     Entry.Node = DemoteStackSlot;
8396     Entry.Ty = StackSlotPtrType;
8397     Entry.IsSExt = false;
8398     Entry.IsZExt = false;
8399     Entry.IsInReg = false;
8400     Entry.IsSRet = true;
8401     Entry.IsNest = false;
8402     Entry.IsByVal = false;
8403     Entry.IsReturned = false;
8404     Entry.IsSwiftSelf = false;
8405     Entry.IsSwiftError = false;
8406     Entry.Alignment = Align;
8407     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
8408     CLI.NumFixedArgs += 1;
8409     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
8410 
8411     // sret demotion isn't compatible with tail-calls, since the sret argument
8412     // points into the callers stack frame.
8413     CLI.IsTailCall = false;
8414   } else {
8415     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8416       EVT VT = RetTys[I];
8417       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8418                                                      CLI.CallConv, VT);
8419       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8420                                                        CLI.CallConv, VT);
8421       for (unsigned i = 0; i != NumRegs; ++i) {
8422         ISD::InputArg MyFlags;
8423         MyFlags.VT = RegisterVT;
8424         MyFlags.ArgVT = VT;
8425         MyFlags.Used = CLI.IsReturnValueUsed;
8426         if (CLI.RetSExt)
8427           MyFlags.Flags.setSExt();
8428         if (CLI.RetZExt)
8429           MyFlags.Flags.setZExt();
8430         if (CLI.IsInReg)
8431           MyFlags.Flags.setInReg();
8432         CLI.Ins.push_back(MyFlags);
8433       }
8434     }
8435   }
8436 
8437   // We push in swifterror return as the last element of CLI.Ins.
8438   ArgListTy &Args = CLI.getArgs();
8439   if (supportSwiftError()) {
8440     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8441       if (Args[i].IsSwiftError) {
8442         ISD::InputArg MyFlags;
8443         MyFlags.VT = getPointerTy(DL);
8444         MyFlags.ArgVT = EVT(getPointerTy(DL));
8445         MyFlags.Flags.setSwiftError();
8446         CLI.Ins.push_back(MyFlags);
8447       }
8448     }
8449   }
8450 
8451   // Handle all of the outgoing arguments.
8452   CLI.Outs.clear();
8453   CLI.OutVals.clear();
8454   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
8455     SmallVector<EVT, 4> ValueVTs;
8456     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
8457     // FIXME: Split arguments if CLI.IsPostTypeLegalization
8458     Type *FinalType = Args[i].Ty;
8459     if (Args[i].IsByVal)
8460       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
8461     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
8462         FinalType, CLI.CallConv, CLI.IsVarArg);
8463     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
8464          ++Value) {
8465       EVT VT = ValueVTs[Value];
8466       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
8467       SDValue Op = SDValue(Args[i].Node.getNode(),
8468                            Args[i].Node.getResNo() + Value);
8469       ISD::ArgFlagsTy Flags;
8470 
8471       // Certain targets (such as MIPS), may have a different ABI alignment
8472       // for a type depending on the context. Give the target a chance to
8473       // specify the alignment it wants.
8474       unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL);
8475 
8476       if (Args[i].IsZExt)
8477         Flags.setZExt();
8478       if (Args[i].IsSExt)
8479         Flags.setSExt();
8480       if (Args[i].IsInReg) {
8481         // If we are using vectorcall calling convention, a structure that is
8482         // passed InReg - is surely an HVA
8483         if (CLI.CallConv == CallingConv::X86_VectorCall &&
8484             isa<StructType>(FinalType)) {
8485           // The first value of a structure is marked
8486           if (0 == Value)
8487             Flags.setHvaStart();
8488           Flags.setHva();
8489         }
8490         // Set InReg Flag
8491         Flags.setInReg();
8492       }
8493       if (Args[i].IsSRet)
8494         Flags.setSRet();
8495       if (Args[i].IsSwiftSelf)
8496         Flags.setSwiftSelf();
8497       if (Args[i].IsSwiftError)
8498         Flags.setSwiftError();
8499       if (Args[i].IsByVal)
8500         Flags.setByVal();
8501       if (Args[i].IsInAlloca) {
8502         Flags.setInAlloca();
8503         // Set the byval flag for CCAssignFn callbacks that don't know about
8504         // inalloca.  This way we can know how many bytes we should've allocated
8505         // and how many bytes a callee cleanup function will pop.  If we port
8506         // inalloca to more targets, we'll have to add custom inalloca handling
8507         // in the various CC lowering callbacks.
8508         Flags.setByVal();
8509       }
8510       if (Args[i].IsByVal || Args[i].IsInAlloca) {
8511         PointerType *Ty = cast<PointerType>(Args[i].Ty);
8512         Type *ElementTy = Ty->getElementType();
8513         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8514         // For ByVal, alignment should come from FE.  BE will guess if this
8515         // info is not there but there are cases it cannot get right.
8516         unsigned FrameAlign;
8517         if (Args[i].Alignment)
8518           FrameAlign = Args[i].Alignment;
8519         else
8520           FrameAlign = getByValTypeAlignment(ElementTy, DL);
8521         Flags.setByValAlign(FrameAlign);
8522       }
8523       if (Args[i].IsNest)
8524         Flags.setNest();
8525       if (NeedsRegBlock)
8526         Flags.setInConsecutiveRegs();
8527       Flags.setOrigAlign(OriginalAlignment);
8528 
8529       MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8530                                                  CLI.CallConv, VT);
8531       unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8532                                                         CLI.CallConv, VT);
8533       SmallVector<SDValue, 4> Parts(NumParts);
8534       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
8535 
8536       if (Args[i].IsSExt)
8537         ExtendKind = ISD::SIGN_EXTEND;
8538       else if (Args[i].IsZExt)
8539         ExtendKind = ISD::ZERO_EXTEND;
8540 
8541       // Conservatively only handle 'returned' on non-vectors that can be lowered,
8542       // for now.
8543       if (Args[i].IsReturned && !Op.getValueType().isVector() &&
8544           CanLowerReturn) {
8545         assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
8546                "unexpected use of 'returned'");
8547         // Before passing 'returned' to the target lowering code, ensure that
8548         // either the register MVT and the actual EVT are the same size or that
8549         // the return value and argument are extended in the same way; in these
8550         // cases it's safe to pass the argument register value unchanged as the
8551         // return register value (although it's at the target's option whether
8552         // to do so)
8553         // TODO: allow code generation to take advantage of partially preserved
8554         // registers rather than clobbering the entire register when the
8555         // parameter extension method is not compatible with the return
8556         // extension method
8557         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
8558             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
8559              CLI.RetZExt == Args[i].IsZExt))
8560           Flags.setReturned();
8561       }
8562 
8563       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
8564                      CLI.CS.getInstruction(), CLI.CallConv, ExtendKind);
8565 
8566       for (unsigned j = 0; j != NumParts; ++j) {
8567         // if it isn't first piece, alignment must be 1
8568         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
8569                                i < CLI.NumFixedArgs,
8570                                i, j*Parts[j].getValueType().getStoreSize());
8571         if (NumParts > 1 && j == 0)
8572           MyFlags.Flags.setSplit();
8573         else if (j != 0) {
8574           MyFlags.Flags.setOrigAlign(1);
8575           if (j == NumParts - 1)
8576             MyFlags.Flags.setSplitEnd();
8577         }
8578 
8579         CLI.Outs.push_back(MyFlags);
8580         CLI.OutVals.push_back(Parts[j]);
8581       }
8582 
8583       if (NeedsRegBlock && Value == NumValues - 1)
8584         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
8585     }
8586   }
8587 
8588   SmallVector<SDValue, 4> InVals;
8589   CLI.Chain = LowerCall(CLI, InVals);
8590 
8591   // Update CLI.InVals to use outside of this function.
8592   CLI.InVals = InVals;
8593 
8594   // Verify that the target's LowerCall behaved as expected.
8595   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
8596          "LowerCall didn't return a valid chain!");
8597   assert((!CLI.IsTailCall || InVals.empty()) &&
8598          "LowerCall emitted a return value for a tail call!");
8599   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
8600          "LowerCall didn't emit the correct number of values!");
8601 
8602   // For a tail call, the return value is merely live-out and there aren't
8603   // any nodes in the DAG representing it. Return a special value to
8604   // indicate that a tail call has been emitted and no more Instructions
8605   // should be processed in the current block.
8606   if (CLI.IsTailCall) {
8607     CLI.DAG.setRoot(CLI.Chain);
8608     return std::make_pair(SDValue(), SDValue());
8609   }
8610 
8611 #ifndef NDEBUG
8612   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
8613     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
8614     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
8615            "LowerCall emitted a value with the wrong type!");
8616   }
8617 #endif
8618 
8619   SmallVector<SDValue, 4> ReturnValues;
8620   if (!CanLowerReturn) {
8621     // The instruction result is the result of loading from the
8622     // hidden sret parameter.
8623     SmallVector<EVT, 1> PVTs;
8624     Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
8625 
8626     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
8627     assert(PVTs.size() == 1 && "Pointers should fit in one register");
8628     EVT PtrVT = PVTs[0];
8629 
8630     unsigned NumValues = RetTys.size();
8631     ReturnValues.resize(NumValues);
8632     SmallVector<SDValue, 4> Chains(NumValues);
8633 
8634     // An aggregate return value cannot wrap around the address space, so
8635     // offsets to its parts don't wrap either.
8636     SDNodeFlags Flags;
8637     Flags.setNoUnsignedWrap(true);
8638 
8639     for (unsigned i = 0; i < NumValues; ++i) {
8640       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
8641                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
8642                                                         PtrVT), Flags);
8643       SDValue L = CLI.DAG.getLoad(
8644           RetTys[i], CLI.DL, CLI.Chain, Add,
8645           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
8646                                             DemoteStackIdx, Offsets[i]),
8647           /* Alignment = */ 1);
8648       ReturnValues[i] = L;
8649       Chains[i] = L.getValue(1);
8650     }
8651 
8652     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
8653   } else {
8654     // Collect the legal value parts into potentially illegal values
8655     // that correspond to the original function's return values.
8656     Optional<ISD::NodeType> AssertOp;
8657     if (CLI.RetSExt)
8658       AssertOp = ISD::AssertSext;
8659     else if (CLI.RetZExt)
8660       AssertOp = ISD::AssertZext;
8661     unsigned CurReg = 0;
8662     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
8663       EVT VT = RetTys[I];
8664       MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
8665                                                      CLI.CallConv, VT);
8666       unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
8667                                                        CLI.CallConv, VT);
8668 
8669       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
8670                                               NumRegs, RegisterVT, VT, nullptr,
8671                                               CLI.CallConv, AssertOp));
8672       CurReg += NumRegs;
8673     }
8674 
8675     // For a function returning void, there is no return value. We can't create
8676     // such a node, so we just return a null return value in that case. In
8677     // that case, nothing will actually look at the value.
8678     if (ReturnValues.empty())
8679       return std::make_pair(SDValue(), CLI.Chain);
8680   }
8681 
8682   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
8683                                 CLI.DAG.getVTList(RetTys), ReturnValues);
8684   return std::make_pair(Res, CLI.Chain);
8685 }
8686 
8687 void TargetLowering::LowerOperationWrapper(SDNode *N,
8688                                            SmallVectorImpl<SDValue> &Results,
8689                                            SelectionDAG &DAG) const {
8690   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
8691     Results.push_back(Res);
8692 }
8693 
8694 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
8695   llvm_unreachable("LowerOperation not implemented for this target!");
8696 }
8697 
8698 void
8699 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
8700   SDValue Op = getNonRegisterValue(V);
8701   assert((Op.getOpcode() != ISD::CopyFromReg ||
8702           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
8703          "Copy from a reg to the same reg!");
8704   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
8705 
8706   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8707   // If this is an InlineAsm we have to match the registers required, not the
8708   // notional registers required by the type.
8709 
8710   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
8711                    None); // This is not an ABI copy.
8712   SDValue Chain = DAG.getEntryNode();
8713 
8714   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
8715                               FuncInfo.PreferredExtendType.end())
8716                                  ? ISD::ANY_EXTEND
8717                                  : FuncInfo.PreferredExtendType[V];
8718   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
8719   PendingExports.push_back(Chain);
8720 }
8721 
8722 #include "llvm/CodeGen/SelectionDAGISel.h"
8723 
8724 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
8725 /// entry block, return true.  This includes arguments used by switches, since
8726 /// the switch may expand into multiple basic blocks.
8727 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
8728   // With FastISel active, we may be splitting blocks, so force creation
8729   // of virtual registers for all non-dead arguments.
8730   if (FastISel)
8731     return A->use_empty();
8732 
8733   const BasicBlock &Entry = A->getParent()->front();
8734   for (const User *U : A->users())
8735     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
8736       return false;  // Use not in entry block.
8737 
8738   return true;
8739 }
8740 
8741 using ArgCopyElisionMapTy =
8742     DenseMap<const Argument *,
8743              std::pair<const AllocaInst *, const StoreInst *>>;
8744 
8745 /// Scan the entry block of the function in FuncInfo for arguments that look
8746 /// like copies into a local alloca. Record any copied arguments in
8747 /// ArgCopyElisionCandidates.
8748 static void
8749 findArgumentCopyElisionCandidates(const DataLayout &DL,
8750                                   FunctionLoweringInfo *FuncInfo,
8751                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
8752   // Record the state of every static alloca used in the entry block. Argument
8753   // allocas are all used in the entry block, so we need approximately as many
8754   // entries as we have arguments.
8755   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
8756   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
8757   unsigned NumArgs = FuncInfo->Fn->arg_size();
8758   StaticAllocas.reserve(NumArgs * 2);
8759 
8760   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
8761     if (!V)
8762       return nullptr;
8763     V = V->stripPointerCasts();
8764     const auto *AI = dyn_cast<AllocaInst>(V);
8765     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
8766       return nullptr;
8767     auto Iter = StaticAllocas.insert({AI, Unknown});
8768     return &Iter.first->second;
8769   };
8770 
8771   // Look for stores of arguments to static allocas. Look through bitcasts and
8772   // GEPs to handle type coercions, as long as the alloca is fully initialized
8773   // by the store. Any non-store use of an alloca escapes it and any subsequent
8774   // unanalyzed store might write it.
8775   // FIXME: Handle structs initialized with multiple stores.
8776   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
8777     // Look for stores, and handle non-store uses conservatively.
8778     const auto *SI = dyn_cast<StoreInst>(&I);
8779     if (!SI) {
8780       // We will look through cast uses, so ignore them completely.
8781       if (I.isCast())
8782         continue;
8783       // Ignore debug info intrinsics, they don't escape or store to allocas.
8784       if (isa<DbgInfoIntrinsic>(I))
8785         continue;
8786       // This is an unknown instruction. Assume it escapes or writes to all
8787       // static alloca operands.
8788       for (const Use &U : I.operands()) {
8789         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
8790           *Info = StaticAllocaInfo::Clobbered;
8791       }
8792       continue;
8793     }
8794 
8795     // If the stored value is a static alloca, mark it as escaped.
8796     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
8797       *Info = StaticAllocaInfo::Clobbered;
8798 
8799     // Check if the destination is a static alloca.
8800     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
8801     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
8802     if (!Info)
8803       continue;
8804     const AllocaInst *AI = cast<AllocaInst>(Dst);
8805 
8806     // Skip allocas that have been initialized or clobbered.
8807     if (*Info != StaticAllocaInfo::Unknown)
8808       continue;
8809 
8810     // Check if the stored value is an argument, and that this store fully
8811     // initializes the alloca. Don't elide copies from the same argument twice.
8812     const Value *Val = SI->getValueOperand()->stripPointerCasts();
8813     const auto *Arg = dyn_cast<Argument>(Val);
8814     if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
8815         Arg->getType()->isEmptyTy() ||
8816         DL.getTypeStoreSize(Arg->getType()) !=
8817             DL.getTypeAllocSize(AI->getAllocatedType()) ||
8818         ArgCopyElisionCandidates.count(Arg)) {
8819       *Info = StaticAllocaInfo::Clobbered;
8820       continue;
8821     }
8822 
8823     LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
8824                       << '\n');
8825 
8826     // Mark this alloca and store for argument copy elision.
8827     *Info = StaticAllocaInfo::Elidable;
8828     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
8829 
8830     // Stop scanning if we've seen all arguments. This will happen early in -O0
8831     // builds, which is useful, because -O0 builds have large entry blocks and
8832     // many allocas.
8833     if (ArgCopyElisionCandidates.size() == NumArgs)
8834       break;
8835   }
8836 }
8837 
8838 /// Try to elide argument copies from memory into a local alloca. Succeeds if
8839 /// ArgVal is a load from a suitable fixed stack object.
8840 static void tryToElideArgumentCopy(
8841     FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains,
8842     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
8843     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
8844     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
8845     SDValue ArgVal, bool &ArgHasUses) {
8846   // Check if this is a load from a fixed stack object.
8847   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
8848   if (!LNode)
8849     return;
8850   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
8851   if (!FINode)
8852     return;
8853 
8854   // Check that the fixed stack object is the right size and alignment.
8855   // Look at the alignment that the user wrote on the alloca instead of looking
8856   // at the stack object.
8857   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
8858   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
8859   const AllocaInst *AI = ArgCopyIter->second.first;
8860   int FixedIndex = FINode->getIndex();
8861   int &AllocaIndex = FuncInfo->StaticAllocaMap[AI];
8862   int OldIndex = AllocaIndex;
8863   MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
8864   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
8865     LLVM_DEBUG(
8866         dbgs() << "  argument copy elision failed due to bad fixed stack "
8867                   "object size\n");
8868     return;
8869   }
8870   unsigned RequiredAlignment = AI->getAlignment();
8871   if (!RequiredAlignment) {
8872     RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment(
8873         AI->getAllocatedType());
8874   }
8875   if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
8876     LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
8877                          "greater than stack argument alignment ("
8878                       << RequiredAlignment << " vs "
8879                       << MFI.getObjectAlignment(FixedIndex) << ")\n");
8880     return;
8881   }
8882 
8883   // Perform the elision. Delete the old stack object and replace its only use
8884   // in the variable info map. Mark the stack object as mutable.
8885   LLVM_DEBUG({
8886     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
8887            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
8888            << '\n';
8889   });
8890   MFI.RemoveStackObject(OldIndex);
8891   MFI.setIsImmutableObjectIndex(FixedIndex, false);
8892   AllocaIndex = FixedIndex;
8893   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
8894   Chains.push_back(ArgVal.getValue(1));
8895 
8896   // Avoid emitting code for the store implementing the copy.
8897   const StoreInst *SI = ArgCopyIter->second.second;
8898   ElidedArgCopyInstrs.insert(SI);
8899 
8900   // Check for uses of the argument again so that we can avoid exporting ArgVal
8901   // if it is't used by anything other than the store.
8902   for (const Value *U : Arg.users()) {
8903     if (U != SI) {
8904       ArgHasUses = true;
8905       break;
8906     }
8907   }
8908 }
8909 
8910 void SelectionDAGISel::LowerArguments(const Function &F) {
8911   SelectionDAG &DAG = SDB->DAG;
8912   SDLoc dl = SDB->getCurSDLoc();
8913   const DataLayout &DL = DAG.getDataLayout();
8914   SmallVector<ISD::InputArg, 16> Ins;
8915 
8916   if (!FuncInfo->CanLowerReturn) {
8917     // Put in an sret pointer parameter before all the other parameters.
8918     SmallVector<EVT, 1> ValueVTs;
8919     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8920                     F.getReturnType()->getPointerTo(
8921                         DAG.getDataLayout().getAllocaAddrSpace()),
8922                     ValueVTs);
8923 
8924     // NOTE: Assuming that a pointer will never break down to more than one VT
8925     // or one register.
8926     ISD::ArgFlagsTy Flags;
8927     Flags.setSRet();
8928     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
8929     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
8930                          ISD::InputArg::NoArgIndex, 0);
8931     Ins.push_back(RetArg);
8932   }
8933 
8934   // Look for stores of arguments to static allocas. Mark such arguments with a
8935   // flag to ask the target to give us the memory location of that argument if
8936   // available.
8937   ArgCopyElisionMapTy ArgCopyElisionCandidates;
8938   findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates);
8939 
8940   // Set up the incoming argument description vector.
8941   for (const Argument &Arg : F.args()) {
8942     unsigned ArgNo = Arg.getArgNo();
8943     SmallVector<EVT, 4> ValueVTs;
8944     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8945     bool isArgValueUsed = !Arg.use_empty();
8946     unsigned PartBase = 0;
8947     Type *FinalType = Arg.getType();
8948     if (Arg.hasAttribute(Attribute::ByVal))
8949       FinalType = cast<PointerType>(FinalType)->getElementType();
8950     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
8951         FinalType, F.getCallingConv(), F.isVarArg());
8952     for (unsigned Value = 0, NumValues = ValueVTs.size();
8953          Value != NumValues; ++Value) {
8954       EVT VT = ValueVTs[Value];
8955       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8956       ISD::ArgFlagsTy Flags;
8957 
8958       // Certain targets (such as MIPS), may have a different ABI alignment
8959       // for a type depending on the context. Give the target a chance to
8960       // specify the alignment it wants.
8961       unsigned OriginalAlignment =
8962           TLI->getABIAlignmentForCallingConv(ArgTy, DL);
8963 
8964       if (Arg.hasAttribute(Attribute::ZExt))
8965         Flags.setZExt();
8966       if (Arg.hasAttribute(Attribute::SExt))
8967         Flags.setSExt();
8968       if (Arg.hasAttribute(Attribute::InReg)) {
8969         // If we are using vectorcall calling convention, a structure that is
8970         // passed InReg - is surely an HVA
8971         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
8972             isa<StructType>(Arg.getType())) {
8973           // The first value of a structure is marked
8974           if (0 == Value)
8975             Flags.setHvaStart();
8976           Flags.setHva();
8977         }
8978         // Set InReg Flag
8979         Flags.setInReg();
8980       }
8981       if (Arg.hasAttribute(Attribute::StructRet))
8982         Flags.setSRet();
8983       if (Arg.hasAttribute(Attribute::SwiftSelf))
8984         Flags.setSwiftSelf();
8985       if (Arg.hasAttribute(Attribute::SwiftError))
8986         Flags.setSwiftError();
8987       if (Arg.hasAttribute(Attribute::ByVal))
8988         Flags.setByVal();
8989       if (Arg.hasAttribute(Attribute::InAlloca)) {
8990         Flags.setInAlloca();
8991         // Set the byval flag for CCAssignFn callbacks that don't know about
8992         // inalloca.  This way we can know how many bytes we should've allocated
8993         // and how many bytes a callee cleanup function will pop.  If we port
8994         // inalloca to more targets, we'll have to add custom inalloca handling
8995         // in the various CC lowering callbacks.
8996         Flags.setByVal();
8997       }
8998       if (F.getCallingConv() == CallingConv::X86_INTR) {
8999         // IA Interrupt passes frame (1st parameter) by value in the stack.
9000         if (ArgNo == 0)
9001           Flags.setByVal();
9002       }
9003       if (Flags.isByVal() || Flags.isInAlloca()) {
9004         PointerType *Ty = cast<PointerType>(Arg.getType());
9005         Type *ElementTy = Ty->getElementType();
9006         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
9007         // For ByVal, alignment should be passed from FE.  BE will guess if
9008         // this info is not there but there are cases it cannot get right.
9009         unsigned FrameAlign;
9010         if (Arg.getParamAlignment())
9011           FrameAlign = Arg.getParamAlignment();
9012         else
9013           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9014         Flags.setByValAlign(FrameAlign);
9015       }
9016       if (Arg.hasAttribute(Attribute::Nest))
9017         Flags.setNest();
9018       if (NeedsRegBlock)
9019         Flags.setInConsecutiveRegs();
9020       Flags.setOrigAlign(OriginalAlignment);
9021       if (ArgCopyElisionCandidates.count(&Arg))
9022         Flags.setCopyElisionCandidate();
9023 
9024       MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9025           *CurDAG->getContext(), F.getCallingConv(), VT);
9026       unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9027           *CurDAG->getContext(), F.getCallingConv(), VT);
9028       for (unsigned i = 0; i != NumRegs; ++i) {
9029         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9030                               ArgNo, PartBase+i*RegisterVT.getStoreSize());
9031         if (NumRegs > 1 && i == 0)
9032           MyFlags.Flags.setSplit();
9033         // if it isn't first piece, alignment must be 1
9034         else if (i > 0) {
9035           MyFlags.Flags.setOrigAlign(1);
9036           if (i == NumRegs - 1)
9037             MyFlags.Flags.setSplitEnd();
9038         }
9039         Ins.push_back(MyFlags);
9040       }
9041       if (NeedsRegBlock && Value == NumValues - 1)
9042         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9043       PartBase += VT.getStoreSize();
9044     }
9045   }
9046 
9047   // Call the target to set up the argument values.
9048   SmallVector<SDValue, 8> InVals;
9049   SDValue NewRoot = TLI->LowerFormalArguments(
9050       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9051 
9052   // Verify that the target's LowerFormalArguments behaved as expected.
9053   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9054          "LowerFormalArguments didn't return a valid chain!");
9055   assert(InVals.size() == Ins.size() &&
9056          "LowerFormalArguments didn't emit the correct number of values!");
9057   LLVM_DEBUG({
9058     for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9059       assert(InVals[i].getNode() &&
9060              "LowerFormalArguments emitted a null value!");
9061       assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9062              "LowerFormalArguments emitted a value with the wrong type!");
9063     }
9064   });
9065 
9066   // Update the DAG with the new chain value resulting from argument lowering.
9067   DAG.setRoot(NewRoot);
9068 
9069   // Set up the argument values.
9070   unsigned i = 0;
9071   if (!FuncInfo->CanLowerReturn) {
9072     // Create a virtual register for the sret pointer, and put in a copy
9073     // from the sret argument into it.
9074     SmallVector<EVT, 1> ValueVTs;
9075     ComputeValueVTs(*TLI, DAG.getDataLayout(),
9076                     F.getReturnType()->getPointerTo(
9077                         DAG.getDataLayout().getAllocaAddrSpace()),
9078                     ValueVTs);
9079     MVT VT = ValueVTs[0].getSimpleVT();
9080     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9081     Optional<ISD::NodeType> AssertOp = None;
9082     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9083                                         nullptr, F.getCallingConv(), AssertOp);
9084 
9085     MachineFunction& MF = SDB->DAG.getMachineFunction();
9086     MachineRegisterInfo& RegInfo = MF.getRegInfo();
9087     unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9088     FuncInfo->DemoteRegister = SRetReg;
9089     NewRoot =
9090         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9091     DAG.setRoot(NewRoot);
9092 
9093     // i indexes lowered arguments.  Bump it past the hidden sret argument.
9094     ++i;
9095   }
9096 
9097   SmallVector<SDValue, 4> Chains;
9098   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9099   for (const Argument &Arg : F.args()) {
9100     SmallVector<SDValue, 4> ArgValues;
9101     SmallVector<EVT, 4> ValueVTs;
9102     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9103     unsigned NumValues = ValueVTs.size();
9104     if (NumValues == 0)
9105       continue;
9106 
9107     bool ArgHasUses = !Arg.use_empty();
9108 
9109     // Elide the copying store if the target loaded this argument from a
9110     // suitable fixed stack object.
9111     if (Ins[i].Flags.isCopyElisionCandidate()) {
9112       tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9113                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9114                              InVals[i], ArgHasUses);
9115     }
9116 
9117     // If this argument is unused then remember its value. It is used to generate
9118     // debugging information.
9119     bool isSwiftErrorArg =
9120         TLI->supportSwiftError() &&
9121         Arg.hasAttribute(Attribute::SwiftError);
9122     if (!ArgHasUses && !isSwiftErrorArg) {
9123       SDB->setUnusedArgValue(&Arg, InVals[i]);
9124 
9125       // Also remember any frame index for use in FastISel.
9126       if (FrameIndexSDNode *FI =
9127           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9128         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9129     }
9130 
9131     for (unsigned Val = 0; Val != NumValues; ++Val) {
9132       EVT VT = ValueVTs[Val];
9133       MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9134                                                       F.getCallingConv(), VT);
9135       unsigned NumParts = TLI->getNumRegistersForCallingConv(
9136           *CurDAG->getContext(), F.getCallingConv(), VT);
9137 
9138       // Even an apparant 'unused' swifterror argument needs to be returned. So
9139       // we do generate a copy for it that can be used on return from the
9140       // function.
9141       if (ArgHasUses || isSwiftErrorArg) {
9142         Optional<ISD::NodeType> AssertOp;
9143         if (Arg.hasAttribute(Attribute::SExt))
9144           AssertOp = ISD::AssertSext;
9145         else if (Arg.hasAttribute(Attribute::ZExt))
9146           AssertOp = ISD::AssertZext;
9147 
9148         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9149                                              PartVT, VT, nullptr,
9150                                              F.getCallingConv(), AssertOp));
9151       }
9152 
9153       i += NumParts;
9154     }
9155 
9156     // We don't need to do anything else for unused arguments.
9157     if (ArgValues.empty())
9158       continue;
9159 
9160     // Note down frame index.
9161     if (FrameIndexSDNode *FI =
9162         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9163       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9164 
9165     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9166                                      SDB->getCurSDLoc());
9167 
9168     SDB->setValue(&Arg, Res);
9169     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9170       // We want to associate the argument with the frame index, among
9171       // involved operands, that correspond to the lowest address. The
9172       // getCopyFromParts function, called earlier, is swapping the order of
9173       // the operands to BUILD_PAIR depending on endianness. The result of
9174       // that swapping is that the least significant bits of the argument will
9175       // be in the first operand of the BUILD_PAIR node, and the most
9176       // significant bits will be in the second operand.
9177       unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9178       if (LoadSDNode *LNode =
9179           dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9180         if (FrameIndexSDNode *FI =
9181             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9182           FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9183     }
9184 
9185     // Update the SwiftErrorVRegDefMap.
9186     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9187       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9188       if (TargetRegisterInfo::isVirtualRegister(Reg))
9189         FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
9190                                            FuncInfo->SwiftErrorArg, Reg);
9191     }
9192 
9193     // If this argument is live outside of the entry block, insert a copy from
9194     // wherever we got it to the vreg that other BB's will reference it as.
9195     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
9196       // If we can, though, try to skip creating an unnecessary vreg.
9197       // FIXME: This isn't very clean... it would be nice to make this more
9198       // general.  It's also subtly incompatible with the hacks FastISel
9199       // uses with vregs.
9200       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9201       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
9202         FuncInfo->ValueMap[&Arg] = Reg;
9203         continue;
9204       }
9205     }
9206     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9207       FuncInfo->InitializeRegForValue(&Arg);
9208       SDB->CopyToExportRegsIfNeeded(&Arg);
9209     }
9210   }
9211 
9212   if (!Chains.empty()) {
9213     Chains.push_back(NewRoot);
9214     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9215   }
9216 
9217   DAG.setRoot(NewRoot);
9218 
9219   assert(i == InVals.size() && "Argument register count mismatch!");
9220 
9221   // If any argument copy elisions occurred and we have debug info, update the
9222   // stale frame indices used in the dbg.declare variable info table.
9223   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9224   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9225     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9226       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9227       if (I != ArgCopyElisionFrameIndexMap.end())
9228         VI.Slot = I->second;
9229     }
9230   }
9231 
9232   // Finally, if the target has anything special to do, allow it to do so.
9233   EmitFunctionEntryCode();
9234 }
9235 
9236 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
9237 /// ensure constants are generated when needed.  Remember the virtual registers
9238 /// that need to be added to the Machine PHI nodes as input.  We cannot just
9239 /// directly add them, because expansion might result in multiple MBB's for one
9240 /// BB.  As such, the start of the BB might correspond to a different MBB than
9241 /// the end.
9242 void
9243 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
9244   const TerminatorInst *TI = LLVMBB->getTerminator();
9245 
9246   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
9247 
9248   // Check PHI nodes in successors that expect a value to be available from this
9249   // block.
9250   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
9251     const BasicBlock *SuccBB = TI->getSuccessor(succ);
9252     if (!isa<PHINode>(SuccBB->begin())) continue;
9253     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
9254 
9255     // If this terminator has multiple identical successors (common for
9256     // switches), only handle each succ once.
9257     if (!SuccsHandled.insert(SuccMBB).second)
9258       continue;
9259 
9260     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
9261 
9262     // At this point we know that there is a 1-1 correspondence between LLVM PHI
9263     // nodes and Machine PHI nodes, but the incoming operands have not been
9264     // emitted yet.
9265     for (const PHINode &PN : SuccBB->phis()) {
9266       // Ignore dead phi's.
9267       if (PN.use_empty())
9268         continue;
9269 
9270       // Skip empty types
9271       if (PN.getType()->isEmptyTy())
9272         continue;
9273 
9274       unsigned Reg;
9275       const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
9276 
9277       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
9278         unsigned &RegOut = ConstantsOut[C];
9279         if (RegOut == 0) {
9280           RegOut = FuncInfo.CreateRegs(C->getType());
9281           CopyValueToVirtualRegister(C, RegOut);
9282         }
9283         Reg = RegOut;
9284       } else {
9285         DenseMap<const Value *, unsigned>::iterator I =
9286           FuncInfo.ValueMap.find(PHIOp);
9287         if (I != FuncInfo.ValueMap.end())
9288           Reg = I->second;
9289         else {
9290           assert(isa<AllocaInst>(PHIOp) &&
9291                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
9292                  "Didn't codegen value into a register!??");
9293           Reg = FuncInfo.CreateRegs(PHIOp->getType());
9294           CopyValueToVirtualRegister(PHIOp, Reg);
9295         }
9296       }
9297 
9298       // Remember that this register needs to added to the machine PHI node as
9299       // the input for this MBB.
9300       SmallVector<EVT, 4> ValueVTs;
9301       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9302       ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
9303       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
9304         EVT VT = ValueVTs[vti];
9305         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
9306         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
9307           FuncInfo.PHINodesToUpdate.push_back(
9308               std::make_pair(&*MBBI++, Reg + i));
9309         Reg += NumRegisters;
9310       }
9311     }
9312   }
9313 
9314   ConstantsOut.clear();
9315 }
9316 
9317 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
9318 /// is 0.
9319 MachineBasicBlock *
9320 SelectionDAGBuilder::StackProtectorDescriptor::
9321 AddSuccessorMBB(const BasicBlock *BB,
9322                 MachineBasicBlock *ParentMBB,
9323                 bool IsLikely,
9324                 MachineBasicBlock *SuccMBB) {
9325   // If SuccBB has not been created yet, create it.
9326   if (!SuccMBB) {
9327     MachineFunction *MF = ParentMBB->getParent();
9328     MachineFunction::iterator BBI(ParentMBB);
9329     SuccMBB = MF->CreateMachineBasicBlock(BB);
9330     MF->insert(++BBI, SuccMBB);
9331   }
9332   // Add it as a successor of ParentMBB.
9333   ParentMBB->addSuccessor(
9334       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
9335   return SuccMBB;
9336 }
9337 
9338 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
9339   MachineFunction::iterator I(MBB);
9340   if (++I == FuncInfo.MF->end())
9341     return nullptr;
9342   return &*I;
9343 }
9344 
9345 /// During lowering new call nodes can be created (such as memset, etc.).
9346 /// Those will become new roots of the current DAG, but complications arise
9347 /// when they are tail calls. In such cases, the call lowering will update
9348 /// the root, but the builder still needs to know that a tail call has been
9349 /// lowered in order to avoid generating an additional return.
9350 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
9351   // If the node is null, we do have a tail call.
9352   if (MaybeTC.getNode() != nullptr)
9353     DAG.setRoot(MaybeTC);
9354   else
9355     HasTailCall = true;
9356 }
9357 
9358 uint64_t
9359 SelectionDAGBuilder::getJumpTableRange(const CaseClusterVector &Clusters,
9360                                        unsigned First, unsigned Last) const {
9361   assert(Last >= First);
9362   const APInt &LowCase = Clusters[First].Low->getValue();
9363   const APInt &HighCase = Clusters[Last].High->getValue();
9364   assert(LowCase.getBitWidth() == HighCase.getBitWidth());
9365 
9366   // FIXME: A range of consecutive cases has 100% density, but only requires one
9367   // comparison to lower. We should discriminate against such consecutive ranges
9368   // in jump tables.
9369 
9370   return (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100) + 1;
9371 }
9372 
9373 uint64_t SelectionDAGBuilder::getJumpTableNumCases(
9374     const SmallVectorImpl<unsigned> &TotalCases, unsigned First,
9375     unsigned Last) const {
9376   assert(Last >= First);
9377   assert(TotalCases[Last] >= TotalCases[First]);
9378   uint64_t NumCases =
9379       TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
9380   return NumCases;
9381 }
9382 
9383 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters,
9384                                          unsigned First, unsigned Last,
9385                                          const SwitchInst *SI,
9386                                          MachineBasicBlock *DefaultMBB,
9387                                          CaseCluster &JTCluster) {
9388   assert(First <= Last);
9389 
9390   auto Prob = BranchProbability::getZero();
9391   unsigned NumCmps = 0;
9392   std::vector<MachineBasicBlock*> Table;
9393   DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
9394 
9395   // Initialize probabilities in JTProbs.
9396   for (unsigned I = First; I <= Last; ++I)
9397     JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
9398 
9399   for (unsigned I = First; I <= Last; ++I) {
9400     assert(Clusters[I].Kind == CC_Range);
9401     Prob += Clusters[I].Prob;
9402     const APInt &Low = Clusters[I].Low->getValue();
9403     const APInt &High = Clusters[I].High->getValue();
9404     NumCmps += (Low == High) ? 1 : 2;
9405     if (I != First) {
9406       // Fill the gap between this and the previous cluster.
9407       const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
9408       assert(PreviousHigh.slt(Low));
9409       uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
9410       for (uint64_t J = 0; J < Gap; J++)
9411         Table.push_back(DefaultMBB);
9412     }
9413     uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
9414     for (uint64_t J = 0; J < ClusterSize; ++J)
9415       Table.push_back(Clusters[I].MBB);
9416     JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
9417   }
9418 
9419   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9420   unsigned NumDests = JTProbs.size();
9421   if (TLI.isSuitableForBitTests(
9422           NumDests, NumCmps, Clusters[First].Low->getValue(),
9423           Clusters[Last].High->getValue(), DAG.getDataLayout())) {
9424     // Clusters[First..Last] should be lowered as bit tests instead.
9425     return false;
9426   }
9427 
9428   // Create the MBB that will load from and jump through the table.
9429   // Note: We create it here, but it's not inserted into the function yet.
9430   MachineFunction *CurMF = FuncInfo.MF;
9431   MachineBasicBlock *JumpTableMBB =
9432       CurMF->CreateMachineBasicBlock(SI->getParent());
9433 
9434   // Add successors. Note: use table order for determinism.
9435   SmallPtrSet<MachineBasicBlock *, 8> Done;
9436   for (MachineBasicBlock *Succ : Table) {
9437     if (Done.count(Succ))
9438       continue;
9439     addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
9440     Done.insert(Succ);
9441   }
9442   JumpTableMBB->normalizeSuccProbs();
9443 
9444   unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
9445                      ->createJumpTableIndex(Table);
9446 
9447   // Set up the jump table info.
9448   JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
9449   JumpTableHeader JTH(Clusters[First].Low->getValue(),
9450                       Clusters[Last].High->getValue(), SI->getCondition(),
9451                       nullptr, false);
9452   JTCases.emplace_back(std::move(JTH), std::move(JT));
9453 
9454   JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
9455                                      JTCases.size() - 1, Prob);
9456   return true;
9457 }
9458 
9459 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
9460                                          const SwitchInst *SI,
9461                                          MachineBasicBlock *DefaultMBB) {
9462 #ifndef NDEBUG
9463   // Clusters must be non-empty, sorted, and only contain Range clusters.
9464   assert(!Clusters.empty());
9465   for (CaseCluster &C : Clusters)
9466     assert(C.Kind == CC_Range);
9467   for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
9468     assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
9469 #endif
9470 
9471   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9472   if (!TLI.areJTsAllowed(SI->getParent()->getParent()))
9473     return;
9474 
9475   const int64_t N = Clusters.size();
9476   const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries();
9477   const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
9478 
9479   if (N < 2 || N < MinJumpTableEntries)
9480     return;
9481 
9482   // TotalCases[i]: Total nbr of cases in Clusters[0..i].
9483   SmallVector<unsigned, 8> TotalCases(N);
9484   for (unsigned i = 0; i < N; ++i) {
9485     const APInt &Hi = Clusters[i].High->getValue();
9486     const APInt &Lo = Clusters[i].Low->getValue();
9487     TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
9488     if (i != 0)
9489       TotalCases[i] += TotalCases[i - 1];
9490   }
9491 
9492   // Cheap case: the whole range may be suitable for jump table.
9493   uint64_t Range = getJumpTableRange(Clusters,0, N - 1);
9494   uint64_t NumCases = getJumpTableNumCases(TotalCases, 0, N - 1);
9495   assert(NumCases < UINT64_MAX / 100);
9496   assert(Range >= NumCases);
9497   if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9498     CaseCluster JTCluster;
9499     if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
9500       Clusters[0] = JTCluster;
9501       Clusters.resize(1);
9502       return;
9503     }
9504   }
9505 
9506   // The algorithm below is not suitable for -O0.
9507   if (TM.getOptLevel() == CodeGenOpt::None)
9508     return;
9509 
9510   // Split Clusters into minimum number of dense partitions. The algorithm uses
9511   // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
9512   // for the Case Statement'" (1994), but builds the MinPartitions array in
9513   // reverse order to make it easier to reconstruct the partitions in ascending
9514   // order. In the choice between two optimal partitionings, it picks the one
9515   // which yields more jump tables.
9516 
9517   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9518   SmallVector<unsigned, 8> MinPartitions(N);
9519   // LastElement[i] is the last element of the partition starting at i.
9520   SmallVector<unsigned, 8> LastElement(N);
9521   // PartitionsScore[i] is used to break ties when choosing between two
9522   // partitionings resulting in the same number of partitions.
9523   SmallVector<unsigned, 8> PartitionsScore(N);
9524   // For PartitionsScore, a small number of comparisons is considered as good as
9525   // a jump table and a single comparison is considered better than a jump
9526   // table.
9527   enum PartitionScores : unsigned {
9528     NoTable = 0,
9529     Table = 1,
9530     FewCases = 1,
9531     SingleCase = 2
9532   };
9533 
9534   // Base case: There is only one way to partition Clusters[N-1].
9535   MinPartitions[N - 1] = 1;
9536   LastElement[N - 1] = N - 1;
9537   PartitionsScore[N - 1] = PartitionScores::SingleCase;
9538 
9539   // Note: loop indexes are signed to avoid underflow.
9540   for (int64_t i = N - 2; i >= 0; i--) {
9541     // Find optimal partitioning of Clusters[i..N-1].
9542     // Baseline: Put Clusters[i] into a partition on its own.
9543     MinPartitions[i] = MinPartitions[i + 1] + 1;
9544     LastElement[i] = i;
9545     PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
9546 
9547     // Search for a solution that results in fewer partitions.
9548     for (int64_t j = N - 1; j > i; j--) {
9549       // Try building a partition from Clusters[i..j].
9550       uint64_t Range = getJumpTableRange(Clusters, i, j);
9551       uint64_t NumCases = getJumpTableNumCases(TotalCases, i, j);
9552       assert(NumCases < UINT64_MAX / 100);
9553       assert(Range >= NumCases);
9554       if (TLI.isSuitableForJumpTable(SI, NumCases, Range)) {
9555         unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9556         unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
9557         int64_t NumEntries = j - i + 1;
9558 
9559         if (NumEntries == 1)
9560           Score += PartitionScores::SingleCase;
9561         else if (NumEntries <= SmallNumberOfEntries)
9562           Score += PartitionScores::FewCases;
9563         else if (NumEntries >= MinJumpTableEntries)
9564           Score += PartitionScores::Table;
9565 
9566         // If this leads to fewer partitions, or to the same number of
9567         // partitions with better score, it is a better partitioning.
9568         if (NumPartitions < MinPartitions[i] ||
9569             (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
9570           MinPartitions[i] = NumPartitions;
9571           LastElement[i] = j;
9572           PartitionsScore[i] = Score;
9573         }
9574       }
9575     }
9576   }
9577 
9578   // Iterate over the partitions, replacing some with jump tables in-place.
9579   unsigned DstIndex = 0;
9580   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9581     Last = LastElement[First];
9582     assert(Last >= First);
9583     assert(DstIndex <= First);
9584     unsigned NumClusters = Last - First + 1;
9585 
9586     CaseCluster JTCluster;
9587     if (NumClusters >= MinJumpTableEntries &&
9588         buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
9589       Clusters[DstIndex++] = JTCluster;
9590     } else {
9591       for (unsigned I = First; I <= Last; ++I)
9592         std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
9593     }
9594   }
9595   Clusters.resize(DstIndex);
9596 }
9597 
9598 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
9599                                         unsigned First, unsigned Last,
9600                                         const SwitchInst *SI,
9601                                         CaseCluster &BTCluster) {
9602   assert(First <= Last);
9603   if (First == Last)
9604     return false;
9605 
9606   BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9607   unsigned NumCmps = 0;
9608   for (int64_t I = First; I <= Last; ++I) {
9609     assert(Clusters[I].Kind == CC_Range);
9610     Dests.set(Clusters[I].MBB->getNumber());
9611     NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
9612   }
9613   unsigned NumDests = Dests.count();
9614 
9615   APInt Low = Clusters[First].Low->getValue();
9616   APInt High = Clusters[Last].High->getValue();
9617   assert(Low.slt(High));
9618 
9619   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9620   const DataLayout &DL = DAG.getDataLayout();
9621   if (!TLI.isSuitableForBitTests(NumDests, NumCmps, Low, High, DL))
9622     return false;
9623 
9624   APInt LowBound;
9625   APInt CmpRange;
9626 
9627   const int BitWidth = TLI.getPointerTy(DL).getSizeInBits();
9628   assert(TLI.rangeFitsInWord(Low, High, DL) &&
9629          "Case range must fit in bit mask!");
9630 
9631   // Check if the clusters cover a contiguous range such that no value in the
9632   // range will jump to the default statement.
9633   bool ContiguousRange = true;
9634   for (int64_t I = First + 1; I <= Last; ++I) {
9635     if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
9636       ContiguousRange = false;
9637       break;
9638     }
9639   }
9640 
9641   if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
9642     // Optimize the case where all the case values fit in a word without having
9643     // to subtract minValue. In this case, we can optimize away the subtraction.
9644     LowBound = APInt::getNullValue(Low.getBitWidth());
9645     CmpRange = High;
9646     ContiguousRange = false;
9647   } else {
9648     LowBound = Low;
9649     CmpRange = High - Low;
9650   }
9651 
9652   CaseBitsVector CBV;
9653   auto TotalProb = BranchProbability::getZero();
9654   for (unsigned i = First; i <= Last; ++i) {
9655     // Find the CaseBits for this destination.
9656     unsigned j;
9657     for (j = 0; j < CBV.size(); ++j)
9658       if (CBV[j].BB == Clusters[i].MBB)
9659         break;
9660     if (j == CBV.size())
9661       CBV.push_back(
9662           CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
9663     CaseBits *CB = &CBV[j];
9664 
9665     // Update Mask, Bits and ExtraProb.
9666     uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
9667     uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
9668     assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
9669     CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
9670     CB->Bits += Hi - Lo + 1;
9671     CB->ExtraProb += Clusters[i].Prob;
9672     TotalProb += Clusters[i].Prob;
9673   }
9674 
9675   BitTestInfo BTI;
9676   llvm::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
9677     // Sort by probability first, number of bits second, bit mask third.
9678     if (a.ExtraProb != b.ExtraProb)
9679       return a.ExtraProb > b.ExtraProb;
9680     if (a.Bits != b.Bits)
9681       return a.Bits > b.Bits;
9682     return a.Mask < b.Mask;
9683   });
9684 
9685   for (auto &CB : CBV) {
9686     MachineBasicBlock *BitTestBB =
9687         FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
9688     BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
9689   }
9690   BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
9691                             SI->getCondition(), -1U, MVT::Other, false,
9692                             ContiguousRange, nullptr, nullptr, std::move(BTI),
9693                             TotalProb);
9694 
9695   BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
9696                                     BitTestCases.size() - 1, TotalProb);
9697   return true;
9698 }
9699 
9700 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
9701                                               const SwitchInst *SI) {
9702 // Partition Clusters into as few subsets as possible, where each subset has a
9703 // range that fits in a machine word and has <= 3 unique destinations.
9704 
9705 #ifndef NDEBUG
9706   // Clusters must be sorted and contain Range or JumpTable clusters.
9707   assert(!Clusters.empty());
9708   assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
9709   for (const CaseCluster &C : Clusters)
9710     assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
9711   for (unsigned i = 1; i < Clusters.size(); ++i)
9712     assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
9713 #endif
9714 
9715   // The algorithm below is not suitable for -O0.
9716   if (TM.getOptLevel() == CodeGenOpt::None)
9717     return;
9718 
9719   // If target does not have legal shift left, do not emit bit tests at all.
9720   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9721   const DataLayout &DL = DAG.getDataLayout();
9722 
9723   EVT PTy = TLI.getPointerTy(DL);
9724   if (!TLI.isOperationLegal(ISD::SHL, PTy))
9725     return;
9726 
9727   int BitWidth = PTy.getSizeInBits();
9728   const int64_t N = Clusters.size();
9729 
9730   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9731   SmallVector<unsigned, 8> MinPartitions(N);
9732   // LastElement[i] is the last element of the partition starting at i.
9733   SmallVector<unsigned, 8> LastElement(N);
9734 
9735   // FIXME: This might not be the best algorithm for finding bit test clusters.
9736 
9737   // Base case: There is only one way to partition Clusters[N-1].
9738   MinPartitions[N - 1] = 1;
9739   LastElement[N - 1] = N - 1;
9740 
9741   // Note: loop indexes are signed to avoid underflow.
9742   for (int64_t i = N - 2; i >= 0; --i) {
9743     // Find optimal partitioning of Clusters[i..N-1].
9744     // Baseline: Put Clusters[i] into a partition on its own.
9745     MinPartitions[i] = MinPartitions[i + 1] + 1;
9746     LastElement[i] = i;
9747 
9748     // Search for a solution that results in fewer partitions.
9749     // Note: the search is limited by BitWidth, reducing time complexity.
9750     for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
9751       // Try building a partition from Clusters[i..j].
9752 
9753       // Check the range.
9754       if (!TLI.rangeFitsInWord(Clusters[i].Low->getValue(),
9755                                Clusters[j].High->getValue(), DL))
9756         continue;
9757 
9758       // Check nbr of destinations and cluster types.
9759       // FIXME: This works, but doesn't seem very efficient.
9760       bool RangesOnly = true;
9761       BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9762       for (int64_t k = i; k <= j; k++) {
9763         if (Clusters[k].Kind != CC_Range) {
9764           RangesOnly = false;
9765           break;
9766         }
9767         Dests.set(Clusters[k].MBB->getNumber());
9768       }
9769       if (!RangesOnly || Dests.count() > 3)
9770         break;
9771 
9772       // Check if it's a better partition.
9773       unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9774       if (NumPartitions < MinPartitions[i]) {
9775         // Found a better partition.
9776         MinPartitions[i] = NumPartitions;
9777         LastElement[i] = j;
9778       }
9779     }
9780   }
9781 
9782   // Iterate over the partitions, replacing with bit-test clusters in-place.
9783   unsigned DstIndex = 0;
9784   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9785     Last = LastElement[First];
9786     assert(First <= Last);
9787     assert(DstIndex <= First);
9788 
9789     CaseCluster BitTestCluster;
9790     if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
9791       Clusters[DstIndex++] = BitTestCluster;
9792     } else {
9793       size_t NumClusters = Last - First + 1;
9794       std::memmove(&Clusters[DstIndex], &Clusters[First],
9795                    sizeof(Clusters[0]) * NumClusters);
9796       DstIndex += NumClusters;
9797     }
9798   }
9799   Clusters.resize(DstIndex);
9800 }
9801 
9802 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
9803                                         MachineBasicBlock *SwitchMBB,
9804                                         MachineBasicBlock *DefaultMBB) {
9805   MachineFunction *CurMF = FuncInfo.MF;
9806   MachineBasicBlock *NextMBB = nullptr;
9807   MachineFunction::iterator BBI(W.MBB);
9808   if (++BBI != FuncInfo.MF->end())
9809     NextMBB = &*BBI;
9810 
9811   unsigned Size = W.LastCluster - W.FirstCluster + 1;
9812 
9813   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9814 
9815   if (Size == 2 && W.MBB == SwitchMBB) {
9816     // If any two of the cases has the same destination, and if one value
9817     // is the same as the other, but has one bit unset that the other has set,
9818     // use bit manipulation to do two compares at once.  For example:
9819     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
9820     // TODO: This could be extended to merge any 2 cases in switches with 3
9821     // cases.
9822     // TODO: Handle cases where W.CaseBB != SwitchBB.
9823     CaseCluster &Small = *W.FirstCluster;
9824     CaseCluster &Big = *W.LastCluster;
9825 
9826     if (Small.Low == Small.High && Big.Low == Big.High &&
9827         Small.MBB == Big.MBB) {
9828       const APInt &SmallValue = Small.Low->getValue();
9829       const APInt &BigValue = Big.Low->getValue();
9830 
9831       // Check that there is only one bit different.
9832       APInt CommonBit = BigValue ^ SmallValue;
9833       if (CommonBit.isPowerOf2()) {
9834         SDValue CondLHS = getValue(Cond);
9835         EVT VT = CondLHS.getValueType();
9836         SDLoc DL = getCurSDLoc();
9837 
9838         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
9839                                  DAG.getConstant(CommonBit, DL, VT));
9840         SDValue Cond = DAG.getSetCC(
9841             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
9842             ISD::SETEQ);
9843 
9844         // Update successor info.
9845         // Both Small and Big will jump to Small.BB, so we sum up the
9846         // probabilities.
9847         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
9848         if (BPI)
9849           addSuccessorWithProb(
9850               SwitchMBB, DefaultMBB,
9851               // The default destination is the first successor in IR.
9852               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
9853         else
9854           addSuccessorWithProb(SwitchMBB, DefaultMBB);
9855 
9856         // Insert the true branch.
9857         SDValue BrCond =
9858             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
9859                         DAG.getBasicBlock(Small.MBB));
9860         // Insert the false branch.
9861         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
9862                              DAG.getBasicBlock(DefaultMBB));
9863 
9864         DAG.setRoot(BrCond);
9865         return;
9866       }
9867     }
9868   }
9869 
9870   if (TM.getOptLevel() != CodeGenOpt::None) {
9871     // Here, we order cases by probability so the most likely case will be
9872     // checked first. However, two clusters can have the same probability in
9873     // which case their relative ordering is non-deterministic. So we use Low
9874     // as a tie-breaker as clusters are guaranteed to never overlap.
9875     llvm::sort(W.FirstCluster, W.LastCluster + 1,
9876                [](const CaseCluster &a, const CaseCluster &b) {
9877       return a.Prob != b.Prob ?
9878              a.Prob > b.Prob :
9879              a.Low->getValue().slt(b.Low->getValue());
9880     });
9881 
9882     // Rearrange the case blocks so that the last one falls through if possible
9883     // without changing the order of probabilities.
9884     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
9885       --I;
9886       if (I->Prob > W.LastCluster->Prob)
9887         break;
9888       if (I->Kind == CC_Range && I->MBB == NextMBB) {
9889         std::swap(*I, *W.LastCluster);
9890         break;
9891       }
9892     }
9893   }
9894 
9895   // Compute total probability.
9896   BranchProbability DefaultProb = W.DefaultProb;
9897   BranchProbability UnhandledProbs = DefaultProb;
9898   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
9899     UnhandledProbs += I->Prob;
9900 
9901   MachineBasicBlock *CurMBB = W.MBB;
9902   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
9903     MachineBasicBlock *Fallthrough;
9904     if (I == W.LastCluster) {
9905       // For the last cluster, fall through to the default destination.
9906       Fallthrough = DefaultMBB;
9907     } else {
9908       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
9909       CurMF->insert(BBI, Fallthrough);
9910       // Put Cond in a virtual register to make it available from the new blocks.
9911       ExportFromCurrentBlock(Cond);
9912     }
9913     UnhandledProbs -= I->Prob;
9914 
9915     switch (I->Kind) {
9916       case CC_JumpTable: {
9917         // FIXME: Optimize away range check based on pivot comparisons.
9918         JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
9919         JumpTable *JT = &JTCases[I->JTCasesIndex].second;
9920 
9921         // The jump block hasn't been inserted yet; insert it here.
9922         MachineBasicBlock *JumpMBB = JT->MBB;
9923         CurMF->insert(BBI, JumpMBB);
9924 
9925         auto JumpProb = I->Prob;
9926         auto FallthroughProb = UnhandledProbs;
9927 
9928         // If the default statement is a target of the jump table, we evenly
9929         // distribute the default probability to successors of CurMBB. Also
9930         // update the probability on the edge from JumpMBB to Fallthrough.
9931         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
9932                                               SE = JumpMBB->succ_end();
9933              SI != SE; ++SI) {
9934           if (*SI == DefaultMBB) {
9935             JumpProb += DefaultProb / 2;
9936             FallthroughProb -= DefaultProb / 2;
9937             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
9938             JumpMBB->normalizeSuccProbs();
9939             break;
9940           }
9941         }
9942 
9943         addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
9944         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
9945         CurMBB->normalizeSuccProbs();
9946 
9947         // The jump table header will be inserted in our current block, do the
9948         // range check, and fall through to our fallthrough block.
9949         JTH->HeaderBB = CurMBB;
9950         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
9951 
9952         // If we're in the right place, emit the jump table header right now.
9953         if (CurMBB == SwitchMBB) {
9954           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
9955           JTH->Emitted = true;
9956         }
9957         break;
9958       }
9959       case CC_BitTests: {
9960         // FIXME: Optimize away range check based on pivot comparisons.
9961         BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
9962 
9963         // The bit test blocks haven't been inserted yet; insert them here.
9964         for (BitTestCase &BTC : BTB->Cases)
9965           CurMF->insert(BBI, BTC.ThisBB);
9966 
9967         // Fill in fields of the BitTestBlock.
9968         BTB->Parent = CurMBB;
9969         BTB->Default = Fallthrough;
9970 
9971         BTB->DefaultProb = UnhandledProbs;
9972         // If the cases in bit test don't form a contiguous range, we evenly
9973         // distribute the probability on the edge to Fallthrough to two
9974         // successors of CurMBB.
9975         if (!BTB->ContiguousRange) {
9976           BTB->Prob += DefaultProb / 2;
9977           BTB->DefaultProb -= DefaultProb / 2;
9978         }
9979 
9980         // If we're in the right place, emit the bit test header right now.
9981         if (CurMBB == SwitchMBB) {
9982           visitBitTestHeader(*BTB, SwitchMBB);
9983           BTB->Emitted = true;
9984         }
9985         break;
9986       }
9987       case CC_Range: {
9988         const Value *RHS, *LHS, *MHS;
9989         ISD::CondCode CC;
9990         if (I->Low == I->High) {
9991           // Check Cond == I->Low.
9992           CC = ISD::SETEQ;
9993           LHS = Cond;
9994           RHS=I->Low;
9995           MHS = nullptr;
9996         } else {
9997           // Check I->Low <= Cond <= I->High.
9998           CC = ISD::SETLE;
9999           LHS = I->Low;
10000           MHS = Cond;
10001           RHS = I->High;
10002         }
10003 
10004         // The false probability is the sum of all unhandled cases.
10005         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10006                      getCurSDLoc(), I->Prob, UnhandledProbs);
10007 
10008         if (CurMBB == SwitchMBB)
10009           visitSwitchCase(CB, SwitchMBB);
10010         else
10011           SwitchCases.push_back(CB);
10012 
10013         break;
10014       }
10015     }
10016     CurMBB = Fallthrough;
10017   }
10018 }
10019 
10020 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10021                                               CaseClusterIt First,
10022                                               CaseClusterIt Last) {
10023   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10024     if (X.Prob != CC.Prob)
10025       return X.Prob > CC.Prob;
10026 
10027     // Ties are broken by comparing the case value.
10028     return X.Low->getValue().slt(CC.Low->getValue());
10029   });
10030 }
10031 
10032 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10033                                         const SwitchWorkListItem &W,
10034                                         Value *Cond,
10035                                         MachineBasicBlock *SwitchMBB) {
10036   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10037          "Clusters not sorted?");
10038 
10039   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10040 
10041   // Balance the tree based on branch probabilities to create a near-optimal (in
10042   // terms of search time given key frequency) binary search tree. See e.g. Kurt
10043   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10044   CaseClusterIt LastLeft = W.FirstCluster;
10045   CaseClusterIt FirstRight = W.LastCluster;
10046   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10047   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10048 
10049   // Move LastLeft and FirstRight towards each other from opposite directions to
10050   // find a partitioning of the clusters which balances the probability on both
10051   // sides. If LeftProb and RightProb are equal, alternate which side is
10052   // taken to ensure 0-probability nodes are distributed evenly.
10053   unsigned I = 0;
10054   while (LastLeft + 1 < FirstRight) {
10055     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10056       LeftProb += (++LastLeft)->Prob;
10057     else
10058       RightProb += (--FirstRight)->Prob;
10059     I++;
10060   }
10061 
10062   while (true) {
10063     // Our binary search tree differs from a typical BST in that ours can have up
10064     // to three values in each leaf. The pivot selection above doesn't take that
10065     // into account, which means the tree might require more nodes and be less
10066     // efficient. We compensate for this here.
10067 
10068     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10069     unsigned NumRight = W.LastCluster - FirstRight + 1;
10070 
10071     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10072       // If one side has less than 3 clusters, and the other has more than 3,
10073       // consider taking a cluster from the other side.
10074 
10075       if (NumLeft < NumRight) {
10076         // Consider moving the first cluster on the right to the left side.
10077         CaseCluster &CC = *FirstRight;
10078         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10079         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10080         if (LeftSideRank <= RightSideRank) {
10081           // Moving the cluster to the left does not demote it.
10082           ++LastLeft;
10083           ++FirstRight;
10084           continue;
10085         }
10086       } else {
10087         assert(NumRight < NumLeft);
10088         // Consider moving the last element on the left to the right side.
10089         CaseCluster &CC = *LastLeft;
10090         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10091         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10092         if (RightSideRank <= LeftSideRank) {
10093           // Moving the cluster to the right does not demot it.
10094           --LastLeft;
10095           --FirstRight;
10096           continue;
10097         }
10098       }
10099     }
10100     break;
10101   }
10102 
10103   assert(LastLeft + 1 == FirstRight);
10104   assert(LastLeft >= W.FirstCluster);
10105   assert(FirstRight <= W.LastCluster);
10106 
10107   // Use the first element on the right as pivot since we will make less-than
10108   // comparisons against it.
10109   CaseClusterIt PivotCluster = FirstRight;
10110   assert(PivotCluster > W.FirstCluster);
10111   assert(PivotCluster <= W.LastCluster);
10112 
10113   CaseClusterIt FirstLeft = W.FirstCluster;
10114   CaseClusterIt LastRight = W.LastCluster;
10115 
10116   const ConstantInt *Pivot = PivotCluster->Low;
10117 
10118   // New blocks will be inserted immediately after the current one.
10119   MachineFunction::iterator BBI(W.MBB);
10120   ++BBI;
10121 
10122   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10123   // we can branch to its destination directly if it's squeezed exactly in
10124   // between the known lower bound and Pivot - 1.
10125   MachineBasicBlock *LeftMBB;
10126   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10127       FirstLeft->Low == W.GE &&
10128       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10129     LeftMBB = FirstLeft->MBB;
10130   } else {
10131     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10132     FuncInfo.MF->insert(BBI, LeftMBB);
10133     WorkList.push_back(
10134         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10135     // Put Cond in a virtual register to make it available from the new blocks.
10136     ExportFromCurrentBlock(Cond);
10137   }
10138 
10139   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10140   // single cluster, RHS.Low == Pivot, and we can branch to its destination
10141   // directly if RHS.High equals the current upper bound.
10142   MachineBasicBlock *RightMBB;
10143   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10144       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10145     RightMBB = FirstRight->MBB;
10146   } else {
10147     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10148     FuncInfo.MF->insert(BBI, RightMBB);
10149     WorkList.push_back(
10150         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10151     // Put Cond in a virtual register to make it available from the new blocks.
10152     ExportFromCurrentBlock(Cond);
10153   }
10154 
10155   // Create the CaseBlock record that will be used to lower the branch.
10156   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10157                getCurSDLoc(), LeftProb, RightProb);
10158 
10159   if (W.MBB == SwitchMBB)
10160     visitSwitchCase(CB, SwitchMBB);
10161   else
10162     SwitchCases.push_back(CB);
10163 }
10164 
10165 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10166 // from the swith statement.
10167 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10168                                             BranchProbability PeeledCaseProb) {
10169   if (PeeledCaseProb == BranchProbability::getOne())
10170     return BranchProbability::getZero();
10171   BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10172 
10173   uint32_t Numerator = CaseProb.getNumerator();
10174   uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10175   return BranchProbability(Numerator, std::max(Numerator, Denominator));
10176 }
10177 
10178 // Try to peel the top probability case if it exceeds the threshold.
10179 // Return current MachineBasicBlock for the switch statement if the peeling
10180 // does not occur.
10181 // If the peeling is performed, return the newly created MachineBasicBlock
10182 // for the peeled switch statement. Also update Clusters to remove the peeled
10183 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10184 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10185     const SwitchInst &SI, CaseClusterVector &Clusters,
10186     BranchProbability &PeeledCaseProb) {
10187   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10188   // Don't perform if there is only one cluster or optimizing for size.
10189   if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10190       TM.getOptLevel() == CodeGenOpt::None ||
10191       SwitchMBB->getParent()->getFunction().optForMinSize())
10192     return SwitchMBB;
10193 
10194   BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10195   unsigned PeeledCaseIndex = 0;
10196   bool SwitchPeeled = false;
10197   for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10198     CaseCluster &CC = Clusters[Index];
10199     if (CC.Prob < TopCaseProb)
10200       continue;
10201     TopCaseProb = CC.Prob;
10202     PeeledCaseIndex = Index;
10203     SwitchPeeled = true;
10204   }
10205   if (!SwitchPeeled)
10206     return SwitchMBB;
10207 
10208   LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10209                     << TopCaseProb << "\n");
10210 
10211   // Record the MBB for the peeled switch statement.
10212   MachineFunction::iterator BBI(SwitchMBB);
10213   ++BBI;
10214   MachineBasicBlock *PeeledSwitchMBB =
10215       FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10216   FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10217 
10218   ExportFromCurrentBlock(SI.getCondition());
10219   auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10220   SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10221                           nullptr,   nullptr,      TopCaseProb.getCompl()};
10222   lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10223 
10224   Clusters.erase(PeeledCaseIt);
10225   for (CaseCluster &CC : Clusters) {
10226     LLVM_DEBUG(
10227         dbgs() << "Scale the probablity for one cluster, before scaling: "
10228                << CC.Prob << "\n");
10229     CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10230     LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10231   }
10232   PeeledCaseProb = TopCaseProb;
10233   return PeeledSwitchMBB;
10234 }
10235 
10236 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10237   // Extract cases from the switch.
10238   BranchProbabilityInfo *BPI = FuncInfo.BPI;
10239   CaseClusterVector Clusters;
10240   Clusters.reserve(SI.getNumCases());
10241   for (auto I : SI.cases()) {
10242     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10243     const ConstantInt *CaseVal = I.getCaseValue();
10244     BranchProbability Prob =
10245         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10246             : BranchProbability(1, SI.getNumCases() + 1);
10247     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10248   }
10249 
10250   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10251 
10252   // Cluster adjacent cases with the same destination. We do this at all
10253   // optimization levels because it's cheap to do and will make codegen faster
10254   // if there are many clusters.
10255   sortAndRangeify(Clusters);
10256 
10257   if (TM.getOptLevel() != CodeGenOpt::None) {
10258     // Replace an unreachable default with the most popular destination.
10259     // FIXME: Exploit unreachable default more aggressively.
10260     bool UnreachableDefault =
10261         isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
10262     if (UnreachableDefault && !Clusters.empty()) {
10263       DenseMap<const BasicBlock *, unsigned> Popularity;
10264       unsigned MaxPop = 0;
10265       const BasicBlock *MaxBB = nullptr;
10266       for (auto I : SI.cases()) {
10267         const BasicBlock *BB = I.getCaseSuccessor();
10268         if (++Popularity[BB] > MaxPop) {
10269           MaxPop = Popularity[BB];
10270           MaxBB = BB;
10271         }
10272       }
10273       // Set new default.
10274       assert(MaxPop > 0 && MaxBB);
10275       DefaultMBB = FuncInfo.MBBMap[MaxBB];
10276 
10277       // Remove cases that were pointing to the destination that is now the
10278       // default.
10279       CaseClusterVector New;
10280       New.reserve(Clusters.size());
10281       for (CaseCluster &CC : Clusters) {
10282         if (CC.MBB != DefaultMBB)
10283           New.push_back(CC);
10284       }
10285       Clusters = std::move(New);
10286     }
10287   }
10288 
10289   // The branch probablity of the peeled case.
10290   BranchProbability PeeledCaseProb = BranchProbability::getZero();
10291   MachineBasicBlock *PeeledSwitchMBB =
10292       peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10293 
10294   // If there is only the default destination, jump there directly.
10295   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10296   if (Clusters.empty()) {
10297     assert(PeeledSwitchMBB == SwitchMBB);
10298     SwitchMBB->addSuccessor(DefaultMBB);
10299     if (DefaultMBB != NextBlock(SwitchMBB)) {
10300       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10301                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10302     }
10303     return;
10304   }
10305 
10306   findJumpTables(Clusters, &SI, DefaultMBB);
10307   findBitTestClusters(Clusters, &SI);
10308 
10309   LLVM_DEBUG({
10310     dbgs() << "Case clusters: ";
10311     for (const CaseCluster &C : Clusters) {
10312       if (C.Kind == CC_JumpTable)
10313         dbgs() << "JT:";
10314       if (C.Kind == CC_BitTests)
10315         dbgs() << "BT:";
10316 
10317       C.Low->getValue().print(dbgs(), true);
10318       if (C.Low != C.High) {
10319         dbgs() << '-';
10320         C.High->getValue().print(dbgs(), true);
10321       }
10322       dbgs() << ' ';
10323     }
10324     dbgs() << '\n';
10325   });
10326 
10327   assert(!Clusters.empty());
10328   SwitchWorkList WorkList;
10329   CaseClusterIt First = Clusters.begin();
10330   CaseClusterIt Last = Clusters.end() - 1;
10331   auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10332   // Scale the branchprobability for DefaultMBB if the peel occurs and
10333   // DefaultMBB is not replaced.
10334   if (PeeledCaseProb != BranchProbability::getZero() &&
10335       DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10336     DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10337   WorkList.push_back(
10338       {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10339 
10340   while (!WorkList.empty()) {
10341     SwitchWorkListItem W = WorkList.back();
10342     WorkList.pop_back();
10343     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10344 
10345     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10346         !DefaultMBB->getParent()->getFunction().optForMinSize()) {
10347       // For optimized builds, lower large range as a balanced binary tree.
10348       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10349       continue;
10350     }
10351 
10352     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10353   }
10354 }
10355