xref: /llvm-project/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (revision 6820f391eba0beb7d0da48c3a3a746ac19bba4d9)
1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/BranchProbabilityInfo.h"
22 #include "llvm/Analysis/ConstantFolding.h"
23 #include "llvm/Analysis/Loads.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Analysis/VectorUtils.h"
27 #include "llvm/CodeGen/Analysis.h"
28 #include "llvm/CodeGen/FastISel.h"
29 #include "llvm/CodeGen/FunctionLoweringInfo.h"
30 #include "llvm/CodeGen/GCMetadata.h"
31 #include "llvm/CodeGen/GCStrategy.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/SelectionDAG.h"
39 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
40 #include "llvm/CodeGen/StackMaps.h"
41 #include "llvm/CodeGen/WinEHFuncInfo.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/ConstantRange.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DebugInfo.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GetElementPtrTypeIterator.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/InlineAsm.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Statepoint.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Target/TargetFrameLowering.h"
65 #include "llvm/Target/TargetInstrInfo.h"
66 #include "llvm/Target/TargetIntrinsicInfo.h"
67 #include "llvm/Target/TargetLowering.h"
68 #include "llvm/Target/TargetOptions.h"
69 #include "llvm/Target/TargetSubtargetInfo.h"
70 #include <algorithm>
71 #include <utility>
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "isel"
75 
76 /// LimitFloatPrecision - Generate low-precision inline sequences for
77 /// some float libcalls (6, 8 or 12 bits).
78 static unsigned LimitFloatPrecision;
79 
80 static cl::opt<unsigned, true>
81 LimitFPPrecision("limit-float-precision",
82                  cl::desc("Generate low-precision inline sequences "
83                           "for some float libcalls"),
84                  cl::location(LimitFloatPrecision),
85                  cl::init(0));
86 
87 static cl::opt<bool>
88 EnableFMFInDAG("enable-fmf-dag", cl::init(true), cl::Hidden,
89                 cl::desc("Enable fast-math-flags for DAG nodes"));
90 
91 /// Minimum jump table density for normal functions.
92 static cl::opt<unsigned>
93 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
94                  cl::desc("Minimum density for building a jump table in "
95                           "a normal function"));
96 
97 /// Minimum jump table density for -Os or -Oz functions.
98 static cl::opt<unsigned>
99 OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden,
100                         cl::desc("Minimum density for building a jump table in "
101                                  "an optsize function"));
102 
103 
104 // Limit the width of DAG chains. This is important in general to prevent
105 // DAG-based analysis from blowing up. For example, alias analysis and
106 // load clustering may not complete in reasonable time. It is difficult to
107 // recognize and avoid this situation within each individual analysis, and
108 // future analyses are likely to have the same behavior. Limiting DAG width is
109 // the safe approach and will be especially important with global DAGs.
110 //
111 // MaxParallelChains default is arbitrarily high to avoid affecting
112 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
113 // sequence over this should have been converted to llvm.memcpy by the
114 // frontend. It is easy to induce this behavior with .ll code such as:
115 // %buffer = alloca [4096 x i8]
116 // %data = load [4096 x i8]* %argPtr
117 // store [4096 x i8] %data, [4096 x i8]* %buffer
118 static const unsigned MaxParallelChains = 64;
119 
120 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
121                                       const SDValue *Parts, unsigned NumParts,
122                                       MVT PartVT, EVT ValueVT, const Value *V);
123 
124 /// getCopyFromParts - Create a value that contains the specified legal parts
125 /// combined into the value they represent.  If the parts combine to a type
126 /// larger than ValueVT then AssertOp can be used to specify whether the extra
127 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
128 /// (ISD::AssertSext).
129 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
130                                 const SDValue *Parts, unsigned NumParts,
131                                 MVT PartVT, EVT ValueVT, const Value *V,
132                                 Optional<ISD::NodeType> AssertOp = None) {
133   if (ValueVT.isVector())
134     return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
135                                   PartVT, ValueVT, V);
136 
137   assert(NumParts > 0 && "No parts to assemble!");
138   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
139   SDValue Val = Parts[0];
140 
141   if (NumParts > 1) {
142     // Assemble the value from multiple parts.
143     if (ValueVT.isInteger()) {
144       unsigned PartBits = PartVT.getSizeInBits();
145       unsigned ValueBits = ValueVT.getSizeInBits();
146 
147       // Assemble the power of 2 part.
148       unsigned RoundParts = NumParts & (NumParts - 1) ?
149         1 << Log2_32(NumParts) : NumParts;
150       unsigned RoundBits = PartBits * RoundParts;
151       EVT RoundVT = RoundBits == ValueBits ?
152         ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
153       SDValue Lo, Hi;
154 
155       EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
156 
157       if (RoundParts > 2) {
158         Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
159                               PartVT, HalfVT, V);
160         Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
161                               RoundParts / 2, PartVT, HalfVT, V);
162       } else {
163         Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
164         Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
165       }
166 
167       if (DAG.getDataLayout().isBigEndian())
168         std::swap(Lo, Hi);
169 
170       Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
171 
172       if (RoundParts < NumParts) {
173         // Assemble the trailing non-power-of-2 part.
174         unsigned OddParts = NumParts - RoundParts;
175         EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
176         Hi = getCopyFromParts(DAG, DL,
177                               Parts + RoundParts, OddParts, PartVT, OddVT, V);
178 
179         // Combine the round and odd parts.
180         Lo = Val;
181         if (DAG.getDataLayout().isBigEndian())
182           std::swap(Lo, Hi);
183         EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
184         Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
185         Hi =
186             DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
187                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
188                                         TLI.getPointerTy(DAG.getDataLayout())));
189         Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
190         Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
191       }
192     } else if (PartVT.isFloatingPoint()) {
193       // FP split into multiple FP parts (for ppcf128)
194       assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
195              "Unexpected split");
196       SDValue Lo, Hi;
197       Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
198       Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
199       if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
200         std::swap(Lo, Hi);
201       Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
202     } else {
203       // FP split into integer parts (soft fp)
204       assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
205              !PartVT.isVector() && "Unexpected split");
206       EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
207       Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
208     }
209   }
210 
211   // There is now one part, held in Val.  Correct it to match ValueVT.
212   // PartEVT is the type of the register class that holds the value.
213   // ValueVT is the type of the inline asm operation.
214   EVT PartEVT = Val.getValueType();
215 
216   if (PartEVT == ValueVT)
217     return Val;
218 
219   if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
220       ValueVT.bitsLT(PartEVT)) {
221     // For an FP value in an integer part, we need to truncate to the right
222     // width first.
223     PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
224     Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
225   }
226 
227   // Handle types that have the same size.
228   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
229     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
230 
231   // Handle types with different sizes.
232   if (PartEVT.isInteger() && ValueVT.isInteger()) {
233     if (ValueVT.bitsLT(PartEVT)) {
234       // For a truncate, see if we have any information to
235       // indicate whether the truncated bits will always be
236       // zero or sign-extension.
237       if (AssertOp.hasValue())
238         Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
239                           DAG.getValueType(ValueVT));
240       return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
241     }
242     return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
243   }
244 
245   if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
246     // FP_ROUND's are always exact here.
247     if (ValueVT.bitsLT(Val.getValueType()))
248       return DAG.getNode(
249           ISD::FP_ROUND, DL, ValueVT, Val,
250           DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
251 
252     return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
253   }
254 
255   llvm_unreachable("Unknown mismatch!");
256 }
257 
258 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
259                                               const Twine &ErrMsg) {
260   const Instruction *I = dyn_cast_or_null<Instruction>(V);
261   if (!V)
262     return Ctx.emitError(ErrMsg);
263 
264   const char *AsmError = ", possible invalid constraint for vector type";
265   if (const CallInst *CI = dyn_cast<CallInst>(I))
266     if (isa<InlineAsm>(CI->getCalledValue()))
267       return Ctx.emitError(I, ErrMsg + AsmError);
268 
269   return Ctx.emitError(I, ErrMsg);
270 }
271 
272 /// getCopyFromPartsVector - Create a value that contains the specified legal
273 /// parts combined into the value they represent.  If the parts combine to a
274 /// type larger than ValueVT then AssertOp can be used to specify whether the
275 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
276 /// ValueVT (ISD::AssertSext).
277 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
278                                       const SDValue *Parts, unsigned NumParts,
279                                       MVT PartVT, EVT ValueVT, const Value *V) {
280   assert(ValueVT.isVector() && "Not a vector value");
281   assert(NumParts > 0 && "No parts to assemble!");
282   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
283   SDValue Val = Parts[0];
284 
285   // Handle a multi-element vector.
286   if (NumParts > 1) {
287     EVT IntermediateVT;
288     MVT RegisterVT;
289     unsigned NumIntermediates;
290     unsigned NumRegs =
291     TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
292                                NumIntermediates, RegisterVT);
293     assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
294     NumParts = NumRegs; // Silence a compiler warning.
295     assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
296     assert(RegisterVT.getSizeInBits() ==
297            Parts[0].getSimpleValueType().getSizeInBits() &&
298            "Part type sizes don't match!");
299 
300     // Assemble the parts into intermediate operands.
301     SmallVector<SDValue, 8> Ops(NumIntermediates);
302     if (NumIntermediates == NumParts) {
303       // If the register was not expanded, truncate or copy the value,
304       // as appropriate.
305       for (unsigned i = 0; i != NumParts; ++i)
306         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
307                                   PartVT, IntermediateVT, V);
308     } else if (NumParts > 0) {
309       // If the intermediate type was expanded, build the intermediate
310       // operands from the parts.
311       assert(NumParts % NumIntermediates == 0 &&
312              "Must expand into a divisible number of parts!");
313       unsigned Factor = NumParts / NumIntermediates;
314       for (unsigned i = 0; i != NumIntermediates; ++i)
315         Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
316                                   PartVT, IntermediateVT, V);
317     }
318 
319     // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
320     // intermediate operands.
321     Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
322                                                 : ISD::BUILD_VECTOR,
323                       DL, ValueVT, Ops);
324   }
325 
326   // There is now one part, held in Val.  Correct it to match ValueVT.
327   EVT PartEVT = Val.getValueType();
328 
329   if (PartEVT == ValueVT)
330     return Val;
331 
332   if (PartEVT.isVector()) {
333     // If the element type of the source/dest vectors are the same, but the
334     // parts vector has more elements than the value vector, then we have a
335     // vector widening case (e.g. <2 x float> -> <4 x float>).  Extract the
336     // elements we want.
337     if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
338       assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
339              "Cannot narrow, it would be a lossy transformation");
340       return DAG.getNode(
341           ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
342           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
343     }
344 
345     // Vector/Vector bitcast.
346     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
347       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
348 
349     assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
350       "Cannot handle this kind of promotion");
351     // Promoted vector extract
352     return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
353 
354   }
355 
356   // Trivial bitcast if the types are the same size and the destination
357   // vector type is legal.
358   if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
359       TLI.isTypeLegal(ValueVT))
360     return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
361 
362   // Handle cases such as i8 -> <1 x i1>
363   if (ValueVT.getVectorNumElements() != 1) {
364     diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
365                                       "non-trivial scalar-to-vector conversion");
366     return DAG.getUNDEF(ValueVT);
367   }
368 
369   if (ValueVT.getVectorNumElements() == 1 &&
370       ValueVT.getVectorElementType() != PartEVT)
371     Val = DAG.getAnyExtOrTrunc(Val, DL, ValueVT.getScalarType());
372 
373   return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
374 }
375 
376 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
377                                  SDValue Val, SDValue *Parts, unsigned NumParts,
378                                  MVT PartVT, const Value *V);
379 
380 /// getCopyToParts - Create a series of nodes that contain the specified value
381 /// split into legal parts.  If the parts contain more bits than Val, then, for
382 /// integers, ExtendKind can be used to specify how to generate the extra bits.
383 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
384                            SDValue *Parts, unsigned NumParts, MVT PartVT,
385                            const Value *V,
386                            ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
387   EVT ValueVT = Val.getValueType();
388 
389   // Handle the vector case separately.
390   if (ValueVT.isVector())
391     return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
392 
393   unsigned PartBits = PartVT.getSizeInBits();
394   unsigned OrigNumParts = NumParts;
395   assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
396          "Copying to an illegal type!");
397 
398   if (NumParts == 0)
399     return;
400 
401   assert(!ValueVT.isVector() && "Vector case handled elsewhere");
402   EVT PartEVT = PartVT;
403   if (PartEVT == ValueVT) {
404     assert(NumParts == 1 && "No-op copy with multiple parts!");
405     Parts[0] = Val;
406     return;
407   }
408 
409   if (NumParts * PartBits > ValueVT.getSizeInBits()) {
410     // If the parts cover more bits than the value has, promote the value.
411     if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
412       assert(NumParts == 1 && "Do not know what to promote to!");
413       Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
414     } else {
415       if (ValueVT.isFloatingPoint()) {
416         // FP values need to be bitcast, then extended if they are being put
417         // into a larger container.
418         ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
419         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
420       }
421       assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
422              ValueVT.isInteger() &&
423              "Unknown mismatch!");
424       ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
425       Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
426       if (PartVT == MVT::x86mmx)
427         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
428     }
429   } else if (PartBits == ValueVT.getSizeInBits()) {
430     // Different types of the same size.
431     assert(NumParts == 1 && PartEVT != ValueVT);
432     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
433   } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
434     // If the parts cover less bits than value has, truncate the value.
435     assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
436            ValueVT.isInteger() &&
437            "Unknown mismatch!");
438     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
439     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
440     if (PartVT == MVT::x86mmx)
441       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
442   }
443 
444   // The value may have changed - recompute ValueVT.
445   ValueVT = Val.getValueType();
446   assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
447          "Failed to tile the value with PartVT!");
448 
449   if (NumParts == 1) {
450     if (PartEVT != ValueVT) {
451       diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
452                                         "scalar-to-vector conversion failed");
453       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
454     }
455 
456     Parts[0] = Val;
457     return;
458   }
459 
460   // Expand the value into multiple parts.
461   if (NumParts & (NumParts - 1)) {
462     // The number of parts is not a power of 2.  Split off and copy the tail.
463     assert(PartVT.isInteger() && ValueVT.isInteger() &&
464            "Do not know what to expand to!");
465     unsigned RoundParts = 1 << Log2_32(NumParts);
466     unsigned RoundBits = RoundParts * PartBits;
467     unsigned OddParts = NumParts - RoundParts;
468     SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
469                                  DAG.getIntPtrConstant(RoundBits, DL));
470     getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
471 
472     if (DAG.getDataLayout().isBigEndian())
473       // The odd parts were reversed by getCopyToParts - unreverse them.
474       std::reverse(Parts + RoundParts, Parts + NumParts);
475 
476     NumParts = RoundParts;
477     ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
478     Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
479   }
480 
481   // The number of parts is a power of 2.  Repeatedly bisect the value using
482   // EXTRACT_ELEMENT.
483   Parts[0] = DAG.getNode(ISD::BITCAST, DL,
484                          EVT::getIntegerVT(*DAG.getContext(),
485                                            ValueVT.getSizeInBits()),
486                          Val);
487 
488   for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
489     for (unsigned i = 0; i < NumParts; i += StepSize) {
490       unsigned ThisBits = StepSize * PartBits / 2;
491       EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
492       SDValue &Part0 = Parts[i];
493       SDValue &Part1 = Parts[i+StepSize/2];
494 
495       Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
496                           ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
497       Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
498                           ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
499 
500       if (ThisBits == PartBits && ThisVT != PartVT) {
501         Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
502         Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
503       }
504     }
505   }
506 
507   if (DAG.getDataLayout().isBigEndian())
508     std::reverse(Parts, Parts + OrigNumParts);
509 }
510 
511 
512 /// getCopyToPartsVector - Create a series of nodes that contain the specified
513 /// value split into legal parts.
514 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
515                                  SDValue Val, SDValue *Parts, unsigned NumParts,
516                                  MVT PartVT, const Value *V) {
517   EVT ValueVT = Val.getValueType();
518   assert(ValueVT.isVector() && "Not a vector");
519   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
520 
521   if (NumParts == 1) {
522     EVT PartEVT = PartVT;
523     if (PartEVT == ValueVT) {
524       // Nothing to do.
525     } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
526       // Bitconvert vector->vector case.
527       Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
528     } else if (PartVT.isVector() &&
529                PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
530                PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
531       EVT ElementVT = PartVT.getVectorElementType();
532       // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
533       // undef elements.
534       SmallVector<SDValue, 16> Ops;
535       for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
536         Ops.push_back(DAG.getNode(
537             ISD::EXTRACT_VECTOR_ELT, DL, ElementVT, Val,
538             DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))));
539 
540       for (unsigned i = ValueVT.getVectorNumElements(),
541            e = PartVT.getVectorNumElements(); i != e; ++i)
542         Ops.push_back(DAG.getUNDEF(ElementVT));
543 
544       Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops);
545 
546       // FIXME: Use CONCAT for 2x -> 4x.
547 
548       //SDValue UndefElts = DAG.getUNDEF(VectorTy);
549       //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
550     } else if (PartVT.isVector() &&
551                PartEVT.getVectorElementType().bitsGE(
552                  ValueVT.getVectorElementType()) &&
553                PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
554 
555       // Promoted vector extract
556       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
557     } else{
558       // Vector -> scalar conversion.
559       assert(ValueVT.getVectorNumElements() == 1 &&
560              "Only trivial vector-to-scalar conversions should get here!");
561       Val = DAG.getNode(
562           ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
563           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
564 
565       Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
566     }
567 
568     Parts[0] = Val;
569     return;
570   }
571 
572   // Handle a multi-element vector.
573   EVT IntermediateVT;
574   MVT RegisterVT;
575   unsigned NumIntermediates;
576   unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
577                                                 IntermediateVT,
578                                                 NumIntermediates, RegisterVT);
579   unsigned NumElements = ValueVT.getVectorNumElements();
580 
581   assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
582   NumParts = NumRegs; // Silence a compiler warning.
583   assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
584 
585   // Split the vector into intermediate operands.
586   SmallVector<SDValue, 8> Ops(NumIntermediates);
587   for (unsigned i = 0; i != NumIntermediates; ++i) {
588     if (IntermediateVT.isVector())
589       Ops[i] =
590           DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
591                       DAG.getConstant(i * (NumElements / NumIntermediates), DL,
592                                       TLI.getVectorIdxTy(DAG.getDataLayout())));
593     else
594       Ops[i] = DAG.getNode(
595           ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
596           DAG.getConstant(i, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
597   }
598 
599   // Split the intermediate operands into legal parts.
600   if (NumParts == NumIntermediates) {
601     // If the register was not expanded, promote or copy the value,
602     // as appropriate.
603     for (unsigned i = 0; i != NumParts; ++i)
604       getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
605   } else if (NumParts > 0) {
606     // If the intermediate type was expanded, split each the value into
607     // legal parts.
608     assert(NumIntermediates != 0 && "division by zero");
609     assert(NumParts % NumIntermediates == 0 &&
610            "Must expand into a divisible number of parts!");
611     unsigned Factor = NumParts / NumIntermediates;
612     for (unsigned i = 0; i != NumIntermediates; ++i)
613       getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
614   }
615 }
616 
617 RegsForValue::RegsForValue() {}
618 
619 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
620                            EVT valuevt)
621     : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
622 
623 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
624                            const DataLayout &DL, unsigned Reg, Type *Ty) {
625   ComputeValueVTs(TLI, DL, Ty, ValueVTs);
626 
627   for (EVT ValueVT : ValueVTs) {
628     unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
629     MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
630     for (unsigned i = 0; i != NumRegs; ++i)
631       Regs.push_back(Reg + i);
632     RegVTs.push_back(RegisterVT);
633     Reg += NumRegs;
634   }
635 }
636 
637 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
638                                       FunctionLoweringInfo &FuncInfo,
639                                       const SDLoc &dl, SDValue &Chain,
640                                       SDValue *Flag, const Value *V) const {
641   // A Value with type {} or [0 x %t] needs no registers.
642   if (ValueVTs.empty())
643     return SDValue();
644 
645   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
646 
647   // Assemble the legal parts into the final values.
648   SmallVector<SDValue, 4> Values(ValueVTs.size());
649   SmallVector<SDValue, 8> Parts;
650   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
651     // Copy the legal parts from the registers.
652     EVT ValueVT = ValueVTs[Value];
653     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
654     MVT RegisterVT = RegVTs[Value];
655 
656     Parts.resize(NumRegs);
657     for (unsigned i = 0; i != NumRegs; ++i) {
658       SDValue P;
659       if (!Flag) {
660         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
661       } else {
662         P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
663         *Flag = P.getValue(2);
664       }
665 
666       Chain = P.getValue(1);
667       Parts[i] = P;
668 
669       // If the source register was virtual and if we know something about it,
670       // add an assert node.
671       if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
672           !RegisterVT.isInteger() || RegisterVT.isVector())
673         continue;
674 
675       const FunctionLoweringInfo::LiveOutInfo *LOI =
676         FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
677       if (!LOI)
678         continue;
679 
680       unsigned RegSize = RegisterVT.getSizeInBits();
681       unsigned NumSignBits = LOI->NumSignBits;
682       unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
683 
684       if (NumZeroBits == RegSize) {
685         // The current value is a zero.
686         // Explicitly express that as it would be easier for
687         // optimizations to kick in.
688         Parts[i] = DAG.getConstant(0, dl, RegisterVT);
689         continue;
690       }
691 
692       // FIXME: We capture more information than the dag can represent.  For
693       // now, just use the tightest assertzext/assertsext possible.
694       bool isSExt = true;
695       EVT FromVT(MVT::Other);
696       if (NumSignBits == RegSize) {
697         isSExt = true;   // ASSERT SEXT 1
698         FromVT = MVT::i1;
699       } else if (NumZeroBits >= RegSize - 1) {
700         isSExt = false;  // ASSERT ZEXT 1
701         FromVT = MVT::i1;
702       } else if (NumSignBits > RegSize - 8) {
703         isSExt = true;   // ASSERT SEXT 8
704         FromVT = MVT::i8;
705       } else if (NumZeroBits >= RegSize - 8) {
706         isSExt = false;  // ASSERT ZEXT 8
707         FromVT = MVT::i8;
708       } else if (NumSignBits > RegSize - 16) {
709         isSExt = true;   // ASSERT SEXT 16
710         FromVT = MVT::i16;
711       } else if (NumZeroBits >= RegSize - 16) {
712         isSExt = false;  // ASSERT ZEXT 16
713         FromVT = MVT::i16;
714       } else if (NumSignBits > RegSize - 32) {
715         isSExt = true;   // ASSERT SEXT 32
716         FromVT = MVT::i32;
717       } else if (NumZeroBits >= RegSize - 32) {
718         isSExt = false;  // ASSERT ZEXT 32
719         FromVT = MVT::i32;
720       } else {
721         continue;
722       }
723       // Add an assertion node.
724       assert(FromVT != MVT::Other);
725       Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
726                              RegisterVT, P, DAG.getValueType(FromVT));
727     }
728 
729     Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
730                                      NumRegs, RegisterVT, ValueVT, V);
731     Part += NumRegs;
732     Parts.clear();
733   }
734 
735   return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
736 }
737 
738 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
739                                  const SDLoc &dl, SDValue &Chain, SDValue *Flag,
740                                  const Value *V,
741                                  ISD::NodeType PreferredExtendType) const {
742   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
743   ISD::NodeType ExtendKind = PreferredExtendType;
744 
745   // Get the list of the values's legal parts.
746   unsigned NumRegs = Regs.size();
747   SmallVector<SDValue, 8> Parts(NumRegs);
748   for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
749     EVT ValueVT = ValueVTs[Value];
750     unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
751     MVT RegisterVT = RegVTs[Value];
752 
753     if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
754       ExtendKind = ISD::ZERO_EXTEND;
755 
756     getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
757                    &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
758     Part += NumParts;
759   }
760 
761   // Copy the parts into the registers.
762   SmallVector<SDValue, 8> Chains(NumRegs);
763   for (unsigned i = 0; i != NumRegs; ++i) {
764     SDValue Part;
765     if (!Flag) {
766       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
767     } else {
768       Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
769       *Flag = Part.getValue(1);
770     }
771 
772     Chains[i] = Part.getValue(0);
773   }
774 
775   if (NumRegs == 1 || Flag)
776     // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
777     // flagged to it. That is the CopyToReg nodes and the user are considered
778     // a single scheduling unit. If we create a TokenFactor and return it as
779     // chain, then the TokenFactor is both a predecessor (operand) of the
780     // user as well as a successor (the TF operands are flagged to the user).
781     // c1, f1 = CopyToReg
782     // c2, f2 = CopyToReg
783     // c3     = TokenFactor c1, c2
784     // ...
785     //        = op c3, ..., f2
786     Chain = Chains[NumRegs-1];
787   else
788     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
789 }
790 
791 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
792                                         unsigned MatchingIdx, const SDLoc &dl,
793                                         SelectionDAG &DAG,
794                                         std::vector<SDValue> &Ops) const {
795   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
796 
797   unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
798   if (HasMatching)
799     Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
800   else if (!Regs.empty() &&
801            TargetRegisterInfo::isVirtualRegister(Regs.front())) {
802     // Put the register class of the virtual registers in the flag word.  That
803     // way, later passes can recompute register class constraints for inline
804     // assembly as well as normal instructions.
805     // Don't do this for tied operands that can use the regclass information
806     // from the def.
807     const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
808     const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
809     Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
810   }
811 
812   SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
813   Ops.push_back(Res);
814 
815   unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
816   for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
817     unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
818     MVT RegisterVT = RegVTs[Value];
819     for (unsigned i = 0; i != NumRegs; ++i) {
820       assert(Reg < Regs.size() && "Mismatch in # registers expected");
821       unsigned TheReg = Regs[Reg++];
822       Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
823 
824       if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
825         // If we clobbered the stack pointer, MFI should know about it.
826         assert(DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment());
827       }
828     }
829   }
830 }
831 
832 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
833                                const TargetLibraryInfo *li) {
834   AA = &aa;
835   GFI = gfi;
836   LibInfo = li;
837   DL = &DAG.getDataLayout();
838   Context = DAG.getContext();
839   LPadToCallSiteMap.clear();
840 }
841 
842 void SelectionDAGBuilder::clear() {
843   NodeMap.clear();
844   UnusedArgNodeMap.clear();
845   PendingLoads.clear();
846   PendingExports.clear();
847   CurInst = nullptr;
848   HasTailCall = false;
849   SDNodeOrder = LowestSDNodeOrder;
850   StatepointLowering.clear();
851 }
852 
853 void SelectionDAGBuilder::clearDanglingDebugInfo() {
854   DanglingDebugInfoMap.clear();
855 }
856 
857 SDValue SelectionDAGBuilder::getRoot() {
858   if (PendingLoads.empty())
859     return DAG.getRoot();
860 
861   if (PendingLoads.size() == 1) {
862     SDValue Root = PendingLoads[0];
863     DAG.setRoot(Root);
864     PendingLoads.clear();
865     return Root;
866   }
867 
868   // Otherwise, we have to make a token factor node.
869   SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
870                              PendingLoads);
871   PendingLoads.clear();
872   DAG.setRoot(Root);
873   return Root;
874 }
875 
876 SDValue SelectionDAGBuilder::getControlRoot() {
877   SDValue Root = DAG.getRoot();
878 
879   if (PendingExports.empty())
880     return Root;
881 
882   // Turn all of the CopyToReg chains into one factored node.
883   if (Root.getOpcode() != ISD::EntryToken) {
884     unsigned i = 0, e = PendingExports.size();
885     for (; i != e; ++i) {
886       assert(PendingExports[i].getNode()->getNumOperands() > 1);
887       if (PendingExports[i].getNode()->getOperand(0) == Root)
888         break;  // Don't add the root if we already indirectly depend on it.
889     }
890 
891     if (i == e)
892       PendingExports.push_back(Root);
893   }
894 
895   Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
896                      PendingExports);
897   PendingExports.clear();
898   DAG.setRoot(Root);
899   return Root;
900 }
901 
902 void SelectionDAGBuilder::visit(const Instruction &I) {
903   // Set up outgoing PHI node register values before emitting the terminator.
904   if (isa<TerminatorInst>(&I)) {
905     HandlePHINodesInSuccessorBlocks(I.getParent());
906   }
907 
908   // Increase the SDNodeOrder if dealing with a non-debug instruction.
909   if (!isa<DbgInfoIntrinsic>(I))
910     ++SDNodeOrder;
911 
912   CurInst = &I;
913 
914   visit(I.getOpcode(), I);
915 
916   if (!isa<TerminatorInst>(&I) && !HasTailCall &&
917       !isStatepoint(&I)) // statepoints handle their exports internally
918     CopyToExportRegsIfNeeded(&I);
919 
920   CurInst = nullptr;
921 }
922 
923 void SelectionDAGBuilder::visitPHI(const PHINode &) {
924   llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
925 }
926 
927 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
928   // Note: this doesn't use InstVisitor, because it has to work with
929   // ConstantExpr's in addition to instructions.
930   switch (Opcode) {
931   default: llvm_unreachable("Unknown instruction type encountered!");
932     // Build the switch statement using the Instruction.def file.
933 #define HANDLE_INST(NUM, OPCODE, CLASS) \
934     case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
935 #include "llvm/IR/Instruction.def"
936   }
937 }
938 
939 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
940 // generate the debug data structures now that we've seen its definition.
941 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
942                                                    SDValue Val) {
943   DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
944   if (DDI.getDI()) {
945     const DbgValueInst *DI = DDI.getDI();
946     DebugLoc dl = DDI.getdl();
947     unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
948     DILocalVariable *Variable = DI->getVariable();
949     DIExpression *Expr = DI->getExpression();
950     assert(Variable->isValidLocationForIntrinsic(dl) &&
951            "Expected inlined-at fields to agree");
952     uint64_t Offset = DI->getOffset();
953     SDDbgValue *SDV;
954     if (Val.getNode()) {
955       if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, Offset, false,
956                                     Val)) {
957         SDV = getDbgValue(Val, Variable, Expr, Offset, dl, DbgSDNodeOrder);
958         DAG.AddDbgValue(SDV, Val.getNode(), false);
959       }
960     } else
961       DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
962     DanglingDebugInfoMap[V] = DanglingDebugInfo();
963   }
964 }
965 
966 /// getCopyFromRegs - If there was virtual register allocated for the value V
967 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
968 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
969   DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
970   SDValue Result;
971 
972   if (It != FuncInfo.ValueMap.end()) {
973     unsigned InReg = It->second;
974     RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
975                      DAG.getDataLayout(), InReg, Ty);
976     SDValue Chain = DAG.getEntryNode();
977     Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
978     resolveDanglingDebugInfo(V, Result);
979   }
980 
981   return Result;
982 }
983 
984 /// getValue - Return an SDValue for the given Value.
985 SDValue SelectionDAGBuilder::getValue(const Value *V) {
986   // If we already have an SDValue for this value, use it. It's important
987   // to do this first, so that we don't create a CopyFromReg if we already
988   // have a regular SDValue.
989   SDValue &N = NodeMap[V];
990   if (N.getNode()) return N;
991 
992   // If there's a virtual register allocated and initialized for this
993   // value, use it.
994   if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
995     return copyFromReg;
996 
997   // Otherwise create a new SDValue and remember it.
998   SDValue Val = getValueImpl(V);
999   NodeMap[V] = Val;
1000   resolveDanglingDebugInfo(V, Val);
1001   return Val;
1002 }
1003 
1004 // Return true if SDValue exists for the given Value
1005 bool SelectionDAGBuilder::findValue(const Value *V) const {
1006   return (NodeMap.find(V) != NodeMap.end()) ||
1007     (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1008 }
1009 
1010 /// getNonRegisterValue - Return an SDValue for the given Value, but
1011 /// don't look in FuncInfo.ValueMap for a virtual register.
1012 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1013   // If we already have an SDValue for this value, use it.
1014   SDValue &N = NodeMap[V];
1015   if (N.getNode()) {
1016     if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1017       // Remove the debug location from the node as the node is about to be used
1018       // in a location which may differ from the original debug location.  This
1019       // is relevant to Constant and ConstantFP nodes because they can appear
1020       // as constant expressions inside PHI nodes.
1021       N->setDebugLoc(DebugLoc());
1022     }
1023     return N;
1024   }
1025 
1026   // Otherwise create a new SDValue and remember it.
1027   SDValue Val = getValueImpl(V);
1028   NodeMap[V] = Val;
1029   resolveDanglingDebugInfo(V, Val);
1030   return Val;
1031 }
1032 
1033 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1034 /// Create an SDValue for the given value.
1035 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1036   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1037 
1038   if (const Constant *C = dyn_cast<Constant>(V)) {
1039     EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1040 
1041     if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1042       return DAG.getConstant(*CI, getCurSDLoc(), VT);
1043 
1044     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1045       return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1046 
1047     if (isa<ConstantPointerNull>(C)) {
1048       unsigned AS = V->getType()->getPointerAddressSpace();
1049       return DAG.getConstant(0, getCurSDLoc(),
1050                              TLI.getPointerTy(DAG.getDataLayout(), AS));
1051     }
1052 
1053     if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1054       return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1055 
1056     if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1057       return DAG.getUNDEF(VT);
1058 
1059     if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1060       visit(CE->getOpcode(), *CE);
1061       SDValue N1 = NodeMap[V];
1062       assert(N1.getNode() && "visit didn't populate the NodeMap!");
1063       return N1;
1064     }
1065 
1066     if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1067       SmallVector<SDValue, 4> Constants;
1068       for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1069            OI != OE; ++OI) {
1070         SDNode *Val = getValue(*OI).getNode();
1071         // If the operand is an empty aggregate, there are no values.
1072         if (!Val) continue;
1073         // Add each leaf value from the operand to the Constants list
1074         // to form a flattened list of all the values.
1075         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1076           Constants.push_back(SDValue(Val, i));
1077       }
1078 
1079       return DAG.getMergeValues(Constants, getCurSDLoc());
1080     }
1081 
1082     if (const ConstantDataSequential *CDS =
1083           dyn_cast<ConstantDataSequential>(C)) {
1084       SmallVector<SDValue, 4> Ops;
1085       for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1086         SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1087         // Add each leaf value from the operand to the Constants list
1088         // to form a flattened list of all the values.
1089         for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1090           Ops.push_back(SDValue(Val, i));
1091       }
1092 
1093       if (isa<ArrayType>(CDS->getType()))
1094         return DAG.getMergeValues(Ops, getCurSDLoc());
1095       return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
1096                                       VT, Ops);
1097     }
1098 
1099     if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1100       assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1101              "Unknown struct or array constant!");
1102 
1103       SmallVector<EVT, 4> ValueVTs;
1104       ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1105       unsigned NumElts = ValueVTs.size();
1106       if (NumElts == 0)
1107         return SDValue(); // empty struct
1108       SmallVector<SDValue, 4> Constants(NumElts);
1109       for (unsigned i = 0; i != NumElts; ++i) {
1110         EVT EltVT = ValueVTs[i];
1111         if (isa<UndefValue>(C))
1112           Constants[i] = DAG.getUNDEF(EltVT);
1113         else if (EltVT.isFloatingPoint())
1114           Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1115         else
1116           Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1117       }
1118 
1119       return DAG.getMergeValues(Constants, getCurSDLoc());
1120     }
1121 
1122     if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1123       return DAG.getBlockAddress(BA, VT);
1124 
1125     VectorType *VecTy = cast<VectorType>(V->getType());
1126     unsigned NumElements = VecTy->getNumElements();
1127 
1128     // Now that we know the number and type of the elements, get that number of
1129     // elements into the Ops array based on what kind of constant it is.
1130     SmallVector<SDValue, 16> Ops;
1131     if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1132       for (unsigned i = 0; i != NumElements; ++i)
1133         Ops.push_back(getValue(CV->getOperand(i)));
1134     } else {
1135       assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1136       EVT EltVT =
1137           TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1138 
1139       SDValue Op;
1140       if (EltVT.isFloatingPoint())
1141         Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1142       else
1143         Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1144       Ops.assign(NumElements, Op);
1145     }
1146 
1147     // Create a BUILD_VECTOR node.
1148     return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops);
1149   }
1150 
1151   // If this is a static alloca, generate it as the frameindex instead of
1152   // computation.
1153   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1154     DenseMap<const AllocaInst*, int>::iterator SI =
1155       FuncInfo.StaticAllocaMap.find(AI);
1156     if (SI != FuncInfo.StaticAllocaMap.end())
1157       return DAG.getFrameIndex(SI->second,
1158                                TLI.getPointerTy(DAG.getDataLayout()));
1159   }
1160 
1161   // If this is an instruction which fast-isel has deferred, select it now.
1162   if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1163     unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1164     RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1165                      Inst->getType());
1166     SDValue Chain = DAG.getEntryNode();
1167     return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1168   }
1169 
1170   llvm_unreachable("Can't get register for value!");
1171 }
1172 
1173 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1174   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1175   bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1176   bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1177   MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1178   // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1179   if (IsMSVCCXX || IsCoreCLR)
1180     CatchPadMBB->setIsEHFuncletEntry();
1181 
1182   DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other, getControlRoot()));
1183 }
1184 
1185 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1186   // Update machine-CFG edge.
1187   MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1188   FuncInfo.MBB->addSuccessor(TargetMBB);
1189 
1190   auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1191   bool IsSEH = isAsynchronousEHPersonality(Pers);
1192   if (IsSEH) {
1193     // If this is not a fall-through branch or optimizations are switched off,
1194     // emit the branch.
1195     if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1196         TM.getOptLevel() == CodeGenOpt::None)
1197       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1198                               getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1199     return;
1200   }
1201 
1202   // Figure out the funclet membership for the catchret's successor.
1203   // This will be used by the FuncletLayout pass to determine how to order the
1204   // BB's.
1205   // A 'catchret' returns to the outer scope's color.
1206   Value *ParentPad = I.getCatchSwitchParentPad();
1207   const BasicBlock *SuccessorColor;
1208   if (isa<ConstantTokenNone>(ParentPad))
1209     SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1210   else
1211     SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1212   assert(SuccessorColor && "No parent funclet for catchret!");
1213   MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1214   assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1215 
1216   // Create the terminator node.
1217   SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1218                             getControlRoot(), DAG.getBasicBlock(TargetMBB),
1219                             DAG.getBasicBlock(SuccessorColorMBB));
1220   DAG.setRoot(Ret);
1221 }
1222 
1223 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1224   // Don't emit any special code for the cleanuppad instruction. It just marks
1225   // the start of a funclet.
1226   FuncInfo.MBB->setIsEHFuncletEntry();
1227   FuncInfo.MBB->setIsCleanupFuncletEntry();
1228 }
1229 
1230 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1231 /// many places it could ultimately go. In the IR, we have a single unwind
1232 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1233 /// This function skips over imaginary basic blocks that hold catchswitch
1234 /// instructions, and finds all the "real" machine
1235 /// basic block destinations. As those destinations may not be successors of
1236 /// EHPadBB, here we also calculate the edge probability to those destinations.
1237 /// The passed-in Prob is the edge probability to EHPadBB.
1238 static void findUnwindDestinations(
1239     FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1240     BranchProbability Prob,
1241     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1242         &UnwindDests) {
1243   EHPersonality Personality =
1244     classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1245   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1246   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1247 
1248   while (EHPadBB) {
1249     const Instruction *Pad = EHPadBB->getFirstNonPHI();
1250     BasicBlock *NewEHPadBB = nullptr;
1251     if (isa<LandingPadInst>(Pad)) {
1252       // Stop on landingpads. They are not funclets.
1253       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1254       break;
1255     } else if (isa<CleanupPadInst>(Pad)) {
1256       // Stop on cleanup pads. Cleanups are always funclet entries for all known
1257       // personalities.
1258       UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1259       UnwindDests.back().first->setIsEHFuncletEntry();
1260       break;
1261     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1262       // Add the catchpad handlers to the possible destinations.
1263       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1264         UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1265         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1266         if (IsMSVCCXX || IsCoreCLR)
1267           UnwindDests.back().first->setIsEHFuncletEntry();
1268       }
1269       NewEHPadBB = CatchSwitch->getUnwindDest();
1270     } else {
1271       continue;
1272     }
1273 
1274     BranchProbabilityInfo *BPI = FuncInfo.BPI;
1275     if (BPI && NewEHPadBB)
1276       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1277     EHPadBB = NewEHPadBB;
1278   }
1279 }
1280 
1281 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1282   // Update successor info.
1283   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1284   auto UnwindDest = I.getUnwindDest();
1285   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1286   BranchProbability UnwindDestProb =
1287       (BPI && UnwindDest)
1288           ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1289           : BranchProbability::getZero();
1290   findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1291   for (auto &UnwindDest : UnwindDests) {
1292     UnwindDest.first->setIsEHPad();
1293     addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1294   }
1295   FuncInfo.MBB->normalizeSuccProbs();
1296 
1297   // Create the terminator node.
1298   SDValue Ret =
1299       DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1300   DAG.setRoot(Ret);
1301 }
1302 
1303 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1304   report_fatal_error("visitCatchSwitch not yet implemented!");
1305 }
1306 
1307 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1308   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1309   auto &DL = DAG.getDataLayout();
1310   SDValue Chain = getControlRoot();
1311   SmallVector<ISD::OutputArg, 8> Outs;
1312   SmallVector<SDValue, 8> OutVals;
1313 
1314   // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1315   // lower
1316   //
1317   //   %val = call <ty> @llvm.experimental.deoptimize()
1318   //   ret <ty> %val
1319   //
1320   // differently.
1321   if (I.getParent()->getTerminatingDeoptimizeCall()) {
1322     LowerDeoptimizingReturn();
1323     return;
1324   }
1325 
1326   if (!FuncInfo.CanLowerReturn) {
1327     unsigned DemoteReg = FuncInfo.DemoteRegister;
1328     const Function *F = I.getParent()->getParent();
1329 
1330     // Emit a store of the return value through the virtual register.
1331     // Leave Outs empty so that LowerReturn won't try to load return
1332     // registers the usual way.
1333     SmallVector<EVT, 1> PtrValueVTs;
1334     ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()),
1335                     PtrValueVTs);
1336 
1337     SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1338                                         DemoteReg, PtrValueVTs[0]);
1339     SDValue RetOp = getValue(I.getOperand(0));
1340 
1341     SmallVector<EVT, 4> ValueVTs;
1342     SmallVector<uint64_t, 4> Offsets;
1343     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1344     unsigned NumValues = ValueVTs.size();
1345 
1346     // An aggregate return value cannot wrap around the address space, so
1347     // offsets to its parts don't wrap either.
1348     SDNodeFlags Flags;
1349     Flags.setNoUnsignedWrap(true);
1350 
1351     SmallVector<SDValue, 4> Chains(NumValues);
1352     for (unsigned i = 0; i != NumValues; ++i) {
1353       SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
1354                                 RetPtr.getValueType(), RetPtr,
1355                                 DAG.getIntPtrConstant(Offsets[i],
1356                                                       getCurSDLoc()),
1357                                 &Flags);
1358       Chains[i] = DAG.getStore(Chain, getCurSDLoc(),
1359                                SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1360                                // FIXME: better loc info would be nice.
1361                                Add, MachinePointerInfo());
1362     }
1363 
1364     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1365                         MVT::Other, Chains);
1366   } else if (I.getNumOperands() != 0) {
1367     SmallVector<EVT, 4> ValueVTs;
1368     ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1369     unsigned NumValues = ValueVTs.size();
1370     if (NumValues) {
1371       SDValue RetOp = getValue(I.getOperand(0));
1372 
1373       const Function *F = I.getParent()->getParent();
1374 
1375       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1376       if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1377                                           Attribute::SExt))
1378         ExtendKind = ISD::SIGN_EXTEND;
1379       else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1380                                                Attribute::ZExt))
1381         ExtendKind = ISD::ZERO_EXTEND;
1382 
1383       LLVMContext &Context = F->getContext();
1384       bool RetInReg = F->getAttributes().hasAttribute(
1385           AttributeList::ReturnIndex, Attribute::InReg);
1386 
1387       for (unsigned j = 0; j != NumValues; ++j) {
1388         EVT VT = ValueVTs[j];
1389 
1390         if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1391           VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1392 
1393         unsigned NumParts = TLI.getNumRegisters(Context, VT);
1394         MVT PartVT = TLI.getRegisterType(Context, VT);
1395         SmallVector<SDValue, 4> Parts(NumParts);
1396         getCopyToParts(DAG, getCurSDLoc(),
1397                        SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1398                        &Parts[0], NumParts, PartVT, &I, ExtendKind);
1399 
1400         // 'inreg' on function refers to return value
1401         ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1402         if (RetInReg)
1403           Flags.setInReg();
1404 
1405         // Propagate extension type if any
1406         if (ExtendKind == ISD::SIGN_EXTEND)
1407           Flags.setSExt();
1408         else if (ExtendKind == ISD::ZERO_EXTEND)
1409           Flags.setZExt();
1410 
1411         for (unsigned i = 0; i < NumParts; ++i) {
1412           Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1413                                         VT, /*isfixed=*/true, 0, 0));
1414           OutVals.push_back(Parts[i]);
1415         }
1416       }
1417     }
1418   }
1419 
1420   // Push in swifterror virtual register as the last element of Outs. This makes
1421   // sure swifterror virtual register will be returned in the swifterror
1422   // physical register.
1423   const Function *F = I.getParent()->getParent();
1424   if (TLI.supportSwiftError() &&
1425       F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1426     assert(FuncInfo.SwiftErrorArg && "Need a swift error argument");
1427     ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1428     Flags.setSwiftError();
1429     Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1430                                   EVT(TLI.getPointerTy(DL)) /*argvt*/,
1431                                   true /*isfixed*/, 1 /*origidx*/,
1432                                   0 /*partOffs*/));
1433     // Create SDNode for the swifterror virtual register.
1434     OutVals.push_back(DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(
1435                                           FuncInfo.MBB, FuncInfo.SwiftErrorArg),
1436                                       EVT(TLI.getPointerTy(DL))));
1437   }
1438 
1439   bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1440   CallingConv::ID CallConv =
1441     DAG.getMachineFunction().getFunction()->getCallingConv();
1442   Chain = DAG.getTargetLoweringInfo().LowerReturn(
1443       Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1444 
1445   // Verify that the target's LowerReturn behaved as expected.
1446   assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1447          "LowerReturn didn't return a valid chain!");
1448 
1449   // Update the DAG with the new chain value resulting from return lowering.
1450   DAG.setRoot(Chain);
1451 }
1452 
1453 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1454 /// created for it, emit nodes to copy the value into the virtual
1455 /// registers.
1456 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1457   // Skip empty types
1458   if (V->getType()->isEmptyTy())
1459     return;
1460 
1461   DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1462   if (VMI != FuncInfo.ValueMap.end()) {
1463     assert(!V->use_empty() && "Unused value assigned virtual registers!");
1464     CopyValueToVirtualRegister(V, VMI->second);
1465   }
1466 }
1467 
1468 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1469 /// the current basic block, add it to ValueMap now so that we'll get a
1470 /// CopyTo/FromReg.
1471 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1472   // No need to export constants.
1473   if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1474 
1475   // Already exported?
1476   if (FuncInfo.isExportedInst(V)) return;
1477 
1478   unsigned Reg = FuncInfo.InitializeRegForValue(V);
1479   CopyValueToVirtualRegister(V, Reg);
1480 }
1481 
1482 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1483                                                      const BasicBlock *FromBB) {
1484   // The operands of the setcc have to be in this block.  We don't know
1485   // how to export them from some other block.
1486   if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1487     // Can export from current BB.
1488     if (VI->getParent() == FromBB)
1489       return true;
1490 
1491     // Is already exported, noop.
1492     return FuncInfo.isExportedInst(V);
1493   }
1494 
1495   // If this is an argument, we can export it if the BB is the entry block or
1496   // if it is already exported.
1497   if (isa<Argument>(V)) {
1498     if (FromBB == &FromBB->getParent()->getEntryBlock())
1499       return true;
1500 
1501     // Otherwise, can only export this if it is already exported.
1502     return FuncInfo.isExportedInst(V);
1503   }
1504 
1505   // Otherwise, constants can always be exported.
1506   return true;
1507 }
1508 
1509 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1510 BranchProbability
1511 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
1512                                         const MachineBasicBlock *Dst) const {
1513   BranchProbabilityInfo *BPI = FuncInfo.BPI;
1514   const BasicBlock *SrcBB = Src->getBasicBlock();
1515   const BasicBlock *DstBB = Dst->getBasicBlock();
1516   if (!BPI) {
1517     // If BPI is not available, set the default probability as 1 / N, where N is
1518     // the number of successors.
1519     auto SuccSize = std::max<uint32_t>(
1520         std::distance(succ_begin(SrcBB), succ_end(SrcBB)), 1);
1521     return BranchProbability(1, SuccSize);
1522   }
1523   return BPI->getEdgeProbability(SrcBB, DstBB);
1524 }
1525 
1526 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
1527                                                MachineBasicBlock *Dst,
1528                                                BranchProbability Prob) {
1529   if (!FuncInfo.BPI)
1530     Src->addSuccessorWithoutProb(Dst);
1531   else {
1532     if (Prob.isUnknown())
1533       Prob = getEdgeProbability(Src, Dst);
1534     Src->addSuccessor(Dst, Prob);
1535   }
1536 }
1537 
1538 static bool InBlock(const Value *V, const BasicBlock *BB) {
1539   if (const Instruction *I = dyn_cast<Instruction>(V))
1540     return I->getParent() == BB;
1541   return true;
1542 }
1543 
1544 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1545 /// This function emits a branch and is used at the leaves of an OR or an
1546 /// AND operator tree.
1547 ///
1548 void
1549 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1550                                                   MachineBasicBlock *TBB,
1551                                                   MachineBasicBlock *FBB,
1552                                                   MachineBasicBlock *CurBB,
1553                                                   MachineBasicBlock *SwitchBB,
1554                                                   BranchProbability TProb,
1555                                                   BranchProbability FProb,
1556                                                   bool InvertCond) {
1557   const BasicBlock *BB = CurBB->getBasicBlock();
1558 
1559   // If the leaf of the tree is a comparison, merge the condition into
1560   // the caseblock.
1561   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1562     // The operands of the cmp have to be in this block.  We don't know
1563     // how to export them from some other block.  If this is the first block
1564     // of the sequence, no exporting is needed.
1565     if (CurBB == SwitchBB ||
1566         (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1567          isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1568       ISD::CondCode Condition;
1569       if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1570         ICmpInst::Predicate Pred =
1571             InvertCond ? IC->getInversePredicate() : IC->getPredicate();
1572         Condition = getICmpCondCode(Pred);
1573       } else {
1574         const FCmpInst *FC = cast<FCmpInst>(Cond);
1575         FCmpInst::Predicate Pred =
1576             InvertCond ? FC->getInversePredicate() : FC->getPredicate();
1577         Condition = getFCmpCondCode(Pred);
1578         if (TM.Options.NoNaNsFPMath)
1579           Condition = getFCmpCodeWithoutNaN(Condition);
1580       }
1581 
1582       CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1583                    TBB, FBB, CurBB, TProb, FProb);
1584       SwitchCases.push_back(CB);
1585       return;
1586     }
1587   }
1588 
1589   // Create a CaseBlock record representing this branch.
1590   ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
1591   CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
1592                nullptr, TBB, FBB, CurBB, TProb, FProb);
1593   SwitchCases.push_back(CB);
1594 }
1595 
1596 /// FindMergedConditions - If Cond is an expression like
1597 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1598                                                MachineBasicBlock *TBB,
1599                                                MachineBasicBlock *FBB,
1600                                                MachineBasicBlock *CurBB,
1601                                                MachineBasicBlock *SwitchBB,
1602                                                Instruction::BinaryOps Opc,
1603                                                BranchProbability TProb,
1604                                                BranchProbability FProb,
1605                                                bool InvertCond) {
1606   // Skip over not part of the tree and remember to invert op and operands at
1607   // next level.
1608   if (BinaryOperator::isNot(Cond) && Cond->hasOneUse()) {
1609     const Value *CondOp = BinaryOperator::getNotArgument(Cond);
1610     if (InBlock(CondOp, CurBB->getBasicBlock())) {
1611       FindMergedConditions(CondOp, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
1612                            !InvertCond);
1613       return;
1614     }
1615   }
1616 
1617   const Instruction *BOp = dyn_cast<Instruction>(Cond);
1618   // Compute the effective opcode for Cond, taking into account whether it needs
1619   // to be inverted, e.g.
1620   //   and (not (or A, B)), C
1621   // gets lowered as
1622   //   and (and (not A, not B), C)
1623   unsigned BOpc = 0;
1624   if (BOp) {
1625     BOpc = BOp->getOpcode();
1626     if (InvertCond) {
1627       if (BOpc == Instruction::And)
1628         BOpc = Instruction::Or;
1629       else if (BOpc == Instruction::Or)
1630         BOpc = Instruction::And;
1631     }
1632   }
1633 
1634   // If this node is not part of the or/and tree, emit it as a branch.
1635   if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1636       BOpc != Opc || !BOp->hasOneUse() ||
1637       BOp->getParent() != CurBB->getBasicBlock() ||
1638       !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1639       !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1640     EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1641                                  TProb, FProb, InvertCond);
1642     return;
1643   }
1644 
1645   //  Create TmpBB after CurBB.
1646   MachineFunction::iterator BBI(CurBB);
1647   MachineFunction &MF = DAG.getMachineFunction();
1648   MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1649   CurBB->getParent()->insert(++BBI, TmpBB);
1650 
1651   if (Opc == Instruction::Or) {
1652     // Codegen X | Y as:
1653     // BB1:
1654     //   jmp_if_X TBB
1655     //   jmp TmpBB
1656     // TmpBB:
1657     //   jmp_if_Y TBB
1658     //   jmp FBB
1659     //
1660 
1661     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1662     // The requirement is that
1663     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1664     //     = TrueProb for original BB.
1665     // Assuming the original probabilities are A and B, one choice is to set
1666     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
1667     // A/(1+B) and 2B/(1+B). This choice assumes that
1668     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1669     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1670     // TmpBB, but the math is more complicated.
1671 
1672     auto NewTrueProb = TProb / 2;
1673     auto NewFalseProb = TProb / 2 + FProb;
1674     // Emit the LHS condition.
1675     FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1676                          NewTrueProb, NewFalseProb, InvertCond);
1677 
1678     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
1679     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
1680     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1681     // Emit the RHS condition into TmpBB.
1682     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1683                          Probs[0], Probs[1], InvertCond);
1684   } else {
1685     assert(Opc == Instruction::And && "Unknown merge op!");
1686     // Codegen X & Y as:
1687     // BB1:
1688     //   jmp_if_X TmpBB
1689     //   jmp FBB
1690     // TmpBB:
1691     //   jmp_if_Y TBB
1692     //   jmp FBB
1693     //
1694     //  This requires creation of TmpBB after CurBB.
1695 
1696     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1697     // The requirement is that
1698     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1699     //     = FalseProb for original BB.
1700     // Assuming the original probabilities are A and B, one choice is to set
1701     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
1702     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
1703     // TrueProb for BB1 * FalseProb for TmpBB.
1704 
1705     auto NewTrueProb = TProb + FProb / 2;
1706     auto NewFalseProb = FProb / 2;
1707     // Emit the LHS condition.
1708     FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1709                          NewTrueProb, NewFalseProb, InvertCond);
1710 
1711     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
1712     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
1713     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
1714     // Emit the RHS condition into TmpBB.
1715     FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1716                          Probs[0], Probs[1], InvertCond);
1717   }
1718 }
1719 
1720 /// If the set of cases should be emitted as a series of branches, return true.
1721 /// If we should emit this as a bunch of and/or'd together conditions, return
1722 /// false.
1723 bool
1724 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1725   if (Cases.size() != 2) return true;
1726 
1727   // If this is two comparisons of the same values or'd or and'd together, they
1728   // will get folded into a single comparison, so don't emit two blocks.
1729   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1730        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1731       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1732        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1733     return false;
1734   }
1735 
1736   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1737   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1738   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1739       Cases[0].CC == Cases[1].CC &&
1740       isa<Constant>(Cases[0].CmpRHS) &&
1741       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1742     if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1743       return false;
1744     if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1745       return false;
1746   }
1747 
1748   return true;
1749 }
1750 
1751 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1752   MachineBasicBlock *BrMBB = FuncInfo.MBB;
1753 
1754   // Update machine-CFG edges.
1755   MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1756 
1757   if (I.isUnconditional()) {
1758     // Update machine-CFG edges.
1759     BrMBB->addSuccessor(Succ0MBB);
1760 
1761     // If this is not a fall-through branch or optimizations are switched off,
1762     // emit the branch.
1763     if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
1764       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1765                               MVT::Other, getControlRoot(),
1766                               DAG.getBasicBlock(Succ0MBB)));
1767 
1768     return;
1769   }
1770 
1771   // If this condition is one of the special cases we handle, do special stuff
1772   // now.
1773   const Value *CondVal = I.getCondition();
1774   MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1775 
1776   // If this is a series of conditions that are or'd or and'd together, emit
1777   // this as a sequence of branches instead of setcc's with and/or operations.
1778   // As long as jumps are not expensive, this should improve performance.
1779   // For example, instead of something like:
1780   //     cmp A, B
1781   //     C = seteq
1782   //     cmp D, E
1783   //     F = setle
1784   //     or C, F
1785   //     jnz foo
1786   // Emit:
1787   //     cmp A, B
1788   //     je foo
1789   //     cmp D, E
1790   //     jle foo
1791   //
1792   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1793     Instruction::BinaryOps Opcode = BOp->getOpcode();
1794     if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
1795         !I.getMetadata(LLVMContext::MD_unpredictable) &&
1796         (Opcode == Instruction::And || Opcode == Instruction::Or)) {
1797       FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
1798                            Opcode,
1799                            getEdgeProbability(BrMBB, Succ0MBB),
1800                            getEdgeProbability(BrMBB, Succ1MBB),
1801                            /*InvertCond=*/false);
1802       // If the compares in later blocks need to use values not currently
1803       // exported from this block, export them now.  This block should always
1804       // be the first entry.
1805       assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
1806 
1807       // Allow some cases to be rejected.
1808       if (ShouldEmitAsBranches(SwitchCases)) {
1809         for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1810           ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1811           ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1812         }
1813 
1814         // Emit the branch for this block.
1815         visitSwitchCase(SwitchCases[0], BrMBB);
1816         SwitchCases.erase(SwitchCases.begin());
1817         return;
1818       }
1819 
1820       // Okay, we decided not to do this, remove any inserted MBB's and clear
1821       // SwitchCases.
1822       for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1823         FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1824 
1825       SwitchCases.clear();
1826     }
1827   }
1828 
1829   // Create a CaseBlock record representing this branch.
1830   CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1831                nullptr, Succ0MBB, Succ1MBB, BrMBB);
1832 
1833   // Use visitSwitchCase to actually insert the fast branch sequence for this
1834   // cond branch.
1835   visitSwitchCase(CB, BrMBB);
1836 }
1837 
1838 /// visitSwitchCase - Emits the necessary code to represent a single node in
1839 /// the binary search tree resulting from lowering a switch instruction.
1840 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
1841                                           MachineBasicBlock *SwitchBB) {
1842   SDValue Cond;
1843   SDValue CondLHS = getValue(CB.CmpLHS);
1844   SDLoc dl = getCurSDLoc();
1845 
1846   // Build the setcc now.
1847   if (!CB.CmpMHS) {
1848     // Fold "(X == true)" to X and "(X == false)" to !X to
1849     // handle common cases produced by branch lowering.
1850     if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1851         CB.CC == ISD::SETEQ)
1852       Cond = CondLHS;
1853     else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1854              CB.CC == ISD::SETEQ) {
1855       SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
1856       Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1857     } else
1858       Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1859   } else {
1860     assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1861 
1862     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1863     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1864 
1865     SDValue CmpOp = getValue(CB.CmpMHS);
1866     EVT VT = CmpOp.getValueType();
1867 
1868     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1869       Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
1870                           ISD::SETLE);
1871     } else {
1872       SDValue SUB = DAG.getNode(ISD::SUB, dl,
1873                                 VT, CmpOp, DAG.getConstant(Low, dl, VT));
1874       Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1875                           DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
1876     }
1877   }
1878 
1879   // Update successor info
1880   addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
1881   // TrueBB and FalseBB are always different unless the incoming IR is
1882   // degenerate. This only happens when running llc on weird IR.
1883   if (CB.TrueBB != CB.FalseBB)
1884     addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
1885   SwitchBB->normalizeSuccProbs();
1886 
1887   // If the lhs block is the next block, invert the condition so that we can
1888   // fall through to the lhs instead of the rhs block.
1889   if (CB.TrueBB == NextBlock(SwitchBB)) {
1890     std::swap(CB.TrueBB, CB.FalseBB);
1891     SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
1892     Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1893   }
1894 
1895   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1896                                MVT::Other, getControlRoot(), Cond,
1897                                DAG.getBasicBlock(CB.TrueBB));
1898 
1899   // Insert the false branch. Do this even if it's a fall through branch,
1900   // this makes it easier to do DAG optimizations which require inverting
1901   // the branch condition.
1902   BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1903                        DAG.getBasicBlock(CB.FalseBB));
1904 
1905   DAG.setRoot(BrCond);
1906 }
1907 
1908 /// visitJumpTable - Emit JumpTable node in the current MBB
1909 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1910   // Emit the code for the jump table
1911   assert(JT.Reg != -1U && "Should lower JT Header first!");
1912   EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
1913   SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1914                                      JT.Reg, PTy);
1915   SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1916   SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
1917                                     MVT::Other, Index.getValue(1),
1918                                     Table, Index);
1919   DAG.setRoot(BrJumpTable);
1920 }
1921 
1922 /// visitJumpTableHeader - This function emits necessary code to produce index
1923 /// in the JumpTable from switch case.
1924 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1925                                                JumpTableHeader &JTH,
1926                                                MachineBasicBlock *SwitchBB) {
1927   SDLoc dl = getCurSDLoc();
1928 
1929   // Subtract the lowest switch case value from the value being switched on and
1930   // conditional branch to default mbb if the result is greater than the
1931   // difference between smallest and largest cases.
1932   SDValue SwitchOp = getValue(JTH.SValue);
1933   EVT VT = SwitchOp.getValueType();
1934   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
1935                             DAG.getConstant(JTH.First, dl, VT));
1936 
1937   // The SDNode we just created, which holds the value being switched on minus
1938   // the smallest case value, needs to be copied to a virtual register so it
1939   // can be used as an index into the jump table in a subsequent basic block.
1940   // This value may be smaller or larger than the target's pointer type, and
1941   // therefore require extension or truncating.
1942   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1943   SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
1944 
1945   unsigned JumpTableReg =
1946       FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
1947   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
1948                                     JumpTableReg, SwitchOp);
1949   JT.Reg = JumpTableReg;
1950 
1951   // Emit the range check for the jump table, and branch to the default block
1952   // for the switch statement if the value being switched on exceeds the largest
1953   // case in the switch.
1954   SDValue CMP = DAG.getSetCC(
1955       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
1956                                  Sub.getValueType()),
1957       Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
1958 
1959   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1960                                MVT::Other, CopyTo, CMP,
1961                                DAG.getBasicBlock(JT.Default));
1962 
1963   // Avoid emitting unnecessary branches to the next block.
1964   if (JT.MBB != NextBlock(SwitchBB))
1965     BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1966                          DAG.getBasicBlock(JT.MBB));
1967 
1968   DAG.setRoot(BrCond);
1969 }
1970 
1971 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
1972 /// variable if there exists one.
1973 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
1974                                  SDValue &Chain) {
1975   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1976   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
1977   MachineFunction &MF = DAG.getMachineFunction();
1978   Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
1979   MachineSDNode *Node =
1980       DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
1981   if (Global) {
1982     MachinePointerInfo MPInfo(Global);
1983     MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
1984     auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1985                  MachineMemOperand::MODereferenceable;
1986     *MemRefs = MF.getMachineMemOperand(MPInfo, Flags, PtrTy.getSizeInBits() / 8,
1987                                        DAG.getEVTAlignment(PtrTy));
1988     Node->setMemRefs(MemRefs, MemRefs + 1);
1989   }
1990   return SDValue(Node, 0);
1991 }
1992 
1993 /// Codegen a new tail for a stack protector check ParentMBB which has had its
1994 /// tail spliced into a stack protector check success bb.
1995 ///
1996 /// For a high level explanation of how this fits into the stack protector
1997 /// generation see the comment on the declaration of class
1998 /// StackProtectorDescriptor.
1999 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2000                                                   MachineBasicBlock *ParentBB) {
2001 
2002   // First create the loads to the guard/stack slot for the comparison.
2003   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2004   EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2005 
2006   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2007   int FI = MFI.getStackProtectorIndex();
2008 
2009   SDValue Guard;
2010   SDLoc dl = getCurSDLoc();
2011   SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2012   const Module &M = *ParentBB->getParent()->getFunction()->getParent();
2013   unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2014 
2015   // Generate code to load the content of the guard slot.
2016   SDValue StackSlot = DAG.getLoad(
2017       PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
2018       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2019       MachineMemOperand::MOVolatile);
2020 
2021   // Retrieve guard check function, nullptr if instrumentation is inlined.
2022   if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
2023     // The target provides a guard check function to validate the guard value.
2024     // Generate a call to that function with the content of the guard slot as
2025     // argument.
2026     auto *Fn = cast<Function>(GuardCheck);
2027     FunctionType *FnTy = Fn->getFunctionType();
2028     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2029 
2030     TargetLowering::ArgListTy Args;
2031     TargetLowering::ArgListEntry Entry;
2032     Entry.Node = StackSlot;
2033     Entry.Ty = FnTy->getParamType(0);
2034     if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
2035       Entry.IsInReg = true;
2036     Args.push_back(Entry);
2037 
2038     TargetLowering::CallLoweringInfo CLI(DAG);
2039     CLI.setDebugLoc(getCurSDLoc())
2040       .setChain(DAG.getEntryNode())
2041       .setCallee(Fn->getCallingConv(), FnTy->getReturnType(),
2042                  getValue(GuardCheck), std::move(Args));
2043 
2044     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2045     DAG.setRoot(Result.second);
2046     return;
2047   }
2048 
2049   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2050   // Otherwise, emit a volatile load to retrieve the stack guard value.
2051   SDValue Chain = DAG.getEntryNode();
2052   if (TLI.useLoadStackGuardNode()) {
2053     Guard = getLoadStackGuard(DAG, dl, Chain);
2054   } else {
2055     const Value *IRGuard = TLI.getSDagStackGuard(M);
2056     SDValue GuardPtr = getValue(IRGuard);
2057 
2058     Guard =
2059         DAG.getLoad(PtrTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0),
2060                     Align, MachineMemOperand::MOVolatile);
2061   }
2062 
2063   // Perform the comparison via a subtract/getsetcc.
2064   EVT VT = Guard.getValueType();
2065   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot);
2066 
2067   SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2068                                                         *DAG.getContext(),
2069                                                         Sub.getValueType()),
2070                              Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2071 
2072   // If the sub is not 0, then we know the guard/stackslot do not equal, so
2073   // branch to failure MBB.
2074   SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2075                                MVT::Other, StackSlot.getOperand(0),
2076                                Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2077   // Otherwise branch to success MBB.
2078   SDValue Br = DAG.getNode(ISD::BR, dl,
2079                            MVT::Other, BrCond,
2080                            DAG.getBasicBlock(SPD.getSuccessMBB()));
2081 
2082   DAG.setRoot(Br);
2083 }
2084 
2085 /// Codegen the failure basic block for a stack protector check.
2086 ///
2087 /// A failure stack protector machine basic block consists simply of a call to
2088 /// __stack_chk_fail().
2089 ///
2090 /// For a high level explanation of how this fits into the stack protector
2091 /// generation see the comment on the declaration of class
2092 /// StackProtectorDescriptor.
2093 void
2094 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2095   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2096   SDValue Chain =
2097       TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2098                       None, false, getCurSDLoc(), false, false).second;
2099   DAG.setRoot(Chain);
2100 }
2101 
2102 /// visitBitTestHeader - This function emits necessary code to produce value
2103 /// suitable for "bit tests"
2104 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2105                                              MachineBasicBlock *SwitchBB) {
2106   SDLoc dl = getCurSDLoc();
2107 
2108   // Subtract the minimum value
2109   SDValue SwitchOp = getValue(B.SValue);
2110   EVT VT = SwitchOp.getValueType();
2111   SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2112                             DAG.getConstant(B.First, dl, VT));
2113 
2114   // Check range
2115   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2116   SDValue RangeCmp = DAG.getSetCC(
2117       dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2118                                  Sub.getValueType()),
2119       Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
2120 
2121   // Determine the type of the test operands.
2122   bool UsePtrType = false;
2123   if (!TLI.isTypeLegal(VT))
2124     UsePtrType = true;
2125   else {
2126     for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2127       if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2128         // Switch table case range are encoded into series of masks.
2129         // Just use pointer type, it's guaranteed to fit.
2130         UsePtrType = true;
2131         break;
2132       }
2133   }
2134   if (UsePtrType) {
2135     VT = TLI.getPointerTy(DAG.getDataLayout());
2136     Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2137   }
2138 
2139   B.RegVT = VT.getSimpleVT();
2140   B.Reg = FuncInfo.CreateReg(B.RegVT);
2141   SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2142 
2143   MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2144 
2145   addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2146   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2147   SwitchBB->normalizeSuccProbs();
2148 
2149   SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
2150                                 MVT::Other, CopyTo, RangeCmp,
2151                                 DAG.getBasicBlock(B.Default));
2152 
2153   // Avoid emitting unnecessary branches to the next block.
2154   if (MBB != NextBlock(SwitchBB))
2155     BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
2156                           DAG.getBasicBlock(MBB));
2157 
2158   DAG.setRoot(BrRange);
2159 }
2160 
2161 /// visitBitTestCase - this function produces one "bit test"
2162 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2163                                            MachineBasicBlock* NextMBB,
2164                                            BranchProbability BranchProbToNext,
2165                                            unsigned Reg,
2166                                            BitTestCase &B,
2167                                            MachineBasicBlock *SwitchBB) {
2168   SDLoc dl = getCurSDLoc();
2169   MVT VT = BB.RegVT;
2170   SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2171   SDValue Cmp;
2172   unsigned PopCount = countPopulation(B.Mask);
2173   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2174   if (PopCount == 1) {
2175     // Testing for a single bit; just compare the shift count with what it
2176     // would need to be to shift a 1 bit in that position.
2177     Cmp = DAG.getSetCC(
2178         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2179         ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2180         ISD::SETEQ);
2181   } else if (PopCount == BB.Range) {
2182     // There is only one zero bit in the range, test for it directly.
2183     Cmp = DAG.getSetCC(
2184         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2185         ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2186         ISD::SETNE);
2187   } else {
2188     // Make desired shift
2189     SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2190                                     DAG.getConstant(1, dl, VT), ShiftOp);
2191 
2192     // Emit bit tests and jumps
2193     SDValue AndOp = DAG.getNode(ISD::AND, dl,
2194                                 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2195     Cmp = DAG.getSetCC(
2196         dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2197         AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2198   }
2199 
2200   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2201   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2202   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2203   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2204   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2205   // one as they are relative probabilities (and thus work more like weights),
2206   // and hence we need to normalize them to let the sum of them become one.
2207   SwitchBB->normalizeSuccProbs();
2208 
2209   SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2210                               MVT::Other, getControlRoot(),
2211                               Cmp, DAG.getBasicBlock(B.TargetBB));
2212 
2213   // Avoid emitting unnecessary branches to the next block.
2214   if (NextMBB != NextBlock(SwitchBB))
2215     BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2216                         DAG.getBasicBlock(NextMBB));
2217 
2218   DAG.setRoot(BrAnd);
2219 }
2220 
2221 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2222   MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2223 
2224   // Retrieve successors. Look through artificial IR level blocks like
2225   // catchswitch for successors.
2226   MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2227   const BasicBlock *EHPadBB = I.getSuccessor(1);
2228 
2229   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2230   // have to do anything here to lower funclet bundles.
2231   assert(!I.hasOperandBundlesOtherThan(
2232              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2233          "Cannot lower invokes with arbitrary operand bundles yet!");
2234 
2235   const Value *Callee(I.getCalledValue());
2236   const Function *Fn = dyn_cast<Function>(Callee);
2237   if (isa<InlineAsm>(Callee))
2238     visitInlineAsm(&I);
2239   else if (Fn && Fn->isIntrinsic()) {
2240     switch (Fn->getIntrinsicID()) {
2241     default:
2242       llvm_unreachable("Cannot invoke this intrinsic");
2243     case Intrinsic::donothing:
2244       // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2245       break;
2246     case Intrinsic::experimental_patchpoint_void:
2247     case Intrinsic::experimental_patchpoint_i64:
2248       visitPatchpoint(&I, EHPadBB);
2249       break;
2250     case Intrinsic::experimental_gc_statepoint:
2251       LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2252       break;
2253     }
2254   } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2255     // Currently we do not lower any intrinsic calls with deopt operand bundles.
2256     // Eventually we will support lowering the @llvm.experimental.deoptimize
2257     // intrinsic, and right now there are no plans to support other intrinsics
2258     // with deopt state.
2259     LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2260   } else {
2261     LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2262   }
2263 
2264   // If the value of the invoke is used outside of its defining block, make it
2265   // available as a virtual register.
2266   // We already took care of the exported value for the statepoint instruction
2267   // during call to the LowerStatepoint.
2268   if (!isStatepoint(I)) {
2269     CopyToExportRegsIfNeeded(&I);
2270   }
2271 
2272   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2273   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2274   BranchProbability EHPadBBProb =
2275       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2276           : BranchProbability::getZero();
2277   findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2278 
2279   // Update successor info.
2280   addSuccessorWithProb(InvokeMBB, Return);
2281   for (auto &UnwindDest : UnwindDests) {
2282     UnwindDest.first->setIsEHPad();
2283     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2284   }
2285   InvokeMBB->normalizeSuccProbs();
2286 
2287   // Drop into normal successor.
2288   DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2289                           MVT::Other, getControlRoot(),
2290                           DAG.getBasicBlock(Return)));
2291 }
2292 
2293 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2294   llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2295 }
2296 
2297 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2298   assert(FuncInfo.MBB->isEHPad() &&
2299          "Call to landingpad not in landing pad!");
2300 
2301   MachineBasicBlock *MBB = FuncInfo.MBB;
2302   addLandingPadInfo(LP, *MBB);
2303 
2304   // If there aren't registers to copy the values into (e.g., during SjLj
2305   // exceptions), then don't bother to create these DAG nodes.
2306   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2307   const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2308   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2309       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2310     return;
2311 
2312   // If landingpad's return type is token type, we don't create DAG nodes
2313   // for its exception pointer and selector value. The extraction of exception
2314   // pointer or selector value from token type landingpads is not currently
2315   // supported.
2316   if (LP.getType()->isTokenTy())
2317     return;
2318 
2319   SmallVector<EVT, 2> ValueVTs;
2320   SDLoc dl = getCurSDLoc();
2321   ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2322   assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2323 
2324   // Get the two live-in registers as SDValues. The physregs have already been
2325   // copied into virtual registers.
2326   SDValue Ops[2];
2327   if (FuncInfo.ExceptionPointerVirtReg) {
2328     Ops[0] = DAG.getZExtOrTrunc(
2329         DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2330                            FuncInfo.ExceptionPointerVirtReg,
2331                            TLI.getPointerTy(DAG.getDataLayout())),
2332         dl, ValueVTs[0]);
2333   } else {
2334     Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2335   }
2336   Ops[1] = DAG.getZExtOrTrunc(
2337       DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2338                          FuncInfo.ExceptionSelectorVirtReg,
2339                          TLI.getPointerTy(DAG.getDataLayout())),
2340       dl, ValueVTs[1]);
2341 
2342   // Merge into one.
2343   SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2344                             DAG.getVTList(ValueVTs), Ops);
2345   setValue(&LP, Res);
2346 }
2347 
2348 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2349 #ifndef NDEBUG
2350   for (const CaseCluster &CC : Clusters)
2351     assert(CC.Low == CC.High && "Input clusters must be single-case");
2352 #endif
2353 
2354   std::sort(Clusters.begin(), Clusters.end(),
2355             [](const CaseCluster &a, const CaseCluster &b) {
2356     return a.Low->getValue().slt(b.Low->getValue());
2357   });
2358 
2359   // Merge adjacent clusters with the same destination.
2360   const unsigned N = Clusters.size();
2361   unsigned DstIndex = 0;
2362   for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2363     CaseCluster &CC = Clusters[SrcIndex];
2364     const ConstantInt *CaseVal = CC.Low;
2365     MachineBasicBlock *Succ = CC.MBB;
2366 
2367     if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2368         (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2369       // If this case has the same successor and is a neighbour, merge it into
2370       // the previous cluster.
2371       Clusters[DstIndex - 1].High = CaseVal;
2372       Clusters[DstIndex - 1].Prob += CC.Prob;
2373     } else {
2374       std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2375                    sizeof(Clusters[SrcIndex]));
2376     }
2377   }
2378   Clusters.resize(DstIndex);
2379 }
2380 
2381 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2382                                            MachineBasicBlock *Last) {
2383   // Update JTCases.
2384   for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2385     if (JTCases[i].first.HeaderBB == First)
2386       JTCases[i].first.HeaderBB = Last;
2387 
2388   // Update BitTestCases.
2389   for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2390     if (BitTestCases[i].Parent == First)
2391       BitTestCases[i].Parent = Last;
2392 }
2393 
2394 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2395   MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2396 
2397   // Update machine-CFG edges with unique successors.
2398   SmallSet<BasicBlock*, 32> Done;
2399   for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2400     BasicBlock *BB = I.getSuccessor(i);
2401     bool Inserted = Done.insert(BB).second;
2402     if (!Inserted)
2403         continue;
2404 
2405     MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2406     addSuccessorWithProb(IndirectBrMBB, Succ);
2407   }
2408   IndirectBrMBB->normalizeSuccProbs();
2409 
2410   DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2411                           MVT::Other, getControlRoot(),
2412                           getValue(I.getAddress())));
2413 }
2414 
2415 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2416   if (DAG.getTarget().Options.TrapUnreachable)
2417     DAG.setRoot(
2418         DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2419 }
2420 
2421 void SelectionDAGBuilder::visitFSub(const User &I) {
2422   // -0.0 - X --> fneg
2423   Type *Ty = I.getType();
2424   if (isa<Constant>(I.getOperand(0)) &&
2425       I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2426     SDValue Op2 = getValue(I.getOperand(1));
2427     setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2428                              Op2.getValueType(), Op2));
2429     return;
2430   }
2431 
2432   visitBinary(I, ISD::FSUB);
2433 }
2434 
2435 /// Checks if the given instruction performs a vector reduction, in which case
2436 /// we have the freedom to alter the elements in the result as long as the
2437 /// reduction of them stays unchanged.
2438 static bool isVectorReductionOp(const User *I) {
2439   const Instruction *Inst = dyn_cast<Instruction>(I);
2440   if (!Inst || !Inst->getType()->isVectorTy())
2441     return false;
2442 
2443   auto OpCode = Inst->getOpcode();
2444   switch (OpCode) {
2445   case Instruction::Add:
2446   case Instruction::Mul:
2447   case Instruction::And:
2448   case Instruction::Or:
2449   case Instruction::Xor:
2450     break;
2451   case Instruction::FAdd:
2452   case Instruction::FMul:
2453     if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2454       if (FPOp->getFastMathFlags().unsafeAlgebra())
2455         break;
2456     LLVM_FALLTHROUGH;
2457   default:
2458     return false;
2459   }
2460 
2461   unsigned ElemNum = Inst->getType()->getVectorNumElements();
2462   unsigned ElemNumToReduce = ElemNum;
2463 
2464   // Do DFS search on the def-use chain from the given instruction. We only
2465   // allow four kinds of operations during the search until we reach the
2466   // instruction that extracts the first element from the vector:
2467   //
2468   //   1. The reduction operation of the same opcode as the given instruction.
2469   //
2470   //   2. PHI node.
2471   //
2472   //   3. ShuffleVector instruction together with a reduction operation that
2473   //      does a partial reduction.
2474   //
2475   //   4. ExtractElement that extracts the first element from the vector, and we
2476   //      stop searching the def-use chain here.
2477   //
2478   // 3 & 4 above perform a reduction on all elements of the vector. We push defs
2479   // from 1-3 to the stack to continue the DFS. The given instruction is not
2480   // a reduction operation if we meet any other instructions other than those
2481   // listed above.
2482 
2483   SmallVector<const User *, 16> UsersToVisit{Inst};
2484   SmallPtrSet<const User *, 16> Visited;
2485   bool ReduxExtracted = false;
2486 
2487   while (!UsersToVisit.empty()) {
2488     auto User = UsersToVisit.back();
2489     UsersToVisit.pop_back();
2490     if (!Visited.insert(User).second)
2491       continue;
2492 
2493     for (const auto &U : User->users()) {
2494       auto Inst = dyn_cast<Instruction>(U);
2495       if (!Inst)
2496         return false;
2497 
2498       if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
2499         if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
2500           if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().unsafeAlgebra())
2501             return false;
2502         UsersToVisit.push_back(U);
2503       } else if (const ShuffleVectorInst *ShufInst =
2504                      dyn_cast<ShuffleVectorInst>(U)) {
2505         // Detect the following pattern: A ShuffleVector instruction together
2506         // with a reduction that do partial reduction on the first and second
2507         // ElemNumToReduce / 2 elements, and store the result in
2508         // ElemNumToReduce / 2 elements in another vector.
2509 
2510         unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
2511         if (ResultElements < ElemNum)
2512           return false;
2513 
2514         if (ElemNumToReduce == 1)
2515           return false;
2516         if (!isa<UndefValue>(U->getOperand(1)))
2517           return false;
2518         for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
2519           if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
2520             return false;
2521         for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
2522           if (ShufInst->getMaskValue(i) != -1)
2523             return false;
2524 
2525         // There is only one user of this ShuffleVector instruction, which
2526         // must be a reduction operation.
2527         if (!U->hasOneUse())
2528           return false;
2529 
2530         auto U2 = dyn_cast<Instruction>(*U->user_begin());
2531         if (!U2 || U2->getOpcode() != OpCode)
2532           return false;
2533 
2534         // Check operands of the reduction operation.
2535         if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
2536             (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
2537           UsersToVisit.push_back(U2);
2538           ElemNumToReduce /= 2;
2539         } else
2540           return false;
2541       } else if (isa<ExtractElementInst>(U)) {
2542         // At this moment we should have reduced all elements in the vector.
2543         if (ElemNumToReduce != 1)
2544           return false;
2545 
2546         const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
2547         if (!Val || Val->getZExtValue() != 0)
2548           return false;
2549 
2550         ReduxExtracted = true;
2551       } else
2552         return false;
2553     }
2554   }
2555   return ReduxExtracted;
2556 }
2557 
2558 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
2559   SDValue Op1 = getValue(I.getOperand(0));
2560   SDValue Op2 = getValue(I.getOperand(1));
2561 
2562   bool nuw = false;
2563   bool nsw = false;
2564   bool exact = false;
2565   bool vec_redux = false;
2566   FastMathFlags FMF;
2567 
2568   if (const OverflowingBinaryOperator *OFBinOp =
2569           dyn_cast<const OverflowingBinaryOperator>(&I)) {
2570     nuw = OFBinOp->hasNoUnsignedWrap();
2571     nsw = OFBinOp->hasNoSignedWrap();
2572   }
2573   if (const PossiblyExactOperator *ExactOp =
2574           dyn_cast<const PossiblyExactOperator>(&I))
2575     exact = ExactOp->isExact();
2576   if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I))
2577     FMF = FPOp->getFastMathFlags();
2578 
2579   if (isVectorReductionOp(&I)) {
2580     vec_redux = true;
2581     DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
2582   }
2583 
2584   SDNodeFlags Flags;
2585   Flags.setExact(exact);
2586   Flags.setNoSignedWrap(nsw);
2587   Flags.setNoUnsignedWrap(nuw);
2588   Flags.setVectorReduction(vec_redux);
2589   if (EnableFMFInDAG) {
2590     Flags.setAllowReciprocal(FMF.allowReciprocal());
2591     Flags.setAllowContract(FMF.allowContract());
2592     Flags.setNoInfs(FMF.noInfs());
2593     Flags.setNoNaNs(FMF.noNaNs());
2594     Flags.setNoSignedZeros(FMF.noSignedZeros());
2595     Flags.setUnsafeAlgebra(FMF.unsafeAlgebra());
2596   }
2597   SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
2598                                      Op1, Op2, &Flags);
2599   setValue(&I, BinNodeValue);
2600 }
2601 
2602 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2603   SDValue Op1 = getValue(I.getOperand(0));
2604   SDValue Op2 = getValue(I.getOperand(1));
2605 
2606   EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
2607       Op2.getValueType(), DAG.getDataLayout());
2608 
2609   // Coerce the shift amount to the right type if we can.
2610   if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2611     unsigned ShiftSize = ShiftTy.getSizeInBits();
2612     unsigned Op2Size = Op2.getValueSizeInBits();
2613     SDLoc DL = getCurSDLoc();
2614 
2615     // If the operand is smaller than the shift count type, promote it.
2616     if (ShiftSize > Op2Size)
2617       Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2618 
2619     // If the operand is larger than the shift count type but the shift
2620     // count type has enough bits to represent any shift value, truncate
2621     // it now. This is a common case and it exposes the truncate to
2622     // optimization early.
2623     else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
2624       Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2625     // Otherwise we'll need to temporarily settle for some other convenient
2626     // type.  Type legalization will make adjustments once the shiftee is split.
2627     else
2628       Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2629   }
2630 
2631   bool nuw = false;
2632   bool nsw = false;
2633   bool exact = false;
2634 
2635   if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2636 
2637     if (const OverflowingBinaryOperator *OFBinOp =
2638             dyn_cast<const OverflowingBinaryOperator>(&I)) {
2639       nuw = OFBinOp->hasNoUnsignedWrap();
2640       nsw = OFBinOp->hasNoSignedWrap();
2641     }
2642     if (const PossiblyExactOperator *ExactOp =
2643             dyn_cast<const PossiblyExactOperator>(&I))
2644       exact = ExactOp->isExact();
2645   }
2646   SDNodeFlags Flags;
2647   Flags.setExact(exact);
2648   Flags.setNoSignedWrap(nsw);
2649   Flags.setNoUnsignedWrap(nuw);
2650   SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2651                             &Flags);
2652   setValue(&I, Res);
2653 }
2654 
2655 void SelectionDAGBuilder::visitSDiv(const User &I) {
2656   SDValue Op1 = getValue(I.getOperand(0));
2657   SDValue Op2 = getValue(I.getOperand(1));
2658 
2659   SDNodeFlags Flags;
2660   Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2661                  cast<PossiblyExactOperator>(&I)->isExact());
2662   setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2663                            Op2, &Flags));
2664 }
2665 
2666 void SelectionDAGBuilder::visitICmp(const User &I) {
2667   ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2668   if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2669     predicate = IC->getPredicate();
2670   else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2671     predicate = ICmpInst::Predicate(IC->getPredicate());
2672   SDValue Op1 = getValue(I.getOperand(0));
2673   SDValue Op2 = getValue(I.getOperand(1));
2674   ISD::CondCode Opcode = getICmpCondCode(predicate);
2675 
2676   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2677                                                         I.getType());
2678   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2679 }
2680 
2681 void SelectionDAGBuilder::visitFCmp(const User &I) {
2682   FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2683   if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2684     predicate = FC->getPredicate();
2685   else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2686     predicate = FCmpInst::Predicate(FC->getPredicate());
2687   SDValue Op1 = getValue(I.getOperand(0));
2688   SDValue Op2 = getValue(I.getOperand(1));
2689   ISD::CondCode Condition = getFCmpCondCode(predicate);
2690 
2691   // FIXME: Fcmp instructions have fast-math-flags in IR, so we should use them.
2692   // FIXME: We should propagate the fast-math-flags to the DAG node itself for
2693   // further optimization, but currently FMF is only applicable to binary nodes.
2694   if (TM.Options.NoNaNsFPMath)
2695     Condition = getFCmpCodeWithoutNaN(Condition);
2696   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2697                                                         I.getType());
2698   setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2699 }
2700 
2701 // Check if the condition of the select has one use or two users that are both
2702 // selects with the same condition.
2703 static bool hasOnlySelectUsers(const Value *Cond) {
2704   return all_of(Cond->users(), [](const Value *V) {
2705     return isa<SelectInst>(V);
2706   });
2707 }
2708 
2709 void SelectionDAGBuilder::visitSelect(const User &I) {
2710   SmallVector<EVT, 4> ValueVTs;
2711   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2712                   ValueVTs);
2713   unsigned NumValues = ValueVTs.size();
2714   if (NumValues == 0) return;
2715 
2716   SmallVector<SDValue, 4> Values(NumValues);
2717   SDValue Cond     = getValue(I.getOperand(0));
2718   SDValue LHSVal   = getValue(I.getOperand(1));
2719   SDValue RHSVal   = getValue(I.getOperand(2));
2720   auto BaseOps = {Cond};
2721   ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2722     ISD::VSELECT : ISD::SELECT;
2723 
2724   // Min/max matching is only viable if all output VTs are the same.
2725   if (std::equal(ValueVTs.begin(), ValueVTs.end(), ValueVTs.begin())) {
2726     EVT VT = ValueVTs[0];
2727     LLVMContext &Ctx = *DAG.getContext();
2728     auto &TLI = DAG.getTargetLoweringInfo();
2729 
2730     // We care about the legality of the operation after it has been type
2731     // legalized.
2732     while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal &&
2733            VT != TLI.getTypeToTransformTo(Ctx, VT))
2734       VT = TLI.getTypeToTransformTo(Ctx, VT);
2735 
2736     // If the vselect is legal, assume we want to leave this as a vector setcc +
2737     // vselect. Otherwise, if this is going to be scalarized, we want to see if
2738     // min/max is legal on the scalar type.
2739     bool UseScalarMinMax = VT.isVector() &&
2740       !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
2741 
2742     Value *LHS, *RHS;
2743     auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2744     ISD::NodeType Opc = ISD::DELETED_NODE;
2745     switch (SPR.Flavor) {
2746     case SPF_UMAX:    Opc = ISD::UMAX; break;
2747     case SPF_UMIN:    Opc = ISD::UMIN; break;
2748     case SPF_SMAX:    Opc = ISD::SMAX; break;
2749     case SPF_SMIN:    Opc = ISD::SMIN; break;
2750     case SPF_FMINNUM:
2751       switch (SPR.NaNBehavior) {
2752       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2753       case SPNB_RETURNS_NAN:   Opc = ISD::FMINNAN; break;
2754       case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
2755       case SPNB_RETURNS_ANY: {
2756         if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
2757           Opc = ISD::FMINNUM;
2758         else if (TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT))
2759           Opc = ISD::FMINNAN;
2760         else if (UseScalarMinMax)
2761           Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
2762             ISD::FMINNUM : ISD::FMINNAN;
2763         break;
2764       }
2765       }
2766       break;
2767     case SPF_FMAXNUM:
2768       switch (SPR.NaNBehavior) {
2769       case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
2770       case SPNB_RETURNS_NAN:   Opc = ISD::FMAXNAN; break;
2771       case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
2772       case SPNB_RETURNS_ANY:
2773 
2774         if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
2775           Opc = ISD::FMAXNUM;
2776         else if (TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT))
2777           Opc = ISD::FMAXNAN;
2778         else if (UseScalarMinMax)
2779           Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
2780             ISD::FMAXNUM : ISD::FMAXNAN;
2781         break;
2782       }
2783       break;
2784     default: break;
2785     }
2786 
2787     if (Opc != ISD::DELETED_NODE &&
2788         (TLI.isOperationLegalOrCustom(Opc, VT) ||
2789          (UseScalarMinMax &&
2790           TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
2791         // If the underlying comparison instruction is used by any other
2792         // instruction, the consumed instructions won't be destroyed, so it is
2793         // not profitable to convert to a min/max.
2794         hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
2795       OpCode = Opc;
2796       LHSVal = getValue(LHS);
2797       RHSVal = getValue(RHS);
2798       BaseOps = {};
2799     }
2800   }
2801 
2802   for (unsigned i = 0; i != NumValues; ++i) {
2803     SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
2804     Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
2805     Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
2806     Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
2807                             LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
2808                             Ops);
2809   }
2810 
2811   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2812                            DAG.getVTList(ValueVTs), Values));
2813 }
2814 
2815 void SelectionDAGBuilder::visitTrunc(const User &I) {
2816   // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2817   SDValue N = getValue(I.getOperand(0));
2818   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2819                                                         I.getType());
2820   setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
2821 }
2822 
2823 void SelectionDAGBuilder::visitZExt(const User &I) {
2824   // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2825   // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2826   SDValue N = getValue(I.getOperand(0));
2827   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2828                                                         I.getType());
2829   setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
2830 }
2831 
2832 void SelectionDAGBuilder::visitSExt(const User &I) {
2833   // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2834   // SExt also can't be a cast to bool for same reason. So, nothing much to do
2835   SDValue N = getValue(I.getOperand(0));
2836   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2837                                                         I.getType());
2838   setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
2839 }
2840 
2841 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
2842   // FPTrunc is never a no-op cast, no need to check
2843   SDValue N = getValue(I.getOperand(0));
2844   SDLoc dl = getCurSDLoc();
2845   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2846   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2847   setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
2848                            DAG.getTargetConstant(
2849                                0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
2850 }
2851 
2852 void SelectionDAGBuilder::visitFPExt(const User &I) {
2853   // FPExt is never a no-op cast, no need to check
2854   SDValue N = getValue(I.getOperand(0));
2855   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2856                                                         I.getType());
2857   setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
2858 }
2859 
2860 void SelectionDAGBuilder::visitFPToUI(const User &I) {
2861   // FPToUI is never a no-op cast, no need to check
2862   SDValue N = getValue(I.getOperand(0));
2863   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2864                                                         I.getType());
2865   setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
2866 }
2867 
2868 void SelectionDAGBuilder::visitFPToSI(const User &I) {
2869   // FPToSI is never a no-op cast, no need to check
2870   SDValue N = getValue(I.getOperand(0));
2871   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2872                                                         I.getType());
2873   setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
2874 }
2875 
2876 void SelectionDAGBuilder::visitUIToFP(const User &I) {
2877   // UIToFP is never a no-op cast, no need to check
2878   SDValue N = getValue(I.getOperand(0));
2879   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2880                                                         I.getType());
2881   setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
2882 }
2883 
2884 void SelectionDAGBuilder::visitSIToFP(const User &I) {
2885   // SIToFP is never a no-op cast, no need to check
2886   SDValue N = getValue(I.getOperand(0));
2887   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2888                                                         I.getType());
2889   setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
2890 }
2891 
2892 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
2893   // What to do depends on the size of the integer and the size of the pointer.
2894   // We can either truncate, zero extend, or no-op, accordingly.
2895   SDValue N = getValue(I.getOperand(0));
2896   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2897                                                         I.getType());
2898   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2899 }
2900 
2901 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
2902   // What to do depends on the size of the integer and the size of the pointer.
2903   // We can either truncate, zero extend, or no-op, accordingly.
2904   SDValue N = getValue(I.getOperand(0));
2905   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2906                                                         I.getType());
2907   setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2908 }
2909 
2910 void SelectionDAGBuilder::visitBitCast(const User &I) {
2911   SDValue N = getValue(I.getOperand(0));
2912   SDLoc dl = getCurSDLoc();
2913   EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
2914                                                         I.getType());
2915 
2916   // BitCast assures us that source and destination are the same size so this is
2917   // either a BITCAST or a no-op.
2918   if (DestVT != N.getValueType())
2919     setValue(&I, DAG.getNode(ISD::BITCAST, dl,
2920                              DestVT, N)); // convert types.
2921   // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
2922   // might fold any kind of constant expression to an integer constant and that
2923   // is not what we are looking for. Only recognize a bitcast of a genuine
2924   // constant integer as an opaque constant.
2925   else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
2926     setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
2927                                  /*isOpaque*/true));
2928   else
2929     setValue(&I, N);            // noop cast.
2930 }
2931 
2932 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
2933   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2934   const Value *SV = I.getOperand(0);
2935   SDValue N = getValue(SV);
2936   EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2937 
2938   unsigned SrcAS = SV->getType()->getPointerAddressSpace();
2939   unsigned DestAS = I.getType()->getPointerAddressSpace();
2940 
2941   if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
2942     N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
2943 
2944   setValue(&I, N);
2945 }
2946 
2947 void SelectionDAGBuilder::visitInsertElement(const User &I) {
2948   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2949   SDValue InVec = getValue(I.getOperand(0));
2950   SDValue InVal = getValue(I.getOperand(1));
2951   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
2952                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
2953   setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
2954                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
2955                            InVec, InVal, InIdx));
2956 }
2957 
2958 void SelectionDAGBuilder::visitExtractElement(const User &I) {
2959   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2960   SDValue InVec = getValue(I.getOperand(0));
2961   SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
2962                                      TLI.getVectorIdxTy(DAG.getDataLayout()));
2963   setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
2964                            TLI.getValueType(DAG.getDataLayout(), I.getType()),
2965                            InVec, InIdx));
2966 }
2967 
2968 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
2969   SDValue Src1 = getValue(I.getOperand(0));
2970   SDValue Src2 = getValue(I.getOperand(1));
2971   SDLoc DL = getCurSDLoc();
2972 
2973   SmallVector<int, 8> Mask;
2974   ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
2975   unsigned MaskNumElts = Mask.size();
2976 
2977   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2978   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
2979   EVT SrcVT = Src1.getValueType();
2980   unsigned SrcNumElts = SrcVT.getVectorNumElements();
2981 
2982   if (SrcNumElts == MaskNumElts) {
2983     setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
2984     return;
2985   }
2986 
2987   // Normalize the shuffle vector since mask and vector length don't match.
2988   if (SrcNumElts < MaskNumElts) {
2989     // Mask is longer than the source vectors. We can use concatenate vector to
2990     // make the mask and vectors lengths match.
2991 
2992     if (MaskNumElts % SrcNumElts == 0) {
2993       // Mask length is a multiple of the source vector length.
2994       // Check if the shuffle is some kind of concatenation of the input
2995       // vectors.
2996       unsigned NumConcat = MaskNumElts / SrcNumElts;
2997       bool IsConcat = true;
2998       SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
2999       for (unsigned i = 0; i != MaskNumElts; ++i) {
3000         int Idx = Mask[i];
3001         if (Idx < 0)
3002           continue;
3003         // Ensure the indices in each SrcVT sized piece are sequential and that
3004         // the same source is used for the whole piece.
3005         if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3006             (ConcatSrcs[i / SrcNumElts] >= 0 &&
3007              ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3008           IsConcat = false;
3009           break;
3010         }
3011         // Remember which source this index came from.
3012         ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3013       }
3014 
3015       // The shuffle is concatenating multiple vectors together. Just emit
3016       // a CONCAT_VECTORS operation.
3017       if (IsConcat) {
3018         SmallVector<SDValue, 8> ConcatOps;
3019         for (auto Src : ConcatSrcs) {
3020           if (Src < 0)
3021             ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3022           else if (Src == 0)
3023             ConcatOps.push_back(Src1);
3024           else
3025             ConcatOps.push_back(Src2);
3026         }
3027         setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3028         return;
3029       }
3030     }
3031 
3032     unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3033     unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3034     EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3035                                     PaddedMaskNumElts);
3036 
3037     // Pad both vectors with undefs to make them the same length as the mask.
3038     SDValue UndefVal = DAG.getUNDEF(SrcVT);
3039 
3040     SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3041     SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3042     MOps1[0] = Src1;
3043     MOps2[0] = Src2;
3044 
3045     Src1 = Src1.isUndef()
3046                ? DAG.getUNDEF(PaddedVT)
3047                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3048     Src2 = Src2.isUndef()
3049                ? DAG.getUNDEF(PaddedVT)
3050                : DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3051 
3052     // Readjust mask for new input vector length.
3053     SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3054     for (unsigned i = 0; i != MaskNumElts; ++i) {
3055       int Idx = Mask[i];
3056       if (Idx >= (int)SrcNumElts)
3057         Idx -= SrcNumElts - PaddedMaskNumElts;
3058       MappedOps[i] = Idx;
3059     }
3060 
3061     SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3062 
3063     // If the concatenated vector was padded, extract a subvector with the
3064     // correct number of elements.
3065     if (MaskNumElts != PaddedMaskNumElts)
3066       Result = DAG.getNode(
3067           ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3068           DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3069 
3070     setValue(&I, Result);
3071     return;
3072   }
3073 
3074   if (SrcNumElts > MaskNumElts) {
3075     // Analyze the access pattern of the vector to see if we can extract
3076     // two subvectors and do the shuffle.
3077     int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3078     bool CanExtract = true;
3079     for (int Idx : Mask) {
3080       unsigned Input = 0;
3081       if (Idx < 0)
3082         continue;
3083 
3084       if (Idx >= (int)SrcNumElts) {
3085         Input = 1;
3086         Idx -= SrcNumElts;
3087       }
3088 
3089       // If all the indices come from the same MaskNumElts sized portion of
3090       // the sources we can use extract. Also make sure the extract wouldn't
3091       // extract past the end of the source.
3092       int NewStartIdx = alignDown(Idx, MaskNumElts);
3093       if (NewStartIdx + MaskNumElts > SrcNumElts ||
3094           (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3095         CanExtract = false;
3096       // Make sure we always update StartIdx as we use it to track if all
3097       // elements are undef.
3098       StartIdx[Input] = NewStartIdx;
3099     }
3100 
3101     if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3102       setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3103       return;
3104     }
3105     if (CanExtract) {
3106       // Extract appropriate subvector and generate a vector shuffle
3107       for (unsigned Input = 0; Input < 2; ++Input) {
3108         SDValue &Src = Input == 0 ? Src1 : Src2;
3109         if (StartIdx[Input] < 0)
3110           Src = DAG.getUNDEF(VT);
3111         else {
3112           Src = DAG.getNode(
3113               ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3114               DAG.getConstant(StartIdx[Input], DL,
3115                               TLI.getVectorIdxTy(DAG.getDataLayout())));
3116         }
3117       }
3118 
3119       // Calculate new mask.
3120       SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3121       for (int &Idx : MappedOps) {
3122         if (Idx >= (int)SrcNumElts)
3123           Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3124         else if (Idx >= 0)
3125           Idx -= StartIdx[0];
3126       }
3127 
3128       setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3129       return;
3130     }
3131   }
3132 
3133   // We can't use either concat vectors or extract subvectors so fall back to
3134   // replacing the shuffle with extract and build vector.
3135   // to insert and build vector.
3136   EVT EltVT = VT.getVectorElementType();
3137   EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3138   SmallVector<SDValue,8> Ops;
3139   for (int Idx : Mask) {
3140     SDValue Res;
3141 
3142     if (Idx < 0) {
3143       Res = DAG.getUNDEF(EltVT);
3144     } else {
3145       SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3146       if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3147 
3148       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3149                         EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3150     }
3151 
3152     Ops.push_back(Res);
3153   }
3154 
3155   setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Ops));
3156 }
3157 
3158 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3159   const Value *Op0 = I.getOperand(0);
3160   const Value *Op1 = I.getOperand(1);
3161   Type *AggTy = I.getType();
3162   Type *ValTy = Op1->getType();
3163   bool IntoUndef = isa<UndefValue>(Op0);
3164   bool FromUndef = isa<UndefValue>(Op1);
3165 
3166   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3167 
3168   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3169   SmallVector<EVT, 4> AggValueVTs;
3170   ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3171   SmallVector<EVT, 4> ValValueVTs;
3172   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3173 
3174   unsigned NumAggValues = AggValueVTs.size();
3175   unsigned NumValValues = ValValueVTs.size();
3176   SmallVector<SDValue, 4> Values(NumAggValues);
3177 
3178   // Ignore an insertvalue that produces an empty object
3179   if (!NumAggValues) {
3180     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3181     return;
3182   }
3183 
3184   SDValue Agg = getValue(Op0);
3185   unsigned i = 0;
3186   // Copy the beginning value(s) from the original aggregate.
3187   for (; i != LinearIndex; ++i)
3188     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3189                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3190   // Copy values from the inserted value(s).
3191   if (NumValValues) {
3192     SDValue Val = getValue(Op1);
3193     for (; i != LinearIndex + NumValValues; ++i)
3194       Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3195                   SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3196   }
3197   // Copy remaining value(s) from the original aggregate.
3198   for (; i != NumAggValues; ++i)
3199     Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3200                 SDValue(Agg.getNode(), Agg.getResNo() + i);
3201 
3202   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3203                            DAG.getVTList(AggValueVTs), Values));
3204 }
3205 
3206 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3207   const Value *Op0 = I.getOperand(0);
3208   Type *AggTy = Op0->getType();
3209   Type *ValTy = I.getType();
3210   bool OutOfUndef = isa<UndefValue>(Op0);
3211 
3212   unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3213 
3214   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3215   SmallVector<EVT, 4> ValValueVTs;
3216   ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3217 
3218   unsigned NumValValues = ValValueVTs.size();
3219 
3220   // Ignore a extractvalue that produces an empty object
3221   if (!NumValValues) {
3222     setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3223     return;
3224   }
3225 
3226   SmallVector<SDValue, 4> Values(NumValValues);
3227 
3228   SDValue Agg = getValue(Op0);
3229   // Copy out the selected value(s).
3230   for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3231     Values[i - LinearIndex] =
3232       OutOfUndef ?
3233         DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3234         SDValue(Agg.getNode(), Agg.getResNo() + i);
3235 
3236   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3237                            DAG.getVTList(ValValueVTs), Values));
3238 }
3239 
3240 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3241   Value *Op0 = I.getOperand(0);
3242   // Note that the pointer operand may be a vector of pointers. Take the scalar
3243   // element which holds a pointer.
3244   unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3245   SDValue N = getValue(Op0);
3246   SDLoc dl = getCurSDLoc();
3247 
3248   // Normalize Vector GEP - all scalar operands should be converted to the
3249   // splat vector.
3250   unsigned VectorWidth = I.getType()->isVectorTy() ?
3251     cast<VectorType>(I.getType())->getVectorNumElements() : 0;
3252 
3253   if (VectorWidth && !N.getValueType().isVector()) {
3254     LLVMContext &Context = *DAG.getContext();
3255     EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3256     N = DAG.getSplatBuildVector(VT, dl, N);
3257   }
3258 
3259   for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3260        GTI != E; ++GTI) {
3261     const Value *Idx = GTI.getOperand();
3262     if (StructType *StTy = GTI.getStructTypeOrNull()) {
3263       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3264       if (Field) {
3265         // N = N + Offset
3266         uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3267 
3268         // In an inbounds GEP with an offset that is nonnegative even when
3269         // interpreted as signed, assume there is no unsigned overflow.
3270         SDNodeFlags Flags;
3271         if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3272           Flags.setNoUnsignedWrap(true);
3273 
3274         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3275                         DAG.getConstant(Offset, dl, N.getValueType()), &Flags);
3276       }
3277     } else {
3278       MVT PtrTy =
3279           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout(), AS);
3280       unsigned PtrSize = PtrTy.getSizeInBits();
3281       APInt ElementSize(PtrSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3282 
3283       // If this is a scalar constant or a splat vector of constants,
3284       // handle it quickly.
3285       const auto *CI = dyn_cast<ConstantInt>(Idx);
3286       if (!CI && isa<ConstantDataVector>(Idx) &&
3287           cast<ConstantDataVector>(Idx)->getSplatValue())
3288         CI = cast<ConstantInt>(cast<ConstantDataVector>(Idx)->getSplatValue());
3289 
3290       if (CI) {
3291         if (CI->isZero())
3292           continue;
3293         APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
3294         LLVMContext &Context = *DAG.getContext();
3295         SDValue OffsVal = VectorWidth ?
3296           DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, PtrTy, VectorWidth)) :
3297           DAG.getConstant(Offs, dl, PtrTy);
3298 
3299         // In an inbouds GEP with an offset that is nonnegative even when
3300         // interpreted as signed, assume there is no unsigned overflow.
3301         SDNodeFlags Flags;
3302         if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3303           Flags.setNoUnsignedWrap(true);
3304 
3305         N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, &Flags);
3306         continue;
3307       }
3308 
3309       // N = N + Idx * ElementSize;
3310       SDValue IdxN = getValue(Idx);
3311 
3312       if (!IdxN.getValueType().isVector() && VectorWidth) {
3313         MVT VT = MVT::getVectorVT(IdxN.getValueType().getSimpleVT(), VectorWidth);
3314         IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3315       }
3316 
3317       // If the index is smaller or larger than intptr_t, truncate or extend
3318       // it.
3319       IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3320 
3321       // If this is a multiply by a power of two, turn it into a shl
3322       // immediately.  This is a very common case.
3323       if (ElementSize != 1) {
3324         if (ElementSize.isPowerOf2()) {
3325           unsigned Amt = ElementSize.logBase2();
3326           IdxN = DAG.getNode(ISD::SHL, dl,
3327                              N.getValueType(), IdxN,
3328                              DAG.getConstant(Amt, dl, IdxN.getValueType()));
3329         } else {
3330           SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
3331           IdxN = DAG.getNode(ISD::MUL, dl,
3332                              N.getValueType(), IdxN, Scale);
3333         }
3334       }
3335 
3336       N = DAG.getNode(ISD::ADD, dl,
3337                       N.getValueType(), N, IdxN);
3338     }
3339   }
3340 
3341   setValue(&I, N);
3342 }
3343 
3344 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3345   // If this is a fixed sized alloca in the entry block of the function,
3346   // allocate it statically on the stack.
3347   if (FuncInfo.StaticAllocaMap.count(&I))
3348     return;   // getValue will auto-populate this.
3349 
3350   SDLoc dl = getCurSDLoc();
3351   Type *Ty = I.getAllocatedType();
3352   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3353   auto &DL = DAG.getDataLayout();
3354   uint64_t TySize = DL.getTypeAllocSize(Ty);
3355   unsigned Align =
3356       std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3357 
3358   SDValue AllocSize = getValue(I.getArraySize());
3359 
3360   EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout());
3361   if (AllocSize.getValueType() != IntPtr)
3362     AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3363 
3364   AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3365                           AllocSize,
3366                           DAG.getConstant(TySize, dl, IntPtr));
3367 
3368   // Handle alignment.  If the requested alignment is less than or equal to
3369   // the stack alignment, ignore it.  If the size is greater than or equal to
3370   // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3371   unsigned StackAlign =
3372       DAG.getSubtarget().getFrameLowering()->getStackAlignment();
3373   if (Align <= StackAlign)
3374     Align = 0;
3375 
3376   // Round the size of the allocation up to the stack alignment size
3377   // by add SA-1 to the size. This doesn't overflow because we're computing
3378   // an address inside an alloca.
3379   SDNodeFlags Flags;
3380   Flags.setNoUnsignedWrap(true);
3381   AllocSize = DAG.getNode(ISD::ADD, dl,
3382                           AllocSize.getValueType(), AllocSize,
3383                           DAG.getIntPtrConstant(StackAlign - 1, dl), &Flags);
3384 
3385   // Mask out the low bits for alignment purposes.
3386   AllocSize = DAG.getNode(ISD::AND, dl,
3387                           AllocSize.getValueType(), AllocSize,
3388                           DAG.getIntPtrConstant(~(uint64_t)(StackAlign - 1),
3389                                                 dl));
3390 
3391   SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align, dl) };
3392   SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3393   SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3394   setValue(&I, DSA);
3395   DAG.setRoot(DSA.getValue(1));
3396 
3397   assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3398 }
3399 
3400 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3401   if (I.isAtomic())
3402     return visitAtomicLoad(I);
3403 
3404   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3405   const Value *SV = I.getOperand(0);
3406   if (TLI.supportSwiftError()) {
3407     // Swifterror values can come from either a function parameter with
3408     // swifterror attribute or an alloca with swifterror attribute.
3409     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3410       if (Arg->hasSwiftErrorAttr())
3411         return visitLoadFromSwiftError(I);
3412     }
3413 
3414     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3415       if (Alloca->isSwiftError())
3416         return visitLoadFromSwiftError(I);
3417     }
3418   }
3419 
3420   SDValue Ptr = getValue(SV);
3421 
3422   Type *Ty = I.getType();
3423 
3424   bool isVolatile = I.isVolatile();
3425   bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
3426   bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr;
3427   bool isDereferenceable = isDereferenceablePointer(SV, DAG.getDataLayout());
3428   unsigned Alignment = I.getAlignment();
3429 
3430   AAMDNodes AAInfo;
3431   I.getAAMetadata(AAInfo);
3432   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3433 
3434   SmallVector<EVT, 4> ValueVTs;
3435   SmallVector<uint64_t, 4> Offsets;
3436   ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
3437   unsigned NumValues = ValueVTs.size();
3438   if (NumValues == 0)
3439     return;
3440 
3441   SDValue Root;
3442   bool ConstantMemory = false;
3443   if (isVolatile || NumValues > MaxParallelChains)
3444     // Serialize volatile loads with other side effects.
3445     Root = getRoot();
3446   else if (AA->pointsToConstantMemory(MemoryLocation(
3447                SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo))) {
3448     // Do not serialize (non-volatile) loads of constant memory with anything.
3449     Root = DAG.getEntryNode();
3450     ConstantMemory = true;
3451   } else {
3452     // Do not serialize non-volatile loads against each other.
3453     Root = DAG.getRoot();
3454   }
3455 
3456   SDLoc dl = getCurSDLoc();
3457 
3458   if (isVolatile)
3459     Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3460 
3461   // An aggregate load cannot wrap around the address space, so offsets to its
3462   // parts don't wrap either.
3463   SDNodeFlags Flags;
3464   Flags.setNoUnsignedWrap(true);
3465 
3466   SmallVector<SDValue, 4> Values(NumValues);
3467   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3468   EVT PtrVT = Ptr.getValueType();
3469   unsigned ChainI = 0;
3470   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3471     // Serializing loads here may result in excessive register pressure, and
3472     // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3473     // could recover a bit by hoisting nodes upward in the chain by recognizing
3474     // they are side-effect free or do not alias. The optimizer should really
3475     // avoid this case by converting large object/array copies to llvm.memcpy
3476     // (MaxParallelChains should always remain as failsafe).
3477     if (ChainI == MaxParallelChains) {
3478       assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3479       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3480                                   makeArrayRef(Chains.data(), ChainI));
3481       Root = Chain;
3482       ChainI = 0;
3483     }
3484     SDValue A = DAG.getNode(ISD::ADD, dl,
3485                             PtrVT, Ptr,
3486                             DAG.getConstant(Offsets[i], dl, PtrVT),
3487                             &Flags);
3488     auto MMOFlags = MachineMemOperand::MONone;
3489     if (isVolatile)
3490       MMOFlags |= MachineMemOperand::MOVolatile;
3491     if (isNonTemporal)
3492       MMOFlags |= MachineMemOperand::MONonTemporal;
3493     if (isInvariant)
3494       MMOFlags |= MachineMemOperand::MOInvariant;
3495     if (isDereferenceable)
3496       MMOFlags |= MachineMemOperand::MODereferenceable;
3497 
3498     SDValue L = DAG.getLoad(ValueVTs[i], dl, Root, A,
3499                             MachinePointerInfo(SV, Offsets[i]), Alignment,
3500                             MMOFlags, AAInfo, Ranges);
3501 
3502     Values[i] = L;
3503     Chains[ChainI] = L.getValue(1);
3504   }
3505 
3506   if (!ConstantMemory) {
3507     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3508                                 makeArrayRef(Chains.data(), ChainI));
3509     if (isVolatile)
3510       DAG.setRoot(Chain);
3511     else
3512       PendingLoads.push_back(Chain);
3513   }
3514 
3515   setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
3516                            DAG.getVTList(ValueVTs), Values));
3517 }
3518 
3519 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
3520   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3521   assert(TLI.supportSwiftError() &&
3522          "call visitStoreToSwiftError when backend supports swifterror");
3523 
3524   SmallVector<EVT, 4> ValueVTs;
3525   SmallVector<uint64_t, 4> Offsets;
3526   const Value *SrcV = I.getOperand(0);
3527   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3528                   SrcV->getType(), ValueVTs, &Offsets);
3529   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3530          "expect a single EVT for swifterror");
3531 
3532   SDValue Src = getValue(SrcV);
3533   // Create a virtual register, then update the virtual register.
3534   auto &DL = DAG.getDataLayout();
3535   const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
3536   unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
3537   // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
3538   // Chain can be getRoot or getControlRoot.
3539   SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
3540                                       SDValue(Src.getNode(), Src.getResNo()));
3541   DAG.setRoot(CopyNode);
3542   FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
3543 }
3544 
3545 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
3546   assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
3547          "call visitLoadFromSwiftError when backend supports swifterror");
3548 
3549   assert(!I.isVolatile() &&
3550          I.getMetadata(LLVMContext::MD_nontemporal) == nullptr &&
3551          I.getMetadata(LLVMContext::MD_invariant_load) == nullptr &&
3552          "Support volatile, non temporal, invariant for load_from_swift_error");
3553 
3554   const Value *SV = I.getOperand(0);
3555   Type *Ty = I.getType();
3556   AAMDNodes AAInfo;
3557   I.getAAMetadata(AAInfo);
3558   assert(!AA->pointsToConstantMemory(MemoryLocation(
3559              SV, DAG.getDataLayout().getTypeStoreSize(Ty), AAInfo)) &&
3560          "load_from_swift_error should not be constant memory");
3561 
3562   SmallVector<EVT, 4> ValueVTs;
3563   SmallVector<uint64_t, 4> Offsets;
3564   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
3565                   ValueVTs, &Offsets);
3566   assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
3567          "expect a single EVT for swifterror");
3568 
3569   // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
3570   SDValue L = DAG.getCopyFromReg(
3571       getRoot(), getCurSDLoc(),
3572       FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, SV), ValueVTs[0]);
3573 
3574   setValue(&I, L);
3575 }
3576 
3577 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3578   if (I.isAtomic())
3579     return visitAtomicStore(I);
3580 
3581   const Value *SrcV = I.getOperand(0);
3582   const Value *PtrV = I.getOperand(1);
3583 
3584   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3585   if (TLI.supportSwiftError()) {
3586     // Swifterror values can come from either a function parameter with
3587     // swifterror attribute or an alloca with swifterror attribute.
3588     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
3589       if (Arg->hasSwiftErrorAttr())
3590         return visitStoreToSwiftError(I);
3591     }
3592 
3593     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
3594       if (Alloca->isSwiftError())
3595         return visitStoreToSwiftError(I);
3596     }
3597   }
3598 
3599   SmallVector<EVT, 4> ValueVTs;
3600   SmallVector<uint64_t, 4> Offsets;
3601   ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
3602                   SrcV->getType(), ValueVTs, &Offsets);
3603   unsigned NumValues = ValueVTs.size();
3604   if (NumValues == 0)
3605     return;
3606 
3607   // Get the lowered operands. Note that we do this after
3608   // checking if NumResults is zero, because with zero results
3609   // the operands won't have values in the map.
3610   SDValue Src = getValue(SrcV);
3611   SDValue Ptr = getValue(PtrV);
3612 
3613   SDValue Root = getRoot();
3614   SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
3615   SDLoc dl = getCurSDLoc();
3616   EVT PtrVT = Ptr.getValueType();
3617   unsigned Alignment = I.getAlignment();
3618   AAMDNodes AAInfo;
3619   I.getAAMetadata(AAInfo);
3620 
3621   auto MMOFlags = MachineMemOperand::MONone;
3622   if (I.isVolatile())
3623     MMOFlags |= MachineMemOperand::MOVolatile;
3624   if (I.getMetadata(LLVMContext::MD_nontemporal) != nullptr)
3625     MMOFlags |= MachineMemOperand::MONonTemporal;
3626 
3627   // An aggregate load cannot wrap around the address space, so offsets to its
3628   // parts don't wrap either.
3629   SDNodeFlags Flags;
3630   Flags.setNoUnsignedWrap(true);
3631 
3632   unsigned ChainI = 0;
3633   for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3634     // See visitLoad comments.
3635     if (ChainI == MaxParallelChains) {
3636       SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3637                                   makeArrayRef(Chains.data(), ChainI));
3638       Root = Chain;
3639       ChainI = 0;
3640     }
3641     SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3642                               DAG.getConstant(Offsets[i], dl, PtrVT), &Flags);
3643     SDValue St = DAG.getStore(
3644         Root, dl, SDValue(Src.getNode(), Src.getResNo() + i), Add,
3645         MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo);
3646     Chains[ChainI] = St;
3647   }
3648 
3649   SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3650                                   makeArrayRef(Chains.data(), ChainI));
3651   DAG.setRoot(StoreNode);
3652 }
3653 
3654 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
3655                                            bool IsCompressing) {
3656   SDLoc sdl = getCurSDLoc();
3657 
3658   auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3659                            unsigned& Alignment) {
3660     // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
3661     Src0 = I.getArgOperand(0);
3662     Ptr = I.getArgOperand(1);
3663     Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3664     Mask = I.getArgOperand(3);
3665   };
3666   auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3667                            unsigned& Alignment) {
3668     // llvm.masked.compressstore.*(Src0, Ptr, Mask)
3669     Src0 = I.getArgOperand(0);
3670     Ptr = I.getArgOperand(1);
3671     Mask = I.getArgOperand(2);
3672     Alignment = 0;
3673   };
3674 
3675   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3676   unsigned Alignment;
3677   if (IsCompressing)
3678     getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3679   else
3680     getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3681 
3682   SDValue Ptr = getValue(PtrOperand);
3683   SDValue Src0 = getValue(Src0Operand);
3684   SDValue Mask = getValue(MaskOperand);
3685 
3686   EVT VT = Src0.getValueType();
3687   if (!Alignment)
3688     Alignment = DAG.getEVTAlignment(VT);
3689 
3690   AAMDNodes AAInfo;
3691   I.getAAMetadata(AAInfo);
3692 
3693   MachineMemOperand *MMO =
3694     DAG.getMachineFunction().
3695     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3696                           MachineMemOperand::MOStore,  VT.getStoreSize(),
3697                           Alignment, AAInfo);
3698   SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3699                                          MMO, false /* Truncating */,
3700                                          IsCompressing);
3701   DAG.setRoot(StoreNode);
3702   setValue(&I, StoreNode);
3703 }
3704 
3705 // Get a uniform base for the Gather/Scatter intrinsic.
3706 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
3707 // We try to represent it as a base pointer + vector of indices.
3708 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
3709 // The first operand of the GEP may be a single pointer or a vector of pointers
3710 // Example:
3711 //   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
3712 //  or
3713 //   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
3714 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
3715 //
3716 // When the first GEP operand is a single pointer - it is the uniform base we
3717 // are looking for. If first operand of the GEP is a splat vector - we
3718 // extract the spalt value and use it as a uniform base.
3719 // In all other cases the function returns 'false'.
3720 //
3721 static bool getUniformBase(const Value* &Ptr, SDValue& Base, SDValue& Index,
3722                            SelectionDAGBuilder* SDB) {
3723 
3724   SelectionDAG& DAG = SDB->DAG;
3725   LLVMContext &Context = *DAG.getContext();
3726 
3727   assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
3728   const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
3729   if (!GEP || GEP->getNumOperands() > 2)
3730     return false;
3731 
3732   const Value *GEPPtr = GEP->getPointerOperand();
3733   if (!GEPPtr->getType()->isVectorTy())
3734     Ptr = GEPPtr;
3735   else if (!(Ptr = getSplatValue(GEPPtr)))
3736     return false;
3737 
3738   Value *IndexVal = GEP->getOperand(1);
3739 
3740   // The operands of the GEP may be defined in another basic block.
3741   // In this case we'll not find nodes for the operands.
3742   if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal))
3743     return false;
3744 
3745   Base = SDB->getValue(Ptr);
3746   Index = SDB->getValue(IndexVal);
3747 
3748   // Suppress sign extension.
3749   if (SExtInst* Sext = dyn_cast<SExtInst>(IndexVal)) {
3750     if (SDB->findValue(Sext->getOperand(0))) {
3751       IndexVal = Sext->getOperand(0);
3752       Index = SDB->getValue(IndexVal);
3753     }
3754   }
3755   if (!Index.getValueType().isVector()) {
3756     unsigned GEPWidth = GEP->getType()->getVectorNumElements();
3757     EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
3758     Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
3759   }
3760   return true;
3761 }
3762 
3763 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
3764   SDLoc sdl = getCurSDLoc();
3765 
3766   // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
3767   const Value *Ptr = I.getArgOperand(1);
3768   SDValue Src0 = getValue(I.getArgOperand(0));
3769   SDValue Mask = getValue(I.getArgOperand(3));
3770   EVT VT = Src0.getValueType();
3771   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
3772   if (!Alignment)
3773     Alignment = DAG.getEVTAlignment(VT);
3774   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3775 
3776   AAMDNodes AAInfo;
3777   I.getAAMetadata(AAInfo);
3778 
3779   SDValue Base;
3780   SDValue Index;
3781   const Value *BasePtr = Ptr;
3782   bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3783 
3784   const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
3785   MachineMemOperand *MMO = DAG.getMachineFunction().
3786     getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
3787                          MachineMemOperand::MOStore,  VT.getStoreSize(),
3788                          Alignment, AAInfo);
3789   if (!UniformBase) {
3790     Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
3791     Index = getValue(Ptr);
3792   }
3793   SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index };
3794   SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
3795                                          Ops, MMO);
3796   DAG.setRoot(Scatter);
3797   setValue(&I, Scatter);
3798 }
3799 
3800 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
3801   SDLoc sdl = getCurSDLoc();
3802 
3803   auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3804                            unsigned& Alignment) {
3805     // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
3806     Ptr = I.getArgOperand(0);
3807     Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
3808     Mask = I.getArgOperand(2);
3809     Src0 = I.getArgOperand(3);
3810   };
3811   auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
3812                            unsigned& Alignment) {
3813     // @llvm.masked.expandload.*(Ptr, Mask, Src0)
3814     Ptr = I.getArgOperand(0);
3815     Alignment = 0;
3816     Mask = I.getArgOperand(1);
3817     Src0 = I.getArgOperand(2);
3818   };
3819 
3820   Value  *PtrOperand, *MaskOperand, *Src0Operand;
3821   unsigned Alignment;
3822   if (IsExpanding)
3823     getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3824   else
3825     getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
3826 
3827   SDValue Ptr = getValue(PtrOperand);
3828   SDValue Src0 = getValue(Src0Operand);
3829   SDValue Mask = getValue(MaskOperand);
3830 
3831   EVT VT = Src0.getValueType();
3832   if (!Alignment)
3833     Alignment = DAG.getEVTAlignment(VT);
3834 
3835   AAMDNodes AAInfo;
3836   I.getAAMetadata(AAInfo);
3837   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3838 
3839   // Do not serialize masked loads of constant memory with anything.
3840   bool AddToChain = !AA->pointsToConstantMemory(MemoryLocation(
3841       PtrOperand, DAG.getDataLayout().getTypeStoreSize(I.getType()), AAInfo));
3842   SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
3843 
3844   MachineMemOperand *MMO =
3845     DAG.getMachineFunction().
3846     getMachineMemOperand(MachinePointerInfo(PtrOperand),
3847                           MachineMemOperand::MOLoad,  VT.getStoreSize(),
3848                           Alignment, AAInfo, Ranges);
3849 
3850   SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
3851                                    ISD::NON_EXTLOAD, IsExpanding);
3852   if (AddToChain) {
3853     SDValue OutChain = Load.getValue(1);
3854     DAG.setRoot(OutChain);
3855   }
3856   setValue(&I, Load);
3857 }
3858 
3859 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
3860   SDLoc sdl = getCurSDLoc();
3861 
3862   // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
3863   const Value *Ptr = I.getArgOperand(0);
3864   SDValue Src0 = getValue(I.getArgOperand(3));
3865   SDValue Mask = getValue(I.getArgOperand(2));
3866 
3867   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3868   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3869   unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
3870   if (!Alignment)
3871     Alignment = DAG.getEVTAlignment(VT);
3872 
3873   AAMDNodes AAInfo;
3874   I.getAAMetadata(AAInfo);
3875   const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3876 
3877   SDValue Root = DAG.getRoot();
3878   SDValue Base;
3879   SDValue Index;
3880   const Value *BasePtr = Ptr;
3881   bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3882   bool ConstantMemory = false;
3883   if (UniformBase &&
3884       AA->pointsToConstantMemory(MemoryLocation(
3885           BasePtr, DAG.getDataLayout().getTypeStoreSize(I.getType()),
3886           AAInfo))) {
3887     // Do not serialize (non-volatile) loads of constant memory with anything.
3888     Root = DAG.getEntryNode();
3889     ConstantMemory = true;
3890   }
3891 
3892   MachineMemOperand *MMO =
3893     DAG.getMachineFunction().
3894     getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
3895                          MachineMemOperand::MOLoad,  VT.getStoreSize(),
3896                          Alignment, AAInfo, Ranges);
3897 
3898   if (!UniformBase) {
3899     Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
3900     Index = getValue(Ptr);
3901   }
3902   SDValue Ops[] = { Root, Src0, Mask, Base, Index };
3903   SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
3904                                        Ops, MMO);
3905 
3906   SDValue OutChain = Gather.getValue(1);
3907   if (!ConstantMemory)
3908     PendingLoads.push_back(OutChain);
3909   setValue(&I, Gather);
3910 }
3911 
3912 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
3913   SDLoc dl = getCurSDLoc();
3914   AtomicOrdering SuccessOrder = I.getSuccessOrdering();
3915   AtomicOrdering FailureOrder = I.getFailureOrdering();
3916   SynchronizationScope Scope = I.getSynchScope();
3917 
3918   SDValue InChain = getRoot();
3919 
3920   MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
3921   SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
3922   SDValue L = DAG.getAtomicCmpSwap(
3923       ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
3924       getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
3925       getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
3926       /*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope);
3927 
3928   SDValue OutChain = L.getValue(2);
3929 
3930   setValue(&I, L);
3931   DAG.setRoot(OutChain);
3932 }
3933 
3934 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
3935   SDLoc dl = getCurSDLoc();
3936   ISD::NodeType NT;
3937   switch (I.getOperation()) {
3938   default: llvm_unreachable("Unknown atomicrmw operation");
3939   case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
3940   case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
3941   case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
3942   case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
3943   case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
3944   case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
3945   case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
3946   case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
3947   case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
3948   case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
3949   case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
3950   }
3951   AtomicOrdering Order = I.getOrdering();
3952   SynchronizationScope Scope = I.getSynchScope();
3953 
3954   SDValue InChain = getRoot();
3955 
3956   SDValue L =
3957     DAG.getAtomic(NT, dl,
3958                   getValue(I.getValOperand()).getSimpleValueType(),
3959                   InChain,
3960                   getValue(I.getPointerOperand()),
3961                   getValue(I.getValOperand()),
3962                   I.getPointerOperand(),
3963                   /* Alignment=*/ 0, Order, Scope);
3964 
3965   SDValue OutChain = L.getValue(1);
3966 
3967   setValue(&I, L);
3968   DAG.setRoot(OutChain);
3969 }
3970 
3971 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
3972   SDLoc dl = getCurSDLoc();
3973   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3974   SDValue Ops[3];
3975   Ops[0] = getRoot();
3976   Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
3977                            TLI.getPointerTy(DAG.getDataLayout()));
3978   Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
3979                            TLI.getPointerTy(DAG.getDataLayout()));
3980   DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
3981 }
3982 
3983 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
3984   SDLoc dl = getCurSDLoc();
3985   AtomicOrdering Order = I.getOrdering();
3986   SynchronizationScope Scope = I.getSynchScope();
3987 
3988   SDValue InChain = getRoot();
3989 
3990   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3991   EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3992 
3993   if (I.getAlignment() < VT.getSizeInBits() / 8)
3994     report_fatal_error("Cannot generate unaligned atomic load");
3995 
3996   MachineMemOperand *MMO =
3997       DAG.getMachineFunction().
3998       getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3999                            MachineMemOperand::MOVolatile |
4000                            MachineMemOperand::MOLoad,
4001                            VT.getStoreSize(),
4002                            I.getAlignment() ? I.getAlignment() :
4003                                               DAG.getEVTAlignment(VT),
4004                            AAMDNodes(), nullptr, Scope, Order);
4005 
4006   InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4007   SDValue L =
4008       DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
4009                     getValue(I.getPointerOperand()), MMO);
4010 
4011   SDValue OutChain = L.getValue(1);
4012 
4013   setValue(&I, L);
4014   DAG.setRoot(OutChain);
4015 }
4016 
4017 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4018   SDLoc dl = getCurSDLoc();
4019 
4020   AtomicOrdering Order = I.getOrdering();
4021   SynchronizationScope Scope = I.getSynchScope();
4022 
4023   SDValue InChain = getRoot();
4024 
4025   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4026   EVT VT =
4027       TLI.getValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4028 
4029   if (I.getAlignment() < VT.getSizeInBits() / 8)
4030     report_fatal_error("Cannot generate unaligned atomic store");
4031 
4032   SDValue OutChain =
4033     DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
4034                   InChain,
4035                   getValue(I.getPointerOperand()),
4036                   getValue(I.getValueOperand()),
4037                   I.getPointerOperand(), I.getAlignment(),
4038                   Order, Scope);
4039 
4040   DAG.setRoot(OutChain);
4041 }
4042 
4043 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4044 /// node.
4045 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4046                                                unsigned Intrinsic) {
4047   // Ignore the callsite's attributes. A specific call site may be marked with
4048   // readnone, but the lowering code will expect the chain based on the
4049   // definition.
4050   const Function *F = I.getCalledFunction();
4051   bool HasChain = !F->doesNotAccessMemory();
4052   bool OnlyLoad = HasChain && F->onlyReadsMemory();
4053 
4054   // Build the operand list.
4055   SmallVector<SDValue, 8> Ops;
4056   if (HasChain) {  // If this intrinsic has side-effects, chainify it.
4057     if (OnlyLoad) {
4058       // We don't need to serialize loads against other loads.
4059       Ops.push_back(DAG.getRoot());
4060     } else {
4061       Ops.push_back(getRoot());
4062     }
4063   }
4064 
4065   // Info is set by getTgtMemInstrinsic
4066   TargetLowering::IntrinsicInfo Info;
4067   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4068   bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
4069 
4070   // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4071   if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4072       Info.opc == ISD::INTRINSIC_W_CHAIN)
4073     Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4074                                         TLI.getPointerTy(DAG.getDataLayout())));
4075 
4076   // Add all operands of the call to the operand list.
4077   for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4078     SDValue Op = getValue(I.getArgOperand(i));
4079     Ops.push_back(Op);
4080   }
4081 
4082   SmallVector<EVT, 4> ValueVTs;
4083   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4084 
4085   if (HasChain)
4086     ValueVTs.push_back(MVT::Other);
4087 
4088   SDVTList VTs = DAG.getVTList(ValueVTs);
4089 
4090   // Create the node.
4091   SDValue Result;
4092   if (IsTgtIntrinsic) {
4093     // This is target intrinsic that touches memory
4094     Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
4095                                      VTs, Ops, Info.memVT,
4096                                    MachinePointerInfo(Info.ptrVal, Info.offset),
4097                                      Info.align, Info.vol,
4098                                      Info.readMem, Info.writeMem, Info.size);
4099   } else if (!HasChain) {
4100     Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4101   } else if (!I.getType()->isVoidTy()) {
4102     Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4103   } else {
4104     Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4105   }
4106 
4107   if (HasChain) {
4108     SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4109     if (OnlyLoad)
4110       PendingLoads.push_back(Chain);
4111     else
4112       DAG.setRoot(Chain);
4113   }
4114 
4115   if (!I.getType()->isVoidTy()) {
4116     if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4117       EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4118       Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4119     } else
4120       Result = lowerRangeToAssertZExt(DAG, I, Result);
4121 
4122     setValue(&I, Result);
4123   }
4124 }
4125 
4126 /// GetSignificand - Get the significand and build it into a floating-point
4127 /// number with exponent of 1:
4128 ///
4129 ///   Op = (Op & 0x007fffff) | 0x3f800000;
4130 ///
4131 /// where Op is the hexadecimal representation of floating point value.
4132 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4133   SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4134                            DAG.getConstant(0x007fffff, dl, MVT::i32));
4135   SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4136                            DAG.getConstant(0x3f800000, dl, MVT::i32));
4137   return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4138 }
4139 
4140 /// GetExponent - Get the exponent:
4141 ///
4142 ///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4143 ///
4144 /// where Op is the hexadecimal representation of floating point value.
4145 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4146                            const TargetLowering &TLI, const SDLoc &dl) {
4147   SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4148                            DAG.getConstant(0x7f800000, dl, MVT::i32));
4149   SDValue t1 = DAG.getNode(
4150       ISD::SRL, dl, MVT::i32, t0,
4151       DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4152   SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4153                            DAG.getConstant(127, dl, MVT::i32));
4154   return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4155 }
4156 
4157 /// getF32Constant - Get 32-bit floating point constant.
4158 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4159                               const SDLoc &dl) {
4160   return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4161                            MVT::f32);
4162 }
4163 
4164 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4165                                        SelectionDAG &DAG) {
4166   // TODO: What fast-math-flags should be set on the floating-point nodes?
4167 
4168   //   IntegerPartOfX = ((int32_t)(t0);
4169   SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4170 
4171   //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
4172   SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4173   SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4174 
4175   //   IntegerPartOfX <<= 23;
4176   IntegerPartOfX = DAG.getNode(
4177       ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4178       DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4179                                   DAG.getDataLayout())));
4180 
4181   SDValue TwoToFractionalPartOfX;
4182   if (LimitFloatPrecision <= 6) {
4183     // For floating-point precision of 6:
4184     //
4185     //   TwoToFractionalPartOfX =
4186     //     0.997535578f +
4187     //       (0.735607626f + 0.252464424f * x) * x;
4188     //
4189     // error 0.0144103317, which is 6 bits
4190     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4191                              getF32Constant(DAG, 0x3e814304, dl));
4192     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4193                              getF32Constant(DAG, 0x3f3c50c8, dl));
4194     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4195     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4196                                          getF32Constant(DAG, 0x3f7f5e7e, dl));
4197   } else if (LimitFloatPrecision <= 12) {
4198     // For floating-point precision of 12:
4199     //
4200     //   TwoToFractionalPartOfX =
4201     //     0.999892986f +
4202     //       (0.696457318f +
4203     //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
4204     //
4205     // error 0.000107046256, which is 13 to 14 bits
4206     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4207                              getF32Constant(DAG, 0x3da235e3, dl));
4208     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4209                              getF32Constant(DAG, 0x3e65b8f3, dl));
4210     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4211     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4212                              getF32Constant(DAG, 0x3f324b07, dl));
4213     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4214     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4215                                          getF32Constant(DAG, 0x3f7ff8fd, dl));
4216   } else { // LimitFloatPrecision <= 18
4217     // For floating-point precision of 18:
4218     //
4219     //   TwoToFractionalPartOfX =
4220     //     0.999999982f +
4221     //       (0.693148872f +
4222     //         (0.240227044f +
4223     //           (0.554906021e-1f +
4224     //             (0.961591928e-2f +
4225     //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4226     // error 2.47208000*10^(-7), which is better than 18 bits
4227     SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4228                              getF32Constant(DAG, 0x3924b03e, dl));
4229     SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4230                              getF32Constant(DAG, 0x3ab24b87, dl));
4231     SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4232     SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4233                              getF32Constant(DAG, 0x3c1d8c17, dl));
4234     SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4235     SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4236                              getF32Constant(DAG, 0x3d634a1d, dl));
4237     SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4238     SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4239                              getF32Constant(DAG, 0x3e75fe14, dl));
4240     SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4241     SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4242                               getF32Constant(DAG, 0x3f317234, dl));
4243     SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4244     TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4245                                          getF32Constant(DAG, 0x3f800000, dl));
4246   }
4247 
4248   // Add the exponent into the result in integer domain.
4249   SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4250   return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4251                      DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4252 }
4253 
4254 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4255 /// limited-precision mode.
4256 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4257                          const TargetLowering &TLI) {
4258   if (Op.getValueType() == MVT::f32 &&
4259       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4260 
4261     // Put the exponent in the right bit position for later addition to the
4262     // final result:
4263     //
4264     //   #define LOG2OFe 1.4426950f
4265     //   t0 = Op * LOG2OFe
4266 
4267     // TODO: What fast-math-flags should be set here?
4268     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4269                              getF32Constant(DAG, 0x3fb8aa3b, dl));
4270     return getLimitedPrecisionExp2(t0, dl, DAG);
4271   }
4272 
4273   // No special expansion.
4274   return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4275 }
4276 
4277 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4278 /// limited-precision mode.
4279 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4280                          const TargetLowering &TLI) {
4281 
4282   // TODO: What fast-math-flags should be set on the floating-point nodes?
4283 
4284   if (Op.getValueType() == MVT::f32 &&
4285       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4286     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4287 
4288     // Scale the exponent by log(2) [0.69314718f].
4289     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4290     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4291                                         getF32Constant(DAG, 0x3f317218, dl));
4292 
4293     // Get the significand and build it into a floating-point number with
4294     // exponent of 1.
4295     SDValue X = GetSignificand(DAG, Op1, dl);
4296 
4297     SDValue LogOfMantissa;
4298     if (LimitFloatPrecision <= 6) {
4299       // For floating-point precision of 6:
4300       //
4301       //   LogofMantissa =
4302       //     -1.1609546f +
4303       //       (1.4034025f - 0.23903021f * x) * x;
4304       //
4305       // error 0.0034276066, which is better than 8 bits
4306       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4307                                getF32Constant(DAG, 0xbe74c456, dl));
4308       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4309                                getF32Constant(DAG, 0x3fb3a2b1, dl));
4310       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4311       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4312                                   getF32Constant(DAG, 0x3f949a29, dl));
4313     } else if (LimitFloatPrecision <= 12) {
4314       // For floating-point precision of 12:
4315       //
4316       //   LogOfMantissa =
4317       //     -1.7417939f +
4318       //       (2.8212026f +
4319       //         (-1.4699568f +
4320       //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4321       //
4322       // error 0.000061011436, which is 14 bits
4323       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4324                                getF32Constant(DAG, 0xbd67b6d6, dl));
4325       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4326                                getF32Constant(DAG, 0x3ee4f4b8, dl));
4327       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4328       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4329                                getF32Constant(DAG, 0x3fbc278b, dl));
4330       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4331       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4332                                getF32Constant(DAG, 0x40348e95, dl));
4333       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4334       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4335                                   getF32Constant(DAG, 0x3fdef31a, dl));
4336     } else { // LimitFloatPrecision <= 18
4337       // For floating-point precision of 18:
4338       //
4339       //   LogOfMantissa =
4340       //     -2.1072184f +
4341       //       (4.2372794f +
4342       //         (-3.7029485f +
4343       //           (2.2781945f +
4344       //             (-0.87823314f +
4345       //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4346       //
4347       // error 0.0000023660568, which is better than 18 bits
4348       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4349                                getF32Constant(DAG, 0xbc91e5ac, dl));
4350       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4351                                getF32Constant(DAG, 0x3e4350aa, dl));
4352       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4353       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4354                                getF32Constant(DAG, 0x3f60d3e3, dl));
4355       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4356       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4357                                getF32Constant(DAG, 0x4011cdf0, dl));
4358       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4359       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4360                                getF32Constant(DAG, 0x406cfd1c, dl));
4361       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4362       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4363                                getF32Constant(DAG, 0x408797cb, dl));
4364       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4365       LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4366                                   getF32Constant(DAG, 0x4006dcab, dl));
4367     }
4368 
4369     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4370   }
4371 
4372   // No special expansion.
4373   return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4374 }
4375 
4376 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4377 /// limited-precision mode.
4378 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4379                           const TargetLowering &TLI) {
4380 
4381   // TODO: What fast-math-flags should be set on the floating-point nodes?
4382 
4383   if (Op.getValueType() == MVT::f32 &&
4384       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4385     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4386 
4387     // Get the exponent.
4388     SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4389 
4390     // Get the significand and build it into a floating-point number with
4391     // exponent of 1.
4392     SDValue X = GetSignificand(DAG, Op1, dl);
4393 
4394     // Different possible minimax approximations of significand in
4395     // floating-point for various degrees of accuracy over [1,2].
4396     SDValue Log2ofMantissa;
4397     if (LimitFloatPrecision <= 6) {
4398       // For floating-point precision of 6:
4399       //
4400       //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4401       //
4402       // error 0.0049451742, which is more than 7 bits
4403       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4404                                getF32Constant(DAG, 0xbeb08fe0, dl));
4405       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4406                                getF32Constant(DAG, 0x40019463, dl));
4407       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4408       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4409                                    getF32Constant(DAG, 0x3fd6633d, dl));
4410     } else if (LimitFloatPrecision <= 12) {
4411       // For floating-point precision of 12:
4412       //
4413       //   Log2ofMantissa =
4414       //     -2.51285454f +
4415       //       (4.07009056f +
4416       //         (-2.12067489f +
4417       //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4418       //
4419       // error 0.0000876136000, which is better than 13 bits
4420       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4421                                getF32Constant(DAG, 0xbda7262e, dl));
4422       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4423                                getF32Constant(DAG, 0x3f25280b, dl));
4424       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4425       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4426                                getF32Constant(DAG, 0x4007b923, dl));
4427       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4428       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4429                                getF32Constant(DAG, 0x40823e2f, dl));
4430       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4431       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4432                                    getF32Constant(DAG, 0x4020d29c, dl));
4433     } else { // LimitFloatPrecision <= 18
4434       // For floating-point precision of 18:
4435       //
4436       //   Log2ofMantissa =
4437       //     -3.0400495f +
4438       //       (6.1129976f +
4439       //         (-5.3420409f +
4440       //           (3.2865683f +
4441       //             (-1.2669343f +
4442       //               (0.27515199f -
4443       //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4444       //
4445       // error 0.0000018516, which is better than 18 bits
4446       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4447                                getF32Constant(DAG, 0xbcd2769e, dl));
4448       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4449                                getF32Constant(DAG, 0x3e8ce0b9, dl));
4450       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4451       SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4452                                getF32Constant(DAG, 0x3fa22ae7, dl));
4453       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4454       SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4455                                getF32Constant(DAG, 0x40525723, dl));
4456       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4457       SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4458                                getF32Constant(DAG, 0x40aaf200, dl));
4459       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4460       SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4461                                getF32Constant(DAG, 0x40c39dad, dl));
4462       SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4463       Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4464                                    getF32Constant(DAG, 0x4042902c, dl));
4465     }
4466 
4467     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4468   }
4469 
4470   // No special expansion.
4471   return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4472 }
4473 
4474 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4475 /// limited-precision mode.
4476 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4477                            const TargetLowering &TLI) {
4478 
4479   // TODO: What fast-math-flags should be set on the floating-point nodes?
4480 
4481   if (Op.getValueType() == MVT::f32 &&
4482       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4483     SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4484 
4485     // Scale the exponent by log10(2) [0.30102999f].
4486     SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4487     SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4488                                         getF32Constant(DAG, 0x3e9a209a, dl));
4489 
4490     // Get the significand and build it into a floating-point number with
4491     // exponent of 1.
4492     SDValue X = GetSignificand(DAG, Op1, dl);
4493 
4494     SDValue Log10ofMantissa;
4495     if (LimitFloatPrecision <= 6) {
4496       // For floating-point precision of 6:
4497       //
4498       //   Log10ofMantissa =
4499       //     -0.50419619f +
4500       //       (0.60948995f - 0.10380950f * x) * x;
4501       //
4502       // error 0.0014886165, which is 6 bits
4503       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4504                                getF32Constant(DAG, 0xbdd49a13, dl));
4505       SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4506                                getF32Constant(DAG, 0x3f1c0789, dl));
4507       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4508       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4509                                     getF32Constant(DAG, 0x3f011300, dl));
4510     } else if (LimitFloatPrecision <= 12) {
4511       // For floating-point precision of 12:
4512       //
4513       //   Log10ofMantissa =
4514       //     -0.64831180f +
4515       //       (0.91751397f +
4516       //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4517       //
4518       // error 0.00019228036, which is better than 12 bits
4519       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4520                                getF32Constant(DAG, 0x3d431f31, dl));
4521       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4522                                getF32Constant(DAG, 0x3ea21fb2, dl));
4523       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4524       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4525                                getF32Constant(DAG, 0x3f6ae232, dl));
4526       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4527       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4528                                     getF32Constant(DAG, 0x3f25f7c3, dl));
4529     } else { // LimitFloatPrecision <= 18
4530       // For floating-point precision of 18:
4531       //
4532       //   Log10ofMantissa =
4533       //     -0.84299375f +
4534       //       (1.5327582f +
4535       //         (-1.0688956f +
4536       //           (0.49102474f +
4537       //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4538       //
4539       // error 0.0000037995730, which is better than 18 bits
4540       SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4541                                getF32Constant(DAG, 0x3c5d51ce, dl));
4542       SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4543                                getF32Constant(DAG, 0x3e00685a, dl));
4544       SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4545       SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4546                                getF32Constant(DAG, 0x3efb6798, dl));
4547       SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4548       SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4549                                getF32Constant(DAG, 0x3f88d192, dl));
4550       SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4551       SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4552                                getF32Constant(DAG, 0x3fc4316c, dl));
4553       SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4554       Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4555                                     getF32Constant(DAG, 0x3f57ce70, dl));
4556     }
4557 
4558     return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4559   }
4560 
4561   // No special expansion.
4562   return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4563 }
4564 
4565 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4566 /// limited-precision mode.
4567 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4568                           const TargetLowering &TLI) {
4569   if (Op.getValueType() == MVT::f32 &&
4570       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
4571     return getLimitedPrecisionExp2(Op, dl, DAG);
4572 
4573   // No special expansion.
4574   return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4575 }
4576 
4577 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4578 /// limited-precision mode with x == 10.0f.
4579 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
4580                          SelectionDAG &DAG, const TargetLowering &TLI) {
4581   bool IsExp10 = false;
4582   if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4583       LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4584     if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4585       APFloat Ten(10.0f);
4586       IsExp10 = LHSC->isExactlyValue(Ten);
4587     }
4588   }
4589 
4590   // TODO: What fast-math-flags should be set on the FMUL node?
4591   if (IsExp10) {
4592     // Put the exponent in the right bit position for later addition to the
4593     // final result:
4594     //
4595     //   #define LOG2OF10 3.3219281f
4596     //   t0 = Op * LOG2OF10;
4597     SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4598                              getF32Constant(DAG, 0x40549a78, dl));
4599     return getLimitedPrecisionExp2(t0, dl, DAG);
4600   }
4601 
4602   // No special expansion.
4603   return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4604 }
4605 
4606 
4607 /// ExpandPowI - Expand a llvm.powi intrinsic.
4608 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
4609                           SelectionDAG &DAG) {
4610   // If RHS is a constant, we can expand this out to a multiplication tree,
4611   // otherwise we end up lowering to a call to __powidf2 (for example).  When
4612   // optimizing for size, we only want to do this if the expansion would produce
4613   // a small number of multiplies, otherwise we do the full expansion.
4614   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4615     // Get the exponent as a positive value.
4616     unsigned Val = RHSC->getSExtValue();
4617     if ((int)Val < 0) Val = -Val;
4618 
4619     // powi(x, 0) -> 1.0
4620     if (Val == 0)
4621       return DAG.getConstantFP(1.0, DL, LHS.getValueType());
4622 
4623     const Function *F = DAG.getMachineFunction().getFunction();
4624     if (!F->optForSize() ||
4625         // If optimizing for size, don't insert too many multiplies.
4626         // This inserts up to 5 multiplies.
4627         countPopulation(Val) + Log2_32(Val) < 7) {
4628       // We use the simple binary decomposition method to generate the multiply
4629       // sequence.  There are more optimal ways to do this (for example,
4630       // powi(x,15) generates one more multiply than it should), but this has
4631       // the benefit of being both really simple and much better than a libcall.
4632       SDValue Res;  // Logically starts equal to 1.0
4633       SDValue CurSquare = LHS;
4634       // TODO: Intrinsics should have fast-math-flags that propagate to these
4635       // nodes.
4636       while (Val) {
4637         if (Val & 1) {
4638           if (Res.getNode())
4639             Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4640           else
4641             Res = CurSquare;  // 1.0*CurSquare.
4642         }
4643 
4644         CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4645                                 CurSquare, CurSquare);
4646         Val >>= 1;
4647       }
4648 
4649       // If the original was negative, invert the result, producing 1/(x*x*x).
4650       if (RHSC->getSExtValue() < 0)
4651         Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4652                           DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
4653       return Res;
4654     }
4655   }
4656 
4657   // Otherwise, expand to a libcall.
4658   return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4659 }
4660 
4661 // getUnderlyingArgReg - Find underlying register used for a truncated or
4662 // bitcasted argument.
4663 static unsigned getUnderlyingArgReg(const SDValue &N) {
4664   switch (N.getOpcode()) {
4665   case ISD::CopyFromReg:
4666     return cast<RegisterSDNode>(N.getOperand(1))->getReg();
4667   case ISD::BITCAST:
4668   case ISD::AssertZext:
4669   case ISD::AssertSext:
4670   case ISD::TRUNCATE:
4671     return getUnderlyingArgReg(N.getOperand(0));
4672   default:
4673     return 0;
4674   }
4675 }
4676 
4677 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
4678 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
4679 /// At the end of instruction selection, they will be inserted to the entry BB.
4680 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
4681     const Value *V, DILocalVariable *Variable, DIExpression *Expr,
4682     DILocation *DL, int64_t Offset, bool IsIndirect, const SDValue &N) {
4683   const Argument *Arg = dyn_cast<Argument>(V);
4684   if (!Arg)
4685     return false;
4686 
4687   MachineFunction &MF = DAG.getMachineFunction();
4688   const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4689 
4690   // Ignore inlined function arguments here.
4691   //
4692   // FIXME: Should we be checking DL->inlinedAt() to determine this?
4693   if (!Variable->getScope()->getSubprogram()->describes(MF.getFunction()))
4694     return false;
4695 
4696   Optional<MachineOperand> Op;
4697   // Some arguments' frame index is recorded during argument lowering.
4698   if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
4699     Op = MachineOperand::CreateFI(FI);
4700 
4701   if (!Op && N.getNode()) {
4702     unsigned Reg = getUnderlyingArgReg(N);
4703     if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4704       MachineRegisterInfo &RegInfo = MF.getRegInfo();
4705       unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4706       if (PR)
4707         Reg = PR;
4708     }
4709     if (Reg)
4710       Op = MachineOperand::CreateReg(Reg, false);
4711   }
4712 
4713   if (!Op) {
4714     // Check if ValueMap has reg number.
4715     DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4716     if (VMI != FuncInfo.ValueMap.end())
4717       Op = MachineOperand::CreateReg(VMI->second, false);
4718   }
4719 
4720   if (!Op && N.getNode())
4721     // Check if frame index is available.
4722     if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4723       if (FrameIndexSDNode *FINode =
4724           dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4725         Op = MachineOperand::CreateFI(FINode->getIndex());
4726 
4727   if (!Op)
4728     return false;
4729 
4730   assert(Variable->isValidLocationForIntrinsic(DL) &&
4731          "Expected inlined-at fields to agree");
4732   if (Op->isReg())
4733     FuncInfo.ArgDbgValues.push_back(
4734         BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
4735                 Op->getReg(), Offset, Variable, Expr));
4736   else
4737     FuncInfo.ArgDbgValues.push_back(
4738         BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE))
4739             .add(*Op)
4740             .addImm(Offset)
4741             .addMetadata(Variable)
4742             .addMetadata(Expr));
4743 
4744   return true;
4745 }
4746 
4747 /// Return the appropriate SDDbgValue based on N.
4748 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
4749                                              DILocalVariable *Variable,
4750                                              DIExpression *Expr, int64_t Offset,
4751                                              const DebugLoc &dl,
4752                                              unsigned DbgSDNodeOrder) {
4753   SDDbgValue *SDV;
4754   auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode());
4755   if (FISDN && Expr->startsWithDeref()) {
4756     // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
4757     // stack slot locations as such instead of as indirectly addressed
4758     // locations.
4759     ArrayRef<uint64_t> TrailingElements(Expr->elements_begin() + 1,
4760                                         Expr->elements_end());
4761     DIExpression *DerefedDIExpr =
4762         DIExpression::get(*DAG.getContext(), TrailingElements);
4763     int FI = FISDN->getIndex();
4764     SDV = DAG.getFrameIndexDbgValue(Variable, DerefedDIExpr, FI, 0, dl,
4765                                     DbgSDNodeOrder);
4766   } else {
4767     SDV = DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(), false,
4768                           Offset, dl, DbgSDNodeOrder);
4769   }
4770   return SDV;
4771 }
4772 
4773 // VisualStudio defines setjmp as _setjmp
4774 #if defined(_MSC_VER) && defined(setjmp) && \
4775                          !defined(setjmp_undefined_for_msvc)
4776 #  pragma push_macro("setjmp")
4777 #  undef setjmp
4778 #  define setjmp_undefined_for_msvc
4779 #endif
4780 
4781 /// Lower the call to the specified intrinsic function. If we want to emit this
4782 /// as a call to a named external function, return the name. Otherwise, lower it
4783 /// and return null.
4784 const char *
4785 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
4786   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4787   SDLoc sdl = getCurSDLoc();
4788   DebugLoc dl = getCurDebugLoc();
4789   SDValue Res;
4790 
4791   switch (Intrinsic) {
4792   default:
4793     // By default, turn this into a target intrinsic node.
4794     visitTargetIntrinsic(I, Intrinsic);
4795     return nullptr;
4796   case Intrinsic::vastart:  visitVAStart(I); return nullptr;
4797   case Intrinsic::vaend:    visitVAEnd(I); return nullptr;
4798   case Intrinsic::vacopy:   visitVACopy(I); return nullptr;
4799   case Intrinsic::returnaddress:
4800     setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
4801                              TLI.getPointerTy(DAG.getDataLayout()),
4802                              getValue(I.getArgOperand(0))));
4803     return nullptr;
4804   case Intrinsic::addressofreturnaddress:
4805     setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
4806                              TLI.getPointerTy(DAG.getDataLayout())));
4807     return nullptr;
4808   case Intrinsic::frameaddress:
4809     setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
4810                              TLI.getPointerTy(DAG.getDataLayout()),
4811                              getValue(I.getArgOperand(0))));
4812     return nullptr;
4813   case Intrinsic::read_register: {
4814     Value *Reg = I.getArgOperand(0);
4815     SDValue Chain = getRoot();
4816     SDValue RegName =
4817         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4818     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4819     Res = DAG.getNode(ISD::READ_REGISTER, sdl,
4820       DAG.getVTList(VT, MVT::Other), Chain, RegName);
4821     setValue(&I, Res);
4822     DAG.setRoot(Res.getValue(1));
4823     return nullptr;
4824   }
4825   case Intrinsic::write_register: {
4826     Value *Reg = I.getArgOperand(0);
4827     Value *RegValue = I.getArgOperand(1);
4828     SDValue Chain = getRoot();
4829     SDValue RegName =
4830         DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
4831     DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
4832                             RegName, getValue(RegValue)));
4833     return nullptr;
4834   }
4835   case Intrinsic::setjmp:
4836     return &"_setjmp"[!TLI.usesUnderscoreSetJmp()];
4837   case Intrinsic::longjmp:
4838     return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
4839   case Intrinsic::memcpy: {
4840     SDValue Op1 = getValue(I.getArgOperand(0));
4841     SDValue Op2 = getValue(I.getArgOperand(1));
4842     SDValue Op3 = getValue(I.getArgOperand(2));
4843     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4844     if (!Align)
4845       Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
4846     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4847     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4848     SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4849                                false, isTC,
4850                                MachinePointerInfo(I.getArgOperand(0)),
4851                                MachinePointerInfo(I.getArgOperand(1)));
4852     updateDAGForMaybeTailCall(MC);
4853     return nullptr;
4854   }
4855   case Intrinsic::memset: {
4856     SDValue Op1 = getValue(I.getArgOperand(0));
4857     SDValue Op2 = getValue(I.getArgOperand(1));
4858     SDValue Op3 = getValue(I.getArgOperand(2));
4859     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4860     if (!Align)
4861       Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
4862     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4863     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4864     SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4865                                isTC, MachinePointerInfo(I.getArgOperand(0)));
4866     updateDAGForMaybeTailCall(MS);
4867     return nullptr;
4868   }
4869   case Intrinsic::memmove: {
4870     SDValue Op1 = getValue(I.getArgOperand(0));
4871     SDValue Op2 = getValue(I.getArgOperand(1));
4872     SDValue Op3 = getValue(I.getArgOperand(2));
4873     unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4874     if (!Align)
4875       Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
4876     bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4877     bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
4878     SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4879                                 isTC, MachinePointerInfo(I.getArgOperand(0)),
4880                                 MachinePointerInfo(I.getArgOperand(1)));
4881     updateDAGForMaybeTailCall(MM);
4882     return nullptr;
4883   }
4884   case Intrinsic::memcpy_element_atomic: {
4885     SDValue Dst = getValue(I.getArgOperand(0));
4886     SDValue Src = getValue(I.getArgOperand(1));
4887     SDValue NumElements = getValue(I.getArgOperand(2));
4888     SDValue ElementSize = getValue(I.getArgOperand(3));
4889 
4890     // Emit a library call.
4891     TargetLowering::ArgListTy Args;
4892     TargetLowering::ArgListEntry Entry;
4893     Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
4894     Entry.Node = Dst;
4895     Args.push_back(Entry);
4896 
4897     Entry.Node = Src;
4898     Args.push_back(Entry);
4899 
4900     Entry.Ty = I.getArgOperand(2)->getType();
4901     Entry.Node = NumElements;
4902     Args.push_back(Entry);
4903 
4904     Entry.Ty = Type::getInt32Ty(*DAG.getContext());
4905     Entry.Node = ElementSize;
4906     Args.push_back(Entry);
4907 
4908     uint64_t ElementSizeConstant =
4909         cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4910     RTLIB::Libcall LibraryCall =
4911         RTLIB::getMEMCPY_ELEMENT_ATOMIC(ElementSizeConstant);
4912     if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
4913       report_fatal_error("Unsupported element size");
4914 
4915     TargetLowering::CallLoweringInfo CLI(DAG);
4916     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
4917         TLI.getLibcallCallingConv(LibraryCall),
4918         Type::getVoidTy(*DAG.getContext()),
4919         DAG.getExternalSymbol(TLI.getLibcallName(LibraryCall),
4920                               TLI.getPointerTy(DAG.getDataLayout())),
4921         std::move(Args));
4922 
4923     std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
4924     DAG.setRoot(CallResult.second);
4925     return nullptr;
4926   }
4927   case Intrinsic::dbg_declare: {
4928     const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4929     DILocalVariable *Variable = DI.getVariable();
4930     DIExpression *Expression = DI.getExpression();
4931     const Value *Address = DI.getAddress();
4932     assert(Variable && "Missing variable");
4933     if (!Address) {
4934       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4935       return nullptr;
4936     }
4937 
4938     // Check if address has undef value.
4939     if (isa<UndefValue>(Address) ||
4940         (Address->use_empty() && !isa<Argument>(Address))) {
4941       DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4942       return nullptr;
4943     }
4944 
4945     SDValue &N = NodeMap[Address];
4946     if (!N.getNode() && isa<Argument>(Address))
4947       // Check unused arguments map.
4948       N = UnusedArgNodeMap[Address];
4949     SDDbgValue *SDV;
4950     if (N.getNode()) {
4951       if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4952         Address = BCI->getOperand(0);
4953       // Parameters are handled specially.
4954       bool isParameter = Variable->isParameter() || isa<Argument>(Address);
4955       auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
4956       if (isParameter && FINode) {
4957         // Byval parameter. We have a frame index at this point.
4958         SDV = DAG.getFrameIndexDbgValue(Variable, Expression,
4959                                         FINode->getIndex(), 0, dl, SDNodeOrder);
4960       } else if (isa<Argument>(Address)) {
4961         // Address is an argument, so try to emit its dbg value using
4962         // virtual register info from the FuncInfo.ValueMap.
4963         EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
4964                                  N);
4965         return nullptr;
4966       } else {
4967         SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
4968                               true, 0, dl, SDNodeOrder);
4969       }
4970       DAG.AddDbgValue(SDV, N.getNode(), isParameter);
4971     } else {
4972       // If Address is an argument then try to emit its dbg value using
4973       // virtual register info from the FuncInfo.ValueMap.
4974       if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, 0, false,
4975                                     N)) {
4976         // If variable is pinned by a alloca in dominating bb then
4977         // use StaticAllocaMap.
4978         if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
4979           if (AI->getParent() != DI.getParent()) {
4980             DenseMap<const AllocaInst*, int>::iterator SI =
4981               FuncInfo.StaticAllocaMap.find(AI);
4982             if (SI != FuncInfo.StaticAllocaMap.end()) {
4983               SDV = DAG.getFrameIndexDbgValue(Variable, Expression, SI->second,
4984                                               0, dl, SDNodeOrder);
4985               DAG.AddDbgValue(SDV, nullptr, false);
4986               return nullptr;
4987             }
4988           }
4989         }
4990         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4991       }
4992     }
4993     return nullptr;
4994   }
4995   case Intrinsic::dbg_value: {
4996     const DbgValueInst &DI = cast<DbgValueInst>(I);
4997     assert(DI.getVariable() && "Missing variable");
4998 
4999     DILocalVariable *Variable = DI.getVariable();
5000     DIExpression *Expression = DI.getExpression();
5001     uint64_t Offset = DI.getOffset();
5002     const Value *V = DI.getValue();
5003     if (!V)
5004       return nullptr;
5005 
5006     SDDbgValue *SDV;
5007     if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
5008       SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
5009                                     SDNodeOrder);
5010       DAG.AddDbgValue(SDV, nullptr, false);
5011     } else {
5012       // Do not use getValue() in here; we don't want to generate code at
5013       // this point if it hasn't been done yet.
5014       SDValue N = NodeMap[V];
5015       if (!N.getNode() && isa<Argument>(V))
5016         // Check unused arguments map.
5017         N = UnusedArgNodeMap[V];
5018       if (N.getNode()) {
5019         if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset,
5020                                       false, N)) {
5021           SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
5022           DAG.AddDbgValue(SDV, N.getNode(), false);
5023         }
5024       } else if (!V->use_empty() ) {
5025         // Do not call getValue(V) yet, as we don't want to generate code.
5026         // Remember it for later.
5027         DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
5028         DanglingDebugInfoMap[V] = DDI;
5029       } else {
5030         // We may expand this to cover more cases.  One case where we have no
5031         // data available is an unreferenced parameter.
5032         DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5033       }
5034     }
5035 
5036     // Build a debug info table entry.
5037     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
5038       V = BCI->getOperand(0);
5039     const AllocaInst *AI = dyn_cast<AllocaInst>(V);
5040     // Don't handle byval struct arguments or VLAs, for example.
5041     if (!AI) {
5042       DEBUG(dbgs() << "Dropping debug location info for:\n  " << DI << "\n");
5043       DEBUG(dbgs() << "  Last seen at:\n    " << *V << "\n");
5044       return nullptr;
5045     }
5046     DenseMap<const AllocaInst*, int>::iterator SI =
5047       FuncInfo.StaticAllocaMap.find(AI);
5048     if (SI == FuncInfo.StaticAllocaMap.end())
5049       return nullptr; // VLAs.
5050     return nullptr;
5051   }
5052 
5053   case Intrinsic::eh_typeid_for: {
5054     // Find the type id for the given typeinfo.
5055     GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5056     unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5057     Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5058     setValue(&I, Res);
5059     return nullptr;
5060   }
5061 
5062   case Intrinsic::eh_return_i32:
5063   case Intrinsic::eh_return_i64:
5064     DAG.getMachineFunction().setCallsEHReturn(true);
5065     DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5066                             MVT::Other,
5067                             getControlRoot(),
5068                             getValue(I.getArgOperand(0)),
5069                             getValue(I.getArgOperand(1))));
5070     return nullptr;
5071   case Intrinsic::eh_unwind_init:
5072     DAG.getMachineFunction().setCallsUnwindInit(true);
5073     return nullptr;
5074   case Intrinsic::eh_dwarf_cfa: {
5075     setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5076                              TLI.getPointerTy(DAG.getDataLayout()),
5077                              getValue(I.getArgOperand(0))));
5078     return nullptr;
5079   }
5080   case Intrinsic::eh_sjlj_callsite: {
5081     MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5082     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
5083     assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
5084     assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
5085 
5086     MMI.setCurrentCallSite(CI->getZExtValue());
5087     return nullptr;
5088   }
5089   case Intrinsic::eh_sjlj_functioncontext: {
5090     // Get and store the index of the function context.
5091     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
5092     AllocaInst *FnCtx =
5093       cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
5094     int FI = FuncInfo.StaticAllocaMap[FnCtx];
5095     MFI.setFunctionContextIndex(FI);
5096     return nullptr;
5097   }
5098   case Intrinsic::eh_sjlj_setjmp: {
5099     SDValue Ops[2];
5100     Ops[0] = getRoot();
5101     Ops[1] = getValue(I.getArgOperand(0));
5102     SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5103                              DAG.getVTList(MVT::i32, MVT::Other), Ops);
5104     setValue(&I, Op.getValue(0));
5105     DAG.setRoot(Op.getValue(1));
5106     return nullptr;
5107   }
5108   case Intrinsic::eh_sjlj_longjmp: {
5109     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5110                             getRoot(), getValue(I.getArgOperand(0))));
5111     return nullptr;
5112   }
5113   case Intrinsic::eh_sjlj_setup_dispatch: {
5114     DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
5115                             getRoot()));
5116     return nullptr;
5117   }
5118 
5119   case Intrinsic::masked_gather:
5120     visitMaskedGather(I);
5121     return nullptr;
5122   case Intrinsic::masked_load:
5123     visitMaskedLoad(I);
5124     return nullptr;
5125   case Intrinsic::masked_scatter:
5126     visitMaskedScatter(I);
5127     return nullptr;
5128   case Intrinsic::masked_store:
5129     visitMaskedStore(I);
5130     return nullptr;
5131   case Intrinsic::masked_expandload:
5132     visitMaskedLoad(I, true /* IsExpanding */);
5133     return nullptr;
5134   case Intrinsic::masked_compressstore:
5135     visitMaskedStore(I, true /* IsCompressing */);
5136     return nullptr;
5137   case Intrinsic::x86_mmx_pslli_w:
5138   case Intrinsic::x86_mmx_pslli_d:
5139   case Intrinsic::x86_mmx_pslli_q:
5140   case Intrinsic::x86_mmx_psrli_w:
5141   case Intrinsic::x86_mmx_psrli_d:
5142   case Intrinsic::x86_mmx_psrli_q:
5143   case Intrinsic::x86_mmx_psrai_w:
5144   case Intrinsic::x86_mmx_psrai_d: {
5145     SDValue ShAmt = getValue(I.getArgOperand(1));
5146     if (isa<ConstantSDNode>(ShAmt)) {
5147       visitTargetIntrinsic(I, Intrinsic);
5148       return nullptr;
5149     }
5150     unsigned NewIntrinsic = 0;
5151     EVT ShAmtVT = MVT::v2i32;
5152     switch (Intrinsic) {
5153     case Intrinsic::x86_mmx_pslli_w:
5154       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5155       break;
5156     case Intrinsic::x86_mmx_pslli_d:
5157       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5158       break;
5159     case Intrinsic::x86_mmx_pslli_q:
5160       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5161       break;
5162     case Intrinsic::x86_mmx_psrli_w:
5163       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5164       break;
5165     case Intrinsic::x86_mmx_psrli_d:
5166       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5167       break;
5168     case Intrinsic::x86_mmx_psrli_q:
5169       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5170       break;
5171     case Intrinsic::x86_mmx_psrai_w:
5172       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5173       break;
5174     case Intrinsic::x86_mmx_psrai_d:
5175       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5176       break;
5177     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5178     }
5179 
5180     // The vector shift intrinsics with scalars uses 32b shift amounts but
5181     // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5182     // to be zero.
5183     // We must do this early because v2i32 is not a legal type.
5184     SDValue ShOps[2];
5185     ShOps[0] = ShAmt;
5186     ShOps[1] = DAG.getConstant(0, sdl, MVT::i32);
5187     ShAmt =  DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps);
5188     EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5189     ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5190     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5191                        DAG.getConstant(NewIntrinsic, sdl, MVT::i32),
5192                        getValue(I.getArgOperand(0)), ShAmt);
5193     setValue(&I, Res);
5194     return nullptr;
5195   }
5196   case Intrinsic::powi:
5197     setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5198                             getValue(I.getArgOperand(1)), DAG));
5199     return nullptr;
5200   case Intrinsic::log:
5201     setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5202     return nullptr;
5203   case Intrinsic::log2:
5204     setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5205     return nullptr;
5206   case Intrinsic::log10:
5207     setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5208     return nullptr;
5209   case Intrinsic::exp:
5210     setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5211     return nullptr;
5212   case Intrinsic::exp2:
5213     setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
5214     return nullptr;
5215   case Intrinsic::pow:
5216     setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5217                            getValue(I.getArgOperand(1)), DAG, TLI));
5218     return nullptr;
5219   case Intrinsic::sqrt:
5220   case Intrinsic::fabs:
5221   case Intrinsic::sin:
5222   case Intrinsic::cos:
5223   case Intrinsic::floor:
5224   case Intrinsic::ceil:
5225   case Intrinsic::trunc:
5226   case Intrinsic::rint:
5227   case Intrinsic::nearbyint:
5228   case Intrinsic::round:
5229   case Intrinsic::canonicalize: {
5230     unsigned Opcode;
5231     switch (Intrinsic) {
5232     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5233     case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
5234     case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
5235     case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
5236     case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
5237     case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
5238     case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
5239     case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
5240     case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
5241     case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5242     case Intrinsic::round:     Opcode = ISD::FROUND;     break;
5243     case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
5244     }
5245 
5246     setValue(&I, DAG.getNode(Opcode, sdl,
5247                              getValue(I.getArgOperand(0)).getValueType(),
5248                              getValue(I.getArgOperand(0))));
5249     return nullptr;
5250   }
5251   case Intrinsic::minnum: {
5252     auto VT = getValue(I.getArgOperand(0)).getValueType();
5253     unsigned Opc =
5254         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMINNAN, VT)
5255             ? ISD::FMINNAN
5256             : ISD::FMINNUM;
5257     setValue(&I, DAG.getNode(Opc, sdl, VT,
5258                              getValue(I.getArgOperand(0)),
5259                              getValue(I.getArgOperand(1))));
5260     return nullptr;
5261   }
5262   case Intrinsic::maxnum: {
5263     auto VT = getValue(I.getArgOperand(0)).getValueType();
5264     unsigned Opc =
5265         I.hasNoNaNs() && TLI.isOperationLegalOrCustom(ISD::FMAXNAN, VT)
5266             ? ISD::FMAXNAN
5267             : ISD::FMAXNUM;
5268     setValue(&I, DAG.getNode(Opc, sdl, VT,
5269                              getValue(I.getArgOperand(0)),
5270                              getValue(I.getArgOperand(1))));
5271     return nullptr;
5272   }
5273   case Intrinsic::copysign:
5274     setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5275                              getValue(I.getArgOperand(0)).getValueType(),
5276                              getValue(I.getArgOperand(0)),
5277                              getValue(I.getArgOperand(1))));
5278     return nullptr;
5279   case Intrinsic::fma:
5280     setValue(&I, DAG.getNode(ISD::FMA, sdl,
5281                              getValue(I.getArgOperand(0)).getValueType(),
5282                              getValue(I.getArgOperand(0)),
5283                              getValue(I.getArgOperand(1)),
5284                              getValue(I.getArgOperand(2))));
5285     return nullptr;
5286   case Intrinsic::experimental_constrained_fadd:
5287   case Intrinsic::experimental_constrained_fsub:
5288   case Intrinsic::experimental_constrained_fmul:
5289   case Intrinsic::experimental_constrained_fdiv:
5290   case Intrinsic::experimental_constrained_frem:
5291     visitConstrainedFPIntrinsic(I, Intrinsic);
5292     return nullptr;
5293   case Intrinsic::fmuladd: {
5294     EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5295     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5296         TLI.isFMAFasterThanFMulAndFAdd(VT)) {
5297       setValue(&I, DAG.getNode(ISD::FMA, sdl,
5298                                getValue(I.getArgOperand(0)).getValueType(),
5299                                getValue(I.getArgOperand(0)),
5300                                getValue(I.getArgOperand(1)),
5301                                getValue(I.getArgOperand(2))));
5302     } else {
5303       // TODO: Intrinsic calls should have fast-math-flags.
5304       SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5305                                 getValue(I.getArgOperand(0)).getValueType(),
5306                                 getValue(I.getArgOperand(0)),
5307                                 getValue(I.getArgOperand(1)));
5308       SDValue Add = DAG.getNode(ISD::FADD, sdl,
5309                                 getValue(I.getArgOperand(0)).getValueType(),
5310                                 Mul,
5311                                 getValue(I.getArgOperand(2)));
5312       setValue(&I, Add);
5313     }
5314     return nullptr;
5315   }
5316   case Intrinsic::convert_to_fp16:
5317     setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5318                              DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5319                                          getValue(I.getArgOperand(0)),
5320                                          DAG.getTargetConstant(0, sdl,
5321                                                                MVT::i32))));
5322     return nullptr;
5323   case Intrinsic::convert_from_fp16:
5324     setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
5325                              TLI.getValueType(DAG.getDataLayout(), I.getType()),
5326                              DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5327                                          getValue(I.getArgOperand(0)))));
5328     return nullptr;
5329   case Intrinsic::pcmarker: {
5330     SDValue Tmp = getValue(I.getArgOperand(0));
5331     DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5332     return nullptr;
5333   }
5334   case Intrinsic::readcyclecounter: {
5335     SDValue Op = getRoot();
5336     Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5337                       DAG.getVTList(MVT::i64, MVT::Other), Op);
5338     setValue(&I, Res);
5339     DAG.setRoot(Res.getValue(1));
5340     return nullptr;
5341   }
5342   case Intrinsic::bitreverse:
5343     setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
5344                              getValue(I.getArgOperand(0)).getValueType(),
5345                              getValue(I.getArgOperand(0))));
5346     return nullptr;
5347   case Intrinsic::bswap:
5348     setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5349                              getValue(I.getArgOperand(0)).getValueType(),
5350                              getValue(I.getArgOperand(0))));
5351     return nullptr;
5352   case Intrinsic::cttz: {
5353     SDValue Arg = getValue(I.getArgOperand(0));
5354     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5355     EVT Ty = Arg.getValueType();
5356     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5357                              sdl, Ty, Arg));
5358     return nullptr;
5359   }
5360   case Intrinsic::ctlz: {
5361     SDValue Arg = getValue(I.getArgOperand(0));
5362     ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5363     EVT Ty = Arg.getValueType();
5364     setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5365                              sdl, Ty, Arg));
5366     return nullptr;
5367   }
5368   case Intrinsic::ctpop: {
5369     SDValue Arg = getValue(I.getArgOperand(0));
5370     EVT Ty = Arg.getValueType();
5371     setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5372     return nullptr;
5373   }
5374   case Intrinsic::stacksave: {
5375     SDValue Op = getRoot();
5376     Res = DAG.getNode(
5377         ISD::STACKSAVE, sdl,
5378         DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
5379     setValue(&I, Res);
5380     DAG.setRoot(Res.getValue(1));
5381     return nullptr;
5382   }
5383   case Intrinsic::stackrestore: {
5384     Res = getValue(I.getArgOperand(0));
5385     DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5386     return nullptr;
5387   }
5388   case Intrinsic::get_dynamic_area_offset: {
5389     SDValue Op = getRoot();
5390     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5391     EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
5392     // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
5393     // target.
5394     if (PtrTy != ResTy)
5395       report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
5396                          " intrinsic!");
5397     Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
5398                       Op);
5399     DAG.setRoot(Op);
5400     setValue(&I, Res);
5401     return nullptr;
5402   }
5403   case Intrinsic::stackguard: {
5404     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5405     MachineFunction &MF = DAG.getMachineFunction();
5406     const Module &M = *MF.getFunction()->getParent();
5407     SDValue Chain = getRoot();
5408     if (TLI.useLoadStackGuardNode()) {
5409       Res = getLoadStackGuard(DAG, sdl, Chain);
5410     } else {
5411       const Value *Global = TLI.getSDagStackGuard(M);
5412       unsigned Align = DL->getPrefTypeAlignment(Global->getType());
5413       Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
5414                         MachinePointerInfo(Global, 0), Align,
5415                         MachineMemOperand::MOVolatile);
5416     }
5417     DAG.setRoot(Chain);
5418     setValue(&I, Res);
5419     return nullptr;
5420   }
5421   case Intrinsic::stackprotector: {
5422     // Emit code into the DAG to store the stack guard onto the stack.
5423     MachineFunction &MF = DAG.getMachineFunction();
5424     MachineFrameInfo &MFI = MF.getFrameInfo();
5425     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
5426     SDValue Src, Chain = getRoot();
5427 
5428     if (TLI.useLoadStackGuardNode())
5429       Src = getLoadStackGuard(DAG, sdl, Chain);
5430     else
5431       Src = getValue(I.getArgOperand(0));   // The guard's value.
5432 
5433     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5434 
5435     int FI = FuncInfo.StaticAllocaMap[Slot];
5436     MFI.setStackProtectorIndex(FI);
5437 
5438     SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5439 
5440     // Store the stack protector onto the stack.
5441     Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
5442                                                  DAG.getMachineFunction(), FI),
5443                        /* Alignment = */ 0, MachineMemOperand::MOVolatile);
5444     setValue(&I, Res);
5445     DAG.setRoot(Res);
5446     return nullptr;
5447   }
5448   case Intrinsic::objectsize: {
5449     // If we don't know by now, we're never going to know.
5450     ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5451 
5452     assert(CI && "Non-constant type in __builtin_object_size?");
5453 
5454     SDValue Arg = getValue(I.getCalledValue());
5455     EVT Ty = Arg.getValueType();
5456 
5457     if (CI->isZero())
5458       Res = DAG.getConstant(-1ULL, sdl, Ty);
5459     else
5460       Res = DAG.getConstant(0, sdl, Ty);
5461 
5462     setValue(&I, Res);
5463     return nullptr;
5464   }
5465   case Intrinsic::annotation:
5466   case Intrinsic::ptr_annotation:
5467   case Intrinsic::invariant_group_barrier:
5468     // Drop the intrinsic, but forward the value
5469     setValue(&I, getValue(I.getOperand(0)));
5470     return nullptr;
5471   case Intrinsic::assume:
5472   case Intrinsic::var_annotation:
5473     // Discard annotate attributes and assumptions
5474     return nullptr;
5475 
5476   case Intrinsic::init_trampoline: {
5477     const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5478 
5479     SDValue Ops[6];
5480     Ops[0] = getRoot();
5481     Ops[1] = getValue(I.getArgOperand(0));
5482     Ops[2] = getValue(I.getArgOperand(1));
5483     Ops[3] = getValue(I.getArgOperand(2));
5484     Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5485     Ops[5] = DAG.getSrcValue(F);
5486 
5487     Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5488 
5489     DAG.setRoot(Res);
5490     return nullptr;
5491   }
5492   case Intrinsic::adjust_trampoline: {
5493     setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5494                              TLI.getPointerTy(DAG.getDataLayout()),
5495                              getValue(I.getArgOperand(0))));
5496     return nullptr;
5497   }
5498   case Intrinsic::gcroot: {
5499     MachineFunction &MF = DAG.getMachineFunction();
5500     const Function *F = MF.getFunction();
5501     (void)F;
5502     assert(F->hasGC() &&
5503            "only valid in functions with gc specified, enforced by Verifier");
5504     assert(GFI && "implied by previous");
5505     const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5506     const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5507 
5508     FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5509     GFI->addStackRoot(FI->getIndex(), TypeMap);
5510     return nullptr;
5511   }
5512   case Intrinsic::gcread:
5513   case Intrinsic::gcwrite:
5514     llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5515   case Intrinsic::flt_rounds:
5516     setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5517     return nullptr;
5518 
5519   case Intrinsic::expect: {
5520     // Just replace __builtin_expect(exp, c) with EXP.
5521     setValue(&I, getValue(I.getArgOperand(0)));
5522     return nullptr;
5523   }
5524 
5525   case Intrinsic::debugtrap:
5526   case Intrinsic::trap: {
5527     StringRef TrapFuncName =
5528         I.getAttributes()
5529             .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
5530             .getValueAsString();
5531     if (TrapFuncName.empty()) {
5532       ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5533         ISD::TRAP : ISD::DEBUGTRAP;
5534       DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5535       return nullptr;
5536     }
5537     TargetLowering::ArgListTy Args;
5538 
5539     TargetLowering::CallLoweringInfo CLI(DAG);
5540     CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
5541         CallingConv::C, I.getType(),
5542         DAG.getExternalSymbol(TrapFuncName.data(),
5543                               TLI.getPointerTy(DAG.getDataLayout())),
5544         std::move(Args));
5545 
5546     std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5547     DAG.setRoot(Result.second);
5548     return nullptr;
5549   }
5550 
5551   case Intrinsic::uadd_with_overflow:
5552   case Intrinsic::sadd_with_overflow:
5553   case Intrinsic::usub_with_overflow:
5554   case Intrinsic::ssub_with_overflow:
5555   case Intrinsic::umul_with_overflow:
5556   case Intrinsic::smul_with_overflow: {
5557     ISD::NodeType Op;
5558     switch (Intrinsic) {
5559     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5560     case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5561     case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5562     case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5563     case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5564     case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5565     case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5566     }
5567     SDValue Op1 = getValue(I.getArgOperand(0));
5568     SDValue Op2 = getValue(I.getArgOperand(1));
5569 
5570     SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5571     setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5572     return nullptr;
5573   }
5574   case Intrinsic::prefetch: {
5575     SDValue Ops[5];
5576     unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5577     Ops[0] = getRoot();
5578     Ops[1] = getValue(I.getArgOperand(0));
5579     Ops[2] = getValue(I.getArgOperand(1));
5580     Ops[3] = getValue(I.getArgOperand(2));
5581     Ops[4] = getValue(I.getArgOperand(3));
5582     DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5583                                         DAG.getVTList(MVT::Other), Ops,
5584                                         EVT::getIntegerVT(*Context, 8),
5585                                         MachinePointerInfo(I.getArgOperand(0)),
5586                                         0, /* align */
5587                                         false, /* volatile */
5588                                         rw==0, /* read */
5589                                         rw==1)); /* write */
5590     return nullptr;
5591   }
5592   case Intrinsic::lifetime_start:
5593   case Intrinsic::lifetime_end: {
5594     bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
5595     // Stack coloring is not enabled in O0, discard region information.
5596     if (TM.getOptLevel() == CodeGenOpt::None)
5597       return nullptr;
5598 
5599     SmallVector<Value *, 4> Allocas;
5600     GetUnderlyingObjects(I.getArgOperand(1), Allocas, *DL);
5601 
5602     for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
5603            E = Allocas.end(); Object != E; ++Object) {
5604       AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
5605 
5606       // Could not find an Alloca.
5607       if (!LifetimeObject)
5608         continue;
5609 
5610       // First check that the Alloca is static, otherwise it won't have a
5611       // valid frame index.
5612       auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
5613       if (SI == FuncInfo.StaticAllocaMap.end())
5614         return nullptr;
5615 
5616       int FI = SI->second;
5617 
5618       SDValue Ops[2];
5619       Ops[0] = getRoot();
5620       Ops[1] =
5621           DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()), true);
5622       unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
5623 
5624       Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
5625       DAG.setRoot(Res);
5626     }
5627     return nullptr;
5628   }
5629   case Intrinsic::invariant_start:
5630     // Discard region information.
5631     setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
5632     return nullptr;
5633   case Intrinsic::invariant_end:
5634     // Discard region information.
5635     return nullptr;
5636   case Intrinsic::clear_cache:
5637     return TLI.getClearCacheBuiltinName();
5638   case Intrinsic::donothing:
5639     // ignore
5640     return nullptr;
5641   case Intrinsic::experimental_stackmap: {
5642     visitStackmap(I);
5643     return nullptr;
5644   }
5645   case Intrinsic::experimental_patchpoint_void:
5646   case Intrinsic::experimental_patchpoint_i64: {
5647     visitPatchpoint(&I);
5648     return nullptr;
5649   }
5650   case Intrinsic::experimental_gc_statepoint: {
5651     LowerStatepoint(ImmutableStatepoint(&I));
5652     return nullptr;
5653   }
5654   case Intrinsic::experimental_gc_result: {
5655     visitGCResult(cast<GCResultInst>(I));
5656     return nullptr;
5657   }
5658   case Intrinsic::experimental_gc_relocate: {
5659     visitGCRelocate(cast<GCRelocateInst>(I));
5660     return nullptr;
5661   }
5662   case Intrinsic::instrprof_increment:
5663     llvm_unreachable("instrprof failed to lower an increment");
5664   case Intrinsic::instrprof_value_profile:
5665     llvm_unreachable("instrprof failed to lower a value profiling call");
5666   case Intrinsic::localescape: {
5667     MachineFunction &MF = DAG.getMachineFunction();
5668     const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5669 
5670     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
5671     // is the same on all targets.
5672     for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
5673       Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
5674       if (isa<ConstantPointerNull>(Arg))
5675         continue; // Skip null pointers. They represent a hole in index space.
5676       AllocaInst *Slot = cast<AllocaInst>(Arg);
5677       assert(FuncInfo.StaticAllocaMap.count(Slot) &&
5678              "can only escape static allocas");
5679       int FI = FuncInfo.StaticAllocaMap[Slot];
5680       MCSymbol *FrameAllocSym =
5681           MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
5682               GlobalValue::getRealLinkageName(MF.getName()), Idx);
5683       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
5684               TII->get(TargetOpcode::LOCAL_ESCAPE))
5685           .addSym(FrameAllocSym)
5686           .addFrameIndex(FI);
5687     }
5688 
5689     return nullptr;
5690   }
5691 
5692   case Intrinsic::localrecover: {
5693     // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
5694     MachineFunction &MF = DAG.getMachineFunction();
5695     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
5696 
5697     // Get the symbol that defines the frame offset.
5698     auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
5699     auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
5700     unsigned IdxVal = unsigned(Idx->getLimitedValue(INT_MAX));
5701     MCSymbol *FrameAllocSym =
5702         MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
5703             GlobalValue::getRealLinkageName(Fn->getName()), IdxVal);
5704 
5705     // Create a MCSymbol for the label to avoid any target lowering
5706     // that would make this PC relative.
5707     SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
5708     SDValue OffsetVal =
5709         DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
5710 
5711     // Add the offset to the FP.
5712     Value *FP = I.getArgOperand(1);
5713     SDValue FPVal = getValue(FP);
5714     SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal);
5715     setValue(&I, Add);
5716 
5717     return nullptr;
5718   }
5719 
5720   case Intrinsic::eh_exceptionpointer:
5721   case Intrinsic::eh_exceptioncode: {
5722     // Get the exception pointer vreg, copy from it, and resize it to fit.
5723     const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
5724     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
5725     const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
5726     unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
5727     SDValue N =
5728         DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
5729     if (Intrinsic == Intrinsic::eh_exceptioncode)
5730       N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
5731     setValue(&I, N);
5732     return nullptr;
5733   }
5734 
5735   case Intrinsic::experimental_deoptimize:
5736     LowerDeoptimizeCall(&I);
5737     return nullptr;
5738   }
5739 }
5740 
5741 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(const CallInst &I,
5742                                                       unsigned Intrinsic) {
5743   SDLoc sdl = getCurSDLoc();
5744   unsigned Opcode;
5745   switch (Intrinsic) {
5746   default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
5747   case Intrinsic::experimental_constrained_fadd:
5748     Opcode = ISD::STRICT_FADD;
5749     break;
5750   case Intrinsic::experimental_constrained_fsub:
5751     Opcode = ISD::STRICT_FSUB;
5752     break;
5753   case Intrinsic::experimental_constrained_fmul:
5754     Opcode = ISD::STRICT_FMUL;
5755     break;
5756   case Intrinsic::experimental_constrained_fdiv:
5757     Opcode = ISD::STRICT_FDIV;
5758     break;
5759   case Intrinsic::experimental_constrained_frem:
5760     Opcode = ISD::STRICT_FREM;
5761     break;
5762   }
5763   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5764   SDValue Chain = getRoot();
5765   SDValue Ops[3] = { Chain, getValue(I.getArgOperand(0)),
5766                      getValue(I.getArgOperand(1)) };
5767   SmallVector<EVT, 4> ValueVTs;
5768   ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5769   ValueVTs.push_back(MVT::Other); // Out chain
5770 
5771   SDVTList VTs = DAG.getVTList(ValueVTs);
5772   SDValue Result = DAG.getNode(Opcode, sdl, VTs, Ops);
5773 
5774   assert(Result.getNode()->getNumValues() == 2);
5775   SDValue OutChain = Result.getValue(1);
5776   DAG.setRoot(OutChain);
5777   SDValue FPResult = Result.getValue(0);
5778   setValue(&I, FPResult);
5779 }
5780 
5781 std::pair<SDValue, SDValue>
5782 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
5783                                     const BasicBlock *EHPadBB) {
5784   MachineFunction &MF = DAG.getMachineFunction();
5785   MachineModuleInfo &MMI = MF.getMMI();
5786   MCSymbol *BeginLabel = nullptr;
5787 
5788   if (EHPadBB) {
5789     // Insert a label before the invoke call to mark the try range.  This can be
5790     // used to detect deletion of the invoke via the MachineModuleInfo.
5791     BeginLabel = MMI.getContext().createTempSymbol();
5792 
5793     // For SjLj, keep track of which landing pads go with which invokes
5794     // so as to maintain the ordering of pads in the LSDA.
5795     unsigned CallSiteIndex = MMI.getCurrentCallSite();
5796     if (CallSiteIndex) {
5797       MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
5798       LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
5799 
5800       // Now that the call site is handled, stop tracking it.
5801       MMI.setCurrentCallSite(0);
5802     }
5803 
5804     // Both PendingLoads and PendingExports must be flushed here;
5805     // this call might not return.
5806     (void)getRoot();
5807     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
5808 
5809     CLI.setChain(getRoot());
5810   }
5811   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5812   std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
5813 
5814   assert((CLI.IsTailCall || Result.second.getNode()) &&
5815          "Non-null chain expected with non-tail call!");
5816   assert((Result.second.getNode() || !Result.first.getNode()) &&
5817          "Null value expected with tail call!");
5818 
5819   if (!Result.second.getNode()) {
5820     // As a special case, a null chain means that a tail call has been emitted
5821     // and the DAG root is already updated.
5822     HasTailCall = true;
5823 
5824     // Since there's no actual continuation from this block, nothing can be
5825     // relying on us setting vregs for them.
5826     PendingExports.clear();
5827   } else {
5828     DAG.setRoot(Result.second);
5829   }
5830 
5831   if (EHPadBB) {
5832     // Insert a label at the end of the invoke call to mark the try range.  This
5833     // can be used to detect deletion of the invoke via the MachineModuleInfo.
5834     MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
5835     DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
5836 
5837     // Inform MachineModuleInfo of range.
5838     if (MF.hasEHFunclets()) {
5839       assert(CLI.CS);
5840       WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
5841       EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS->getInstruction()),
5842                                 BeginLabel, EndLabel);
5843     } else {
5844       MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
5845     }
5846   }
5847 
5848   return Result;
5849 }
5850 
5851 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
5852                                       bool isTailCall,
5853                                       const BasicBlock *EHPadBB) {
5854   auto &DL = DAG.getDataLayout();
5855   FunctionType *FTy = CS.getFunctionType();
5856   Type *RetTy = CS.getType();
5857 
5858   TargetLowering::ArgListTy Args;
5859   Args.reserve(CS.arg_size());
5860 
5861   const Value *SwiftErrorVal = nullptr;
5862   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5863 
5864   // We can't tail call inside a function with a swifterror argument. Lowering
5865   // does not support this yet. It would have to move into the swifterror
5866   // register before the call.
5867   auto *Caller = CS.getInstruction()->getParent()->getParent();
5868   if (TLI.supportSwiftError() &&
5869       Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
5870     isTailCall = false;
5871 
5872   for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
5873        i != e; ++i) {
5874     TargetLowering::ArgListEntry Entry;
5875     const Value *V = *i;
5876 
5877     // Skip empty types
5878     if (V->getType()->isEmptyTy())
5879       continue;
5880 
5881     SDValue ArgNode = getValue(V);
5882     Entry.Node = ArgNode; Entry.Ty = V->getType();
5883 
5884     // Skip the first return-type Attribute to get to params.
5885     Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
5886 
5887     // Use swifterror virtual register as input to the call.
5888     if (Entry.IsSwiftError && TLI.supportSwiftError()) {
5889       SwiftErrorVal = V;
5890       // We find the virtual register for the actual swifterror argument.
5891       // Instead of using the Value, we use the virtual register instead.
5892       Entry.Node =
5893           DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, V),
5894                           EVT(TLI.getPointerTy(DL)));
5895     }
5896 
5897     Args.push_back(Entry);
5898 
5899     // If we have an explicit sret argument that is an Instruction, (i.e., it
5900     // might point to function-local memory), we can't meaningfully tail-call.
5901     if (Entry.IsSRet && isa<Instruction>(V))
5902       isTailCall = false;
5903   }
5904 
5905   // Check if target-independent constraints permit a tail call here.
5906   // Target-dependent constraints are checked within TLI->LowerCallTo.
5907   if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
5908     isTailCall = false;
5909 
5910   // Disable tail calls if there is an swifterror argument. Targets have not
5911   // been updated to support tail calls.
5912   if (TLI.supportSwiftError() && SwiftErrorVal)
5913     isTailCall = false;
5914 
5915   TargetLowering::CallLoweringInfo CLI(DAG);
5916   CLI.setDebugLoc(getCurSDLoc())
5917       .setChain(getRoot())
5918       .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
5919       .setTailCall(isTailCall)
5920       .setConvergent(CS.isConvergent());
5921   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
5922 
5923   if (Result.first.getNode()) {
5924     const Instruction *Inst = CS.getInstruction();
5925     Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
5926     setValue(Inst, Result.first);
5927   }
5928 
5929   // The last element of CLI.InVals has the SDValue for swifterror return.
5930   // Here we copy it to a virtual register and update SwiftErrorMap for
5931   // book-keeping.
5932   if (SwiftErrorVal && TLI.supportSwiftError()) {
5933     // Get the last element of InVals.
5934     SDValue Src = CLI.InVals.back();
5935     const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
5936     unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
5937     SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
5938     // We update the virtual register for the actual swifterror argument.
5939     FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
5940     DAG.setRoot(CopyNode);
5941   }
5942 }
5943 
5944 /// Return true if it only matters that the value is equal or not-equal to zero.
5945 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
5946   for (const User *U : V->users()) {
5947     if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
5948       if (IC->isEquality())
5949         if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
5950           if (C->isNullValue())
5951             continue;
5952     // Unknown instruction.
5953     return false;
5954   }
5955   return true;
5956 }
5957 
5958 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
5959                              SelectionDAGBuilder &Builder) {
5960 
5961   // Check to see if this load can be trivially constant folded, e.g. if the
5962   // input is from a string literal.
5963   if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
5964     // Cast pointer to the type we really want to load.
5965     Type *LoadTy =
5966         Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
5967     if (LoadVT.isVector())
5968       LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
5969 
5970     LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
5971                                          PointerType::getUnqual(LoadTy));
5972 
5973     if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
5974             const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
5975       return Builder.getValue(LoadCst);
5976   }
5977 
5978   // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
5979   // still constant memory, the input chain can be the entry node.
5980   SDValue Root;
5981   bool ConstantMemory = false;
5982 
5983   // Do not serialize (non-volatile) loads of constant memory with anything.
5984   if (Builder.AA->pointsToConstantMemory(PtrVal)) {
5985     Root = Builder.DAG.getEntryNode();
5986     ConstantMemory = true;
5987   } else {
5988     // Do not serialize non-volatile loads against each other.
5989     Root = Builder.DAG.getRoot();
5990   }
5991 
5992   SDValue Ptr = Builder.getValue(PtrVal);
5993   SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
5994                                         Ptr, MachinePointerInfo(PtrVal),
5995                                         /* Alignment = */ 1);
5996 
5997   if (!ConstantMemory)
5998     Builder.PendingLoads.push_back(LoadVal.getValue(1));
5999   return LoadVal;
6000 }
6001 
6002 /// Record the value for an instruction that produces an integer result,
6003 /// converting the type where necessary.
6004 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
6005                                                   SDValue Value,
6006                                                   bool IsSigned) {
6007   EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6008                                                     I.getType(), true);
6009   if (IsSigned)
6010     Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
6011   else
6012     Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
6013   setValue(&I, Value);
6014 }
6015 
6016 /// See if we can lower a memcmp call into an optimized form. If so, return
6017 /// true and lower it. Otherwise return false, and it will be lowered like a
6018 /// normal call.
6019 /// The caller already checked that \p I calls the appropriate LibFunc with a
6020 /// correct prototype.
6021 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
6022   const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
6023   const Value *Size = I.getArgOperand(2);
6024   const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
6025   if (CSize && CSize->getZExtValue() == 0) {
6026     EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
6027                                                           I.getType(), true);
6028     setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
6029     return true;
6030   }
6031 
6032   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6033   std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
6034       DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
6035       getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
6036   if (Res.first.getNode()) {
6037     processIntegerCallValue(I, Res.first, true);
6038     PendingLoads.push_back(Res.second);
6039     return true;
6040   }
6041 
6042   // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
6043   // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
6044   if (!CSize || !IsOnlyUsedInZeroEqualityComparison(&I))
6045     return false;
6046 
6047   // If the target has a fast compare for the given size, it will return a
6048   // preferred load type for that size. Require that the load VT is legal and
6049   // that the target supports unaligned loads of that type. Otherwise, return
6050   // INVALID.
6051   auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
6052     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6053     MVT LVT = TLI.hasFastEqualityCompare(NumBits);
6054     if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
6055       // TODO: Handle 5 byte compare as 4-byte + 1 byte.
6056       // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
6057       // TODO: Check alignment of src and dest ptrs.
6058       unsigned DstAS = LHS->getType()->getPointerAddressSpace();
6059       unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
6060       if (!TLI.isTypeLegal(LVT) ||
6061           !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
6062           !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
6063         LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
6064     }
6065 
6066     return LVT;
6067   };
6068 
6069   // This turns into unaligned loads. We only do this if the target natively
6070   // supports the MVT we'll be loading or if it is small enough (<= 4) that
6071   // we'll only produce a small number of byte loads.
6072   MVT LoadVT;
6073   unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
6074   switch (NumBitsToCompare) {
6075   default:
6076     return false;
6077   case 16:
6078     LoadVT = MVT::i16;
6079     break;
6080   case 32:
6081     LoadVT = MVT::i32;
6082     break;
6083   case 64:
6084   case 128:
6085   case 256:
6086     LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
6087     break;
6088   }
6089 
6090   if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
6091     return false;
6092 
6093   SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
6094   SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
6095 
6096   // Bitcast to a wide integer type if the loads are vectors.
6097   if (LoadVT.isVector()) {
6098     EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
6099     LoadL = DAG.getBitcast(CmpVT, LoadL);
6100     LoadR = DAG.getBitcast(CmpVT, LoadR);
6101   }
6102 
6103   SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
6104   processIntegerCallValue(I, Cmp, false);
6105   return true;
6106 }
6107 
6108 /// See if we can lower a memchr call into an optimized form. If so, return
6109 /// true and lower it. Otherwise return false, and it will be lowered like a
6110 /// normal call.
6111 /// The caller already checked that \p I calls the appropriate LibFunc with a
6112 /// correct prototype.
6113 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
6114   const Value *Src = I.getArgOperand(0);
6115   const Value *Char = I.getArgOperand(1);
6116   const Value *Length = I.getArgOperand(2);
6117 
6118   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6119   std::pair<SDValue, SDValue> Res =
6120     TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
6121                                 getValue(Src), getValue(Char), getValue(Length),
6122                                 MachinePointerInfo(Src));
6123   if (Res.first.getNode()) {
6124     setValue(&I, Res.first);
6125     PendingLoads.push_back(Res.second);
6126     return true;
6127   }
6128 
6129   return false;
6130 }
6131 
6132 /// See if we can lower a mempcpy call into an optimized form. If so, return
6133 /// true and lower it. Otherwise return false, and it will be lowered like a
6134 /// normal call.
6135 /// The caller already checked that \p I calls the appropriate LibFunc with a
6136 /// correct prototype.
6137 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
6138   SDValue Dst = getValue(I.getArgOperand(0));
6139   SDValue Src = getValue(I.getArgOperand(1));
6140   SDValue Size = getValue(I.getArgOperand(2));
6141 
6142   unsigned DstAlign = DAG.InferPtrAlignment(Dst);
6143   unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6144   unsigned Align = std::min(DstAlign, SrcAlign);
6145   if (Align == 0) // Alignment of one or both could not be inferred.
6146     Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
6147 
6148   bool isVol = false;
6149   SDLoc sdl = getCurSDLoc();
6150 
6151   // In the mempcpy context we need to pass in a false value for isTailCall
6152   // because the return pointer needs to be adjusted by the size of
6153   // the copied memory.
6154   SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
6155                              false, /*isTailCall=*/false,
6156                              MachinePointerInfo(I.getArgOperand(0)),
6157                              MachinePointerInfo(I.getArgOperand(1)));
6158   assert(MC.getNode() != nullptr &&
6159          "** memcpy should not be lowered as TailCall in mempcpy context **");
6160   DAG.setRoot(MC);
6161 
6162   // Check if Size needs to be truncated or extended.
6163   Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
6164 
6165   // Adjust return pointer to point just past the last dst byte.
6166   SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
6167                                     Dst, Size);
6168   setValue(&I, DstPlusSize);
6169   return true;
6170 }
6171 
6172 /// See if we can lower a strcpy call into an optimized form.  If so, return
6173 /// true and lower it, otherwise return false and it will be lowered like a
6174 /// normal call.
6175 /// The caller already checked that \p I calls the appropriate LibFunc with a
6176 /// correct prototype.
6177 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
6178   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6179 
6180   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6181   std::pair<SDValue, SDValue> Res =
6182     TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
6183                                 getValue(Arg0), getValue(Arg1),
6184                                 MachinePointerInfo(Arg0),
6185                                 MachinePointerInfo(Arg1), isStpcpy);
6186   if (Res.first.getNode()) {
6187     setValue(&I, Res.first);
6188     DAG.setRoot(Res.second);
6189     return true;
6190   }
6191 
6192   return false;
6193 }
6194 
6195 /// See if we can lower a strcmp call into an optimized form.  If so, return
6196 /// true and lower it, otherwise return false and it will be lowered like a
6197 /// normal call.
6198 /// The caller already checked that \p I calls the appropriate LibFunc with a
6199 /// correct prototype.
6200 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
6201   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6202 
6203   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6204   std::pair<SDValue, SDValue> Res =
6205     TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
6206                                 getValue(Arg0), getValue(Arg1),
6207                                 MachinePointerInfo(Arg0),
6208                                 MachinePointerInfo(Arg1));
6209   if (Res.first.getNode()) {
6210     processIntegerCallValue(I, Res.first, true);
6211     PendingLoads.push_back(Res.second);
6212     return true;
6213   }
6214 
6215   return false;
6216 }
6217 
6218 /// See if we can lower a strlen call into an optimized form.  If so, return
6219 /// true and lower it, otherwise return false and it will be lowered like a
6220 /// normal call.
6221 /// The caller already checked that \p I calls the appropriate LibFunc with a
6222 /// correct prototype.
6223 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
6224   const Value *Arg0 = I.getArgOperand(0);
6225 
6226   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6227   std::pair<SDValue, SDValue> Res =
6228     TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
6229                                 getValue(Arg0), MachinePointerInfo(Arg0));
6230   if (Res.first.getNode()) {
6231     processIntegerCallValue(I, Res.first, false);
6232     PendingLoads.push_back(Res.second);
6233     return true;
6234   }
6235 
6236   return false;
6237 }
6238 
6239 /// See if we can lower a strnlen call into an optimized form.  If so, return
6240 /// true and lower it, otherwise return false and it will be lowered like a
6241 /// normal call.
6242 /// The caller already checked that \p I calls the appropriate LibFunc with a
6243 /// correct prototype.
6244 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
6245   const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
6246 
6247   const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6248   std::pair<SDValue, SDValue> Res =
6249     TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
6250                                  getValue(Arg0), getValue(Arg1),
6251                                  MachinePointerInfo(Arg0));
6252   if (Res.first.getNode()) {
6253     processIntegerCallValue(I, Res.first, false);
6254     PendingLoads.push_back(Res.second);
6255     return true;
6256   }
6257 
6258   return false;
6259 }
6260 
6261 /// See if we can lower a unary floating-point operation into an SDNode with
6262 /// the specified Opcode.  If so, return true and lower it, otherwise return
6263 /// false and it will be lowered like a normal call.
6264 /// The caller already checked that \p I calls the appropriate LibFunc with a
6265 /// correct prototype.
6266 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
6267                                               unsigned Opcode) {
6268   // We already checked this call's prototype; verify it doesn't modify errno.
6269   if (!I.onlyReadsMemory())
6270     return false;
6271 
6272   SDValue Tmp = getValue(I.getArgOperand(0));
6273   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
6274   return true;
6275 }
6276 
6277 /// See if we can lower a binary floating-point operation into an SDNode with
6278 /// the specified Opcode. If so, return true and lower it. Otherwise return
6279 /// false, and it will be lowered like a normal call.
6280 /// The caller already checked that \p I calls the appropriate LibFunc with a
6281 /// correct prototype.
6282 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
6283                                                unsigned Opcode) {
6284   // We already checked this call's prototype; verify it doesn't modify errno.
6285   if (!I.onlyReadsMemory())
6286     return false;
6287 
6288   SDValue Tmp0 = getValue(I.getArgOperand(0));
6289   SDValue Tmp1 = getValue(I.getArgOperand(1));
6290   EVT VT = Tmp0.getValueType();
6291   setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
6292   return true;
6293 }
6294 
6295 void SelectionDAGBuilder::visitCall(const CallInst &I) {
6296   // Handle inline assembly differently.
6297   if (isa<InlineAsm>(I.getCalledValue())) {
6298     visitInlineAsm(&I);
6299     return;
6300   }
6301 
6302   MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6303   computeUsesVAFloatArgument(I, MMI);
6304 
6305   const char *RenameFn = nullptr;
6306   if (Function *F = I.getCalledFunction()) {
6307     if (F->isDeclaration()) {
6308       if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
6309         if (unsigned IID = II->getIntrinsicID(F)) {
6310           RenameFn = visitIntrinsicCall(I, IID);
6311           if (!RenameFn)
6312             return;
6313         }
6314       }
6315       if (Intrinsic::ID IID = F->getIntrinsicID()) {
6316         RenameFn = visitIntrinsicCall(I, IID);
6317         if (!RenameFn)
6318           return;
6319       }
6320     }
6321 
6322     // Check for well-known libc/libm calls.  If the function is internal, it
6323     // can't be a library call.  Don't do the check if marked as nobuiltin for
6324     // some reason.
6325     LibFunc Func;
6326     if (!I.isNoBuiltin() && !F->hasLocalLinkage() && F->hasName() &&
6327         LibInfo->getLibFunc(*F, Func) &&
6328         LibInfo->hasOptimizedCodeGen(Func)) {
6329       switch (Func) {
6330       default: break;
6331       case LibFunc_copysign:
6332       case LibFunc_copysignf:
6333       case LibFunc_copysignl:
6334         // We already checked this call's prototype; verify it doesn't modify
6335         // errno.
6336         if (I.onlyReadsMemory()) {
6337           SDValue LHS = getValue(I.getArgOperand(0));
6338           SDValue RHS = getValue(I.getArgOperand(1));
6339           setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
6340                                    LHS.getValueType(), LHS, RHS));
6341           return;
6342         }
6343         break;
6344       case LibFunc_fabs:
6345       case LibFunc_fabsf:
6346       case LibFunc_fabsl:
6347         if (visitUnaryFloatCall(I, ISD::FABS))
6348           return;
6349         break;
6350       case LibFunc_fmin:
6351       case LibFunc_fminf:
6352       case LibFunc_fminl:
6353         if (visitBinaryFloatCall(I, ISD::FMINNUM))
6354           return;
6355         break;
6356       case LibFunc_fmax:
6357       case LibFunc_fmaxf:
6358       case LibFunc_fmaxl:
6359         if (visitBinaryFloatCall(I, ISD::FMAXNUM))
6360           return;
6361         break;
6362       case LibFunc_sin:
6363       case LibFunc_sinf:
6364       case LibFunc_sinl:
6365         if (visitUnaryFloatCall(I, ISD::FSIN))
6366           return;
6367         break;
6368       case LibFunc_cos:
6369       case LibFunc_cosf:
6370       case LibFunc_cosl:
6371         if (visitUnaryFloatCall(I, ISD::FCOS))
6372           return;
6373         break;
6374       case LibFunc_sqrt:
6375       case LibFunc_sqrtf:
6376       case LibFunc_sqrtl:
6377       case LibFunc_sqrt_finite:
6378       case LibFunc_sqrtf_finite:
6379       case LibFunc_sqrtl_finite:
6380         if (visitUnaryFloatCall(I, ISD::FSQRT))
6381           return;
6382         break;
6383       case LibFunc_floor:
6384       case LibFunc_floorf:
6385       case LibFunc_floorl:
6386         if (visitUnaryFloatCall(I, ISD::FFLOOR))
6387           return;
6388         break;
6389       case LibFunc_nearbyint:
6390       case LibFunc_nearbyintf:
6391       case LibFunc_nearbyintl:
6392         if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6393           return;
6394         break;
6395       case LibFunc_ceil:
6396       case LibFunc_ceilf:
6397       case LibFunc_ceill:
6398         if (visitUnaryFloatCall(I, ISD::FCEIL))
6399           return;
6400         break;
6401       case LibFunc_rint:
6402       case LibFunc_rintf:
6403       case LibFunc_rintl:
6404         if (visitUnaryFloatCall(I, ISD::FRINT))
6405           return;
6406         break;
6407       case LibFunc_round:
6408       case LibFunc_roundf:
6409       case LibFunc_roundl:
6410         if (visitUnaryFloatCall(I, ISD::FROUND))
6411           return;
6412         break;
6413       case LibFunc_trunc:
6414       case LibFunc_truncf:
6415       case LibFunc_truncl:
6416         if (visitUnaryFloatCall(I, ISD::FTRUNC))
6417           return;
6418         break;
6419       case LibFunc_log2:
6420       case LibFunc_log2f:
6421       case LibFunc_log2l:
6422         if (visitUnaryFloatCall(I, ISD::FLOG2))
6423           return;
6424         break;
6425       case LibFunc_exp2:
6426       case LibFunc_exp2f:
6427       case LibFunc_exp2l:
6428         if (visitUnaryFloatCall(I, ISD::FEXP2))
6429           return;
6430         break;
6431       case LibFunc_memcmp:
6432         if (visitMemCmpCall(I))
6433           return;
6434         break;
6435       case LibFunc_mempcpy:
6436         if (visitMemPCpyCall(I))
6437           return;
6438         break;
6439       case LibFunc_memchr:
6440         if (visitMemChrCall(I))
6441           return;
6442         break;
6443       case LibFunc_strcpy:
6444         if (visitStrCpyCall(I, false))
6445           return;
6446         break;
6447       case LibFunc_stpcpy:
6448         if (visitStrCpyCall(I, true))
6449           return;
6450         break;
6451       case LibFunc_strcmp:
6452         if (visitStrCmpCall(I))
6453           return;
6454         break;
6455       case LibFunc_strlen:
6456         if (visitStrLenCall(I))
6457           return;
6458         break;
6459       case LibFunc_strnlen:
6460         if (visitStrNLenCall(I))
6461           return;
6462         break;
6463       }
6464     }
6465   }
6466 
6467   SDValue Callee;
6468   if (!RenameFn)
6469     Callee = getValue(I.getCalledValue());
6470   else
6471     Callee = DAG.getExternalSymbol(
6472         RenameFn,
6473         DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6474 
6475   // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
6476   // have to do anything here to lower funclet bundles.
6477   assert(!I.hasOperandBundlesOtherThan(
6478              {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
6479          "Cannot lower calls with arbitrary operand bundles!");
6480 
6481   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
6482     LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
6483   else
6484     // Check if we can potentially perform a tail call. More detailed checking
6485     // is be done within LowerCallTo, after more information about the call is
6486     // known.
6487     LowerCallTo(&I, Callee, I.isTailCall());
6488 }
6489 
6490 namespace {
6491 
6492 /// AsmOperandInfo - This contains information for each constraint that we are
6493 /// lowering.
6494 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
6495 public:
6496   /// CallOperand - If this is the result output operand or a clobber
6497   /// this is null, otherwise it is the incoming operand to the CallInst.
6498   /// This gets modified as the asm is processed.
6499   SDValue CallOperand;
6500 
6501   /// AssignedRegs - If this is a register or register class operand, this
6502   /// contains the set of register corresponding to the operand.
6503   RegsForValue AssignedRegs;
6504 
6505   explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
6506     : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
6507   }
6508 
6509   /// Whether or not this operand accesses memory
6510   bool hasMemory(const TargetLowering &TLI) const {
6511     // Indirect operand accesses access memory.
6512     if (isIndirect)
6513       return true;
6514 
6515     for (const auto &Code : Codes)
6516       if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
6517         return true;
6518 
6519     return false;
6520   }
6521 
6522   /// getCallOperandValEVT - Return the EVT of the Value* that this operand
6523   /// corresponds to.  If there is no Value* for this operand, it returns
6524   /// MVT::Other.
6525   EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
6526                            const DataLayout &DL) const {
6527     if (!CallOperandVal) return MVT::Other;
6528 
6529     if (isa<BasicBlock>(CallOperandVal))
6530       return TLI.getPointerTy(DL);
6531 
6532     llvm::Type *OpTy = CallOperandVal->getType();
6533 
6534     // FIXME: code duplicated from TargetLowering::ParseConstraints().
6535     // If this is an indirect operand, the operand is a pointer to the
6536     // accessed type.
6537     if (isIndirect) {
6538       llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
6539       if (!PtrTy)
6540         report_fatal_error("Indirect operand for inline asm not a pointer!");
6541       OpTy = PtrTy->getElementType();
6542     }
6543 
6544     // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
6545     if (StructType *STy = dyn_cast<StructType>(OpTy))
6546       if (STy->getNumElements() == 1)
6547         OpTy = STy->getElementType(0);
6548 
6549     // If OpTy is not a single value, it may be a struct/union that we
6550     // can tile with integers.
6551     if (!OpTy->isSingleValueType() && OpTy->isSized()) {
6552       unsigned BitSize = DL.getTypeSizeInBits(OpTy);
6553       switch (BitSize) {
6554       default: break;
6555       case 1:
6556       case 8:
6557       case 16:
6558       case 32:
6559       case 64:
6560       case 128:
6561         OpTy = IntegerType::get(Context, BitSize);
6562         break;
6563       }
6564     }
6565 
6566     return TLI.getValueType(DL, OpTy, true);
6567   }
6568 };
6569 
6570 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
6571 
6572 } // end anonymous namespace
6573 
6574 /// Make sure that the output operand \p OpInfo and its corresponding input
6575 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
6576 /// out).
6577 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
6578                                SDISelAsmOperandInfo &MatchingOpInfo,
6579                                SelectionDAG &DAG) {
6580   if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
6581     return;
6582 
6583   const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
6584   const auto &TLI = DAG.getTargetLoweringInfo();
6585 
6586   std::pair<unsigned, const TargetRegisterClass *> MatchRC =
6587       TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
6588                                        OpInfo.ConstraintVT);
6589   std::pair<unsigned, const TargetRegisterClass *> InputRC =
6590       TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
6591                                        MatchingOpInfo.ConstraintVT);
6592   if ((OpInfo.ConstraintVT.isInteger() !=
6593        MatchingOpInfo.ConstraintVT.isInteger()) ||
6594       (MatchRC.second != InputRC.second)) {
6595     // FIXME: error out in a more elegant fashion
6596     report_fatal_error("Unsupported asm: input constraint"
6597                        " with a matching output constraint of"
6598                        " incompatible type!");
6599   }
6600   MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
6601 }
6602 
6603 /// Get a direct memory input to behave well as an indirect operand.
6604 /// This may introduce stores, hence the need for a \p Chain.
6605 /// \return The (possibly updated) chain.
6606 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
6607                                         SDISelAsmOperandInfo &OpInfo,
6608                                         SelectionDAG &DAG) {
6609   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6610 
6611   // If we don't have an indirect input, put it in the constpool if we can,
6612   // otherwise spill it to a stack slot.
6613   // TODO: This isn't quite right. We need to handle these according to
6614   // the addressing mode that the constraint wants. Also, this may take
6615   // an additional register for the computation and we don't want that
6616   // either.
6617 
6618   // If the operand is a float, integer, or vector constant, spill to a
6619   // constant pool entry to get its address.
6620   const Value *OpVal = OpInfo.CallOperandVal;
6621   if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
6622       isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
6623     OpInfo.CallOperand = DAG.getConstantPool(
6624         cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
6625     return Chain;
6626   }
6627 
6628   // Otherwise, create a stack slot and emit a store to it before the asm.
6629   Type *Ty = OpVal->getType();
6630   auto &DL = DAG.getDataLayout();
6631   uint64_t TySize = DL.getTypeAllocSize(Ty);
6632   unsigned Align = DL.getPrefTypeAlignment(Ty);
6633   MachineFunction &MF = DAG.getMachineFunction();
6634   int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
6635   SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy(DL));
6636   Chain = DAG.getStore(Chain, Location, OpInfo.CallOperand, StackSlot,
6637                        MachinePointerInfo::getFixedStack(MF, SSFI));
6638   OpInfo.CallOperand = StackSlot;
6639 
6640   return Chain;
6641 }
6642 
6643 /// GetRegistersForValue - Assign registers (virtual or physical) for the
6644 /// specified operand.  We prefer to assign virtual registers, to allow the
6645 /// register allocator to handle the assignment process.  However, if the asm
6646 /// uses features that we can't model on machineinstrs, we have SDISel do the
6647 /// allocation.  This produces generally horrible, but correct, code.
6648 ///
6649 ///   OpInfo describes the operand.
6650 ///
6651 static void GetRegistersForValue(SelectionDAG &DAG, const TargetLowering &TLI,
6652                                  const SDLoc &DL,
6653                                  SDISelAsmOperandInfo &OpInfo) {
6654   LLVMContext &Context = *DAG.getContext();
6655 
6656   MachineFunction &MF = DAG.getMachineFunction();
6657   SmallVector<unsigned, 4> Regs;
6658 
6659   // If this is a constraint for a single physreg, or a constraint for a
6660   // register class, find it.
6661   std::pair<unsigned, const TargetRegisterClass *> PhysReg =
6662       TLI.getRegForInlineAsmConstraint(MF.getSubtarget().getRegisterInfo(),
6663                                        OpInfo.ConstraintCode,
6664                                        OpInfo.ConstraintVT);
6665 
6666   unsigned NumRegs = 1;
6667   if (OpInfo.ConstraintVT != MVT::Other) {
6668     // If this is a FP input in an integer register (or visa versa) insert a bit
6669     // cast of the input value.  More generally, handle any case where the input
6670     // value disagrees with the register class we plan to stick this in.
6671     if (OpInfo.Type == InlineAsm::isInput &&
6672         PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
6673       // Try to convert to the first EVT that the reg class contains.  If the
6674       // types are identical size, use a bitcast to convert (e.g. two differing
6675       // vector types).
6676       MVT RegVT = *PhysReg.second->vt_begin();
6677       if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
6678         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6679                                          RegVT, OpInfo.CallOperand);
6680         OpInfo.ConstraintVT = RegVT;
6681       } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
6682         // If the input is a FP value and we want it in FP registers, do a
6683         // bitcast to the corresponding integer type.  This turns an f64 value
6684         // into i64, which can be passed with two i32 values on a 32-bit
6685         // machine.
6686         RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
6687         OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6688                                          RegVT, OpInfo.CallOperand);
6689         OpInfo.ConstraintVT = RegVT;
6690       }
6691     }
6692 
6693     NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
6694   }
6695 
6696   MVT RegVT;
6697   EVT ValueVT = OpInfo.ConstraintVT;
6698 
6699   // If this is a constraint for a specific physical register, like {r17},
6700   // assign it now.
6701   if (unsigned AssignedReg = PhysReg.first) {
6702     const TargetRegisterClass *RC = PhysReg.second;
6703     if (OpInfo.ConstraintVT == MVT::Other)
6704       ValueVT = *RC->vt_begin();
6705 
6706     // Get the actual register value type.  This is important, because the user
6707     // may have asked for (e.g.) the AX register in i32 type.  We need to
6708     // remember that AX is actually i16 to get the right extension.
6709     RegVT = *RC->vt_begin();
6710 
6711     // This is a explicit reference to a physical register.
6712     Regs.push_back(AssignedReg);
6713 
6714     // If this is an expanded reference, add the rest of the regs to Regs.
6715     if (NumRegs != 1) {
6716       TargetRegisterClass::iterator I = RC->begin();
6717       for (; *I != AssignedReg; ++I)
6718         assert(I != RC->end() && "Didn't find reg!");
6719 
6720       // Already added the first reg.
6721       --NumRegs; ++I;
6722       for (; NumRegs; --NumRegs, ++I) {
6723         assert(I != RC->end() && "Ran out of registers to allocate!");
6724         Regs.push_back(*I);
6725       }
6726     }
6727 
6728     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6729     return;
6730   }
6731 
6732   // Otherwise, if this was a reference to an LLVM register class, create vregs
6733   // for this reference.
6734   if (const TargetRegisterClass *RC = PhysReg.second) {
6735     RegVT = *RC->vt_begin();
6736     if (OpInfo.ConstraintVT == MVT::Other)
6737       ValueVT = RegVT;
6738 
6739     // Create the appropriate number of virtual registers.
6740     MachineRegisterInfo &RegInfo = MF.getRegInfo();
6741     for (; NumRegs; --NumRegs)
6742       Regs.push_back(RegInfo.createVirtualRegister(RC));
6743 
6744     OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6745     return;
6746   }
6747 
6748   // Otherwise, we couldn't allocate enough registers for this.
6749 }
6750 
6751 static unsigned
6752 findMatchingInlineAsmOperand(unsigned OperandNo,
6753                              const std::vector<SDValue> &AsmNodeOperands) {
6754   // Scan until we find the definition we already emitted of this operand.
6755   unsigned CurOp = InlineAsm::Op_FirstOperand;
6756   for (; OperandNo; --OperandNo) {
6757     // Advance to the next operand.
6758     unsigned OpFlag =
6759         cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6760     assert((InlineAsm::isRegDefKind(OpFlag) ||
6761             InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
6762             InlineAsm::isMemKind(OpFlag)) &&
6763            "Skipped past definitions?");
6764     CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
6765   }
6766   return CurOp;
6767 }
6768 
6769 /// Fill \p Regs with \p NumRegs new virtual registers of type \p RegVT
6770 /// \return true if it has succeeded, false otherwise
6771 static bool createVirtualRegs(SmallVector<unsigned, 4> &Regs, unsigned NumRegs,
6772                               MVT RegVT, SelectionDAG &DAG) {
6773   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6774   MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
6775   for (unsigned i = 0, e = NumRegs; i != e; ++i) {
6776     if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT))
6777       Regs.push_back(RegInfo.createVirtualRegister(RC));
6778     else
6779       return false;
6780   }
6781   return true;
6782 }
6783 
6784 class ExtraFlags {
6785   unsigned Flags = 0;
6786 
6787 public:
6788   explicit ExtraFlags(ImmutableCallSite CS) {
6789     const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6790     if (IA->hasSideEffects())
6791       Flags |= InlineAsm::Extra_HasSideEffects;
6792     if (IA->isAlignStack())
6793       Flags |= InlineAsm::Extra_IsAlignStack;
6794     if (CS.isConvergent())
6795       Flags |= InlineAsm::Extra_IsConvergent;
6796     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
6797   }
6798 
6799   void update(const llvm::TargetLowering::AsmOperandInfo &OpInfo) {
6800     // Ideally, we would only check against memory constraints.  However, the
6801     // meaning of an Other constraint can be target-specific and we can't easily
6802     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
6803     // for Other constraints as well.
6804     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
6805         OpInfo.ConstraintType == TargetLowering::C_Other) {
6806       if (OpInfo.Type == InlineAsm::isInput)
6807         Flags |= InlineAsm::Extra_MayLoad;
6808       else if (OpInfo.Type == InlineAsm::isOutput)
6809         Flags |= InlineAsm::Extra_MayStore;
6810       else if (OpInfo.Type == InlineAsm::isClobber)
6811         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
6812     }
6813   }
6814 
6815   unsigned get() const { return Flags; }
6816 };
6817 
6818 /// visitInlineAsm - Handle a call to an InlineAsm object.
6819 ///
6820 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
6821   const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6822 
6823   /// ConstraintOperands - Information about all of the constraints.
6824   SDISelAsmOperandInfoVector ConstraintOperands;
6825 
6826   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6827   TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
6828       DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
6829 
6830   bool hasMemory = false;
6831 
6832   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6833   ExtraFlags ExtraInfo(CS);
6834 
6835   unsigned ArgNo = 0;   // ArgNo - The argument of the CallInst.
6836   unsigned ResNo = 0;   // ResNo - The result number of the next output.
6837   for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6838     ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
6839     SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
6840 
6841     MVT OpVT = MVT::Other;
6842 
6843     // Compute the value type for each operand.
6844     if (OpInfo.Type == InlineAsm::isInput ||
6845         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
6846       OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6847 
6848       // Process the call argument. BasicBlocks are labels, currently appearing
6849       // only in asm's.
6850       if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
6851         OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
6852       } else {
6853         OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
6854       }
6855 
6856       OpVT =
6857           OpInfo
6858               .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
6859               .getSimpleVT();
6860     }
6861 
6862     if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
6863       // The return value of the call is this value.  As such, there is no
6864       // corresponding argument.
6865       assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6866       if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
6867         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(),
6868                                       STy->getElementType(ResNo));
6869       } else {
6870         assert(ResNo == 0 && "Asm only has one result!");
6871         OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
6872       }
6873       ++ResNo;
6874     }
6875 
6876     OpInfo.ConstraintVT = OpVT;
6877 
6878     if (!hasMemory)
6879       hasMemory = OpInfo.hasMemory(TLI);
6880 
6881     // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
6882     // FIXME: Could we compute this on OpInfo rather than TargetConstraints[i]?
6883     auto TargetConstraint = TargetConstraints[i];
6884 
6885     // Compute the constraint code and ConstraintType to use.
6886     TLI.ComputeConstraintToUse(TargetConstraint, SDValue());
6887 
6888     ExtraInfo.update(TargetConstraint);
6889   }
6890 
6891   SDValue Chain, Flag;
6892 
6893   // We won't need to flush pending loads if this asm doesn't touch
6894   // memory and is nonvolatile.
6895   if (hasMemory || IA->hasSideEffects())
6896     Chain = getRoot();
6897   else
6898     Chain = DAG.getRoot();
6899 
6900   // Second pass over the constraints: compute which constraint option to use
6901   // and assign registers to constraints that want a specific physreg.
6902   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6903     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6904 
6905     // If this is an output operand with a matching input operand, look up the
6906     // matching input. If their types mismatch, e.g. one is an integer, the
6907     // other is floating point, or their sizes are different, flag it as an
6908     // error.
6909     if (OpInfo.hasMatchingInput()) {
6910       SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
6911       patchMatchingInput(OpInfo, Input, DAG);
6912     }
6913 
6914     // Compute the constraint code and ConstraintType to use.
6915     TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
6916 
6917     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6918         OpInfo.Type == InlineAsm::isClobber)
6919       continue;
6920 
6921     // If this is a memory input, and if the operand is not indirect, do what we
6922     // need to to provide an address for the memory input.
6923     if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6924         !OpInfo.isIndirect) {
6925       assert((OpInfo.isMultipleAlternative ||
6926               (OpInfo.Type == InlineAsm::isInput)) &&
6927              "Can only indirectify direct input operands!");
6928 
6929       // Memory operands really want the address of the value.
6930       Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
6931 
6932       // There is no longer a Value* corresponding to this operand.
6933       OpInfo.CallOperandVal = nullptr;
6934 
6935       // It is now an indirect operand.
6936       OpInfo.isIndirect = true;
6937     }
6938 
6939     // If this constraint is for a specific register, allocate it before
6940     // anything else.
6941     if (OpInfo.ConstraintType == TargetLowering::C_Register)
6942       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6943   }
6944 
6945   // Third pass - Loop over all of the operands, assigning virtual or physregs
6946   // to register class operands.
6947   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6948     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6949 
6950     // C_Register operands have already been allocated, Other/Memory don't need
6951     // to be.
6952     if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
6953       GetRegistersForValue(DAG, TLI, getCurSDLoc(), OpInfo);
6954   }
6955 
6956   // AsmNodeOperands - The operands for the ISD::INLINEASM node.
6957   std::vector<SDValue> AsmNodeOperands;
6958   AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
6959   AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
6960       IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
6961 
6962   // If we have a !srcloc metadata node associated with it, we want to attach
6963   // this to the ultimately generated inline asm machineinstr.  To do this, we
6964   // pass in the third operand as this (potentially null) inline asm MDNode.
6965   const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
6966   AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
6967 
6968   // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6969   // bits as operand 3.
6970   AsmNodeOperands.push_back(DAG.getTargetConstant(
6971       ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
6972 
6973   // Loop over all of the inputs, copying the operand values into the
6974   // appropriate registers and processing the output regs.
6975   RegsForValue RetValRegs;
6976 
6977   // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
6978   std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
6979 
6980   for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6981     SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6982 
6983     switch (OpInfo.Type) {
6984     case InlineAsm::isOutput: {
6985       if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
6986           OpInfo.ConstraintType != TargetLowering::C_Register) {
6987         // Memory output, or 'other' output (e.g. 'X' constraint).
6988         assert(OpInfo.isIndirect && "Memory output must be indirect operand");
6989 
6990         unsigned ConstraintID =
6991             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
6992         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
6993                "Failed to convert memory constraint code to constraint id.");
6994 
6995         // Add information to the INLINEASM node to know about this output.
6996         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6997         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
6998         AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
6999                                                         MVT::i32));
7000         AsmNodeOperands.push_back(OpInfo.CallOperand);
7001         break;
7002       }
7003 
7004       // Otherwise, this is a register or register class output.
7005 
7006       // Copy the output from the appropriate register.  Find a register that
7007       // we can use.
7008       if (OpInfo.AssignedRegs.Regs.empty()) {
7009         emitInlineAsmError(
7010             CS, "couldn't allocate output register for constraint '" +
7011                     Twine(OpInfo.ConstraintCode) + "'");
7012         return;
7013       }
7014 
7015       // If this is an indirect operand, store through the pointer after the
7016       // asm.
7017       if (OpInfo.isIndirect) {
7018         IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
7019                                                       OpInfo.CallOperandVal));
7020       } else {
7021         // This is the result value of the call.
7022         assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
7023         // Concatenate this output onto the outputs list.
7024         RetValRegs.append(OpInfo.AssignedRegs);
7025       }
7026 
7027       // Add information to the INLINEASM node to know that this register is
7028       // set.
7029       OpInfo.AssignedRegs
7030           .AddInlineAsmOperands(OpInfo.isEarlyClobber
7031                                     ? InlineAsm::Kind_RegDefEarlyClobber
7032                                     : InlineAsm::Kind_RegDef,
7033                                 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
7034       break;
7035     }
7036     case InlineAsm::isInput: {
7037       SDValue InOperandVal = OpInfo.CallOperand;
7038 
7039       if (OpInfo.isMatchingInputConstraint()) {
7040         // If this is required to match an output register we have already set,
7041         // just use its register.
7042         auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
7043                                                   AsmNodeOperands);
7044         unsigned OpFlag =
7045           cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
7046         if (InlineAsm::isRegDefKind(OpFlag) ||
7047             InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
7048           // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
7049           if (OpInfo.isIndirect) {
7050             // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
7051             emitInlineAsmError(CS, "inline asm not supported yet:"
7052                                    " don't know how to handle tied "
7053                                    "indirect register inputs");
7054             return;
7055           }
7056 
7057           MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
7058           SmallVector<unsigned, 4> Regs;
7059 
7060           if (!createVirtualRegs(Regs,
7061                                  InlineAsm::getNumOperandRegisters(OpFlag),
7062                                  RegVT, DAG)) {
7063             emitInlineAsmError(CS, "inline asm error: This value type register "
7064                                    "class is not natively supported!");
7065             return;
7066           }
7067 
7068           RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
7069 
7070           SDLoc dl = getCurSDLoc();
7071           // Use the produced MatchedRegs object to
7072           MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7073                                     Chain, &Flag, CS.getInstruction());
7074           MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
7075                                            true, OpInfo.getMatchedOperand(), dl,
7076                                            DAG, AsmNodeOperands);
7077           break;
7078         }
7079 
7080         assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
7081         assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
7082                "Unexpected number of operands");
7083         // Add information to the INLINEASM node to know about this input.
7084         // See InlineAsm.h isUseOperandTiedToDef.
7085         OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
7086         OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
7087                                                     OpInfo.getMatchedOperand());
7088         AsmNodeOperands.push_back(DAG.getTargetConstant(
7089             OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7090         AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
7091         break;
7092       }
7093 
7094       // Treat indirect 'X' constraint as memory.
7095       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
7096           OpInfo.isIndirect)
7097         OpInfo.ConstraintType = TargetLowering::C_Memory;
7098 
7099       if (OpInfo.ConstraintType == TargetLowering::C_Other) {
7100         std::vector<SDValue> Ops;
7101         TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
7102                                           Ops, DAG);
7103         if (Ops.empty()) {
7104           emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
7105                                      Twine(OpInfo.ConstraintCode) + "'");
7106           return;
7107         }
7108 
7109         // Add information to the INLINEASM node to know about this input.
7110         unsigned ResOpType =
7111           InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
7112         AsmNodeOperands.push_back(DAG.getTargetConstant(
7113             ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
7114         AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
7115         break;
7116       }
7117 
7118       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
7119         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
7120         assert(InOperandVal.getValueType() ==
7121                    TLI.getPointerTy(DAG.getDataLayout()) &&
7122                "Memory operands expect pointer values");
7123 
7124         unsigned ConstraintID =
7125             TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
7126         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
7127                "Failed to convert memory constraint code to constraint id.");
7128 
7129         // Add information to the INLINEASM node to know about this input.
7130         unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
7131         ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
7132         AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
7133                                                         getCurSDLoc(),
7134                                                         MVT::i32));
7135         AsmNodeOperands.push_back(InOperandVal);
7136         break;
7137       }
7138 
7139       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
7140               OpInfo.ConstraintType == TargetLowering::C_Register) &&
7141              "Unknown constraint type!");
7142 
7143       // TODO: Support this.
7144       if (OpInfo.isIndirect) {
7145         emitInlineAsmError(
7146             CS, "Don't know how to handle indirect register inputs yet "
7147                 "for constraint '" +
7148                     Twine(OpInfo.ConstraintCode) + "'");
7149         return;
7150       }
7151 
7152       // Copy the input into the appropriate registers.
7153       if (OpInfo.AssignedRegs.Regs.empty()) {
7154         emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
7155                                    Twine(OpInfo.ConstraintCode) + "'");
7156         return;
7157       }
7158 
7159       SDLoc dl = getCurSDLoc();
7160 
7161       OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
7162                                         Chain, &Flag, CS.getInstruction());
7163 
7164       OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
7165                                                dl, DAG, AsmNodeOperands);
7166       break;
7167     }
7168     case InlineAsm::isClobber: {
7169       // Add the clobbered value to the operand list, so that the register
7170       // allocator is aware that the physreg got clobbered.
7171       if (!OpInfo.AssignedRegs.Regs.empty())
7172         OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
7173                                                  false, 0, getCurSDLoc(), DAG,
7174                                                  AsmNodeOperands);
7175       break;
7176     }
7177     }
7178   }
7179 
7180   // Finish up input operands.  Set the input chain and add the flag last.
7181   AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
7182   if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
7183 
7184   Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
7185                       DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
7186   Flag = Chain.getValue(1);
7187 
7188   // If this asm returns a register value, copy the result from that register
7189   // and set it as the value of the call.
7190   if (!RetValRegs.Regs.empty()) {
7191     SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7192                                              Chain, &Flag, CS.getInstruction());
7193 
7194     // FIXME: Why don't we do this for inline asms with MRVs?
7195     if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
7196       EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType());
7197 
7198       // If any of the results of the inline asm is a vector, it may have the
7199       // wrong width/num elts.  This can happen for register classes that can
7200       // contain multiple different value types.  The preg or vreg allocated may
7201       // not have the same VT as was expected.  Convert it to the right type
7202       // with bit_convert.
7203       if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
7204         Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
7205                           ResultType, Val);
7206 
7207       } else if (ResultType != Val.getValueType() &&
7208                  ResultType.isInteger() && Val.getValueType().isInteger()) {
7209         // If a result value was tied to an input value, the computed result may
7210         // have a wider width than the expected result.  Extract the relevant
7211         // portion.
7212         Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
7213       }
7214 
7215       assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
7216     }
7217 
7218     setValue(CS.getInstruction(), Val);
7219     // Don't need to use this as a chain in this case.
7220     if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
7221       return;
7222   }
7223 
7224   std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
7225 
7226   // Process indirect outputs, first output all of the flagged copies out of
7227   // physregs.
7228   for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
7229     RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
7230     const Value *Ptr = IndirectStoresToEmit[i].second;
7231     SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
7232                                              Chain, &Flag, IA);
7233     StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
7234   }
7235 
7236   // Emit the non-flagged stores from the physregs.
7237   SmallVector<SDValue, 8> OutChains;
7238   for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
7239     SDValue Val = DAG.getStore(Chain, getCurSDLoc(), StoresToEmit[i].first,
7240                                getValue(StoresToEmit[i].second),
7241                                MachinePointerInfo(StoresToEmit[i].second));
7242     OutChains.push_back(Val);
7243   }
7244 
7245   if (!OutChains.empty())
7246     Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
7247 
7248   DAG.setRoot(Chain);
7249 }
7250 
7251 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
7252                                              const Twine &Message) {
7253   LLVMContext &Ctx = *DAG.getContext();
7254   Ctx.emitError(CS.getInstruction(), Message);
7255 
7256   // Make sure we leave the DAG in a valid state
7257   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7258   auto VT = TLI.getValueType(DAG.getDataLayout(), CS.getType());
7259   setValue(CS.getInstruction(), DAG.getUNDEF(VT));
7260 }
7261 
7262 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
7263   DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
7264                           MVT::Other, getRoot(),
7265                           getValue(I.getArgOperand(0)),
7266                           DAG.getSrcValue(I.getArgOperand(0))));
7267 }
7268 
7269 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
7270   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7271   const DataLayout &DL = DAG.getDataLayout();
7272   SDValue V = DAG.getVAArg(TLI.getValueType(DAG.getDataLayout(), I.getType()),
7273                            getCurSDLoc(), getRoot(), getValue(I.getOperand(0)),
7274                            DAG.getSrcValue(I.getOperand(0)),
7275                            DL.getABITypeAlignment(I.getType()));
7276   setValue(&I, V);
7277   DAG.setRoot(V.getValue(1));
7278 }
7279 
7280 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
7281   DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
7282                           MVT::Other, getRoot(),
7283                           getValue(I.getArgOperand(0)),
7284                           DAG.getSrcValue(I.getArgOperand(0))));
7285 }
7286 
7287 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
7288   DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
7289                           MVT::Other, getRoot(),
7290                           getValue(I.getArgOperand(0)),
7291                           getValue(I.getArgOperand(1)),
7292                           DAG.getSrcValue(I.getArgOperand(0)),
7293                           DAG.getSrcValue(I.getArgOperand(1))));
7294 }
7295 
7296 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
7297                                                     const Instruction &I,
7298                                                     SDValue Op) {
7299   const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
7300   if (!Range)
7301     return Op;
7302 
7303   ConstantRange CR = getConstantRangeFromMetadata(*Range);
7304   if (CR.isFullSet() || CR.isEmptySet() || CR.isWrappedSet())
7305     return Op;
7306 
7307   APInt Lo = CR.getUnsignedMin();
7308   if (!Lo.isMinValue())
7309     return Op;
7310 
7311   APInt Hi = CR.getUnsignedMax();
7312   unsigned Bits = Hi.getActiveBits();
7313 
7314   EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
7315 
7316   SDLoc SL = getCurSDLoc();
7317 
7318   SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
7319                              DAG.getValueType(SmallVT));
7320   unsigned NumVals = Op.getNode()->getNumValues();
7321   if (NumVals == 1)
7322     return ZExt;
7323 
7324   SmallVector<SDValue, 4> Ops;
7325 
7326   Ops.push_back(ZExt);
7327   for (unsigned I = 1; I != NumVals; ++I)
7328     Ops.push_back(Op.getValue(I));
7329 
7330   return DAG.getMergeValues(Ops, SL);
7331 }
7332 
7333 /// \brief Populate a CallLowerinInfo (into \p CLI) based on the properties of
7334 /// the call being lowered.
7335 ///
7336 /// This is a helper for lowering intrinsics that follow a target calling
7337 /// convention or require stack pointer adjustment. Only a subset of the
7338 /// intrinsic's operands need to participate in the calling convention.
7339 void SelectionDAGBuilder::populateCallLoweringInfo(
7340     TargetLowering::CallLoweringInfo &CLI, ImmutableCallSite CS,
7341     unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
7342     bool IsPatchPoint) {
7343   TargetLowering::ArgListTy Args;
7344   Args.reserve(NumArgs);
7345 
7346   // Populate the argument list.
7347   // Attributes for args start at offset 1, after the return attribute.
7348   for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
7349        ArgI != ArgE; ++ArgI) {
7350     const Value *V = CS->getOperand(ArgI);
7351 
7352     assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
7353 
7354     TargetLowering::ArgListEntry Entry;
7355     Entry.Node = getValue(V);
7356     Entry.Ty = V->getType();
7357     Entry.setAttributes(&CS, AttrI);
7358     Args.push_back(Entry);
7359   }
7360 
7361   CLI.setDebugLoc(getCurSDLoc())
7362       .setChain(getRoot())
7363       .setCallee(CS.getCallingConv(), ReturnTy, Callee, std::move(Args))
7364       .setDiscardResult(CS->use_empty())
7365       .setIsPatchPoint(IsPatchPoint);
7366 }
7367 
7368 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap
7369 /// or patchpoint target node's operand list.
7370 ///
7371 /// Constants are converted to TargetConstants purely as an optimization to
7372 /// avoid constant materialization and register allocation.
7373 ///
7374 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
7375 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
7376 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
7377 /// address materialization and register allocation, but may also be required
7378 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
7379 /// alloca in the entry block, then the runtime may assume that the alloca's
7380 /// StackMap location can be read immediately after compilation and that the
7381 /// location is valid at any point during execution (this is similar to the
7382 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
7383 /// only available in a register, then the runtime would need to trap when
7384 /// execution reaches the StackMap in order to read the alloca's location.
7385 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
7386                                 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
7387                                 SelectionDAGBuilder &Builder) {
7388   for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
7389     SDValue OpVal = Builder.getValue(CS.getArgument(i));
7390     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
7391       Ops.push_back(
7392         Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
7393       Ops.push_back(
7394         Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
7395     } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
7396       const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
7397       Ops.push_back(Builder.DAG.getTargetFrameIndex(
7398           FI->getIndex(), TLI.getPointerTy(Builder.DAG.getDataLayout())));
7399     } else
7400       Ops.push_back(OpVal);
7401   }
7402 }
7403 
7404 /// \brief Lower llvm.experimental.stackmap directly to its target opcode.
7405 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
7406   // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
7407   //                                  [live variables...])
7408 
7409   assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
7410 
7411   SDValue Chain, InFlag, Callee, NullPtr;
7412   SmallVector<SDValue, 32> Ops;
7413 
7414   SDLoc DL = getCurSDLoc();
7415   Callee = getValue(CI.getCalledValue());
7416   NullPtr = DAG.getIntPtrConstant(0, DL, true);
7417 
7418   // The stackmap intrinsic only records the live variables (the arguemnts
7419   // passed to it) and emits NOPS (if requested). Unlike the patchpoint
7420   // intrinsic, this won't be lowered to a function call. This means we don't
7421   // have to worry about calling conventions and target specific lowering code.
7422   // Instead we perform the call lowering right here.
7423   //
7424   // chain, flag = CALLSEQ_START(chain, 0)
7425   // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
7426   // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
7427   //
7428   Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
7429   InFlag = Chain.getValue(1);
7430 
7431   // Add the <id> and <numBytes> constants.
7432   SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
7433   Ops.push_back(DAG.getTargetConstant(
7434                   cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
7435   SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
7436   Ops.push_back(DAG.getTargetConstant(
7437                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
7438                   MVT::i32));
7439 
7440   // Push live variables for the stack map.
7441   addStackMapLiveVars(&CI, 2, DL, Ops, *this);
7442 
7443   // We are not pushing any register mask info here on the operands list,
7444   // because the stackmap doesn't clobber anything.
7445 
7446   // Push the chain and the glue flag.
7447   Ops.push_back(Chain);
7448   Ops.push_back(InFlag);
7449 
7450   // Create the STACKMAP node.
7451   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7452   SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
7453   Chain = SDValue(SM, 0);
7454   InFlag = Chain.getValue(1);
7455 
7456   Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
7457 
7458   // Stackmaps don't generate values, so nothing goes into the NodeMap.
7459 
7460   // Set the root to the target-lowered call chain.
7461   DAG.setRoot(Chain);
7462 
7463   // Inform the Frame Information that we have a stackmap in this function.
7464   FuncInfo.MF->getFrameInfo().setHasStackMap();
7465 }
7466 
7467 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
7468 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
7469                                           const BasicBlock *EHPadBB) {
7470   // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
7471   //                                                 i32 <numBytes>,
7472   //                                                 i8* <target>,
7473   //                                                 i32 <numArgs>,
7474   //                                                 [Args...],
7475   //                                                 [live variables...])
7476 
7477   CallingConv::ID CC = CS.getCallingConv();
7478   bool IsAnyRegCC = CC == CallingConv::AnyReg;
7479   bool HasDef = !CS->getType()->isVoidTy();
7480   SDLoc dl = getCurSDLoc();
7481   SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
7482 
7483   // Handle immediate and symbolic callees.
7484   if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
7485     Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
7486                                    /*isTarget=*/true);
7487   else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
7488     Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
7489                                          SDLoc(SymbolicCallee),
7490                                          SymbolicCallee->getValueType(0));
7491 
7492   // Get the real number of arguments participating in the call <numArgs>
7493   SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
7494   unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
7495 
7496   // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
7497   // Intrinsics include all meta-operands up to but not including CC.
7498   unsigned NumMetaOpers = PatchPointOpers::CCPos;
7499   assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
7500          "Not enough arguments provided to the patchpoint intrinsic");
7501 
7502   // For AnyRegCC the arguments are lowered later on manually.
7503   unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
7504   Type *ReturnTy =
7505     IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
7506 
7507   TargetLowering::CallLoweringInfo CLI(DAG);
7508   populateCallLoweringInfo(CLI, CS, NumMetaOpers, NumCallArgs, Callee, ReturnTy,
7509                            true);
7510   std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7511 
7512   SDNode *CallEnd = Result.second.getNode();
7513   if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
7514     CallEnd = CallEnd->getOperand(0).getNode();
7515 
7516   /// Get a call instruction from the call sequence chain.
7517   /// Tail calls are not allowed.
7518   assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
7519          "Expected a callseq node.");
7520   SDNode *Call = CallEnd->getOperand(0).getNode();
7521   bool HasGlue = Call->getGluedNode();
7522 
7523   // Replace the target specific call node with the patchable intrinsic.
7524   SmallVector<SDValue, 8> Ops;
7525 
7526   // Add the <id> and <numBytes> constants.
7527   SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
7528   Ops.push_back(DAG.getTargetConstant(
7529                   cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
7530   SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
7531   Ops.push_back(DAG.getTargetConstant(
7532                   cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
7533                   MVT::i32));
7534 
7535   // Add the callee.
7536   Ops.push_back(Callee);
7537 
7538   // Adjust <numArgs> to account for any arguments that have been passed on the
7539   // stack instead.
7540   // Call Node: Chain, Target, {Args}, RegMask, [Glue]
7541   unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
7542   NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
7543   Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
7544 
7545   // Add the calling convention
7546   Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
7547 
7548   // Add the arguments we omitted previously. The register allocator should
7549   // place these in any free register.
7550   if (IsAnyRegCC)
7551     for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
7552       Ops.push_back(getValue(CS.getArgument(i)));
7553 
7554   // Push the arguments from the call instruction up to the register mask.
7555   SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
7556   Ops.append(Call->op_begin() + 2, e);
7557 
7558   // Push live variables for the stack map.
7559   addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
7560 
7561   // Push the register mask info.
7562   if (HasGlue)
7563     Ops.push_back(*(Call->op_end()-2));
7564   else
7565     Ops.push_back(*(Call->op_end()-1));
7566 
7567   // Push the chain (this is originally the first operand of the call, but
7568   // becomes now the last or second to last operand).
7569   Ops.push_back(*(Call->op_begin()));
7570 
7571   // Push the glue flag (last operand).
7572   if (HasGlue)
7573     Ops.push_back(*(Call->op_end()-1));
7574 
7575   SDVTList NodeTys;
7576   if (IsAnyRegCC && HasDef) {
7577     // Create the return types based on the intrinsic definition
7578     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7579     SmallVector<EVT, 3> ValueVTs;
7580     ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
7581     assert(ValueVTs.size() == 1 && "Expected only one return value type.");
7582 
7583     // There is always a chain and a glue type at the end
7584     ValueVTs.push_back(MVT::Other);
7585     ValueVTs.push_back(MVT::Glue);
7586     NodeTys = DAG.getVTList(ValueVTs);
7587   } else
7588     NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7589 
7590   // Replace the target specific call node with a PATCHPOINT node.
7591   MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
7592                                          dl, NodeTys, Ops);
7593 
7594   // Update the NodeMap.
7595   if (HasDef) {
7596     if (IsAnyRegCC)
7597       setValue(CS.getInstruction(), SDValue(MN, 0));
7598     else
7599       setValue(CS.getInstruction(), Result.first);
7600   }
7601 
7602   // Fixup the consumers of the intrinsic. The chain and glue may be used in the
7603   // call sequence. Furthermore the location of the chain and glue can change
7604   // when the AnyReg calling convention is used and the intrinsic returns a
7605   // value.
7606   if (IsAnyRegCC && HasDef) {
7607     SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
7608     SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
7609     DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
7610   } else
7611     DAG.ReplaceAllUsesWith(Call, MN);
7612   DAG.DeleteNode(Call);
7613 
7614   // Inform the Frame Information that we have a patchpoint in this function.
7615   FuncInfo.MF->getFrameInfo().setHasPatchPoint();
7616 }
7617 
7618 /// Returns an AttributeList representing the attributes applied to the return
7619 /// value of the given call.
7620 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
7621   SmallVector<Attribute::AttrKind, 2> Attrs;
7622   if (CLI.RetSExt)
7623     Attrs.push_back(Attribute::SExt);
7624   if (CLI.RetZExt)
7625     Attrs.push_back(Attribute::ZExt);
7626   if (CLI.IsInReg)
7627     Attrs.push_back(Attribute::InReg);
7628 
7629   return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
7630                             Attrs);
7631 }
7632 
7633 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
7634 /// implementation, which just calls LowerCall.
7635 /// FIXME: When all targets are
7636 /// migrated to using LowerCall, this hook should be integrated into SDISel.
7637 std::pair<SDValue, SDValue>
7638 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
7639   // Handle the incoming return values from the call.
7640   CLI.Ins.clear();
7641   Type *OrigRetTy = CLI.RetTy;
7642   SmallVector<EVT, 4> RetTys;
7643   SmallVector<uint64_t, 4> Offsets;
7644   auto &DL = CLI.DAG.getDataLayout();
7645   ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
7646 
7647   SmallVector<ISD::OutputArg, 4> Outs;
7648   GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
7649 
7650   bool CanLowerReturn =
7651       this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
7652                            CLI.IsVarArg, Outs, CLI.RetTy->getContext());
7653 
7654   SDValue DemoteStackSlot;
7655   int DemoteStackIdx = -100;
7656   if (!CanLowerReturn) {
7657     // FIXME: equivalent assert?
7658     // assert(!CS.hasInAllocaArgument() &&
7659     //        "sret demotion is incompatible with inalloca");
7660     uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
7661     unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
7662     MachineFunction &MF = CLI.DAG.getMachineFunction();
7663     DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7664     Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
7665 
7666     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy(DL));
7667     ArgListEntry Entry;
7668     Entry.Node = DemoteStackSlot;
7669     Entry.Ty = StackSlotPtrType;
7670     Entry.IsSExt = false;
7671     Entry.IsZExt = false;
7672     Entry.IsInReg = false;
7673     Entry.IsSRet = true;
7674     Entry.IsNest = false;
7675     Entry.IsByVal = false;
7676     Entry.IsReturned = false;
7677     Entry.IsSwiftSelf = false;
7678     Entry.IsSwiftError = false;
7679     Entry.Alignment = Align;
7680     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
7681     CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
7682 
7683     // sret demotion isn't compatible with tail-calls, since the sret argument
7684     // points into the callers stack frame.
7685     CLI.IsTailCall = false;
7686   } else {
7687     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7688       EVT VT = RetTys[I];
7689       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7690       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7691       for (unsigned i = 0; i != NumRegs; ++i) {
7692         ISD::InputArg MyFlags;
7693         MyFlags.VT = RegisterVT;
7694         MyFlags.ArgVT = VT;
7695         MyFlags.Used = CLI.IsReturnValueUsed;
7696         if (CLI.RetSExt)
7697           MyFlags.Flags.setSExt();
7698         if (CLI.RetZExt)
7699           MyFlags.Flags.setZExt();
7700         if (CLI.IsInReg)
7701           MyFlags.Flags.setInReg();
7702         CLI.Ins.push_back(MyFlags);
7703       }
7704     }
7705   }
7706 
7707   // We push in swifterror return as the last element of CLI.Ins.
7708   ArgListTy &Args = CLI.getArgs();
7709   if (supportSwiftError()) {
7710     for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7711       if (Args[i].IsSwiftError) {
7712         ISD::InputArg MyFlags;
7713         MyFlags.VT = getPointerTy(DL);
7714         MyFlags.ArgVT = EVT(getPointerTy(DL));
7715         MyFlags.Flags.setSwiftError();
7716         CLI.Ins.push_back(MyFlags);
7717       }
7718     }
7719   }
7720 
7721   // Handle all of the outgoing arguments.
7722   CLI.Outs.clear();
7723   CLI.OutVals.clear();
7724   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7725     SmallVector<EVT, 4> ValueVTs;
7726     ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
7727     Type *FinalType = Args[i].Ty;
7728     if (Args[i].IsByVal)
7729       FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
7730     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
7731         FinalType, CLI.CallConv, CLI.IsVarArg);
7732     for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
7733          ++Value) {
7734       EVT VT = ValueVTs[Value];
7735       Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
7736       SDValue Op = SDValue(Args[i].Node.getNode(),
7737                            Args[i].Node.getResNo() + Value);
7738       ISD::ArgFlagsTy Flags;
7739       unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
7740 
7741       if (Args[i].IsZExt)
7742         Flags.setZExt();
7743       if (Args[i].IsSExt)
7744         Flags.setSExt();
7745       if (Args[i].IsInReg) {
7746         // If we are using vectorcall calling convention, a structure that is
7747         // passed InReg - is surely an HVA
7748         if (CLI.CallConv == CallingConv::X86_VectorCall &&
7749             isa<StructType>(FinalType)) {
7750           // The first value of a structure is marked
7751           if (0 == Value)
7752             Flags.setHvaStart();
7753           Flags.setHva();
7754         }
7755         // Set InReg Flag
7756         Flags.setInReg();
7757       }
7758       if (Args[i].IsSRet)
7759         Flags.setSRet();
7760       if (Args[i].IsSwiftSelf)
7761         Flags.setSwiftSelf();
7762       if (Args[i].IsSwiftError)
7763         Flags.setSwiftError();
7764       if (Args[i].IsByVal)
7765         Flags.setByVal();
7766       if (Args[i].IsInAlloca) {
7767         Flags.setInAlloca();
7768         // Set the byval flag for CCAssignFn callbacks that don't know about
7769         // inalloca.  This way we can know how many bytes we should've allocated
7770         // and how many bytes a callee cleanup function will pop.  If we port
7771         // inalloca to more targets, we'll have to add custom inalloca handling
7772         // in the various CC lowering callbacks.
7773         Flags.setByVal();
7774       }
7775       if (Args[i].IsByVal || Args[i].IsInAlloca) {
7776         PointerType *Ty = cast<PointerType>(Args[i].Ty);
7777         Type *ElementTy = Ty->getElementType();
7778         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
7779         // For ByVal, alignment should come from FE.  BE will guess if this
7780         // info is not there but there are cases it cannot get right.
7781         unsigned FrameAlign;
7782         if (Args[i].Alignment)
7783           FrameAlign = Args[i].Alignment;
7784         else
7785           FrameAlign = getByValTypeAlignment(ElementTy, DL);
7786         Flags.setByValAlign(FrameAlign);
7787       }
7788       if (Args[i].IsNest)
7789         Flags.setNest();
7790       if (NeedsRegBlock)
7791         Flags.setInConsecutiveRegs();
7792       Flags.setOrigAlign(OriginalAlignment);
7793 
7794       MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
7795       unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
7796       SmallVector<SDValue, 4> Parts(NumParts);
7797       ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
7798 
7799       if (Args[i].IsSExt)
7800         ExtendKind = ISD::SIGN_EXTEND;
7801       else if (Args[i].IsZExt)
7802         ExtendKind = ISD::ZERO_EXTEND;
7803 
7804       // Conservatively only handle 'returned' on non-vectors for now
7805       if (Args[i].IsReturned && !Op.getValueType().isVector()) {
7806         assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
7807                "unexpected use of 'returned'");
7808         // Before passing 'returned' to the target lowering code, ensure that
7809         // either the register MVT and the actual EVT are the same size or that
7810         // the return value and argument are extended in the same way; in these
7811         // cases it's safe to pass the argument register value unchanged as the
7812         // return register value (although it's at the target's option whether
7813         // to do so)
7814         // TODO: allow code generation to take advantage of partially preserved
7815         // registers rather than clobbering the entire register when the
7816         // parameter extension method is not compatible with the return
7817         // extension method
7818         if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
7819             (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
7820              CLI.RetZExt == Args[i].IsZExt))
7821           Flags.setReturned();
7822       }
7823 
7824       getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
7825                      CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
7826 
7827       for (unsigned j = 0; j != NumParts; ++j) {
7828         // if it isn't first piece, alignment must be 1
7829         ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
7830                                i < CLI.NumFixedArgs,
7831                                i, j*Parts[j].getValueType().getStoreSize());
7832         if (NumParts > 1 && j == 0)
7833           MyFlags.Flags.setSplit();
7834         else if (j != 0) {
7835           MyFlags.Flags.setOrigAlign(1);
7836           if (j == NumParts - 1)
7837             MyFlags.Flags.setSplitEnd();
7838         }
7839 
7840         CLI.Outs.push_back(MyFlags);
7841         CLI.OutVals.push_back(Parts[j]);
7842       }
7843 
7844       if (NeedsRegBlock && Value == NumValues - 1)
7845         CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
7846     }
7847   }
7848 
7849   SmallVector<SDValue, 4> InVals;
7850   CLI.Chain = LowerCall(CLI, InVals);
7851 
7852   // Update CLI.InVals to use outside of this function.
7853   CLI.InVals = InVals;
7854 
7855   // Verify that the target's LowerCall behaved as expected.
7856   assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
7857          "LowerCall didn't return a valid chain!");
7858   assert((!CLI.IsTailCall || InVals.empty()) &&
7859          "LowerCall emitted a return value for a tail call!");
7860   assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
7861          "LowerCall didn't emit the correct number of values!");
7862 
7863   // For a tail call, the return value is merely live-out and there aren't
7864   // any nodes in the DAG representing it. Return a special value to
7865   // indicate that a tail call has been emitted and no more Instructions
7866   // should be processed in the current block.
7867   if (CLI.IsTailCall) {
7868     CLI.DAG.setRoot(CLI.Chain);
7869     return std::make_pair(SDValue(), SDValue());
7870   }
7871 
7872 #ifndef NDEBUG
7873   for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
7874     assert(InVals[i].getNode() && "LowerCall emitted a null value!");
7875     assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
7876            "LowerCall emitted a value with the wrong type!");
7877   }
7878 #endif
7879 
7880   SmallVector<SDValue, 4> ReturnValues;
7881   if (!CanLowerReturn) {
7882     // The instruction result is the result of loading from the
7883     // hidden sret parameter.
7884     SmallVector<EVT, 1> PVTs;
7885     Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
7886 
7887     ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
7888     assert(PVTs.size() == 1 && "Pointers should fit in one register");
7889     EVT PtrVT = PVTs[0];
7890 
7891     unsigned NumValues = RetTys.size();
7892     ReturnValues.resize(NumValues);
7893     SmallVector<SDValue, 4> Chains(NumValues);
7894 
7895     // An aggregate return value cannot wrap around the address space, so
7896     // offsets to its parts don't wrap either.
7897     SDNodeFlags Flags;
7898     Flags.setNoUnsignedWrap(true);
7899 
7900     for (unsigned i = 0; i < NumValues; ++i) {
7901       SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
7902                                     CLI.DAG.getConstant(Offsets[i], CLI.DL,
7903                                                         PtrVT), &Flags);
7904       SDValue L = CLI.DAG.getLoad(
7905           RetTys[i], CLI.DL, CLI.Chain, Add,
7906           MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
7907                                             DemoteStackIdx, Offsets[i]),
7908           /* Alignment = */ 1);
7909       ReturnValues[i] = L;
7910       Chains[i] = L.getValue(1);
7911     }
7912 
7913     CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
7914   } else {
7915     // Collect the legal value parts into potentially illegal values
7916     // that correspond to the original function's return values.
7917     Optional<ISD::NodeType> AssertOp;
7918     if (CLI.RetSExt)
7919       AssertOp = ISD::AssertSext;
7920     else if (CLI.RetZExt)
7921       AssertOp = ISD::AssertZext;
7922     unsigned CurReg = 0;
7923     for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7924       EVT VT = RetTys[I];
7925       MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7926       unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7927 
7928       ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
7929                                               NumRegs, RegisterVT, VT, nullptr,
7930                                               AssertOp));
7931       CurReg += NumRegs;
7932     }
7933 
7934     // For a function returning void, there is no return value. We can't create
7935     // such a node, so we just return a null return value in that case. In
7936     // that case, nothing will actually look at the value.
7937     if (ReturnValues.empty())
7938       return std::make_pair(SDValue(), CLI.Chain);
7939   }
7940 
7941   SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
7942                                 CLI.DAG.getVTList(RetTys), ReturnValues);
7943   return std::make_pair(Res, CLI.Chain);
7944 }
7945 
7946 void TargetLowering::LowerOperationWrapper(SDNode *N,
7947                                            SmallVectorImpl<SDValue> &Results,
7948                                            SelectionDAG &DAG) const {
7949   if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
7950     Results.push_back(Res);
7951 }
7952 
7953 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7954   llvm_unreachable("LowerOperation not implemented for this target!");
7955 }
7956 
7957 void
7958 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
7959   SDValue Op = getNonRegisterValue(V);
7960   assert((Op.getOpcode() != ISD::CopyFromReg ||
7961           cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
7962          "Copy from a reg to the same reg!");
7963   assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
7964 
7965   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7966   RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
7967                    V->getType());
7968   SDValue Chain = DAG.getEntryNode();
7969 
7970   ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
7971                               FuncInfo.PreferredExtendType.end())
7972                                  ? ISD::ANY_EXTEND
7973                                  : FuncInfo.PreferredExtendType[V];
7974   RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
7975   PendingExports.push_back(Chain);
7976 }
7977 
7978 #include "llvm/CodeGen/SelectionDAGISel.h"
7979 
7980 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
7981 /// entry block, return true.  This includes arguments used by switches, since
7982 /// the switch may expand into multiple basic blocks.
7983 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
7984   // With FastISel active, we may be splitting blocks, so force creation
7985   // of virtual registers for all non-dead arguments.
7986   if (FastISel)
7987     return A->use_empty();
7988 
7989   const BasicBlock &Entry = A->getParent()->front();
7990   for (const User *U : A->users())
7991     if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
7992       return false;  // Use not in entry block.
7993 
7994   return true;
7995 }
7996 
7997 typedef DenseMap<const Argument *,
7998                  std::pair<const AllocaInst *, const StoreInst *>>
7999     ArgCopyElisionMapTy;
8000 
8001 /// Scan the entry block of the function in FuncInfo for arguments that look
8002 /// like copies into a local alloca. Record any copied arguments in
8003 /// ArgCopyElisionCandidates.
8004 static void
8005 findArgumentCopyElisionCandidates(const DataLayout &DL,
8006                                   FunctionLoweringInfo *FuncInfo,
8007                                   ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
8008   // Record the state of every static alloca used in the entry block. Argument
8009   // allocas are all used in the entry block, so we need approximately as many
8010   // entries as we have arguments.
8011   enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
8012   SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
8013   unsigned NumArgs = FuncInfo->Fn->arg_size();
8014   StaticAllocas.reserve(NumArgs * 2);
8015 
8016   auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
8017     if (!V)
8018       return nullptr;
8019     V = V->stripPointerCasts();
8020     const auto *AI = dyn_cast<AllocaInst>(V);
8021     if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
8022       return nullptr;
8023     auto Iter = StaticAllocas.insert({AI, Unknown});
8024     return &Iter.first->second;
8025   };
8026 
8027   // Look for stores of arguments to static allocas. Look through bitcasts and
8028   // GEPs to handle type coercions, as long as the alloca is fully initialized
8029   // by the store. Any non-store use of an alloca escapes it and any subsequent
8030   // unanalyzed store might write it.
8031   // FIXME: Handle structs initialized with multiple stores.
8032   for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
8033     // Look for stores, and handle non-store uses conservatively.
8034     const auto *SI = dyn_cast<StoreInst>(&I);
8035     if (!SI) {
8036       // We will look through cast uses, so ignore them completely.
8037       if (I.isCast())
8038         continue;
8039       // Ignore debug info intrinsics, they don't escape or store to allocas.
8040       if (isa<DbgInfoIntrinsic>(I))
8041         continue;
8042       // This is an unknown instruction. Assume it escapes or writes to all
8043       // static alloca operands.
8044       for (const Use &U : I.operands()) {
8045         if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
8046           *Info = StaticAllocaInfo::Clobbered;
8047       }
8048       continue;
8049     }
8050 
8051     // If the stored value is a static alloca, mark it as escaped.
8052     if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
8053       *Info = StaticAllocaInfo::Clobbered;
8054 
8055     // Check if the destination is a static alloca.
8056     const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
8057     StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
8058     if (!Info)
8059       continue;
8060     const AllocaInst *AI = cast<AllocaInst>(Dst);
8061 
8062     // Skip allocas that have been initialized or clobbered.
8063     if (*Info != StaticAllocaInfo::Unknown)
8064       continue;
8065 
8066     // Check if the stored value is an argument, and that this store fully
8067     // initializes the alloca. Don't elide copies from the same argument twice.
8068     const Value *Val = SI->getValueOperand()->stripPointerCasts();
8069     const auto *Arg = dyn_cast<Argument>(Val);
8070     if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
8071         Arg->getType()->isEmptyTy() ||
8072         DL.getTypeStoreSize(Arg->getType()) !=
8073             DL.getTypeAllocSize(AI->getAllocatedType()) ||
8074         ArgCopyElisionCandidates.count(Arg)) {
8075       *Info = StaticAllocaInfo::Clobbered;
8076       continue;
8077     }
8078 
8079     DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI << '\n');
8080 
8081     // Mark this alloca and store for argument copy elision.
8082     *Info = StaticAllocaInfo::Elidable;
8083     ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
8084 
8085     // Stop scanning if we've seen all arguments. This will happen early in -O0
8086     // builds, which is useful, because -O0 builds have large entry blocks and
8087     // many allocas.
8088     if (ArgCopyElisionCandidates.size() == NumArgs)
8089       break;
8090   }
8091 }
8092 
8093 /// Try to elide argument copies from memory into a local alloca. Succeeds if
8094 /// ArgVal is a load from a suitable fixed stack object.
8095 static void tryToElideArgumentCopy(
8096     FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains,
8097     DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
8098     SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
8099     ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
8100     SDValue ArgVal, bool &ArgHasUses) {
8101   // Check if this is a load from a fixed stack object.
8102   auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
8103   if (!LNode)
8104     return;
8105   auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
8106   if (!FINode)
8107     return;
8108 
8109   // Check that the fixed stack object is the right size and alignment.
8110   // Look at the alignment that the user wrote on the alloca instead of looking
8111   // at the stack object.
8112   auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
8113   assert(ArgCopyIter != ArgCopyElisionCandidates.end());
8114   const AllocaInst *AI = ArgCopyIter->second.first;
8115   int FixedIndex = FINode->getIndex();
8116   int &AllocaIndex = FuncInfo->StaticAllocaMap[AI];
8117   int OldIndex = AllocaIndex;
8118   MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo();
8119   if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
8120     DEBUG(dbgs() << "  argument copy elision failed due to bad fixed stack "
8121                     "object size\n");
8122     return;
8123   }
8124   unsigned RequiredAlignment = AI->getAlignment();
8125   if (!RequiredAlignment) {
8126     RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment(
8127         AI->getAllocatedType());
8128   }
8129   if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
8130     DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
8131                     "greater than stack argument alignment ("
8132                  << RequiredAlignment << " vs "
8133                  << MFI.getObjectAlignment(FixedIndex) << ")\n");
8134     return;
8135   }
8136 
8137   // Perform the elision. Delete the old stack object and replace its only use
8138   // in the variable info map. Mark the stack object as mutable.
8139   DEBUG({
8140     dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
8141            << "  Replacing frame index " << OldIndex << " with " << FixedIndex
8142            << '\n';
8143   });
8144   MFI.RemoveStackObject(OldIndex);
8145   MFI.setIsImmutableObjectIndex(FixedIndex, false);
8146   AllocaIndex = FixedIndex;
8147   ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
8148   Chains.push_back(ArgVal.getValue(1));
8149 
8150   // Avoid emitting code for the store implementing the copy.
8151   const StoreInst *SI = ArgCopyIter->second.second;
8152   ElidedArgCopyInstrs.insert(SI);
8153 
8154   // Check for uses of the argument again so that we can avoid exporting ArgVal
8155   // if it is't used by anything other than the store.
8156   for (const Value *U : Arg.users()) {
8157     if (U != SI) {
8158       ArgHasUses = true;
8159       break;
8160     }
8161   }
8162 }
8163 
8164 void SelectionDAGISel::LowerArguments(const Function &F) {
8165   SelectionDAG &DAG = SDB->DAG;
8166   SDLoc dl = SDB->getCurSDLoc();
8167   const DataLayout &DL = DAG.getDataLayout();
8168   SmallVector<ISD::InputArg, 16> Ins;
8169 
8170   if (!FuncInfo->CanLowerReturn) {
8171     // Put in an sret pointer parameter before all the other parameters.
8172     SmallVector<EVT, 1> ValueVTs;
8173     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8174                     PointerType::getUnqual(F.getReturnType()), ValueVTs);
8175 
8176     // NOTE: Assuming that a pointer will never break down to more than one VT
8177     // or one register.
8178     ISD::ArgFlagsTy Flags;
8179     Flags.setSRet();
8180     MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
8181     ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
8182                          ISD::InputArg::NoArgIndex, 0);
8183     Ins.push_back(RetArg);
8184   }
8185 
8186   // Look for stores of arguments to static allocas. Mark such arguments with a
8187   // flag to ask the target to give us the memory location of that argument if
8188   // available.
8189   ArgCopyElisionMapTy ArgCopyElisionCandidates;
8190   findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates);
8191 
8192   // Set up the incoming argument description vector.
8193   unsigned Idx = 0;
8194   for (const Argument &Arg : F.args()) {
8195     ++Idx;
8196     SmallVector<EVT, 4> ValueVTs;
8197     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8198     bool isArgValueUsed = !Arg.use_empty();
8199     unsigned PartBase = 0;
8200     Type *FinalType = Arg.getType();
8201     if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
8202       FinalType = cast<PointerType>(FinalType)->getElementType();
8203     bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
8204         FinalType, F.getCallingConv(), F.isVarArg());
8205     for (unsigned Value = 0, NumValues = ValueVTs.size();
8206          Value != NumValues; ++Value) {
8207       EVT VT = ValueVTs[Value];
8208       Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
8209       ISD::ArgFlagsTy Flags;
8210       unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
8211 
8212       if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
8213         Flags.setZExt();
8214       if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
8215         Flags.setSExt();
8216       if (F.getAttributes().hasAttribute(Idx, Attribute::InReg)) {
8217         // If we are using vectorcall calling convention, a structure that is
8218         // passed InReg - is surely an HVA
8219         if (F.getCallingConv() == CallingConv::X86_VectorCall &&
8220             isa<StructType>(Arg.getType())) {
8221           // The first value of a structure is marked
8222           if (0 == Value)
8223             Flags.setHvaStart();
8224           Flags.setHva();
8225         }
8226         // Set InReg Flag
8227         Flags.setInReg();
8228       }
8229       if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
8230         Flags.setSRet();
8231       if (F.getAttributes().hasAttribute(Idx, Attribute::SwiftSelf))
8232         Flags.setSwiftSelf();
8233       if (F.getAttributes().hasAttribute(Idx, Attribute::SwiftError))
8234         Flags.setSwiftError();
8235       if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
8236         Flags.setByVal();
8237       if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
8238         Flags.setInAlloca();
8239         // Set the byval flag for CCAssignFn callbacks that don't know about
8240         // inalloca.  This way we can know how many bytes we should've allocated
8241         // and how many bytes a callee cleanup function will pop.  If we port
8242         // inalloca to more targets, we'll have to add custom inalloca handling
8243         // in the various CC lowering callbacks.
8244         Flags.setByVal();
8245       }
8246       if (F.getCallingConv() == CallingConv::X86_INTR) {
8247         // IA Interrupt passes frame (1st parameter) by value in the stack.
8248         if (Idx == 1)
8249           Flags.setByVal();
8250       }
8251       if (Flags.isByVal() || Flags.isInAlloca()) {
8252         PointerType *Ty = cast<PointerType>(Arg.getType());
8253         Type *ElementTy = Ty->getElementType();
8254         Flags.setByValSize(DL.getTypeAllocSize(ElementTy));
8255         // For ByVal, alignment should be passed from FE.  BE will guess if
8256         // this info is not there but there are cases it cannot get right.
8257         unsigned FrameAlign;
8258         if (F.getParamAlignment(Idx))
8259           FrameAlign = F.getParamAlignment(Idx);
8260         else
8261           FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
8262         Flags.setByValAlign(FrameAlign);
8263       }
8264       if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
8265         Flags.setNest();
8266       if (NeedsRegBlock)
8267         Flags.setInConsecutiveRegs();
8268       Flags.setOrigAlign(OriginalAlignment);
8269       if (ArgCopyElisionCandidates.count(&Arg))
8270         Flags.setCopyElisionCandidate();
8271 
8272       MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8273       unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
8274       for (unsigned i = 0; i != NumRegs; ++i) {
8275         ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
8276                               Idx-1, PartBase+i*RegisterVT.getStoreSize());
8277         if (NumRegs > 1 && i == 0)
8278           MyFlags.Flags.setSplit();
8279         // if it isn't first piece, alignment must be 1
8280         else if (i > 0) {
8281           MyFlags.Flags.setOrigAlign(1);
8282           if (i == NumRegs - 1)
8283             MyFlags.Flags.setSplitEnd();
8284         }
8285         Ins.push_back(MyFlags);
8286       }
8287       if (NeedsRegBlock && Value == NumValues - 1)
8288         Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
8289       PartBase += VT.getStoreSize();
8290     }
8291   }
8292 
8293   // Call the target to set up the argument values.
8294   SmallVector<SDValue, 8> InVals;
8295   SDValue NewRoot = TLI->LowerFormalArguments(
8296       DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
8297 
8298   // Verify that the target's LowerFormalArguments behaved as expected.
8299   assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
8300          "LowerFormalArguments didn't return a valid chain!");
8301   assert(InVals.size() == Ins.size() &&
8302          "LowerFormalArguments didn't emit the correct number of values!");
8303   DEBUG({
8304       for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
8305         assert(InVals[i].getNode() &&
8306                "LowerFormalArguments emitted a null value!");
8307         assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
8308                "LowerFormalArguments emitted a value with the wrong type!");
8309       }
8310     });
8311 
8312   // Update the DAG with the new chain value resulting from argument lowering.
8313   DAG.setRoot(NewRoot);
8314 
8315   // Set up the argument values.
8316   unsigned i = 0;
8317   Idx = 0;
8318   if (!FuncInfo->CanLowerReturn) {
8319     // Create a virtual register for the sret pointer, and put in a copy
8320     // from the sret argument into it.
8321     SmallVector<EVT, 1> ValueVTs;
8322     ComputeValueVTs(*TLI, DAG.getDataLayout(),
8323                     PointerType::getUnqual(F.getReturnType()), ValueVTs);
8324     MVT VT = ValueVTs[0].getSimpleVT();
8325     MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8326     Optional<ISD::NodeType> AssertOp = None;
8327     SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
8328                                         RegVT, VT, nullptr, AssertOp);
8329 
8330     MachineFunction& MF = SDB->DAG.getMachineFunction();
8331     MachineRegisterInfo& RegInfo = MF.getRegInfo();
8332     unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
8333     FuncInfo->DemoteRegister = SRetReg;
8334     NewRoot =
8335         SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
8336     DAG.setRoot(NewRoot);
8337 
8338     // i indexes lowered arguments.  Bump it past the hidden sret argument.
8339     // Idx indexes LLVM arguments.  Don't touch it.
8340     ++i;
8341   }
8342 
8343   SmallVector<SDValue, 4> Chains;
8344   DenseMap<int, int> ArgCopyElisionFrameIndexMap;
8345   for (const Argument &Arg : F.args()) {
8346     ++Idx;
8347     SmallVector<SDValue, 4> ArgValues;
8348     SmallVector<EVT, 4> ValueVTs;
8349     ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
8350     unsigned NumValues = ValueVTs.size();
8351     if (NumValues == 0)
8352       continue;
8353 
8354     bool ArgHasUses = !Arg.use_empty();
8355 
8356     // Elide the copying store if the target loaded this argument from a
8357     // suitable fixed stack object.
8358     if (Ins[i].Flags.isCopyElisionCandidate()) {
8359       tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
8360                              ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
8361                              InVals[i], ArgHasUses);
8362     }
8363 
8364     // If this argument is unused then remember its value. It is used to generate
8365     // debugging information.
8366     bool isSwiftErrorArg =
8367         TLI->supportSwiftError() &&
8368         F.getAttributes().hasAttribute(Idx, Attribute::SwiftError);
8369     if (!ArgHasUses && !isSwiftErrorArg) {
8370       SDB->setUnusedArgValue(&Arg, InVals[i]);
8371 
8372       // Also remember any frame index for use in FastISel.
8373       if (FrameIndexSDNode *FI =
8374           dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
8375         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8376     }
8377 
8378     for (unsigned Val = 0; Val != NumValues; ++Val) {
8379       EVT VT = ValueVTs[Val];
8380       MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
8381       unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
8382 
8383       // Even an apparant 'unused' swifterror argument needs to be returned. So
8384       // we do generate a copy for it that can be used on return from the
8385       // function.
8386       if (ArgHasUses || isSwiftErrorArg) {
8387         Optional<ISD::NodeType> AssertOp;
8388         if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
8389           AssertOp = ISD::AssertSext;
8390         else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
8391           AssertOp = ISD::AssertZext;
8392 
8393         ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
8394                                              PartVT, VT, nullptr, AssertOp));
8395       }
8396 
8397       i += NumParts;
8398     }
8399 
8400     // We don't need to do anything else for unused arguments.
8401     if (ArgValues.empty())
8402       continue;
8403 
8404     // Note down frame index.
8405     if (FrameIndexSDNode *FI =
8406         dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
8407       FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8408 
8409     SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
8410                                      SDB->getCurSDLoc());
8411 
8412     SDB->setValue(&Arg, Res);
8413     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
8414       if (LoadSDNode *LNode =
8415           dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
8416         if (FrameIndexSDNode *FI =
8417             dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
8418         FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
8419     }
8420 
8421     // Update the SwiftErrorVRegDefMap.
8422     if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
8423       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
8424       if (TargetRegisterInfo::isVirtualRegister(Reg))
8425         FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB,
8426                                            FuncInfo->SwiftErrorArg, Reg);
8427     }
8428 
8429     // If this argument is live outside of the entry block, insert a copy from
8430     // wherever we got it to the vreg that other BB's will reference it as.
8431     if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
8432       // If we can, though, try to skip creating an unnecessary vreg.
8433       // FIXME: This isn't very clean... it would be nice to make this more
8434       // general.  It's also subtly incompatible with the hacks FastISel
8435       // uses with vregs.
8436       unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
8437       if (TargetRegisterInfo::isVirtualRegister(Reg)) {
8438         FuncInfo->ValueMap[&Arg] = Reg;
8439         continue;
8440       }
8441     }
8442     if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
8443       FuncInfo->InitializeRegForValue(&Arg);
8444       SDB->CopyToExportRegsIfNeeded(&Arg);
8445     }
8446   }
8447 
8448   if (!Chains.empty()) {
8449     Chains.push_back(NewRoot);
8450     NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
8451   }
8452 
8453   DAG.setRoot(NewRoot);
8454 
8455   assert(i == InVals.size() && "Argument register count mismatch!");
8456 
8457   // If any argument copy elisions occurred and we have debug info, update the
8458   // stale frame indices used in the dbg.declare variable info table.
8459   MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
8460   if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
8461     for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
8462       auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
8463       if (I != ArgCopyElisionFrameIndexMap.end())
8464         VI.Slot = I->second;
8465     }
8466   }
8467 
8468   // Finally, if the target has anything special to do, allow it to do so.
8469   EmitFunctionEntryCode();
8470 }
8471 
8472 /// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
8473 /// ensure constants are generated when needed.  Remember the virtual registers
8474 /// that need to be added to the Machine PHI nodes as input.  We cannot just
8475 /// directly add them, because expansion might result in multiple MBB's for one
8476 /// BB.  As such, the start of the BB might correspond to a different MBB than
8477 /// the end.
8478 ///
8479 void
8480 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
8481   const TerminatorInst *TI = LLVMBB->getTerminator();
8482 
8483   SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
8484 
8485   // Check PHI nodes in successors that expect a value to be available from this
8486   // block.
8487   for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
8488     const BasicBlock *SuccBB = TI->getSuccessor(succ);
8489     if (!isa<PHINode>(SuccBB->begin())) continue;
8490     MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
8491 
8492     // If this terminator has multiple identical successors (common for
8493     // switches), only handle each succ once.
8494     if (!SuccsHandled.insert(SuccMBB).second)
8495       continue;
8496 
8497     MachineBasicBlock::iterator MBBI = SuccMBB->begin();
8498 
8499     // At this point we know that there is a 1-1 correspondence between LLVM PHI
8500     // nodes and Machine PHI nodes, but the incoming operands have not been
8501     // emitted yet.
8502     for (BasicBlock::const_iterator I = SuccBB->begin();
8503          const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
8504       // Ignore dead phi's.
8505       if (PN->use_empty()) continue;
8506 
8507       // Skip empty types
8508       if (PN->getType()->isEmptyTy())
8509         continue;
8510 
8511       unsigned Reg;
8512       const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
8513 
8514       if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
8515         unsigned &RegOut = ConstantsOut[C];
8516         if (RegOut == 0) {
8517           RegOut = FuncInfo.CreateRegs(C->getType());
8518           CopyValueToVirtualRegister(C, RegOut);
8519         }
8520         Reg = RegOut;
8521       } else {
8522         DenseMap<const Value *, unsigned>::iterator I =
8523           FuncInfo.ValueMap.find(PHIOp);
8524         if (I != FuncInfo.ValueMap.end())
8525           Reg = I->second;
8526         else {
8527           assert(isa<AllocaInst>(PHIOp) &&
8528                  FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
8529                  "Didn't codegen value into a register!??");
8530           Reg = FuncInfo.CreateRegs(PHIOp->getType());
8531           CopyValueToVirtualRegister(PHIOp, Reg);
8532         }
8533       }
8534 
8535       // Remember that this register needs to added to the machine PHI node as
8536       // the input for this MBB.
8537       SmallVector<EVT, 4> ValueVTs;
8538       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8539       ComputeValueVTs(TLI, DAG.getDataLayout(), PN->getType(), ValueVTs);
8540       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
8541         EVT VT = ValueVTs[vti];
8542         unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
8543         for (unsigned i = 0, e = NumRegisters; i != e; ++i)
8544           FuncInfo.PHINodesToUpdate.push_back(
8545               std::make_pair(&*MBBI++, Reg + i));
8546         Reg += NumRegisters;
8547       }
8548     }
8549   }
8550 
8551   ConstantsOut.clear();
8552 }
8553 
8554 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
8555 /// is 0.
8556 MachineBasicBlock *
8557 SelectionDAGBuilder::StackProtectorDescriptor::
8558 AddSuccessorMBB(const BasicBlock *BB,
8559                 MachineBasicBlock *ParentMBB,
8560                 bool IsLikely,
8561                 MachineBasicBlock *SuccMBB) {
8562   // If SuccBB has not been created yet, create it.
8563   if (!SuccMBB) {
8564     MachineFunction *MF = ParentMBB->getParent();
8565     MachineFunction::iterator BBI(ParentMBB);
8566     SuccMBB = MF->CreateMachineBasicBlock(BB);
8567     MF->insert(++BBI, SuccMBB);
8568   }
8569   // Add it as a successor of ParentMBB.
8570   ParentMBB->addSuccessor(
8571       SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
8572   return SuccMBB;
8573 }
8574 
8575 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
8576   MachineFunction::iterator I(MBB);
8577   if (++I == FuncInfo.MF->end())
8578     return nullptr;
8579   return &*I;
8580 }
8581 
8582 /// During lowering new call nodes can be created (such as memset, etc.).
8583 /// Those will become new roots of the current DAG, but complications arise
8584 /// when they are tail calls. In such cases, the call lowering will update
8585 /// the root, but the builder still needs to know that a tail call has been
8586 /// lowered in order to avoid generating an additional return.
8587 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
8588   // If the node is null, we do have a tail call.
8589   if (MaybeTC.getNode() != nullptr)
8590     DAG.setRoot(MaybeTC);
8591   else
8592     HasTailCall = true;
8593 }
8594 
8595 bool SelectionDAGBuilder::isDense(const CaseClusterVector &Clusters,
8596                                   const SmallVectorImpl<unsigned> &TotalCases,
8597                                   unsigned First, unsigned Last,
8598                                   unsigned Density) const {
8599   assert(Last >= First);
8600   assert(TotalCases[Last] >= TotalCases[First]);
8601 
8602   const APInt &LowCase = Clusters[First].Low->getValue();
8603   const APInt &HighCase = Clusters[Last].High->getValue();
8604   assert(LowCase.getBitWidth() == HighCase.getBitWidth());
8605 
8606   // FIXME: A range of consecutive cases has 100% density, but only requires one
8607   // comparison to lower. We should discriminate against such consecutive ranges
8608   // in jump tables.
8609 
8610   uint64_t Diff = (HighCase - LowCase).getLimitedValue((UINT64_MAX - 1) / 100);
8611   uint64_t Range = Diff + 1;
8612 
8613   uint64_t NumCases =
8614       TotalCases[Last] - (First == 0 ? 0 : TotalCases[First - 1]);
8615 
8616   assert(NumCases < UINT64_MAX / 100);
8617   assert(Range >= NumCases);
8618 
8619   return NumCases * 100 >= Range * Density;
8620 }
8621 
8622 static inline bool areJTsAllowed(const TargetLowering &TLI,
8623                                  const SwitchInst *SI) {
8624   const Function *Fn = SI->getParent()->getParent();
8625   if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
8626     return false;
8627 
8628   return TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
8629          TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
8630 }
8631 
8632 bool SelectionDAGBuilder::buildJumpTable(const CaseClusterVector &Clusters,
8633                                          unsigned First, unsigned Last,
8634                                          const SwitchInst *SI,
8635                                          MachineBasicBlock *DefaultMBB,
8636                                          CaseCluster &JTCluster) {
8637   assert(First <= Last);
8638 
8639   auto Prob = BranchProbability::getZero();
8640   unsigned NumCmps = 0;
8641   std::vector<MachineBasicBlock*> Table;
8642   DenseMap<MachineBasicBlock*, BranchProbability> JTProbs;
8643 
8644   // Initialize probabilities in JTProbs.
8645   for (unsigned I = First; I <= Last; ++I)
8646     JTProbs[Clusters[I].MBB] = BranchProbability::getZero();
8647 
8648   for (unsigned I = First; I <= Last; ++I) {
8649     assert(Clusters[I].Kind == CC_Range);
8650     Prob += Clusters[I].Prob;
8651     const APInt &Low = Clusters[I].Low->getValue();
8652     const APInt &High = Clusters[I].High->getValue();
8653     NumCmps += (Low == High) ? 1 : 2;
8654     if (I != First) {
8655       // Fill the gap between this and the previous cluster.
8656       const APInt &PreviousHigh = Clusters[I - 1].High->getValue();
8657       assert(PreviousHigh.slt(Low));
8658       uint64_t Gap = (Low - PreviousHigh).getLimitedValue() - 1;
8659       for (uint64_t J = 0; J < Gap; J++)
8660         Table.push_back(DefaultMBB);
8661     }
8662     uint64_t ClusterSize = (High - Low).getLimitedValue() + 1;
8663     for (uint64_t J = 0; J < ClusterSize; ++J)
8664       Table.push_back(Clusters[I].MBB);
8665     JTProbs[Clusters[I].MBB] += Clusters[I].Prob;
8666   }
8667 
8668   unsigned NumDests = JTProbs.size();
8669   if (isSuitableForBitTests(NumDests, NumCmps,
8670                             Clusters[First].Low->getValue(),
8671                             Clusters[Last].High->getValue())) {
8672     // Clusters[First..Last] should be lowered as bit tests instead.
8673     return false;
8674   }
8675 
8676   // Create the MBB that will load from and jump through the table.
8677   // Note: We create it here, but it's not inserted into the function yet.
8678   MachineFunction *CurMF = FuncInfo.MF;
8679   MachineBasicBlock *JumpTableMBB =
8680       CurMF->CreateMachineBasicBlock(SI->getParent());
8681 
8682   // Add successors. Note: use table order for determinism.
8683   SmallPtrSet<MachineBasicBlock *, 8> Done;
8684   for (MachineBasicBlock *Succ : Table) {
8685     if (Done.count(Succ))
8686       continue;
8687     addSuccessorWithProb(JumpTableMBB, Succ, JTProbs[Succ]);
8688     Done.insert(Succ);
8689   }
8690   JumpTableMBB->normalizeSuccProbs();
8691 
8692   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8693   unsigned JTI = CurMF->getOrCreateJumpTableInfo(TLI.getJumpTableEncoding())
8694                      ->createJumpTableIndex(Table);
8695 
8696   // Set up the jump table info.
8697   JumpTable JT(-1U, JTI, JumpTableMBB, nullptr);
8698   JumpTableHeader JTH(Clusters[First].Low->getValue(),
8699                       Clusters[Last].High->getValue(), SI->getCondition(),
8700                       nullptr, false);
8701   JTCases.emplace_back(std::move(JTH), std::move(JT));
8702 
8703   JTCluster = CaseCluster::jumpTable(Clusters[First].Low, Clusters[Last].High,
8704                                      JTCases.size() - 1, Prob);
8705   return true;
8706 }
8707 
8708 void SelectionDAGBuilder::findJumpTables(CaseClusterVector &Clusters,
8709                                          const SwitchInst *SI,
8710                                          MachineBasicBlock *DefaultMBB) {
8711 #ifndef NDEBUG
8712   // Clusters must be non-empty, sorted, and only contain Range clusters.
8713   assert(!Clusters.empty());
8714   for (CaseCluster &C : Clusters)
8715     assert(C.Kind == CC_Range);
8716   for (unsigned i = 1, e = Clusters.size(); i < e; ++i)
8717     assert(Clusters[i - 1].High->getValue().slt(Clusters[i].Low->getValue()));
8718 #endif
8719 
8720   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8721   if (!areJTsAllowed(TLI, SI))
8722     return;
8723 
8724   const bool OptForSize = DefaultMBB->getParent()->getFunction()->optForSize();
8725 
8726   const int64_t N = Clusters.size();
8727   const unsigned MinJumpTableEntries = TLI.getMinimumJumpTableEntries();
8728   const unsigned SmallNumberOfEntries = MinJumpTableEntries / 2;
8729   const unsigned MaxJumpTableSize =
8730                    OptForSize || TLI.getMaximumJumpTableSize() == 0
8731                    ? UINT_MAX : TLI.getMaximumJumpTableSize();
8732 
8733   if (N < 2 || N < MinJumpTableEntries)
8734     return;
8735 
8736   // TotalCases[i]: Total nbr of cases in Clusters[0..i].
8737   SmallVector<unsigned, 8> TotalCases(N);
8738   for (unsigned i = 0; i < N; ++i) {
8739     const APInt &Hi = Clusters[i].High->getValue();
8740     const APInt &Lo = Clusters[i].Low->getValue();
8741     TotalCases[i] = (Hi - Lo).getLimitedValue() + 1;
8742     if (i != 0)
8743       TotalCases[i] += TotalCases[i - 1];
8744   }
8745 
8746   const unsigned MinDensity =
8747     OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
8748 
8749   // Cheap case: the whole range may be suitable for jump table.
8750   unsigned JumpTableSize = (Clusters[N - 1].High->getValue() -
8751                             Clusters[0].Low->getValue())
8752                            .getLimitedValue(UINT_MAX - 1) + 1;
8753   if (JumpTableSize <= MaxJumpTableSize &&
8754       isDense(Clusters, TotalCases, 0, N - 1, MinDensity)) {
8755     CaseCluster JTCluster;
8756     if (buildJumpTable(Clusters, 0, N - 1, SI, DefaultMBB, JTCluster)) {
8757       Clusters[0] = JTCluster;
8758       Clusters.resize(1);
8759       return;
8760     }
8761   }
8762 
8763   // The algorithm below is not suitable for -O0.
8764   if (TM.getOptLevel() == CodeGenOpt::None)
8765     return;
8766 
8767   // Split Clusters into minimum number of dense partitions. The algorithm uses
8768   // the same idea as Kannan & Proebsting "Correction to 'Producing Good Code
8769   // for the Case Statement'" (1994), but builds the MinPartitions array in
8770   // reverse order to make it easier to reconstruct the partitions in ascending
8771   // order. In the choice between two optimal partitionings, it picks the one
8772   // which yields more jump tables.
8773 
8774   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
8775   SmallVector<unsigned, 8> MinPartitions(N);
8776   // LastElement[i] is the last element of the partition starting at i.
8777   SmallVector<unsigned, 8> LastElement(N);
8778   // PartitionsScore[i] is used to break ties when choosing between two
8779   // partitionings resulting in the same number of partitions.
8780   SmallVector<unsigned, 8> PartitionsScore(N);
8781   // For PartitionsScore, a small number of comparisons is considered as good as
8782   // a jump table and a single comparison is considered better than a jump
8783   // table.
8784   enum PartitionScores : unsigned {
8785     NoTable = 0,
8786     Table = 1,
8787     FewCases = 1,
8788     SingleCase = 2
8789   };
8790 
8791   // Base case: There is only one way to partition Clusters[N-1].
8792   MinPartitions[N - 1] = 1;
8793   LastElement[N - 1] = N - 1;
8794   PartitionsScore[N - 1] = PartitionScores::SingleCase;
8795 
8796   // Note: loop indexes are signed to avoid underflow.
8797   for (int64_t i = N - 2; i >= 0; i--) {
8798     // Find optimal partitioning of Clusters[i..N-1].
8799     // Baseline: Put Clusters[i] into a partition on its own.
8800     MinPartitions[i] = MinPartitions[i + 1] + 1;
8801     LastElement[i] = i;
8802     PartitionsScore[i] = PartitionsScore[i + 1] + PartitionScores::SingleCase;
8803 
8804     // Search for a solution that results in fewer partitions.
8805     for (int64_t j = N - 1; j > i; j--) {
8806       // Try building a partition from Clusters[i..j].
8807       JumpTableSize = (Clusters[j].High->getValue() -
8808                        Clusters[i].Low->getValue())
8809                       .getLimitedValue(UINT_MAX - 1) + 1;
8810       if (JumpTableSize <= MaxJumpTableSize &&
8811           isDense(Clusters, TotalCases, i, j, MinDensity)) {
8812         unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
8813         unsigned Score = j == N - 1 ? 0 : PartitionsScore[j + 1];
8814         int64_t NumEntries = j - i + 1;
8815 
8816         if (NumEntries == 1)
8817           Score += PartitionScores::SingleCase;
8818         else if (NumEntries <= SmallNumberOfEntries)
8819           Score += PartitionScores::FewCases;
8820         else if (NumEntries >= MinJumpTableEntries)
8821           Score += PartitionScores::Table;
8822 
8823         // If this leads to fewer partitions, or to the same number of
8824         // partitions with better score, it is a better partitioning.
8825         if (NumPartitions < MinPartitions[i] ||
8826             (NumPartitions == MinPartitions[i] && Score > PartitionsScore[i])) {
8827           MinPartitions[i] = NumPartitions;
8828           LastElement[i] = j;
8829           PartitionsScore[i] = Score;
8830         }
8831       }
8832     }
8833   }
8834 
8835   // Iterate over the partitions, replacing some with jump tables in-place.
8836   unsigned DstIndex = 0;
8837   for (unsigned First = 0, Last; First < N; First = Last + 1) {
8838     Last = LastElement[First];
8839     assert(Last >= First);
8840     assert(DstIndex <= First);
8841     unsigned NumClusters = Last - First + 1;
8842 
8843     CaseCluster JTCluster;
8844     if (NumClusters >= MinJumpTableEntries &&
8845         buildJumpTable(Clusters, First, Last, SI, DefaultMBB, JTCluster)) {
8846       Clusters[DstIndex++] = JTCluster;
8847     } else {
8848       for (unsigned I = First; I <= Last; ++I)
8849         std::memmove(&Clusters[DstIndex++], &Clusters[I], sizeof(Clusters[I]));
8850     }
8851   }
8852   Clusters.resize(DstIndex);
8853 }
8854 
8855 bool SelectionDAGBuilder::rangeFitsInWord(const APInt &Low, const APInt &High) {
8856   // FIXME: Using the pointer type doesn't seem ideal.
8857   uint64_t BW = DAG.getDataLayout().getPointerSizeInBits();
8858   uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
8859   return Range <= BW;
8860 }
8861 
8862 bool SelectionDAGBuilder::isSuitableForBitTests(unsigned NumDests,
8863                                                 unsigned NumCmps,
8864                                                 const APInt &Low,
8865                                                 const APInt &High) {
8866   // FIXME: I don't think NumCmps is the correct metric: a single case and a
8867   // range of cases both require only one branch to lower. Just looking at the
8868   // number of clusters and destinations should be enough to decide whether to
8869   // build bit tests.
8870 
8871   // To lower a range with bit tests, the range must fit the bitwidth of a
8872   // machine word.
8873   if (!rangeFitsInWord(Low, High))
8874     return false;
8875 
8876   // Decide whether it's profitable to lower this range with bit tests. Each
8877   // destination requires a bit test and branch, and there is an overall range
8878   // check branch. For a small number of clusters, separate comparisons might be
8879   // cheaper, and for many destinations, splitting the range might be better.
8880   return (NumDests == 1 && NumCmps >= 3) ||
8881          (NumDests == 2 && NumCmps >= 5) ||
8882          (NumDests == 3 && NumCmps >= 6);
8883 }
8884 
8885 bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
8886                                         unsigned First, unsigned Last,
8887                                         const SwitchInst *SI,
8888                                         CaseCluster &BTCluster) {
8889   assert(First <= Last);
8890   if (First == Last)
8891     return false;
8892 
8893   BitVector Dests(FuncInfo.MF->getNumBlockIDs());
8894   unsigned NumCmps = 0;
8895   for (int64_t I = First; I <= Last; ++I) {
8896     assert(Clusters[I].Kind == CC_Range);
8897     Dests.set(Clusters[I].MBB->getNumber());
8898     NumCmps += (Clusters[I].Low == Clusters[I].High) ? 1 : 2;
8899   }
8900   unsigned NumDests = Dests.count();
8901 
8902   APInt Low = Clusters[First].Low->getValue();
8903   APInt High = Clusters[Last].High->getValue();
8904   assert(Low.slt(High));
8905 
8906   if (!isSuitableForBitTests(NumDests, NumCmps, Low, High))
8907     return false;
8908 
8909   APInt LowBound;
8910   APInt CmpRange;
8911 
8912   const int BitWidth = DAG.getTargetLoweringInfo()
8913                            .getPointerTy(DAG.getDataLayout())
8914                            .getSizeInBits();
8915   assert(rangeFitsInWord(Low, High) && "Case range must fit in bit mask!");
8916 
8917   // Check if the clusters cover a contiguous range such that no value in the
8918   // range will jump to the default statement.
8919   bool ContiguousRange = true;
8920   for (int64_t I = First + 1; I <= Last; ++I) {
8921     if (Clusters[I].Low->getValue() != Clusters[I - 1].High->getValue() + 1) {
8922       ContiguousRange = false;
8923       break;
8924     }
8925   }
8926 
8927   if (Low.isStrictlyPositive() && High.slt(BitWidth)) {
8928     // Optimize the case where all the case values fit in a word without having
8929     // to subtract minValue. In this case, we can optimize away the subtraction.
8930     LowBound = APInt::getNullValue(Low.getBitWidth());
8931     CmpRange = High;
8932     ContiguousRange = false;
8933   } else {
8934     LowBound = Low;
8935     CmpRange = High - Low;
8936   }
8937 
8938   CaseBitsVector CBV;
8939   auto TotalProb = BranchProbability::getZero();
8940   for (unsigned i = First; i <= Last; ++i) {
8941     // Find the CaseBits for this destination.
8942     unsigned j;
8943     for (j = 0; j < CBV.size(); ++j)
8944       if (CBV[j].BB == Clusters[i].MBB)
8945         break;
8946     if (j == CBV.size())
8947       CBV.push_back(
8948           CaseBits(0, Clusters[i].MBB, 0, BranchProbability::getZero()));
8949     CaseBits *CB = &CBV[j];
8950 
8951     // Update Mask, Bits and ExtraProb.
8952     uint64_t Lo = (Clusters[i].Low->getValue() - LowBound).getZExtValue();
8953     uint64_t Hi = (Clusters[i].High->getValue() - LowBound).getZExtValue();
8954     assert(Hi >= Lo && Hi < 64 && "Invalid bit case!");
8955     CB->Mask |= (-1ULL >> (63 - (Hi - Lo))) << Lo;
8956     CB->Bits += Hi - Lo + 1;
8957     CB->ExtraProb += Clusters[i].Prob;
8958     TotalProb += Clusters[i].Prob;
8959   }
8960 
8961   BitTestInfo BTI;
8962   std::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) {
8963     // Sort by probability first, number of bits second.
8964     if (a.ExtraProb != b.ExtraProb)
8965       return a.ExtraProb > b.ExtraProb;
8966     return a.Bits > b.Bits;
8967   });
8968 
8969   for (auto &CB : CBV) {
8970     MachineBasicBlock *BitTestBB =
8971         FuncInfo.MF->CreateMachineBasicBlock(SI->getParent());
8972     BTI.push_back(BitTestCase(CB.Mask, BitTestBB, CB.BB, CB.ExtraProb));
8973   }
8974   BitTestCases.emplace_back(std::move(LowBound), std::move(CmpRange),
8975                             SI->getCondition(), -1U, MVT::Other, false,
8976                             ContiguousRange, nullptr, nullptr, std::move(BTI),
8977                             TotalProb);
8978 
8979   BTCluster = CaseCluster::bitTests(Clusters[First].Low, Clusters[Last].High,
8980                                     BitTestCases.size() - 1, TotalProb);
8981   return true;
8982 }
8983 
8984 void SelectionDAGBuilder::findBitTestClusters(CaseClusterVector &Clusters,
8985                                               const SwitchInst *SI) {
8986 // Partition Clusters into as few subsets as possible, where each subset has a
8987 // range that fits in a machine word and has <= 3 unique destinations.
8988 
8989 #ifndef NDEBUG
8990   // Clusters must be sorted and contain Range or JumpTable clusters.
8991   assert(!Clusters.empty());
8992   assert(Clusters[0].Kind == CC_Range || Clusters[0].Kind == CC_JumpTable);
8993   for (const CaseCluster &C : Clusters)
8994     assert(C.Kind == CC_Range || C.Kind == CC_JumpTable);
8995   for (unsigned i = 1; i < Clusters.size(); ++i)
8996     assert(Clusters[i-1].High->getValue().slt(Clusters[i].Low->getValue()));
8997 #endif
8998 
8999   // The algorithm below is not suitable for -O0.
9000   if (TM.getOptLevel() == CodeGenOpt::None)
9001     return;
9002 
9003   // If target does not have legal shift left, do not emit bit tests at all.
9004   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9005   EVT PTy = TLI.getPointerTy(DAG.getDataLayout());
9006   if (!TLI.isOperationLegal(ISD::SHL, PTy))
9007     return;
9008 
9009   int BitWidth = PTy.getSizeInBits();
9010   const int64_t N = Clusters.size();
9011 
9012   // MinPartitions[i] is the minimum nbr of partitions of Clusters[i..N-1].
9013   SmallVector<unsigned, 8> MinPartitions(N);
9014   // LastElement[i] is the last element of the partition starting at i.
9015   SmallVector<unsigned, 8> LastElement(N);
9016 
9017   // FIXME: This might not be the best algorithm for finding bit test clusters.
9018 
9019   // Base case: There is only one way to partition Clusters[N-1].
9020   MinPartitions[N - 1] = 1;
9021   LastElement[N - 1] = N - 1;
9022 
9023   // Note: loop indexes are signed to avoid underflow.
9024   for (int64_t i = N - 2; i >= 0; --i) {
9025     // Find optimal partitioning of Clusters[i..N-1].
9026     // Baseline: Put Clusters[i] into a partition on its own.
9027     MinPartitions[i] = MinPartitions[i + 1] + 1;
9028     LastElement[i] = i;
9029 
9030     // Search for a solution that results in fewer partitions.
9031     // Note: the search is limited by BitWidth, reducing time complexity.
9032     for (int64_t j = std::min(N - 1, i + BitWidth - 1); j > i; --j) {
9033       // Try building a partition from Clusters[i..j].
9034 
9035       // Check the range.
9036       if (!rangeFitsInWord(Clusters[i].Low->getValue(),
9037                            Clusters[j].High->getValue()))
9038         continue;
9039 
9040       // Check nbr of destinations and cluster types.
9041       // FIXME: This works, but doesn't seem very efficient.
9042       bool RangesOnly = true;
9043       BitVector Dests(FuncInfo.MF->getNumBlockIDs());
9044       for (int64_t k = i; k <= j; k++) {
9045         if (Clusters[k].Kind != CC_Range) {
9046           RangesOnly = false;
9047           break;
9048         }
9049         Dests.set(Clusters[k].MBB->getNumber());
9050       }
9051       if (!RangesOnly || Dests.count() > 3)
9052         break;
9053 
9054       // Check if it's a better partition.
9055       unsigned NumPartitions = 1 + (j == N - 1 ? 0 : MinPartitions[j + 1]);
9056       if (NumPartitions < MinPartitions[i]) {
9057         // Found a better partition.
9058         MinPartitions[i] = NumPartitions;
9059         LastElement[i] = j;
9060       }
9061     }
9062   }
9063 
9064   // Iterate over the partitions, replacing with bit-test clusters in-place.
9065   unsigned DstIndex = 0;
9066   for (unsigned First = 0, Last; First < N; First = Last + 1) {
9067     Last = LastElement[First];
9068     assert(First <= Last);
9069     assert(DstIndex <= First);
9070 
9071     CaseCluster BitTestCluster;
9072     if (buildBitTests(Clusters, First, Last, SI, BitTestCluster)) {
9073       Clusters[DstIndex++] = BitTestCluster;
9074     } else {
9075       size_t NumClusters = Last - First + 1;
9076       std::memmove(&Clusters[DstIndex], &Clusters[First],
9077                    sizeof(Clusters[0]) * NumClusters);
9078       DstIndex += NumClusters;
9079     }
9080   }
9081   Clusters.resize(DstIndex);
9082 }
9083 
9084 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
9085                                         MachineBasicBlock *SwitchMBB,
9086                                         MachineBasicBlock *DefaultMBB) {
9087   MachineFunction *CurMF = FuncInfo.MF;
9088   MachineBasicBlock *NextMBB = nullptr;
9089   MachineFunction::iterator BBI(W.MBB);
9090   if (++BBI != FuncInfo.MF->end())
9091     NextMBB = &*BBI;
9092 
9093   unsigned Size = W.LastCluster - W.FirstCluster + 1;
9094 
9095   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9096 
9097   if (Size == 2 && W.MBB == SwitchMBB) {
9098     // If any two of the cases has the same destination, and if one value
9099     // is the same as the other, but has one bit unset that the other has set,
9100     // use bit manipulation to do two compares at once.  For example:
9101     // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
9102     // TODO: This could be extended to merge any 2 cases in switches with 3
9103     // cases.
9104     // TODO: Handle cases where W.CaseBB != SwitchBB.
9105     CaseCluster &Small = *W.FirstCluster;
9106     CaseCluster &Big = *W.LastCluster;
9107 
9108     if (Small.Low == Small.High && Big.Low == Big.High &&
9109         Small.MBB == Big.MBB) {
9110       const APInt &SmallValue = Small.Low->getValue();
9111       const APInt &BigValue = Big.Low->getValue();
9112 
9113       // Check that there is only one bit different.
9114       APInt CommonBit = BigValue ^ SmallValue;
9115       if (CommonBit.isPowerOf2()) {
9116         SDValue CondLHS = getValue(Cond);
9117         EVT VT = CondLHS.getValueType();
9118         SDLoc DL = getCurSDLoc();
9119 
9120         SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
9121                                  DAG.getConstant(CommonBit, DL, VT));
9122         SDValue Cond = DAG.getSetCC(
9123             DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
9124             ISD::SETEQ);
9125 
9126         // Update successor info.
9127         // Both Small and Big will jump to Small.BB, so we sum up the
9128         // probabilities.
9129         addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
9130         if (BPI)
9131           addSuccessorWithProb(
9132               SwitchMBB, DefaultMBB,
9133               // The default destination is the first successor in IR.
9134               BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
9135         else
9136           addSuccessorWithProb(SwitchMBB, DefaultMBB);
9137 
9138         // Insert the true branch.
9139         SDValue BrCond =
9140             DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
9141                         DAG.getBasicBlock(Small.MBB));
9142         // Insert the false branch.
9143         BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
9144                              DAG.getBasicBlock(DefaultMBB));
9145 
9146         DAG.setRoot(BrCond);
9147         return;
9148       }
9149     }
9150   }
9151 
9152   if (TM.getOptLevel() != CodeGenOpt::None) {
9153     // Order cases by probability so the most likely case will be checked first.
9154     std::sort(W.FirstCluster, W.LastCluster + 1,
9155               [](const CaseCluster &a, const CaseCluster &b) {
9156       return a.Prob > b.Prob;
9157     });
9158 
9159     // Rearrange the case blocks so that the last one falls through if possible
9160     // without without changing the order of probabilities.
9161     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
9162       --I;
9163       if (I->Prob > W.LastCluster->Prob)
9164         break;
9165       if (I->Kind == CC_Range && I->MBB == NextMBB) {
9166         std::swap(*I, *W.LastCluster);
9167         break;
9168       }
9169     }
9170   }
9171 
9172   // Compute total probability.
9173   BranchProbability DefaultProb = W.DefaultProb;
9174   BranchProbability UnhandledProbs = DefaultProb;
9175   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
9176     UnhandledProbs += I->Prob;
9177 
9178   MachineBasicBlock *CurMBB = W.MBB;
9179   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
9180     MachineBasicBlock *Fallthrough;
9181     if (I == W.LastCluster) {
9182       // For the last cluster, fall through to the default destination.
9183       Fallthrough = DefaultMBB;
9184     } else {
9185       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
9186       CurMF->insert(BBI, Fallthrough);
9187       // Put Cond in a virtual register to make it available from the new blocks.
9188       ExportFromCurrentBlock(Cond);
9189     }
9190     UnhandledProbs -= I->Prob;
9191 
9192     switch (I->Kind) {
9193       case CC_JumpTable: {
9194         // FIXME: Optimize away range check based on pivot comparisons.
9195         JumpTableHeader *JTH = &JTCases[I->JTCasesIndex].first;
9196         JumpTable *JT = &JTCases[I->JTCasesIndex].second;
9197 
9198         // The jump block hasn't been inserted yet; insert it here.
9199         MachineBasicBlock *JumpMBB = JT->MBB;
9200         CurMF->insert(BBI, JumpMBB);
9201 
9202         auto JumpProb = I->Prob;
9203         auto FallthroughProb = UnhandledProbs;
9204 
9205         // If the default statement is a target of the jump table, we evenly
9206         // distribute the default probability to successors of CurMBB. Also
9207         // update the probability on the edge from JumpMBB to Fallthrough.
9208         for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
9209                                               SE = JumpMBB->succ_end();
9210              SI != SE; ++SI) {
9211           if (*SI == DefaultMBB) {
9212             JumpProb += DefaultProb / 2;
9213             FallthroughProb -= DefaultProb / 2;
9214             JumpMBB->setSuccProbability(SI, DefaultProb / 2);
9215             JumpMBB->normalizeSuccProbs();
9216             break;
9217           }
9218         }
9219 
9220         addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
9221         addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
9222         CurMBB->normalizeSuccProbs();
9223 
9224         // The jump table header will be inserted in our current block, do the
9225         // range check, and fall through to our fallthrough block.
9226         JTH->HeaderBB = CurMBB;
9227         JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
9228 
9229         // If we're in the right place, emit the jump table header right now.
9230         if (CurMBB == SwitchMBB) {
9231           visitJumpTableHeader(*JT, *JTH, SwitchMBB);
9232           JTH->Emitted = true;
9233         }
9234         break;
9235       }
9236       case CC_BitTests: {
9237         // FIXME: Optimize away range check based on pivot comparisons.
9238         BitTestBlock *BTB = &BitTestCases[I->BTCasesIndex];
9239 
9240         // The bit test blocks haven't been inserted yet; insert them here.
9241         for (BitTestCase &BTC : BTB->Cases)
9242           CurMF->insert(BBI, BTC.ThisBB);
9243 
9244         // Fill in fields of the BitTestBlock.
9245         BTB->Parent = CurMBB;
9246         BTB->Default = Fallthrough;
9247 
9248         BTB->DefaultProb = UnhandledProbs;
9249         // If the cases in bit test don't form a contiguous range, we evenly
9250         // distribute the probability on the edge to Fallthrough to two
9251         // successors of CurMBB.
9252         if (!BTB->ContiguousRange) {
9253           BTB->Prob += DefaultProb / 2;
9254           BTB->DefaultProb -= DefaultProb / 2;
9255         }
9256 
9257         // If we're in the right place, emit the bit test header right now.
9258         if (CurMBB == SwitchMBB) {
9259           visitBitTestHeader(*BTB, SwitchMBB);
9260           BTB->Emitted = true;
9261         }
9262         break;
9263       }
9264       case CC_Range: {
9265         const Value *RHS, *LHS, *MHS;
9266         ISD::CondCode CC;
9267         if (I->Low == I->High) {
9268           // Check Cond == I->Low.
9269           CC = ISD::SETEQ;
9270           LHS = Cond;
9271           RHS=I->Low;
9272           MHS = nullptr;
9273         } else {
9274           // Check I->Low <= Cond <= I->High.
9275           CC = ISD::SETLE;
9276           LHS = I->Low;
9277           MHS = Cond;
9278           RHS = I->High;
9279         }
9280 
9281         // The false probability is the sum of all unhandled cases.
9282         CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, I->Prob,
9283                      UnhandledProbs);
9284 
9285         if (CurMBB == SwitchMBB)
9286           visitSwitchCase(CB, SwitchMBB);
9287         else
9288           SwitchCases.push_back(CB);
9289 
9290         break;
9291       }
9292     }
9293     CurMBB = Fallthrough;
9294   }
9295 }
9296 
9297 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
9298                                               CaseClusterIt First,
9299                                               CaseClusterIt Last) {
9300   return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
9301     if (X.Prob != CC.Prob)
9302       return X.Prob > CC.Prob;
9303 
9304     // Ties are broken by comparing the case value.
9305     return X.Low->getValue().slt(CC.Low->getValue());
9306   });
9307 }
9308 
9309 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
9310                                         const SwitchWorkListItem &W,
9311                                         Value *Cond,
9312                                         MachineBasicBlock *SwitchMBB) {
9313   assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
9314          "Clusters not sorted?");
9315 
9316   assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
9317 
9318   // Balance the tree based on branch probabilities to create a near-optimal (in
9319   // terms of search time given key frequency) binary search tree. See e.g. Kurt
9320   // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
9321   CaseClusterIt LastLeft = W.FirstCluster;
9322   CaseClusterIt FirstRight = W.LastCluster;
9323   auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
9324   auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
9325 
9326   // Move LastLeft and FirstRight towards each other from opposite directions to
9327   // find a partitioning of the clusters which balances the probability on both
9328   // sides. If LeftProb and RightProb are equal, alternate which side is
9329   // taken to ensure 0-probability nodes are distributed evenly.
9330   unsigned I = 0;
9331   while (LastLeft + 1 < FirstRight) {
9332     if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
9333       LeftProb += (++LastLeft)->Prob;
9334     else
9335       RightProb += (--FirstRight)->Prob;
9336     I++;
9337   }
9338 
9339   for (;;) {
9340     // Our binary search tree differs from a typical BST in that ours can have up
9341     // to three values in each leaf. The pivot selection above doesn't take that
9342     // into account, which means the tree might require more nodes and be less
9343     // efficient. We compensate for this here.
9344 
9345     unsigned NumLeft = LastLeft - W.FirstCluster + 1;
9346     unsigned NumRight = W.LastCluster - FirstRight + 1;
9347 
9348     if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
9349       // If one side has less than 3 clusters, and the other has more than 3,
9350       // consider taking a cluster from the other side.
9351 
9352       if (NumLeft < NumRight) {
9353         // Consider moving the first cluster on the right to the left side.
9354         CaseCluster &CC = *FirstRight;
9355         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
9356         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
9357         if (LeftSideRank <= RightSideRank) {
9358           // Moving the cluster to the left does not demote it.
9359           ++LastLeft;
9360           ++FirstRight;
9361           continue;
9362         }
9363       } else {
9364         assert(NumRight < NumLeft);
9365         // Consider moving the last element on the left to the right side.
9366         CaseCluster &CC = *LastLeft;
9367         unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
9368         unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
9369         if (RightSideRank <= LeftSideRank) {
9370           // Moving the cluster to the right does not demot it.
9371           --LastLeft;
9372           --FirstRight;
9373           continue;
9374         }
9375       }
9376     }
9377     break;
9378   }
9379 
9380   assert(LastLeft + 1 == FirstRight);
9381   assert(LastLeft >= W.FirstCluster);
9382   assert(FirstRight <= W.LastCluster);
9383 
9384   // Use the first element on the right as pivot since we will make less-than
9385   // comparisons against it.
9386   CaseClusterIt PivotCluster = FirstRight;
9387   assert(PivotCluster > W.FirstCluster);
9388   assert(PivotCluster <= W.LastCluster);
9389 
9390   CaseClusterIt FirstLeft = W.FirstCluster;
9391   CaseClusterIt LastRight = W.LastCluster;
9392 
9393   const ConstantInt *Pivot = PivotCluster->Low;
9394 
9395   // New blocks will be inserted immediately after the current one.
9396   MachineFunction::iterator BBI(W.MBB);
9397   ++BBI;
9398 
9399   // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
9400   // we can branch to its destination directly if it's squeezed exactly in
9401   // between the known lower bound and Pivot - 1.
9402   MachineBasicBlock *LeftMBB;
9403   if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
9404       FirstLeft->Low == W.GE &&
9405       (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
9406     LeftMBB = FirstLeft->MBB;
9407   } else {
9408     LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
9409     FuncInfo.MF->insert(BBI, LeftMBB);
9410     WorkList.push_back(
9411         {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
9412     // Put Cond in a virtual register to make it available from the new blocks.
9413     ExportFromCurrentBlock(Cond);
9414   }
9415 
9416   // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
9417   // single cluster, RHS.Low == Pivot, and we can branch to its destination
9418   // directly if RHS.High equals the current upper bound.
9419   MachineBasicBlock *RightMBB;
9420   if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
9421       W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
9422     RightMBB = FirstRight->MBB;
9423   } else {
9424     RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
9425     FuncInfo.MF->insert(BBI, RightMBB);
9426     WorkList.push_back(
9427         {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
9428     // Put Cond in a virtual register to make it available from the new blocks.
9429     ExportFromCurrentBlock(Cond);
9430   }
9431 
9432   // Create the CaseBlock record that will be used to lower the branch.
9433   CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
9434                LeftProb, RightProb);
9435 
9436   if (W.MBB == SwitchMBB)
9437     visitSwitchCase(CB, SwitchMBB);
9438   else
9439     SwitchCases.push_back(CB);
9440 }
9441 
9442 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
9443   // Extract cases from the switch.
9444   BranchProbabilityInfo *BPI = FuncInfo.BPI;
9445   CaseClusterVector Clusters;
9446   Clusters.reserve(SI.getNumCases());
9447   for (auto I : SI.cases()) {
9448     MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
9449     const ConstantInt *CaseVal = I.getCaseValue();
9450     BranchProbability Prob =
9451         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
9452             : BranchProbability(1, SI.getNumCases() + 1);
9453     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
9454   }
9455 
9456   MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
9457 
9458   // Cluster adjacent cases with the same destination. We do this at all
9459   // optimization levels because it's cheap to do and will make codegen faster
9460   // if there are many clusters.
9461   sortAndRangeify(Clusters);
9462 
9463   if (TM.getOptLevel() != CodeGenOpt::None) {
9464     // Replace an unreachable default with the most popular destination.
9465     // FIXME: Exploit unreachable default more aggressively.
9466     bool UnreachableDefault =
9467         isa<UnreachableInst>(SI.getDefaultDest()->getFirstNonPHIOrDbg());
9468     if (UnreachableDefault && !Clusters.empty()) {
9469       DenseMap<const BasicBlock *, unsigned> Popularity;
9470       unsigned MaxPop = 0;
9471       const BasicBlock *MaxBB = nullptr;
9472       for (auto I : SI.cases()) {
9473         const BasicBlock *BB = I.getCaseSuccessor();
9474         if (++Popularity[BB] > MaxPop) {
9475           MaxPop = Popularity[BB];
9476           MaxBB = BB;
9477         }
9478       }
9479       // Set new default.
9480       assert(MaxPop > 0 && MaxBB);
9481       DefaultMBB = FuncInfo.MBBMap[MaxBB];
9482 
9483       // Remove cases that were pointing to the destination that is now the
9484       // default.
9485       CaseClusterVector New;
9486       New.reserve(Clusters.size());
9487       for (CaseCluster &CC : Clusters) {
9488         if (CC.MBB != DefaultMBB)
9489           New.push_back(CC);
9490       }
9491       Clusters = std::move(New);
9492     }
9493   }
9494 
9495   // If there is only the default destination, jump there directly.
9496   MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
9497   if (Clusters.empty()) {
9498     SwitchMBB->addSuccessor(DefaultMBB);
9499     if (DefaultMBB != NextBlock(SwitchMBB)) {
9500       DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
9501                               getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
9502     }
9503     return;
9504   }
9505 
9506   findJumpTables(Clusters, &SI, DefaultMBB);
9507   findBitTestClusters(Clusters, &SI);
9508 
9509   DEBUG({
9510     dbgs() << "Case clusters: ";
9511     for (const CaseCluster &C : Clusters) {
9512       if (C.Kind == CC_JumpTable) dbgs() << "JT:";
9513       if (C.Kind == CC_BitTests) dbgs() << "BT:";
9514 
9515       C.Low->getValue().print(dbgs(), true);
9516       if (C.Low != C.High) {
9517         dbgs() << '-';
9518         C.High->getValue().print(dbgs(), true);
9519       }
9520       dbgs() << ' ';
9521     }
9522     dbgs() << '\n';
9523   });
9524 
9525   assert(!Clusters.empty());
9526   SwitchWorkList WorkList;
9527   CaseClusterIt First = Clusters.begin();
9528   CaseClusterIt Last = Clusters.end() - 1;
9529   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
9530   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
9531 
9532   while (!WorkList.empty()) {
9533     SwitchWorkListItem W = WorkList.back();
9534     WorkList.pop_back();
9535     unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
9536 
9537     if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
9538         !DefaultMBB->getParent()->getFunction()->optForMinSize()) {
9539       // For optimized builds, lower large range as a balanced binary tree.
9540       splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
9541       continue;
9542     }
9543 
9544     lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
9545   }
9546 }
9547